content
stringlengths
0
894k
type
stringclasses
2 values
import numpy as np ## Wan-Ting borrow this function from io from stmpy folder. def _make_attr(self, attr, names, data): ''' Trys to give object an attribute from self.data by looking through each key in names. It will add only the fist match, so the order of names dictates the preferences. Inputs: attr - Required : Name of new attribute names - Required : List of names to search for data - Required : Name of a current attribute in which the new attribute is stored. Returns: 1 - If successfully added the attribute 0 - If name is not found. History: 2017-08-11 - HP : Initial commit. 2017-08-24 - HP : Now uses grid z value for Z attribute. ''' dat = getattr(self, data) for name in names: if name in dat.keys(): setattr(self, attr, dat[name]) return 1 return 0 def loadsm4(filePath): ''' The load_sm4 can now output several attributes: including I, iv, LIY, didv, didvStd, Z, en Inputs: filePath- Required : Name of the file Returns: self.info - information of the pages self.header - details of the pages self.data - all the data from all of the pages self.en - x axis for the spectropscopy data self.Z - Topography of the data self.I - Spectropscopy of the current data self.iv - Average of the current spectroscopy data self.LIY - Spectropscopy of the didv data self.didv - Average of the didv spectroscopy data self.didvStd - Standard deviation of all the didv spectropscopy data History: 2020-07-15 - WT : Initial commit. ''' import rhk_stmpy.rhk_sm4 as sm4 f = sm4.load_sm4(filePath) self = Spy() self.info = {} self.info = f.print_info() name = f.print_info().iloc[:, 0].to_numpy() it = f.print_info().iloc[:, 1].to_numpy() namef = np.char.strip(it.astype(str), 'DATA_') names = namef + name label = {} for ix, item in zip(range(0,len(names)), names): label[ix] = item self.data = {} for ix, line in enumerate(f): self.data[ix] = f[ix].data self.header = {} for ix, line in enumerate(f): self.header[ix] = f[ix].attrs def getf(channel): res = 100 for key in label: if(label[key] == channel): res = list(label.values()).index(channel) return(res) liy = getf('LINELIA Current') i = getf('LINECurrent') z = getf('IMAGETopography') self.en = {} if liy < 100: self.en = f[liy].coords[1][1] else: self.en = f[0].coords[1][1] if _make_attr(self, 'LIY', [liy], 'data'): self.didv = np.mean(self.LIY, axis=0) self.didvStd = np.std(self.LIY, axis=0) else: print('ERR: LIY channel not found') if _make_attr(self, 'I', [i], 'data'): self.iv = np.mean(self.I, axis=0) else: print('ERR: Current not found') if _make_attr(self, 'Z', [z], 'data'): self.Z = self.Z else: print('ERR: Z channel not found') return self class Spy(object): def __init__(self): pass
python
#!/usr/bin/env python3 # Author: Ali Assaf <[email protected]> # Copyright: (C) 2010 Ali Assaf # License: GNU General Public License <http://www.gnu.org/licenses/> from itertools import product def solve_sudoku(size, grid): """ An efficient Sudoku solver using Algorithm X. >>> grid = [ ... [5, 3, 0, 0, 7, 0, 0, 0, 0], ... [6, 0, 0, 1, 9, 5, 0, 0, 0], ... [0, 9, 8, 0, 0, 0, 0, 6, 0], ... [8, 0, 0, 0, 6, 0, 0, 0, 3], ... [4, 0, 0, 8, 0, 3, 0, 0, 1], ... [7, 0, 0, 0, 2, 0, 0, 0, 6], ... [0, 6, 0, 0, 0, 0, 2, 8, 0], ... [0, 0, 0, 4, 1, 9, 0, 0, 5], ... [0, 0, 0, 0, 8, 0, 0, 7, 9]] >>> for solution in solve_sudoku((3, 3), grid): ... print(*solution, sep='\\n') [5, 3, 4, 6, 7, 8, 9, 1, 2] [6, 7, 2, 1, 9, 5, 3, 4, 8] [1, 9, 8, 3, 4, 2, 5, 6, 7] [8, 5, 9, 7, 6, 1, 4, 2, 3] [4, 2, 6, 8, 5, 3, 7, 9, 1] [7, 1, 3, 9, 2, 4, 8, 5, 6] [9, 6, 1, 5, 3, 7, 2, 8, 4] [2, 8, 7, 4, 1, 9, 6, 3, 5] [3, 4, 5, 2, 8, 6, 1, 7, 9] """ R, C = size N = R * C X = ([("rc", rc) for rc in product(range(N), range(N))] + [("rn", rn) for rn in product(range(N), range(1, N + 1))] + [("cn", cn) for cn in product(range(N), range(1, N + 1))] + [("bn", bn) for bn in product(range(N), range(1, N + 1))]) Y = dict() for r, c, n in product(range(N), range(N), range(1, N + 1)): b = (r // R) * R + (c // C) # Box number Y[(r, c, n)] = [ ("rc", (r, c)), ("rn", (r, n)), ("cn", (c, n)), ("bn", (b, n))] X, Y = exact_cover(X, Y) for i, row in enumerate(grid): for j, n in enumerate(row): if n: select(X, Y, (i, j, n)) for solution in solve(X, Y, []): for (r, c, n) in solution: grid[r][c] = n yield grid def exact_cover(X, Y): X = {j: set() for j in X} for i, row in Y.items(): for j in row: X[j].add(i) return X, Y def solve(X, Y, solution): if not X: yield list(solution) else: c = min(X, key=lambda c: len(X[c])) for r in list(X[c]): solution.append(r) cols = select(X, Y, r) for s in solve(X, Y, solution): yield s deselect(X, Y, r, cols) solution.pop() def select(X, Y, r): cols = [] for j in Y[r]: for i in X[j]: for k in Y[i]: if k != j: X[k].remove(i) cols.append(X.pop(j)) return cols def deselect(X, Y, r, cols): for j in reversed(Y[r]): X[j] = cols.pop() for i in X[j]: for k in Y[i]: if k != j: X[k].add(i) if __name__ == "__main__": import doctest doctest.testmod()
python
import unittest from game_classes import Card class TestCard(unittest.TestCase): def test_init(self): test_card = Card() self.assertEqual(test_card.counter, 0) self.assertEqual(len(test_card.selected_numbers), 15) self.assertEqual(len(test_card.card), 3) def test_print_card(self): test_card = Card() player = '#1' print() test_card.print_card(player) self.assertEqual(player, '#1') def test_check_number_in_card(self): test_card = Card() number = 90 player = '#1' test_card.check_number_in_card(number, player) self.assertEqual(test_card.counter, 0)
python
# An empty class has a dictionary that ... # holds the attributes of the object. class A(object): pass A = A() A.__dict__ = { 'key11': 1, 'key2': 2, } A.__dict__['key2'] = 3 print(A.__dict__['key2']) # 3
python
qtde = int(input('Qual a Qtde: ')) valor = float(input('Qual valor unitário desse produto: ')) preco_total = qtde * valor print('O preço total é: {}'.format(preco_total))
python
"""Application settings.""" import os import pydantic class Settings(pydantic.BaseSettings): """Main application config. It takes settings from environment variables. """ sqlalchemy_uri: str = os.environ['SQLALCHEMY_URI'] import_token: str = os.environ['AUTH_IMPORT_TOKEN']
python
import math import requests from typing import Tuple, List AUTH_KEY = 'GOOGLE API KEY' PI = math.pi LatLng = Tuple[float, float] Polygon = List[LatLng] """ Various mathematical formulas for use in Google's isLocationOnEdge and containsLocation algorithms. Unless otherwise specified all math utilities have been ported from: Google's android-map-utils PolyUtil class: https://github.com/googlemaps/android-maps-utils/blob/master/library/src/main/java/com/google/maps/android/PolyUtil.java Google's android-map-utils MathUtil class: https://github.com/googlemaps/android-maps-utils/blob/master/library/src/main/java/com/google/maps/android/MathUtil.java """ def decode(point_str: str) -> Polygon: """ The following method although present in Google's android-map-utils PolyUtil class, this method was ported from tuvtran's PopMap placerequest.py https://github.com/tuvtran/PopMap Decodes a polyline that has been encoded using Google's algorithm http://code.google.com/apis/maps/documentation/polylinealgorithm.html This is a generic method that returns a list of (latitude, longitude) tuples. """ coord_chunks = [[]] for char in point_str: value = ord(char) - 63 split_after = not (value & 0x20) value &= 0x1F coord_chunks[-1].append(value) if split_after: coord_chunks.append([]) del coord_chunks[-1] coords = [] for coord_chunk in coord_chunks: coord = 0 for i, chunk in enumerate(coord_chunk): coord |= chunk << (i * 5) if coord & 0x1: coord = ~coord # invert coord >>= 1 coord /= 100000.0 coords.append(coord) points = [] prev_x = 0 prev_y = 0 for i in range(0, len(coords) - 1, 2): if coords[i] == 0 and coords[i + 1] == 0: continue prev_x += coords[i + 1] prev_y += coords[i] points.append((round(prev_x, 6), round(prev_y, 6))) return points def intersects(lat1, lat2, lng2, lat3, lng3, geodesic): if (lng3 >= 0 and lng3 >= lng2) or (lng3 < 0 and lng3 < lng2): return False if lat3 <= -PI / 2: return False if lat1 <= -PI / 2 or lat2 <= -PI / 2 or lat1 >= PI / 2 or lat2 >= PI / 2: return False if lng2 <= -PI: return False linear_lat = (lat1 * (lng2 - lng3) + lat2 * lng3) / lng2 if lat1 >= 0 and lat2 >= 0 and lat3 < linear_lat: return False if lat1 <= 0 and lat2 <= 0 and lat3 >= linear_lat: return True if lat3 >= PI / 2: return True return math.tan(lat3) >= tan_lat_gc(lat1, lat2, lng2, lng3) if geodesic else mercator(lat3) >= mercator_rhumb( lat1, lat2, lng2, lng3) def mercator_rhumb(lat1, lat2, lng2, lng3): return (mercator(lat1) * (lng2 - lng3) + mercator(lat2) * lng3) / lng2 def mercator(lat): return math.log(math.tan(lat * 0.5 + PI / 4)) def tan_lat_gc(lat1, lat2, lng2, lng3): return (math.tan(lat1) * math.sin(lng2 - lng3) + math.tan(lat2) * math.sin(lng3)) / math.sin(lng2) def to_radians(degrees): return degrees * PI / 180 def wrap(n, minimum, maximum): return n if minimum <= n < maximum else mod(n - minimum, maximum - minimum) + minimum def mod(x, m): return ((x % m) + m) % m def hav(x): sin_half = math.sin(x * 0.5) return sin_half * sin_half def clamp(x, low, high): return low if x < low else (high if x > high else x) def hav_distance(lat1, lat2, d_lng): return hav(lat1 - lat2) + hav(d_lng) * math.cos(lat1) * math.cos(lat2) def inverse_mercator(y): return 2.0 * math.atan(math.exp(y)) - 1.5707963267948966 def sin_delta_bearing(lat1, lng1, lat2, lng2, lat3, lng3): sin_lat1 = math.sin(lat1) cos_lat2 = math.cos(lat2) cos_lat3 = math.cos(lat3) lat31 = lat3 - lat1 lng31 = lng3 - lng1 lat21 = lat2 - lat1 lng21 = lng2 - lng1 a = math.sin(lng31) * cos_lat3 c = math.sin(lng21) * cos_lat2 b = math.sin(lat31) + 2.0 * sin_lat1 * cos_lat3 * hav(lng31) d = math.sin(lat21) + 2.0 * sin_lat1 * cos_lat2 * hav(lng21) denom = (a * a + b * b) * (c * c + d * d) return 1.0 if denom <= 0.0 else (a * d - b * c) / math.sqrt(denom) def sin_sum_from_hav(x, y): a = math.sqrt(x * (1.0 - x)) b = math.sqrt(y * (1.0 - y)) return 2.0 * (a + b - 2.0 * (a * y + b * x)) def hav_from_sin(x): x2 = x * x return x2 / (1.0 + math.sqrt(1.0 - x2)) * 0.5 def sin_from_hav(h): return 2.0 * math.sqrt(h * (1.0 - h)) """ Methods below have not been imported from any standalone API or package and simply exist to aide in the function of this entire package """ def within_city_bounds(origin: LatLng, destination: LatLng) -> bool: la_bounds = [(33.8641899712294, -118.281468637671), (33.8627792, -118.2814372), (33.862734758137, -118.281534783721), (33.8415, -118.2825), (33.8415, -118.2965), (33.8135, -118.293), (33.803, -118.2965), (33.803, -118.2685), (33.81, -118.265), (33.81, -118.2545), (33.803, -118.251), (33.7995, -118.23), (33.81, -118.2265), (33.824, -118.2335), (33.8345, -118.23), (33.8345, -118.223), (33.824, -118.2195), (33.789, -118.223), (33.7855, -118.216), (33.7785, -118.216), (33.7645, -118.2405), (33.754, -118.237), (33.754, -118.244), (33.7155, -118.2265), (33.6875, -118.223), (33.6875, -118.237), (33.67, -118.251), (33.6595, -118.272), (33.656, -118.321), (33.6595, -118.349), (33.67, -118.3665), (33.7295, -118.335), (33.733, -118.3245), (33.7505, -118.321), (33.7505, -118.314), (33.8695, -118.314), (33.873, -118.2965), (33.9465, -118.2965), (33.936, -118.3035), (33.936, -118.3175), (33.9675, -118.321), (33.964, -118.335), (33.978, -118.3385), (33.978, -118.3665), (33.9605, -118.3665), (33.957, -118.3735), (33.957, -118.3665), (33.9325, -118.363), (33.9255, -118.3665), (33.929, -118.4225), (33.9115, -118.419), (33.9115, -118.503), (33.9535, -118.5275), (33.964, -118.5415), (33.971, -118.5415), (34.0165, -118.4505), (34.0235, -118.454), (34.041, -118.475), (34.0375, -118.4855), (34.0445, -118.4925), (33.9815, -118.552), (33.985, -118.573), (34.041, -118.5695), (34.0655, -118.573), (34.069, -118.601), (34.076, -118.6045), (34.1285, -118.5695), (34.1425, -118.608), (34.1425, -118.6325), (34.16, -118.6465), (34.167, -118.664), (34.174, -118.664), (34.1775, -118.671), (34.2125, -118.671), (34.216, -118.664), (34.2405, -118.65), (34.2405, -118.636), (34.272, -118.636), (34.279, -118.629), (34.279, -118.5975), (34.307, -118.5905), (34.3, -118.5485), (34.3105, -118.552), (34.321, -118.5485), (34.3175, -118.5345), (34.342, -118.5065), (34.335, -118.4925), (34.335, -118.405), (34.3245, -118.4015), (34.321, -118.391), (34.3035, -118.4015), (34.3035, -118.384), (34.2895, -118.377), (34.2895, -118.3665), (34.2825, -118.3595), (34.2895, -118.3035), (34.2965, -118.3035), (34.2965, -118.2825), (34.2825, -118.2825), (34.286, -118.2755), (34.2825, -118.2335), (34.265, -118.2335), (34.2615, -118.251), (34.251, -118.251), (34.2475, -118.2615), (34.2195, -118.2615), (34.216, -118.3315), (34.202, -118.3385), (34.1985, -118.3595), (34.167, -118.3525), (34.1495, -118.342), (34.16, -118.3245), (34.16, -118.314), (34.167, -118.3105), (34.16, -118.2755), (34.125, -118.258), (34.1285, -118.2405), (34.139, -118.2405), (34.139, -118.2335), (34.153, -118.23), (34.1495, -118.209), (34.1565, -118.195), (34.153, -118.181), (34.141965071875, -118.181), (34.1418339, -118.180908), (34.1412999, -118.180757), (34.1412019, -118.180646), (34.1411289, -118.180513), (34.1410909, -118.180082), (34.1408809, -118.180097), (34.1408179, -118.180198), (34.1407129, -118.180766), (34.1407352709369, -118.181), (34.132, -118.181), (34.1285, -118.1635), (34.118, -118.1635), (34.111, -118.167), (34.111, -118.174), (34.104, -118.174), (34.104, -118.153), (34.0585, -118.16), (34.0585, -118.188), (34.0095, -118.188), (34.0095, -118.237), (33.985, -118.2335), (33.985, -118.251), (33.957, -118.251), (33.957, -118.23), (33.95, -118.223), (33.9255, -118.2265), (33.9255, -118.251), (33.9185, -118.251), (33.9185, -118.279)] return inside_polygon(origin, la_bounds) and inside_polygon(destination, la_bounds) def find_distance(latlng1: LatLng, latlng2: LatLng) -> float: """ Computes the distance between two tuples of latitude and longitudes in meters """ lat1 = latlng1[0] lng1 = latlng1[1] lat2 = latlng2[0] lng2 = latlng2[1] earth_radius = 6371.00 phi1 = to_radians(lat1) phi2 = to_radians(lat2) delta_phi = to_radians(lat2 - lat1) delta_lambda = to_radians(lng2 - lng1) haversine_a = math.sin(delta_phi / 2) * math.sin(delta_phi / 2) + math.cos(phi1) * math.cos(phi2) * math.sin( delta_lambda / 2) * math.sin(delta_lambda / 2) haversine_c = 2 * math.atan2(math.sqrt(haversine_a), math.sqrt(1 - haversine_a)) haversine_d = (earth_radius * haversine_c) * 1000 return haversine_d def inside_polygon(point, polygon): n = len(polygon) inside = False x = point[0] y = point[1] p1x, p1y = polygon[0] for i in range(n + 1): x_ints = 0 p2x, p2y = polygon[i % n] if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): if p1y != p2y: x_ints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or x <= x_ints: inside = not inside p1x, p1y = p2x, p2y return inside def get_accidents(lat: float, lng: float, radius: float, buckets: dict, decoded_polyline: List) -> List[dict]: near_accidents = [] tagged_accidents = [] tagged_buckets_keys = [] tagged_accidents.extend(buckets['b0']['accidents']) for point in decoded_polyline: for key in buckets: if key not in tagged_buckets_keys and key != 'b0': bucket = buckets[key] if inside_polygon((point[1], point[0]), bucket['bucket_border']): tagged_buckets_keys.append(key) tagged_accidents.extend(bucket['accidents']) break for tagged_accident in tagged_accidents: if find_distance((lat, lng), (tagged_accident['lat'], tagged_accident['lng'])) <= radius: near_accidents.append(tagged_accident) return near_accidents def find_directions(origin: LatLng, destination: LatLng, method: str) -> dict: parameters = { "origin": f'{origin[0]},{origin[1]}', "destination": f'{destination[0]},{destination[1]}', "mode": method, "alternatives": "true", "key": AUTH_KEY } # print(f'https://maps.googleapis.com/maps/api/directions/json?origin={origin[0]},{origin[1]}&destination={destination[0]},{destination[1]}&mode={method}&alternative=true&key={AUTH_KEY}') response = requests.get( "https://maps.googleapis.com/maps/api/directions/json?", params=parameters) json_data = response.json() status = json_data["status"] if status == 'OK': return {'status': 'OK', 'routes': json_data['routes']} elif status == 'ZERO_RESULTS': return {'status': 'ZERO_RESULTS', 'user_error_msg': 'SafeWays API Found No SafePaths for the Origin-Destination Combination', 'log_error_google': 'Google Directions API found zero results'} elif status == 'REQUEST_DENIED': return {'status': 'REQUEST_DENIED', 'user_error_msg': 'SafeWays API Encountered an Internal Key Validation Error', 'log_error_google': json_data["error_message"]} else: return {'status': 'SERVER_SIDE_ERROR', 'user_error_msg': 'SafeWays API Encountered a Internal Server Error', 'log_error_google': json_data["error_message"]}
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: streamlit/proto/DeckGlJsonChart.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='streamlit/proto/DeckGlJsonChart.proto', package='', syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n%streamlit/proto/DeckGlJsonChart.proto\"M\n\x0f\x44\x65\x63kGlJsonChart\x12\x0c\n\x04json\x18\x01 \x01(\t\x12\x0f\n\x07tooltip\x18\x02 \x01(\t\x12\x1b\n\x13use_container_width\x18\x04 \x01(\x08\x62\x06proto3' ) _DECKGLJSONCHART = _descriptor.Descriptor( name='DeckGlJsonChart', full_name='DeckGlJsonChart', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='json', full_name='DeckGlJsonChart.json', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tooltip', full_name='DeckGlJsonChart.tooltip', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_container_width', full_name='DeckGlJsonChart.use_container_width', index=2, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=41, serialized_end=118, ) DESCRIPTOR.message_types_by_name['DeckGlJsonChart'] = _DECKGLJSONCHART _sym_db.RegisterFileDescriptor(DESCRIPTOR) DeckGlJsonChart = _reflection.GeneratedProtocolMessageType('DeckGlJsonChart', (_message.Message,), { 'DESCRIPTOR' : _DECKGLJSONCHART, '__module__' : 'streamlit.proto.DeckGlJsonChart_pb2' # @@protoc_insertion_point(class_scope:DeckGlJsonChart) }) _sym_db.RegisterMessage(DeckGlJsonChart) # @@protoc_insertion_point(module_scope)
python
import os def skip_if_environ(name): if name in os.environ: def skip_inner(func): return lambda x: None return skip_inner def inner(func): return func return inner
python
from selenium import webdriver import pandas as pd import time import os # load product file product = pd.read_csv('../dataset/glowpick_products.csv') # urls product_urls = product.product_url.unique() url = 'https://www.glowpick.com' # driver driver = webdriver.Chrome() # information dataframe info_df = pd.DataFrame() # if there's file, load file and concatenate if os.path.isfile('../dataset/glowpick_info.csv'): df = pd.read_csv('../dataset/glowpick_info.csv') info_df = pd.concat([info_df, df], axis=0) print('out info_df.shape: ',info_df.shape) # crawling information of product for p_url in product_urls: print('='*100) print('in info_df.shape: ',info_df.shape) print('product url: ',p_url) driver.get(url + p_url) driver.implicitly_wait(5) # if category in total df, continue if info_df.shape[0] > 0: if p_url in info_df.product_url.unique(): continue # name name = driver.find_element_by_xpath('//*[@id="gp-product-detail"]/div/ul[1]/li[2]/div/section[1]/h1/span').text print('product: ',name) # description describe = driver.find_element_by_css_selector('.product-detail__description-box.product-detail__tr > td > div').text print('describe: ',describe) # tags tags = driver.find_element_by_css_selector('.product-detail__tag-list.product-detail__tr > td > p') spans = tags.find_elements_by_tag_name('span') t_lst = [] for span in spans: t_lst.append(span.text) tags = '/'.join(t_lst) print('tags: ',tags) # make dataframe df = pd.DataFrame({'product_url':[p_url], 'description':[describe], 'tag':[tags]}) info_df = pd.concat([info_df, df], axis=0) info_df.to_csv('../dataset/glowpick_info.csv', index=False) print()
python
import os import sys PROJECT_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(PROJECT_DIR) sys.path.append(os.path.dirname(BASE_DIR)) SECRET_KEY = '@$n=(b+ih211@e02_kup2i26e)o4ovt6ureh@xbkfz!&@b(hh*' DEBUG = True ALLOWED_HOSTS = [] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.sites', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap3', 'django_extensions', 'picker.apps.PickerConfig', 'demo', ) MIDDLEWARE = ( 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) TEMPLATES = [{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'demo/templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'demo.context_processors.demo', ], }, }] WSGI_APPLICATION = 'demo.wsgi.application' DATABASES = {'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.environ.get('DEMO_DB_NAME', os.path.join(BASE_DIR, 'db.sqlite3')), }} SITE_ID = 1 ROOT_URLCONF = 'demo.urls' LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/New_York' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATICFILES_DIRS = ( os.path.join(PROJECT_DIR, 'static'), ) MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media') DEMO = { 'dump_post_data': True } PICKER = { 'FAKE_DATETIME_NOW': None, 'NFL': { 'TEAM_PICKER_WIDGET': 'demo.forms.TemplateTeamChoice', }, 'HQ': { 'TEAM_PICKER_WIDGET': 'demo.forms.TemplateTeamChoice', } } from freezegun import freeze_time freezer = freeze_time("2019-09-14 12:00:01") freezer.start()
python
from django import forms from .models import AddressEntry class AddressEntryForm(forms.ModelForm): class Meta: model = AddressEntry fields = [ 'address', ]
python
""" Lines 5 and 6 were adapted from SO code: http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python """ import sys sys.path.insert(0, '..') """ END """ import main as program import pytest def test_int_0(): assert '0' == program._get_binary(0,1) def test_int_5(): assert '101'== program._get_binary(5,3) def test_int_1_with_larger_r(): assert '00001' == program._get_binary(1,5)
python
from enum import Enum, auto class DatabaseActionType(Enum): WRITE_DATA_STORAGE = auto() # Writes do not require a response on the request WRITE_STORAGE_INDEX = auto() READ_CONNECTED_DEVICES = auto() # Reads need response to get requested data READ_DEVICE = auto() # RPC CALL DELETE_OLD_DATA = auto()
python
import os import imp import setuptools version = imp.load_source("ssh2.version", os.path.join("ssh2", "version.py")).version setuptools.setup( name="python-ssh", version=version, packages=setuptools.find_packages(include=["ssh2", "ssh2.*"]), package_dir={"ssh2": "ssh2"}, license="MIT", author="Deric Degagne", author_email="[email protected]", description="A library to execute commands on remote hosts.", url="https://github.com/degagne/python-ssh", project_urls={ "Bug Tracker": "https://github.com/degagne/python-ssh/issues", "Documentation": "https://python-ssh.readthedocs.io/en/latest/index.html" }, install_requires=[ "paramiko", "rich" ], classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires=">=3.6", )
python
#!/usr/bin/env python3 """Demo on how to run the simulation using the Gym environment This demo creates a SimRearrangeDiceEnv environment and runs one episode using a dummy policy. """ from rrc_example_package import rearrange_dice_env from rrc_example_package.example import PointAtDieGoalPositionsPolicy def main(): env = rearrange_dice_env.SimRearrangeDiceEnv( goal=None, # passing None to sample a random goal action_type=rearrange_dice_env.ActionType.POSITION, visualization=True, ) is_done = False observation = env.reset() t = 0 policy = PointAtDieGoalPositionsPolicy(env.action_space, env.current_goal) while not is_done: action = policy.predict(observation, t) observation, reward, is_done, info = env.step(action) t = info["time_index"] if __name__ == "__main__": main()
python
#!/usr/bin/python3 # # Copyright (c) 2019-2021 Ruben Perez Hidalgo (rubenperez038 at gmail dot com) # # Distributed under the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # import requests from bs4 import BeautifulSoup import os from os import path REPO_BASE = path.abspath(path.join(path.dirname(__file__), '..')) DOC_PATH = path.join(REPO_BASE, 'doc', 'html') def list_doc_files(): all_files = [] for base_dir, _, files in os.walk(DOC_PATH): all_files += [path.join(base_dir, f) for f in files if f.endswith('.html')] return all_files def get_href(elm, current_file): try: res = elm['href'] except KeyError: return None if res.startswith('http://') or res.startswith('https://'): if '#error_er_' in res: return res.split('#error_er_')[0] else: return res else: curdir = path.dirname(current_file) return path.realpath(path.join(curdir, res.split('#')[0])) def extract_links(): external_links = {} internal_links = {} for fname in list_doc_files(): with open(fname, 'rt') as f: html_doc = f.read() soup = BeautifulSoup(html_doc, 'html.parser') links = [get_href(elm, fname) for elm in soup.find_all('a')] internal_links.update({ elm: fname for elm in links if elm is not None and elm.startswith('/')}) external_links.update({ elm: fname for elm in links if elm is not None and \ (elm.startswith('http://') or elm.startswith('https://'))}) return (external_links, internal_links) def check_external_links(links): s = requests.Session() for url in sorted(links.keys()): print('Checking ', url) response = s.head(url, allow_redirects=True) if response.status_code != 200: print(' ++++ {} response code: {}'.format(url, response.status_code)) def check_internal_links(links): for target, link_file in links.items(): if not path.exists(target): print(' ++++ Link {} in file {} does not exist'.format(target, link_file)) def main(): external, internal = extract_links() check_external_links(external) check_internal_links(internal) if __name__ == '__main__': main()
python
""" Copyright 2016 Stephen Boyd, Enzo Busseti, Steven Diamond, BlackRock Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import cvxpy as cvx import numpy as np import pandas as pd from ..policies import SinglePeriodOpt from ..costs import HcostModel, TcostModel from ..returns import ReturnsForecast from ..risks import FullSigma from .base_test import BaseTest DIR = os.path.dirname(__file__) + os.path.sep class TestOptimizer(BaseTest): def setUp(self): self.sigma = pd.read_csv(DIR+'sigmas.csv', index_col=0, parse_dates=[0]) self.returns = pd.read_csv(DIR+'returns.csv', index_col=0, parse_dates=[0]) self.volume = pd.read_csv(DIR+'volumes.csv', index_col=0, parse_dates=[0]) self.a, self.b, self.s = 0.0005, 1., 0. self.s = self.s + 1e-3 self.universe = self.returns.columns self.times = self.returns.index def test_single_period_opt(self): """Test single period optimizer. """ # Alpha source gamma = 100. n = len(self.universe) alpha_model = ReturnsForecast(self.returns) emp_Sigma = np.cov(self.returns.as_matrix().T) + np.eye(n)*1e-3 risk_model = FullSigma(emp_Sigma) tcost_model = TcostModel(0, self.b, self.sigma, self.volume, power=2) hcost_model = HcostModel(self.s*0, self.s) pol = SinglePeriodOpt(alpha_model, [gamma*risk_model, tcost_model, hcost_model], [], solver=cvx.ECOS) t = self.times[1] p_0 = pd.Series(index=self.universe, data=1E6) z = pol.get_trades(p_0, t) self.assertAlmostEqual(z.sum(), 0) # Compare with CP calculation. h = z + p_0 rho = self.b*self.sigma.loc[t]*(sum(p_0)/self.volume.loc[t]) rho = np.hstack([rho, 0]) A = 2*gamma*emp_Sigma + 2*np.diag(rho) s_val = pd.Series(index=self.returns.columns, data=self.s) s_val['cash'] = 0. b = self.returns.loc[t] + 2*rho*(p_0/sum(p_0)) + s_val h0 = np.linalg.solve(A, b) offset = np.linalg.solve(A, np.ones(n)) nu = (1 - h0.sum())/offset.sum() hstar = h0 + nu*offset self.assertAlmostEqual(hstar.sum(), 1) self.assertItemsAlmostEqual(h/sum(p_0), hstar, places=4) # def test_multi_period(self): # """Test multiperiod optimizer. # """ # # Alpha source # bmark = pd.Series(index=self.universe, data=0.) # bmark.cash=1 # gamma = 100. # n = len(self.universe) # alpha_model = ReturnsForecast(self.returns) # emp_Sigma = np.cov(self.returns.as_matrix().T) + np.eye(n)*1e-3 # risk_model = FullSigma(emp_Sigma,gamma_half_life=np.inf) # tcost_model = TcostModel(self.volume, self.sigma, # self.a*0, self.b, power=2) # hcost_model = HcostModel(self.s*0, self.s) # pol = MultiPeriodOpt(list(self.times)[:3], bmark, 2, alpha_model, # [gamma*risk_model, tcost_model, hcost_model], # [], solver=cvx.ECOS) # # t = self.times[1] # p_0 =pd.Series(index=self.universe, data=1E6) # z = pol.get_trades(p_0, t) # self.assertAlmostEqual(z.sum(), 0) # # Compare with CP calculation. Terminal constraint. # h = z + p_0 # rho=self.b*self.sigma.loc[t]*(sum(p_0)/self.volume.loc[t]) # rho=np.hstack([rho, 0]) # A = 2*gamma*emp_Sigma + 4*np.diag(rho) # s_val = self.s.loc[t] # s_val['cash'] = 0 # b = self.returns.loc[t] + 2*rho*(p_0/sum(p_0) + bmark) + s_val # h0 = np.linalg.solve(A, b) + bmark # offset = np.linalg.solve(A, np.ones(n)) # nu = (1 - h0.sum())/offset.sum() # hstar = h0 + nu*offset # self.assertAlmostEqual(hstar.sum(), 1) # self.assertItemsAlmostEqual(h/sum(p_0), hstar, places=4) # # # pol = MultiPeriodOpt(2, alpha_model, [risk_model, tcost_model, # hcost_model], [], solver=cvx.ECOS, # terminal_constr=False) # # t = self.times[1] # p_0 = pd.Series(index=self.universe, data=1E6) # z = pol.get_trades(p_0, t) # self.assertAlmostEqual(z.sum(), 0) # # Compare with CP calculation. # h = z + p_0 # rho = self.b*self.sigma.loc[t]*(sum(p_0)/self.volume.loc[t]) # rho = np.hstack([rho, 0]) # D = np.diag(rho) # A = np.bmat([[2*gamma*emp_Sigma + 4*D, -2*D, np.ones((n,1)), # np.zeros((n,1))], # [-2*D, 2*gamma*emp_Sigma, np.zeros((n,1)), # np.ones((n,1))], # [np.ones((1,n)), np.zeros((1,n+2))], # [np.zeros((1,n)), np.ones((1, n)), np.zeros((1,2))]]) # s_val = self.s.loc[t] # s_val['cash'] = 0 # b = self.returns.loc[t] + 2*rho*p_0/sum(p_0) + s_val # b = np.hstack([b, self.returns.loc[t] + s_val, 1, 1]) # x = np.linalg.solve(A, b) # w1 = x[:n] # w2 = x[n:2*n] # self.assertAlmostEqual(w1.sum(), 1) # self.assertAlmostEqual(w2.sum(), 1) # self.assertItemsAlmostEqual(h/sum(p_0), w1, places=4)
python
# Generated by Django 3.2.1 on 2021-05-09 13:39 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Customer', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tenure', models.FloatField()), ('preferredlogindevice', models.CharField(max_length=20)), ('citytier', models.FloatField()), ('warehousetohome', models.FloatField()), ('preferredpaymenthome', models.CharField(max_length=20)), ('gender', models.CharField(max_length=10)), ('hourspendonapp', models.FloatField()), ('numberofdeviceregistered', models.FloatField()), ('preferedordercat', models.CharField(max_length=20)), ('satisfactionscore', models.FloatField()), ('maritalstatus', models.CharField(max_length=20)), ('noofaaddress', models.FloatField()), ('complain', models.FloatField()), ('orderamounthikefromlastyear', models.FloatField()), ('couponused', models.FloatField()), ('ordercount', models.FloatField()), ('daysincelastorder', models.FloatField()), ('cashbackamount', models.FloatField()), ('userid', models.CharField(max_length=30)), ('password', models.CharField(max_length=20)), ], ), ]
python
from .BaseNeuralBatch import BaseNeuralBatch from ..nu import v1 from .. import Ports import numpy as np class CubicBatch(BaseNeuralBatch): def __init__( self, name, parent, cell_pos, shape, unit_distance, nu_type=v1, receive_modulators=False, nu_params={}): super().__init__( name, parent, cell_pos, nu_type, receive_modulators, nu_params) self.unit_distance = unit_distance self.shape = shape start_pos = ( self.cell_pos[0] - (self.shape[0] - 1) / 2 * self.unit_distance, self.cell_pos[1] - (self.shape[1] - 1) / 2 * self.unit_distance, self.cell_pos[2] - (self.shape[2] - 1) / 2 * self.unit_distance) i = 0 for z in range(self.shape[2]): for y in range(self.shape[1]): for x in range(self.shape[0]): pos = (start_pos[0] + x * self.unit_distance, start_pos[1] + y * self.unit_distance, start_pos[2] + z * self.unit_distance) unit_name = self.name + "-NU-" + str(i) temp_nu = self.nu_type(unit_name, self, pos, **self.nu_params) if self.receive_modulators: self.couple( self.in_ports[Ports.NEUROMODULATORS], temp_nu.in_ports[Ports.NEUROMODULATORS]) i += 1 def interconnect_full(self, synapse_type, synapse_params): for src_unit in self.children: for target_unit in self.children: if src_unit is not target_unit: new_synapse = target_unit.connect( src_unit, synapse_type, synapse_params) self.couple( src_unit.out_ports[Ports.AP], target_unit.in_ports[new_synapse.name]) def interconnect_prob(self, prob, synapse_type, synapse_params): for src_unit in self.children: for target_unit in self.children: if src_unit is not target_unit: if np.random.uniform() <= prob: new_synapse = target_unit.connect( src_unit, synapse_type, synapse_params) self.couple( src_unit.out_ports[Ports.AP], target_unit.in_ports[new_synapse.name]) def get_index_from_pos(self, pos): return pos[0] + pos[1] * self.shape[0] + pos[2] * self.shape[0] * self.shape[1] def __getitem__(self, index): if type(index) == int: return self.children[index] elif len(index) == 3: output = [] for z in range(index[2].start, index[2].stop, index[2].step): for y in range(index[1].start, index[1].stop, index[1].step): for x in range(index[0].start, index[0].stop, index[0].step): output.append(self[self.get_index_from_pos((x, y, z))]) return output else: raise ValueError( "CubicBatch only receives tuple of length 1 or 3.") def __str__(self): output = self.name + "\n" output += "unit_distance: "+str(self.unit_distance) + "\n" output += "cell_pos: "+str(self.cell_pos) + "\n" output += "shape: " + str(self.cell_pos) + "\n" temp = str(self.membrane_type).split(".") output += "membrane_type: " + temp[len(temp) - 1][:-2] + "\n" for key in self.membrane_params: if type(self.membrane_params[key]) == type: temp = str(self.membrane_params[key]).split(".") output += key + ": " + temp[len(temp) - 1][:-2] + "\n" else: output += key + ": " + str(self.membrane_params[key]) + "\n" return output def asNumpyArr(self): arr = np.empty(self.shape) for z in range(0, self.shape[2]): for y in range(0, self.shape[1]): for x in range(0, self.shape[0]): weights = [ s.w for s in self[self.get_index_from_pos((x, y, z))].synapses] avg = sum(weights)/len(weights) if not len(weights) == 0 else 0 arr[x, y, z] = avg return arr
python
from datetime import datetime import discord from discord.ext import commands class cat_debug(commands.Cog, name="Debug commands"): """Documentation""" def __init__(self, bot): self.bot = bot @commands.command() async def tell_me_about_yourself(self, ctx): print( f"[{datetime.now()}] Command Issued: tell_me_about_yourself\n - message: {ctx.message.content}\n - debug: {ctx.message}" ) text = "My name is XikoBot!\n. My creator is XikoCat. Check him out on twitter: https://twitter.com/xikocat\nType %help, to get a list of commands.\n :)" await ctx.send(text) @commands.command(help="Prints details of Author") async def whats_my_name(self, ctx): print( f"[{datetime.now()}] Command Issued: whats_my_name\n - message: {ctx.message.content}\n - debug: {ctx.message}" ) await ctx.send(f"Hello {ctx.author.name}") @commands.command(help="Prints details of Server") async def where_am_i(self, ctx): print( f"[{datetime.now()}] Command Issued: where_am_i\n - message: {ctx.message.content}\n - debug: {ctx.message}" ) owner = str(ctx.guild.owner) region = str(ctx.guild.region) guild_id = str(ctx.guild.id) memberCount = str(ctx.guild.member_count) icon = str(ctx.guild.icon_url) desc = ctx.guild.description embed = discord.Embed( title=ctx.guild.name + " Server Information", description=desc, color=discord.Color.blue(), ) embed.set_thumbnail(url=icon) embed.add_field(name="Owner", value=owner, inline=True) embed.add_field(name="Server ID", value=guild_id, inline=True) embed.add_field(name="Region", value=region, inline=True) embed.add_field(name="Member Count", value=memberCount, inline=True) await ctx.send(embed=embed) def setup(bot): bot.add_cog(cat_debug(bot))
python
'''This module computes ''' import argparse import csv import io import os.path from datetime import datetime from urllib.request import urlopen from stockjournal.operator import gmean csv_header = "Date,Open,High,Low,Close,Volume,Adj Close" parser = argparse.ArgumentParser(description='Stock stats tool using data \ from Yahoo Finance service or local file.') parser.add_argument('src', help="csv file with Yahoo Finance format (%s) or \ a valid stock symbol name to fetch from Yahoo Finance" % csv_header) # months in yahoo finance starts from 0 # http://chart.finance.yahoo.com/table.csv?s=JPM&a=11&b=30&c=1983&d=1&e=16&f=2017&g=d&ignore=.csv # all: ## "http://chart.finance.yahoo.com/table.csv?s=JPM&d=1&e=16&f=2017&g=d&ignore=.csv" def read_from_yahoo(name): now = datetime.now() params = "s={}&d={}&e={}&f={}&g=d&ignore=.csv".format( name.upper(), now.month - 1, now.day, now.year) url = 'http://chart.finance.yahoo.com/table.csv?' + params with urlopen(url) as f: return get_values(io.TextIOWrapper(f, encoding='ascii')) def read_from_file(filename): with open(filename) as f: return get_values(f) def get_values(resource): h = resource.readline()[:-1] assert h == csv_header,\ 'csv header must be:\n%s got:\n%s' % (csv_header, h) reader = csv.reader(resource) vals = [float(r[4]) for r in reader] return vals def main(): args = parser.parse_args() if os.path.exists(args.src): vals = read_from_file(args.src) else: try: vals = read_from_yahoo(args.src) except Exception as e: print("Can't get the stock data from Yahoo Finance service.\ Probably the stock code is wrong: %s\n" % args.src, e) return print(gmean(vals)) main()
python
# Author: Smit Patel # Date: 25/07/2018 # File: chatbot_trainer.py # Licence: MIT from chatterbot import ChatBot from chatterbot.trainers import ListTrainer import os bot = ChatBot('Bot') bot.set_trainer(ListTrainer) while True: message = input('You:') if message.strip() != 'Bye': reply = bot.get_response(message) print('ChatBot :', reply) if message.strip() == 'Bye': print('ChatBot : Bye, see u again') break
python
#!/usr/bin/python3 # ''' ### Desafio de request de url ### Extrair o nono e o quarto campos do arquivo CSV sobre região de influencia das Cidades Ignorar a primeira linha que é o cabechalho do arquivo dados = entrada.read().decode('latin1') Arquivo IBGE esta no formato ISO-8859-1 (aka latin1) Essa linha baixa o arquivo para a memoria do computador for cidade in csv.reader(dados.splitlines()): Sem o uso do splitlines, o csv.reader vai processar caracter por caracter (e não linha por linha), desde forma a variável linhas sempre terá apenas um elemento, e por isso linhas[8] ou linhas[3] vai levantar a exceção: list index out of range 9 Coluna = Indice 8 4 Coluna = Indice 3 read(r'http://files.cod3r.com.br/curso-python/desafio-ibge.csv') Faz com que o python nao interprete de forma indevida os caracteres da url exemplo de uso, imprimindo o caractere \n print(\\n\\n\\n) # OU print(r'\n\n\n') ''' import csv from urllib import request def read(url): with request.urlopen(url) as entrada: print('Baixando o CSV...') dados = entrada.read().decode('latin1') print('Download completo!') for cidade in csv.reader(dados.splitlines()): print(f'{cidade[8]}: {cidade[3]}') if __name__ == '__main__': read(r'http://files.cod3r.com.br/curso-python/desafio-ibge.csv') # Fontes: # Curso Python 3 - Curso Completo do Básico ao Avançado Udemy Aula 97 a 107 # https://github.com/cod3rcursos/curso-python/tree/master/manipulacao_arquivos
python
class TennisGame(): def __init__(self, first_player_name="player1", second_player_name="player2"): self.first_player_name = first_player_name self.second_player_name = second_player_name self.first_player_score = 0 self.second_player_score = 0 @property def first_player_score(self): return self._first_player_score @first_player_score.setter def first_player_score(self, score): self._first_player_score = score @property def second_player_score(self): return self._second_player_score @second_player_score.setter def second_player_score(self, score): self._second_player_score = score def score(self): self.score_lookup = { "0": "Love", "1": "Fifteen", "2": "Thirty", "3": "Forty", "4": "Adv", "5": "Win" } if self.is_same_score(): return self.get_high_same_score_result() if self.is_both_score_high_than_forty() else self.get_low_same_score_result() else: return self.get_high_diff_score_result() if self.is_both_score_high_than_forty() else self.get_low_diff_score_result() def get_low_same_score_result(self): return f"{self.score_lookup[str(self.first_player_score)]}-All" def get_low_diff_score_result(self): return f"{self.score_lookup[str(self.first_player_score)]}-{self.score_lookup[str(self.second_player_score)]}" def get_high_same_score_result(self): return "Deuce" def get_high_diff_score_result(self): return f"{self.get_winner()} {self.get_adv_statue_by_max_score()}" def get_winner(self): return self.first_player_name if self.first_player_score > self.second_player_score else self.second_player_name def get_adv_statue_by_max_score(self): return self.score_lookup[str(max(self.first_player_score, self.second_player_score))] def is_both_score_high_than_forty(self): return min(self.first_player_score, self.second_player_score) >= 3 def is_same_score(self): return self.first_player_score == self.second_player_score
python
# @Time : 2020/11/14 # @Author : Gaole He # @Email : [email protected] # UPDATE: # @Time : 2020/12/3 # @Author : Tianyi Tang # @Email : [email protected] # UPDATE # @Time : 2021/4/12 # @Author : Lai Xu # @Email : [email protected] """ textbox.evaluator.bleu_evaluator ####################################### """ import numpy as np from fast_bleu import BLEU from textbox.evaluator.sentence_bleu import sentence_bleu, SmoothingFunction from textbox.evaluator.abstract_evaluator import AbstractEvaluator class BleuEvaluator(AbstractEvaluator): r"""Bleu Evaluator. Now, we support metrics `'bleu'` """ def __init__(self, task_type): self.n_grams = [1, 2, 3, 4] self.task_type = task_type self.weights = self._generate_weights() def _generate_weights(self): weight = [0] * max(self.n_grams) weights = {} for n_gram in self.n_grams: weight[n_gram - 1] = 1.0 weights['bleu-{}'.format(n_gram)] = tuple(weight) weight[n_gram - 1] = 0.0 avg_weight = [1. / n_gram] * n_gram avg_weight.extend([0. for index in range(max(self.n_grams) - n_gram)]) weights['bleu-{}-avg'.format(n_gram)] = tuple(avg_weight) return weights def _calc_fast_bleu(self, generate_corpus, reference_corpus): r""" Calculate the BLEU metrics of the generated corpus in referenced corpus. Args: generate_corpus (List[List[str]]): the generated corpus reference_corpus (List[List[str]]): the referenced corpus n_grams (List): the n-gram metric to be calculated Returns: list: the BLEU results and average BLEU scores """ bleu = BLEU(reference_corpus, self.weights) scores = bleu.get_score(generate_corpus) return scores def _calc_metrics_info(self, generate_corpus, reference_corpus): r"""get metrics result Args: generate_corpus: the generated corpus reference_corpus: the referenced corpus Returns: dict: a dict of metrics <metric> which record the results according to self.n_grams """ bleu_dict = {} for n_gram in self.n_grams: bleu_dict['bleu-{}'.format(n_gram)] = [] for n_gram in self.n_grams: bleu_dict['bleu-{}-avg'.format(n_gram)] = [] if self.task_type: results = self._calc_fast_bleu(generate_corpus=generate_corpus, reference_corpus=reference_corpus) for n_gram in self.n_grams: bleu_dict['bleu-{}'.format(n_gram)].append(np.array(results['bleu-{}'.format(n_gram)]).mean()) bleu_dict['bleu-{}-avg'.format(n_gram)].append(np.array(results['bleu-{}-avg'.format(n_gram)]).mean()) else: for i in range(len(generate_corpus)): pred_sent = generate_corpus[i] gold_sent = reference_corpus[i] results = sentence_bleu( hypothesis=pred_sent, references=[gold_sent], weights=self.weights, smoothing_function=SmoothingFunction().method1 ) for n_gram in self.n_grams: bleu_dict['bleu-{}'.format(n_gram)].append(np.array(results['bleu-{}'.format(n_gram)]).mean()) bleu_dict['bleu-{}-avg'.format(n_gram)].append( np.array(results['bleu-{}-avg'.format(n_gram)]).mean() ) return bleu_dict
python
import torch import numpy as np import pandas as pd from os.path import join from pathlib import Path from torch.utils.data import Dataset from torch.nn.utils.rnn import pad_sequence class DSet(Dataset): ''' This is the WSJ parser ''' def __init__(self, path, split): # Setup self.path = path self.wav_form = join(path, 'wav', '{}.wav') self.phn_form = join(path, 'phn', '{}.pt') # List all wave files self.file_list = [] for s in split: s_list = pd.read_csv(join(path,'meta',s+'_phn.csv'),header=None)[0].tolist() assert len(s_list) > 0, "No data found @ {}".format(join(path,s)) self.file_list += s_list def __getitem__(self, index): fid = self.file_list[index] return self.wav_form.format(fid), self.phn_form.format(fid) def __len__(self): return len(self.file_list) def collect_batch(batch, audio_transform, audio_max_frames, mode): '''Collects a batch, should be list of <str> file_path ''' # Load Batch file_id, audio_feat, phn_seq, audio_len = [], [], [], [] with torch.no_grad(): for wav,phn in batch: file_id.append(wav.rsplit('/',1)[-1].replace('.wav','')) # Audio feature (sequence) on-the-fly x = audio_transform(filepath=wav) # Phn label sequence (test set shouldn't be cropped) if mode =='test': phn = phn.replace('.pt','_nocrop.pt') y = torch.load(phn)+1 # 0 = pad # Crop to avoid batch too large x,y = _crop(x,y,audio_max_frames, mode) audio_feat.append(x) audio_len.append(len(x)) phn_seq.append(y[:len(x)]) # Descending audio length within each batch audio_len, audio_feat, phn_seq, file_id = zip(*[(fl, f, phn, fid) for fl, f, phn, fid in sorted(zip(audio_len, audio_feat, phn_seq, file_id), reverse=True, key=lambda x:x[0])]) # Zero padding audio_feat = pad_sequence(audio_feat, batch_first=True) phn_seq = pad_sequence(phn_seq, batch_first=True) return file_id, audio_feat, audio_len, phn_seq def _crop(x, y, max_len, mode): if len(x)>len(y): if mode == 'test': raise NotImplementedError('Test set are not supposed to be cropped') else: # Crop files that are too long x = x[:len(y)] if len(x) > max_len: return x[:max_len],y[:max_len] else: return x,y
python
#!/usr/bin/env python import astropy.units as u __all__ = ['toltec_info', ] toltec_info = { 'instru': 'toltec', 'name': 'TolTEC', 'name_long': 'TolTEC Camera', 'array_physical_diameter': 127.049101 << u.mm, 'fov_diameter': 4. << u.arcmin, 'fg_names': ['fg0', 'fg1', 'fg2', 'fg3'], 'fg0': { 'index': 0, 'det_pa': 0. << u.deg, }, 'fg1': { 'index': 1, 'det_pa': 45. << u.deg, }, 'fg2': { 'index': 2, 'det_pa': 90. << u.deg, }, 'fg3': { 'index': 3, 'det_pa': 135. << u.deg, }, 'array_names': ['a1100', 'a1400', 'a2000'], 'a1100': { 'index': 0, 'name': 'a1100', 'name_long': 'TolTEC 1.1 mm array', 'wl_center': 1.1 << u.mm, 'array_mounting_angle': 90. << u.deg }, 'a1400': { 'index': 1, 'name': 'a1400', 'name_long': 'TolTEC 1.4 mm array', 'wl_center': 1.4 << u.mm, 'array_mounting_angle': -90. << u.deg }, 'a2000': { 'index': 2, 'name': 'a2000', 'name_long': 'TolTEC 2.0 mm array', 'wl_center': 2.0 << u.mm, 'array_mounting_angle': -90. << u.deg }, 'nws': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'interfaces': [ 'toltec0', 'toltec1', 'toltec2', 'toltec3', 'toltec4', 'toltec5', 'toltec6', 'toltec7', 'toltec8', 'toltec9', 'toltec10', 'toltec11', 'toltec12', 'hwpr', 'wyatt', 'tel', 'toltec_hk'], 'toltec0': { 'name': 'toltec0', 'nw': 0, 'array_name': 'a1100', }, 'toltec1': { 'name': 'toltec1', 'nw': 1, 'array_name': 'a1100', }, 'toltec2': { 'name': 'toltec2', 'nw': 2, 'array_name': 'a1100', }, 'toltec3': { 'name': 'toltec3', 'nw': 3, 'array_name': 'a1100', }, 'toltec4': { 'name': 'toltec4', 'nw': 4, 'array_name': 'a1100', }, 'toltec5': { 'name': 'toltec5', 'nw': 5, 'array_name': 'a1100', }, 'toltec6': { 'name': 'toltec6', 'nw': 6, 'array_name': 'a1100', }, 'toltec7': { 'name': 'toltec7', 'nw': 7, 'array_name': 'a1400', }, 'toltec8': { 'name': 'toltec8', 'nw': 8, 'array_name': 'a1400', }, 'toltec9': { 'name': 'toltec9', 'nw': 9, 'array_name': 'a1400', }, 'toltec10': { 'name': 'toltec10', 'nw': 10, 'array_name': 'a1400', }, 'toltec11': { 'name': 'toltec11', 'nw': 11, 'array_name': 'a2000', }, 'toltec12': { 'name': 'toltec12', 'nw': 12, 'array_name': 'a2000', }, 'hwpr': { 'name': 'hwpr', }, 'wyatt': { 'name': 'wyatt', }, 'tel': { 'name': 'tel', }, 'toltec_hk': { 'name': 'toltec_hk' }, }
python
"""\ Examples For the development.ini you must supply the paster app name: %(prog)s development.ini --app-name app --init --clear """ from pyramid.paster import get_app import atexit import logging import os.path import select import shutil import sys EPILOG = __doc__ logger = logging.getLogger(__name__) def main(): import argparse parser = argparse.ArgumentParser( description="Run development servers", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('--app-name', help="Pyramid app name in configfile") parser.add_argument('config_uri', help="path to configfile") parser.add_argument('--clear', action="store_true", help="Clear existing data") parser.add_argument('--init', action="store_true", help="Init database") parser.add_argument('--load', action="store_true", help="Load test set") parser.add_argument('--datadir', default='/tmp/clincoded', help="path to datadir") args = parser.parse_args() logging.basicConfig() # Loading app will have configured from config file. Reconfigure here: logging.getLogger('clincoded').setLevel(logging.DEBUG) from clincoded.tests import elasticsearch_fixture, postgresql_fixture from contentbase.elasticsearch import create_mapping datadir = os.path.abspath(args.datadir) pgdata = os.path.join(datadir, 'pgdata') esdata = os.path.join(datadir, 'esdata') if args.clear: for dirname in [pgdata, esdata]: if os.path.exists(dirname): shutil.rmtree(dirname) if args.init: postgresql_fixture.initdb(pgdata, echo=True) postgres = postgresql_fixture.server_process(pgdata, echo=True) elasticsearch = elasticsearch_fixture.server_process(esdata, echo=True) processes = [postgres, elasticsearch] @atexit.register def cleanup_process(): for process in processes: if process.poll() is None: process.terminate() for process in processes: try: for line in process.stdout: sys.stdout.write(line.decode('utf-8')) except IOError: pass process.wait() if args.init: app = get_app(args.config_uri, args.app_name) create_mapping.run(app) if args.load: from webtest import TestApp environ = { 'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST', } testapp = TestApp(app, environ) from clincoded.loadxl import load_all from pkg_resources import resource_filename inserts = resource_filename('clincoded', 'tests/data/inserts/') docsdir = [resource_filename('clincoded', 'tests/data/documents/')] load_all(testapp, inserts, docsdir) print('Started. ^C to exit.') stdouts = [p.stdout for p in processes] # Ugly should probably use threads instead while True: readable, writable, err = select.select(stdouts, [], stdouts, 5) for stdout in readable: for line in iter(stdout.readline, b''): sys.stdout.write(line.decode('utf-8')) if err: for stdout in err: for line in iter(stdout.readline, b''): sys.stdout.write(line.decode('utf-8')) break if __name__ == '__main__': main()
python
import logging import os.path DEFAULT_LOG_PATH = None DEFAULT_LOG_DIR = os.path.join(os.path.dirname(__file__), "logs") if not os.path.exists(DEFAULT_LOG_DIR): try: os.mkdir(DEFAULT_LOG_DIR) except OSError: DEFAULT_LOG_DIR = None if DEFAULT_LOG_DIR: DEFAULT_LOG_PATH = os.path.join(DEFAULT_LOG_DIR, "search.log") def with_logging_methods(methods): """ Class decorator to add logging methods like info(), warning(), ... to logger class :param methods: A list of string method names :return: Class decorator """ def logger_decorator(clazz): def create_log_method(name): def inner(self, msg, force_console_print=False): if logging.root.isEnabledFor(self.log_level_mappings()[name]): getattr(logging, name)(msg) elif force_console_print: print(msg) return inner for level in methods: setattr(clazz, level, create_log_method(level)) return clazz return logger_decorator @with_logging_methods(("info", "error", "warning", "debug", "critical")) class SearchLogger: _instance = None @classmethod def get_logger(cls): if not cls._instance: raise RuntimeError( "Logger should be initialized before the first use. Use SearchLogger.init_logger() to do so." ) return cls._instance @classmethod def init_logger(cls, *args, **kwargs): if not cls._instance: cls._instance = cls(*args, **kwargs) @classmethod def log_level_mappings(cls): return { "info": logging.INFO, "error": logging.ERROR, "warning": logging.WARNING, "debug": logging.DEBUG, "critical": logging.CRITICAL } @classmethod def get_actual_log_level(cls, level): return cls.log_level_mappings().get(level, logging.INFO) def __init__(self, path=DEFAULT_LOG_PATH, log_to_console=True, level="info"): log_level = self.__class__.get_actual_log_level(level) handlers = [] if path: handlers.append(logging.FileHandler(path, mode='w')) if log_to_console or not path: handlers.append(logging.StreamHandler()) logging.root.handlers = [] logging.basicConfig( level=log_level, format="%(asctime)s [%(levelname)s] %(message)s", handlers=handlers )
python
# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None class Solution(object): def addTwoNumbers(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ # Headnode of the return list headNode = ListNode(0) # A pointer that moves around, note we don't want to use the # headNode otherwise we will lose the starting point dummyPointer = headNode # A place hold for carry carry = 0 # We need carry to be here in case l1 and l2 only have one element and # the addup is greater than 10 while l1 or l2 or carry: val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + carry dummyPointer.next = ListNode(val % 10) dummyPointer = dummyPointer.next carry = val / 10 # Move on to the next node l1 = l1.next if l1 else None l2 = l2.next if l2 else None headNode = headNode.next return headNode
python
# coding: utf-8 from mhw_armor_edit.ftypes import StructFile, Struct class WpDatEntry(Struct): STRUCT_SIZE = 65 id: "<I" unk1: "<H" base_model_id: "<H" part1_id: "<H" part2_id: "<H" color: "<B" tree_id: "<B" is_fixed_upgrade: "<B" crafting_cost: "<I" rarity: "<B" kire_id: "<B" handicraft: "<B" raw_damage: "<H" defense: "<H" affinity: "<b" element_id: "<B" element_damage: "<H" hidden_element_id: "<B" hidden_element_damage: "<H" elderseal: "<B" num_gem_slots: "<B" gem_slot1_lvl: "<B" gem_slot2_lvl: "<B" gem_slot3_lvl: "<B" wep1_id: "<H" wep2_id: "<H" unk2: "<I" unk3: "<I" unk4: "<I" tree_position: "<B" order: "<H" gmd_name_index: "<H" gmd_description_index: "<H" skill_id: "<H" unk5: "<H" class WpDat(StructFile): EntryFactory = WpDatEntry MAGIC = 0x0186
python
# Generated by Django 3.2.4 on 2021-06-20 12:31 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='Token', fields=[ ('id', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')), ('token', models.JSONField()), ], ), migrations.CreateModel( name='WalletIncome', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[('regular', 'Regular'), ('casual', 'Casual')], default='regular', max_length=32)), ('name', models.CharField(max_length=64)), ('value', models.DecimalField(decimal_places=2, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='WalletHouse', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ('value', models.DecimalField(decimal_places=3, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='WalletExpense', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[('life', 'Life'), ('tickets', 'Ticket'), ('house rent', 'House rent')], default='life', max_length=32)), ('name', models.CharField(max_length=64)), ('value', models.DecimalField(decimal_places=2, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='WalletDeposit', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ('value', models.DecimalField(decimal_places=2, max_digits=20)), ('rate', models.DecimalField(decimal_places=2, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='WalletCredit', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ('value', models.DecimalField(decimal_places=2, max_digits=20)), ('rate', models.DecimalField(decimal_places=2, max_digits=20)), ('balance', models.DecimalField(decimal_places=2, max_digits=20)), ('interest', models.DecimalField(decimal_places=2, max_digits=20)), ('capital', models.DecimalField(decimal_places=2, max_digits=20)), ('insurance', models.DecimalField(decimal_places=2, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='WalletCar', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('car', models.CharField(max_length=64)), ('exploitation', models.IntegerField()), ('payment', models.DecimalField(decimal_places=2, max_digits=20)), ('refuelling', models.DecimalField(decimal_places=2, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='WalletAccount', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[('wallet', 'Wallet'), ('bank account', 'Bank account'), ('mobile account', 'Mobile account')], default='wallet', max_length=32)), ('name', models.CharField(max_length=64)), ('value', models.DecimalField(decimal_places=2, max_digits=20)), ('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)), ('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)), ('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='ReminderGroup', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('color', models.CharField(choices=[('red', 'Red'), ('orange', 'Orange'), ('blue', 'Blue'), ('green', 'Green'), ('black', 'Black'), ('grey', 'Grey'), ('brown', 'Brown'), ('yellow', 'Yellow'), ('magenta', 'Magenta')], default='orange', max_length=32)), ('name', models.CharField(max_length=64)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Reminder', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ('when', models.DateTimeField(blank=True, null=True)), ('repeat', models.CharField(blank=True, choices=[('1d', 'Every day'), ('7d', 'Every week'), ('14d', 'Every 2 weeks'), ('30d', 'Every 30 days'), ('1m', 'Every month'), ('2m', 'Every 2 months'), ('3m', 'Every 3 months'), ('1y', 'Every year')], max_length=32, null=True)), ('priority', models.CharField(choices=[('low', 'Low'), ('normal', 'Normal'), ('high', 'High')], default='normal', max_length=32)), ('modified', models.DateTimeField(auto_now=True)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.remindergroup')), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(choices=[('wallet', 'Wallet'), ('calendar', 'Calendar'), ('reminder', 'Reminder'), ('note', 'Note')], default='note', max_length=32)), ('info', models.CharField(max_length=64)), ('json', models.TextField(blank=True, max_length=512, null=True)), ('modified', models.DateTimeField(auto_now=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
python
from setuptools import setup from os import path with open('README.md') as f: long_description = f.read() setup( name='itrcnt', module='itrcnt.py', version='0.1.2', license='BSD', author='mao2009', url='https://github.com/mao2009/Python_Counter', description='Alternative for Range and Enumerator', long_description=long_description, long_description_content_type='text/markdown', keywords='range enumrator' )
python
""" testing for agent's config """ import os import pytest import yaml from eha.agent.config import load @pytest.mark.parametrize('content, envs, result', ( ( """ foo: 123 bar: 234 """, {}, { 'foo': 123, 'bar': 234, } ), ( """ foo: 123 bar: 234 """, { 'EHA_AGENT_FOO': 'abc', 'EHA_AGENT_BAR': '234', }, { 'foo': 'abc', 'bar': '234', } ), )) def test_load(content, envs, result, mocker, monkeypatch): patched_open = mocker.mock_open(read_data=content) mocker.patch('builtins.open', patched_open) mocker.patch('os.path.isfile', bool) with monkeypatch.context() as patch: for key, value in envs.items(): patch.setenv(key, value) config = load() assert config == result
python
default_app_config = 'kolibri.content.apps.KolibriContentConfig'
python
import os import pandas as pd def read_parquet(data_path, num_partitions=None, random=False, verbose=True, columns=None): files = os.listdir(data_path) if random: import random random.shuffle(files) if num_partitions is None: num_partitions = len(files) data = [] num_reads = 0 for file_path in files: if num_reads >= num_partitions: break root, ext = os.path.splitext(file_path) # exclude non-parquet files (e.g. gitkeep, other folders) if ext == '.parquet': fp = os.path.join(data_path, file_path) if verbose: print('Reading in data from {}'.format(fp)) data.append(pd.read_parquet(os.path.join(data_path, file_path), columns=columns)) if verbose: print('Data of shape {}'.format(data[-1].shape)) num_reads += 1 else: continue data = pd.concat(data, axis=0) if verbose: print('Total dataframe of shape {}'.format(data.shape)) return data def feature_label_split(data, model_features, label='label', qid='qid'): # assumes data of same QIDs are grouped together X = data[model_features] y = data[label] qid = data[qid].value_counts(sort=False).sort_index() return X, y, qid
python
#!/usr/bin/env python import os import sys fn_read_keys = None dn_sstable_keys = None read_keys = [] key_sstgen = {} def LoadReadKeys(): global read_keys print "loading read keys from %s ..." % fn_read_keys with open(fn_read_keys) as fo: for line in fo.readlines(): read_keys.append(line.strip().lower()) def LoadSSTableKeys(): global dn_sstable_keys global key_sstgen print "loading sstable keys from %s ..." % dn_sstable_keys sst_gen = 0 while True: sst_gen += 1 fn = "%s/keys-%d" % (dn_sstable_keys, sst_gen) if not os.path.isfile(fn): break with open(fn) as fo: for line in fo.readlines(): key = line.strip() if key not in key_sstgen: key_sstgen[key] = [] key_sstgen[key].append(sst_gen) print "len(key_sstgen)=%d" % len(key_sstgen) def CheckDupKeys(): print "Checking duplicate keys ..." for k, v in key_sstgen.iteritems(): if len(v) > 1: print k, v def CountReadsBySSTables(): sstgen_readcnt_first = {} sstgen_readcnt_all = {} memtable_read_cnt = 0 print "len(read_keys)=%d" % len(read_keys) for rk in read_keys: # If a read key is not in any of the sstables, it may be in the memtable if rk not in key_sstgen: memtable_read_cnt += 1 continue # Get the youngest sstable, which is the last one in the list sstgen = key_sstgen[rk][-1] if sstgen not in sstgen_readcnt_first: sstgen_readcnt_first[sstgen] = 1 else: sstgen_readcnt_first[sstgen] += 1 for sstgen in key_sstgen[rk]: if sstgen not in sstgen_readcnt_all: sstgen_readcnt_all[sstgen] = 1 else: sstgen_readcnt_all[sstgen] += 1 print "memtable_read_cnt=%d" % memtable_read_cnt print "sstable_readcnt: sstgen first_hit all_hit:" for k, v in sorted(sstgen_readcnt_first.iteritems()): print " %2d %6d %6d" % (k, v, sstgen_readcnt_all[k]) def main(argv): if len(argv) != 3: print "Usage: %s fn_read_keys dn_sstable_keys" % (argv[0]) print " E.g.: %s data/read-keys-15-11-26-18:15:55 ../check-keys-in-sstables/standard1-2d180380949311e5945a1d822de6a4f1" % (argv[0]) sys.exit(1) global fn_read_keys global dn_sstable_keys fn_read_keys = argv[1] dn_sstable_keys = argv[2] LoadReadKeys() LoadSSTableKeys() #CheckDupKeys() CountReadsBySSTables() # Stopping when the max timestamp of a sstable is older than the current timestamp is not simulated. # We assume that bigger sstable gens have younger keys (records) if __name__ == "__main__": sys.exit(main(sys.argv))
python
# # Copyright (c) Sinergise, 2019 -- 2021. # # This file belongs to subproject "field-delineation" of project NIVA (www.niva4cap.eu). # All rights reserved. # # file in the root directory of this source tree. # This source code is licensed under the MIT license found in the LICENSE # from typing import Callable, List, Any from concurrent.futures import ProcessPoolExecutor from fs_s3fs import S3FS from dataclasses import dataclass from tqdm.auto import tqdm from sentinelhub import SHConfig @dataclass class BaseConfig: bucket_name: str aws_access_key_id: str aws_secret_access_key: str aws_region: str def prepare_filesystem(config: BaseConfig) -> S3FS: return S3FS(bucket_name=config.bucket_name, aws_access_key_id=config.aws_access_key_id, aws_secret_access_key=config.aws_secret_access_key, region=config.aws_region) def set_sh_config(config: BaseConfig) -> SHConfig: """ Set AWS and SH credentials in SHConfig file to allow usage of download and io tasks """ sh_config = SHConfig() sh_config.aws_access_key_id = config.aws_access_key_id sh_config.aws_secret_access_key = config.aws_secret_access_key if all(key in config.__annotations__.keys() for key in ['sh_client_id', 'sh_client_secret']): sh_config.sh_client_id = config.sh_client_id sh_config.sh_client_secret = config.sh_client_secret sh_config.save() return sh_config def multiprocess(process_fun: Callable, arguments: List[Any], max_workers: int = 4) -> List[Any]: """ Executes multiprocessing with tqdm. Parameters ---------- process_fun: A function that processes a single item. arguments: Arguments with which te function is called. max_workers: Max workers for the process pool executor. Returns A list of results. ------- """ with ProcessPoolExecutor(max_workers=max_workers) as executor: results = list(tqdm(executor.map(process_fun, arguments), total=len(arguments))) return results
python
# Copyright 2016 # Drewan Tech, LLC # ALL RIGHTS RESERVED db_user = 'web_service_admin' db_password = 'web_service_admin' db_host = 'postgres' db_port = '5432' users_to_manage = {'random_matrix': {'authorized_databases': ['matrix_database'], 'password': 'random_matrix'}, 'matrix_mult': {'authorized_databases': ['matrix_database'], 'password': 'matrix_mult'}}
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # (C) 2014 Arulalan.T <[email protected]> # # This file is part of 'open-tamil/txt2ipa' package examples # import sys sys.path.append("../..") from tamil.txt2ipa.ipaconvert import ipa, broad from tamil.txt2ipa.transliteration import tam2lat text = "வணக்கம் தமிழகம் " t1 = tam2lat(text) t2 = " " + t1 + " " t2 = ipa(t2) t3 = broad(t2) print("after tam2lat", t1) print("after ipa", t2) print("after broad", t3)
python
import tkinter as tk import tkinter.messagebox as msg import socket import configparser import threading import time import os def warning(message): msg.showwarning("Предупреждение", message) def error(message, error=None): msg.showerror("Ошибка", message) print(error) class Server(socket.socket): def __init__(self, host, port, max_connections, warning_disconnect=0, warning_connection_attempt=0): self.connections = [] self.max_connections = max_connections self.warning_disconnect = warning_disconnect self.warning_connection_attempt = warning_connection_attempt self.address = host, port try: super().__init__(socket.AF_INET, socket.SOCK_STREAM) self.bind(self.address) self.listen(max_connections) except OSError as e: error(f"{e}\n{':'.join(map(str, self.address))} - Этот адрес уже используется или IP не действительный\nПопробуйте сменить порт или IP") print(f"{e}\n{':'.join(map(str, self.address))} - Этот адрес уже используется или IP не действительный") def start(self): while True: connection, sockname = self.accept() if len(self.connections) + 1 > self.max_connections: connection.close() if self.warning_connection_attempt: warning(f"Попытка подключения, превышающее макс. кол-во подключений\nID({sockname[1]})") continue self.connections.append((connection, sockname[1])) print(f"\nID({sockname[1]}) подключился") def send_data(self, data): for connection in self.connections: try: connection[0].send(data.encode("utf-8")) except ConnectionError as e: print(f"\n{e}\nID({connection[1]}) закрыл соединение") self.connections.remove(connection) if self.warning_disconnect: warning(f"ID({connection[1]}) закрыл соединение") continue class App: def __init__(self): self.buffer_text = None self.config = configparser.ConfigParser() self.config.read(os.path.split(__file__)[0] + "/settings.conf") try: self.SHOW_WINDOW = int(self.config["APP"]["SHOW_WINDOW"]) self.SHOW_DATA = int(self.config["APP"]["SHOW_DATA"]) self.SERVER_PORT = int(self.config["SERVER"]["SERVER_PORT"]) self.SERVER_HOST = self.config["SERVER"]["SERVER_HOST"] self.MAX_CONNECTIONS = int(self.config["CONNECTIONS"]["MAX_CONNECTIONS"]) self.STUDENT_DISCONNECTION = int(self.config["SHOW_WARNINGS"]["STUDENT_DISCONNECTION"]) self.CONNECTION_ATTEMPT = int(self.config["SHOW_WARNINGS"]["CONNECTION_ATTEMPT"]) except KeyError as e: error("Неправильно составлен или отсутствует файл settings.conf", error=e) except ValueError as e: error("Неправильное значение параметров в файле settings.conf", error=e) self.server = Server( self.SERVER_HOST, self.SERVER_PORT, max_connections=self.MAX_CONNECTIONS, warning_disconnect=self.STUDENT_DISCONNECTION, warning_connection_attempt=self.CONNECTION_ATTEMPT ) self.root = tk.Tk() self.root.withdraw() self.root.resizable(False, False) if self.SHOW_WINDOW: self.root.deiconify() self.root.title("App") self.root.geometry(f"200x{100*self.SHOW_DATA}") self.root.wm_attributes("-topmost", True) if self.SHOW_DATA: self.root.resizable(True, True) self.text_data = tk.Text(self.root, text=self.buffer_text, state="disabled") self.text_data.config(bd=0, highlightthickness=0) self.text_data.pack(expand=True, fill="both") threading.Thread(target=self.check_buffer, daemon=True).start() threading.Thread(target=self.server.start, daemon=True).start() self.root.mainloop() def check_buffer(self): while True: time.sleep(0.1) try: self.server.send_data("¤") clipboard_text = self.root.clipboard_get() if clipboard_text != self.buffer_text: self.buffer_text = clipboard_text self._send_buffer() except tk.TclError: continue def _send_buffer(self): self.buffer_text = self.root.clipboard_get() self.server.send_data(self.buffer_text) if self.SHOW_DATA: self._show_data() def _show_data(self): self.text_data.configure(state="normal") self.text_data.delete(1.0, "end") self.text_data.insert(1.0, self.buffer_text) self.text_data.configure(state="disabled") if __name__ == "__main__": try: App() except KeyboardInterrupt: print("\nПриложение принудительно остановлено") except Exception as e: print(f"Ошибка: {e}")
python
import pytest from text_normalizer.tokenization import replace_bigrams @pytest.mark.benchmark(group='ivr_convert') def test_benchmark_replace_synonyms(benchmark, tokenize, benchmark_text): tokens = list(tokenize(benchmark_text)) benchmark(lambda: list(replace_bigrams(tokens)))
python
from product import product from company import company from pathlib import Path # Loading products info products = [] products_list_file = open(str(Path(__file__).resolve().parent) + "/products_list.txt", "r") for p in products_list_file: p = p.replace("\n", "") p = p.split(",") products.append(product(p[0], float(p[1]), float(p[2]))) # Loading companies info companies = [] companies_list_file = open(str(Path(__file__).resolve().parent) + "/companies_list.txt", "r") for c in companies_list_file: c = c.replace("\n", "") c = c.split(",") if len(c) == 6: companies.append(company(c[0], float(c[1]), float(c[2]), float(c[3]), float(c[4]), float(c[5]))) else: companies.append(company(c[0], float(c[1]), float(c[2]))) # Showing results for p in products: print("Product info:") print("Name: {}".format(p.get_name())) print("Distance: {:.2f}".format(p.get_distance())) print("Weight: {:.2f}".format(p.get_weight())) print() print("Budgets:") for c in companies: print("{}: R$ {:.2f}".format(c.get_name(), c.calculate_budget(p))) print("---")
python
# Title: Trapping Rain Water # Link: https://leetcode.com/problems/trapping-rain-water/ import sys from heapq import heappop, heappush sys.setrecursionlimit(10 ** 6) class Solution(): def trap(self, heights: list) -> int: water = 0 walls = [] for i, height in enumerate(heights): last_level = 0 while walls: left_height, left_index = heappop(walls) if left_height <= height: water += (i - left_index - 1) * (left_height - last_level) last_level = left_height else: water += (i - left_index - 1) * (height - last_level) heappush(walls, (left_height, left_index)) break heappush(walls, (height, i)) return water def main(): solution = Solution() height = [4,2,0,3,2,5] print(solution.trap(height)) if __name__ == '__main__': main()
python
from __future__ import unicode_literals from django.contrib import admin from authtools.admin import NamedUserAdmin from .models import Profile, TokenFirebase from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from import_export.admin import ImportExportModelAdmin from import_export import resources User = get_user_model() class UserProfileInline(admin.StackedInline): model = Profile #model = Persona class NewUserAdmin(NamedUserAdmin): inlines = [UserProfileInline] list_display = ('is_active', 'email', 'name', 'permalink', 'is_superuser', 'is_staff',) # 'View on site' didn't work since the original User model needs to # have get_absolute_url defined. So showing on the list display # was a workaround. def permalink(self, obj): url = reverse("profiles:show", kwargs={"slug": obj.profile.slug}) # Unicode hex b6 is the Pilcrow sign return '<a href="{}">{}</a>'.format(url, '\xb6') permalink.allow_tags = True class ProfileResource(resources.ModelResource): class Meta: model = Profile exclude = ('id',) import_id_fields = ('id_persona',) skip_unchanged = True fields = ['id_persona', 'email_verified', 'nombre', 'segundo_nombre', 'apellido_pa', 'apellido_ma', 'tipo_documento', 'numero_documento', 'sexo', 'correo', 'fecha_nac'] class ProfileAdmin(ImportExportModelAdmin): resource_class = ProfileResource admin.site.unregister(User) admin.site.register(User, NewUserAdmin) admin.site.register(Profile, ProfileAdmin) admin.site.register(TokenFirebase)
python
from decimal import Decimal from django.apps import apps from rest_framework import serializers from rest_flex_fields import FlexFieldsModelSerializer from ....checkout.utils import get_taxes_for_checkout from ....glovo.utils import glovo_get_lowest_price from ....runningbox.utils import runningbox_order_estimate from ...fields import MoneyField, TaxedMoneyField from ..shipping_method import ShippingMethodSerializer from .checkout_line import CheckoutLineSerializer from .glovo_order import GlovoOrderSerializer from .runningbox_order import RunningBoxOrderSerializer __all__ = [ 'CheckoutSerializer', ] Checkout = apps.get_model(*'checkout.Checkout'.split()) CheckoutLine = apps.get_model(*'checkout.CheckoutLine'.split()) Address = apps.get_model(*'account.Address'.split()) ShippingMethod = apps.get_model(*'shipping.ShippingMethod'.split()) PhysicalStore = apps.get_model(*'store.PhysicalStore'.split()) GlovoOrder = apps.get_model(*'glovo.GlovoOrder'.split()) RunningBoxOrder = apps.get_model(*'runningbox.RunningBoxOrder'.split()) class CheckoutSerializer(FlexFieldsModelSerializer): """Serializer for :model:`checkout.Checkout`: `**Fields:**` 01. `billing_address` : `ForeignKey` [:model:`account.Address`] 02. `created` : `DateTimeField` 03. `discount_amount` : `DecimalField` 04. `discount_name` : `CharField` 05. `email` : `CharField` 06. `last_change` : `DateTimeField` 07. `note` : `TextField` 08. `quantity` : `PositiveIntegerField` 09. `shipping_address` : `ForeignKey` [:model:`account.Address`] 10. `shipping_method` : `ForeignKey` [:model:`shipping.ShippingMethod`] 11. `token` : `UUIDField` 12. `translated_discount_name` : `CharField` 13. `user` : `ForeignKey` [:model:`account.User`] 14. `voucher_code` : `CharField` **Reverse Fields:** 01. `lines` : `ForeignKey` [:model:`checkout.CheckoutLine`] 02. `payments` : `ForeignKey` [:model:`payment.Payment`] """ lines = serializers.PrimaryKeyRelatedField( queryset=CheckoutLine.objects.all(), allow_null=False, required=False, many=True, ) shipping_address = serializers.PrimaryKeyRelatedField( queryset=Address.objects.all(), allow_null=True, required=True ) billing_address = serializers.PrimaryKeyRelatedField( queryset=Address.objects.all(), allow_null=True, required=False ) glovo_order = serializers.PrimaryKeyRelatedField( queryset=GlovoOrder.objects.all(), allow_null=True, required=False ) runningbox_order = serializers.PrimaryKeyRelatedField( queryset=RunningBoxOrder.objects.all(), allow_null=True, required=False ) discount_amount = MoneyField() total = serializers.SerializerMethodField() subtotal = serializers.SerializerMethodField() shipping_price = serializers.SerializerMethodField() applicable_shipping_methods = serializers.SerializerMethodField() expandable_fields = { 'lines': ( CheckoutLineSerializer, { 'fields': [ 'id', 'quantity', 'variant', ], 'many': True } ), 'shipping_method': ( ShippingMethodSerializer, { 'fields': [ 'id', 'name', 'price', ] } ), 'glovo_order': ( GlovoOrderSerializer, { 'fields': [ 'id', 'price', ] } ), 'runningbox_order': ( RunningBoxOrder, { 'fields': [ 'id', 'price', ] } ) } class Meta: model = Checkout fields = [ # Fields 'token', 'created', 'user', 'email', 'quantity', 'voucher_code', 'discount_name', 'discount_amount', 'shipping_type', 'shipping_address', 'shipping_method', 'billing_type', 'billing_address', 'note', # 'last_change', # 'translated_discount_name', # Reverse Fields 'lines', # 'payments', 'glovo_order', 'runningbox_order', # other fields 'subtotal', 'total', 'shipping_price', 'applicable_shipping_methods', ] read_only_fields = [] # def create(self, validated_data): # return super().create(validated_data) # def update(self, instance, validated_data): # return super().update(instance, validated_data) def get_subtotal(self, obj): discounts = None taxes = None context = self.context.get('request', None) if context is not None: discounts = context.discounts taxes = context.taxes subtotal = obj.get_subtotal(discounts, taxes) return TaxedMoneyField().to_representation(subtotal) def get_shipping_price(self, obj): taxes = None context = self.context.get('request', None) if context is not None: taxes = context.taxes shipping_price = obj.get_shipping_price(taxes) return TaxedMoneyField().to_representation(shipping_price) def get_total(self, obj): discounts = None taxes = None context = self.context.get('request', None) if context is not None: discounts = context.discounts taxes = context.taxes total = obj.get_total(discounts, taxes) return TaxedMoneyField().to_representation(total) def get_applicable_shipping_methods(self, obj): if obj.shipping_address is None: return None request = self.context.get('request', None) discounts = None taxes = None if request is None: discounts = request.discounts taxes = get_taxes_for_checkout(obj, request.taxes) # country_code = obj.shipping_address.country.code shpping_methods = ShippingMethod.objects.applicable_shipping_methods( price=obj.get_subtotal(discounts, taxes).gross, weight=obj.get_total_weight(), address=obj.shipping_address ) shpping_methods = ShippingMethodSerializer(shpping_methods, many=True) shpping_methods = shpping_methods.data stores = PhysicalStore.objects.filter( glovo_delivery_permission__glovo_enabled=True) if stores.exists(): if getattr(obj.shipping_address, 'position', None): glovo_shipping_method = glovo_get_lowest_price( stores, obj.shipping_address) if glovo_shipping_method is not None: glovo_shipping_method['price']['amount'] = Decimal( str(glovo_shipping_method['price']['amount'] / 100)) glovo_shipping_method['name'] = 'Glovo' glovo_shipping_method['id'] = 'shipping-with-glovo' shpping_methods.append(glovo_shipping_method) stores = PhysicalStore.objects.filter( runningbox_delivery_permission__runningbox_enabled=True) if stores.exists(): if getattr(obj.shipping_address, 'ubigeo', None): runningbox_shipping_method = runningbox_order_estimate( obj.get_total_weight().value, obj.shipping_address.ubigeo, 'EXPRESS' ) if runningbox_shipping_method is not None: runningbox_shipping_method['name'] = 'RunningBox' runningbox_shipping_method['id'] = 'shipping-with-runningbox' shpping_methods.append(runningbox_shipping_method) return shpping_methods
python
import requests import json import re class RestApi(object): # base_url example http://aaa.co.com/webhdfs def __init__(self, base_url, username, password): self.name = "nhso core api" + base_url self.base_url = base_url self.username = username self.password = password self.token = "" def __check_over_retry(self, retry): if retry < 0: raise Exception("Retry lost") # ถ้ามี error ให้ลองใหม่ ตามจำนวน retry def __request_retry(self, http_method, url, headers, data, retry=5): try: return requests.request(http_method, url, headers=headers, data=data) except Exception as ex: if retry <= 0: raise ex else: print("Req retry " + str(retry) + " " + url) return self.__request_retry(http_method, url, headers, data, retry - 1) # ถ้ามี error ให้ลองใหม่ ตามจำนวน retry def __request_retry_file(self, http_method, url, headers, data, file, retry=5): try: return requests.request( http_method, url, headers=headers, data=data, files=file ) except Exception as ex: if retry <= 0: raise ex else: print("Req retry " + str(retry) + " " + url) return self.__request_retry_file( http_method, url, headers, data, file, retry - 1 ) # ใช้สำหรับขอ token จาก user # return token def __auth_core(self): api_url = self.base_url + "/auth-jwt" print(api_url) payload = json.dumps({"username": self.username, "password": self.password}) headers = {"Content-Type": "application/json"} response = self.__request_retry("POST", api_url, headers=headers, data=payload) status = response.status_code if status == 200: token = response.json()["token"] return token else: raise Exception(api_url + " code " + str(status)) # ตรวจสอบว่า Token ยังใช้งานได้อยู่หรือไม่ # return bool def __verify_token_core(self): api_url = self.base_url + "/auth-jwt-verify" payload = json.dumps({"token": self.token}) headers = {"Content-Type": "application/json"} response = self.__request_retry("POST", api_url, headers=headers, data=payload) status = response.status_code print(api_url + " status code " + str(status)) if status == 200: return True else: return False # จะทำการตรวจสอบ verify ก่อน ว่าผ่านไหม ถ้าไม่ผ่านจะเข้าสู่การขอ token ใหม่ def __auth(self): verify = self.__verify_token_core() if verify == False: self.token = self.__auth_core() # แสดงรายการไฟล์ def __list_file(self, dir_parth, retry=3): self.__check_over_retry(retry) api_url = self.base_url + "/v1/" + dir_parth + "?op=LISTSTATUS" print(api_url + " deep:" + str(retry)) payload = {} headers = {"Authorization": "JWT " + self.token} response = self.__request_retry("GET", api_url, headers=headers, data=payload) status = response.status_code if status == 200: return response.json() elif status == 401: self.__auth() return self.__list_file(dir_parth, retry - 1) else: raise Exception(api_url + " code " + str(status)) def list_file(self, dir_parth): return self.__list_file(dir_parth, 5) # สร้างโฟเดอร์แบบคำสั่ง mkdir -p โดยที่จะไม่มี ruturn def __mkdirs(self, dir_parth, retry=3): self.__check_over_retry(retry) api_url = self.base_url + "/v1/" + dir_parth + "?op=MKDIRS" print(api_url + " deep:" + str(retry)) payload = {} headers = {"Authorization": "JWT " + self.token} response = self.__request_retry("PUT", api_url, headers=headers, data=payload) status = response.status_code # if status != 200: # raise Exception(api_url + " code " + str(status)) if status == 401: self.__auth() self.__mkdirs(dir_parth, retry - 1) def mkdirs(self, dir_parth): self.__mkdirs(dir_parth, 5) # มีไฟล์ หรือ ไดเรกทอรี่ที่ระบุอยู่หรือไม่ def exists(self, dir_or_file_parth): print("call Check exists file") try: self.list_file(dir_or_file_parth) print("Check exists file true") return True except: print("Check exists file false") return False def __move_file_and_rename(self, source_path, destination_path, retry=3): self.__check_over_retry(retry) api_url = ( self.base_url + "/v1/" + source_path + "?op=RENAME&destination=/" + destination_path ) print(api_url + " deep:" + str(retry)) payload = {} headers = {"Authorization": "JWT " + self.token} response = self.__request_retry("PUT", api_url, headers=headers, data=payload) status = response.status_code if status == 401: self.__auth() self.__move_file_and_rename(source_path, destination_path, retry - 1) def move_file_and_rename(self, source_path, destination_path): self.__move_file_and_rename(source_path, destination_path, 5) def __delete(self, dir_or_file_parth, retry=3): self.__check_over_retry(retry) api_url = self.base_url + "/v1/" + dir_or_file_parth + "?op=DELETE" print(api_url + " deep:" + str(retry)) payload = {} headers = {"Authorization": "JWT " + self.token} response = self.__request_retry( "DELETE", api_url, headers=headers, data=payload ) status = response.status_code if status == 401: # 401 Un self.__auth() self.__delete(dir_or_file_parth, retry - 1) elif status == 500: # 500 มีไฟล์หรือ โฟเดอร์อยู่ ไม่สามารถลบได้ raise Exception(api_url + " code " + str(status)) elif status == 200: # ไม่มีไฟล์ 200 และ {"boolean": false} pass def delete(self, dir_or_file_parth): self.__delete(dir_or_file_parth) # แยกชื่อไฟล์ออกมาจาก นามสกุลไฟล์ def __get_file_name(self, full_parth): p = re.compile("/?.+/(.+)$") return p.match(full_parth).groups()[0] # อัพโหลดไฟล์ def __upload_and_overwrite(self, local_file_path, nhso_file_path, retry=3): self.__check_over_retry(retry) self.__auth() # ใส่ไว้เลย เพราะเป็น fun ที่ช้า api_url = self.base_url + "/v1/" + nhso_file_path + "?op=CREATE" print(api_url + " deep:" + str(retry)) filename = self.__get_file_name(local_file_path) payload = {} headers = {"Authorization": "JWT " + self.token} files = [ ( "file", ( filename, open(local_file_path, "rb"), "application/octet-stream", ), ) ] response = self.__request_retry_file( "PUT", api_url, headers=headers, data=payload, file=files ) status = response.status_code if status == 401: # 401 Un self.__auth() self.__upload_and_overwrite(local_file_path, nhso_file_path, retry - 1) def upload_and_overwrite(self, local_file_path, nhso_file_path): self.__upload_and_overwrite(local_file_path, nhso_file_path, 3)
python
import threading import time import socket import sys import copy import pprint pp = pprint.PrettyPrinter(indent=2) # global variables turn = 1 convergence = 0 round = 1 update_occured = 0 nodes = { "0" : {"name": "A", "index": 0, "port": 10000, "update": 1}, "1" : {"name": "B", "index": 1, "port": 10001, "update": 1}, "2" : {"name": "C", "index": 2, "port": 10002, "update": 1}, "3" : {"name": "D", "index": 3, "port": 10003, "update": 1}, "4" : {"name": "E", "index": 4, "port": 10004, "update": 1} } updates = [1,1,1,1,1] final_output = {"0":None, "1":None, "2":None, "3":None, "4":None} final_round = 0 def set_next(next): global turn turn = next def set_unset_update(value): global update_occured update_occured = value def server_thread_task(port, old_dv_matrix, updated_dv_matrix, node_index): global round global final_round # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Bind the socket to the port server_address = ('localhost', port) #print('node %d server thread : starting up on %s port %s' % (node_index, server_address[0], server_address[1])) sock.bind(server_address) # Listen for incoming connections sock.listen(1) last_updated_dv_matrix = copy.deepcopy(updated_dv_matrix) while True: connection, client_address = sock.accept() try: while True: data = connection.recv(1024) if data: message = data.decode() from_node_index = int(message.split(":")[0]) received_dv_estimate = message.split(":")[1].split(",") receiver = nodes[str(node_index)]["name"] sender = nodes[str(from_node_index)]["name"] print("Node %s received DV from %s" % (receiver, sender)) for i in range(len(received_dv_estimate)): received_dv_estimate[i] = int(received_dv_estimate[i]) #------------update neighbor's row------------------- updated_dv_matrix[from_node_index] = received_dv_estimate #print(updated_dv_matrix[from_node_index]) #------------recalculate own dv estimate------------- self_row = updated_dv_matrix[node_index] for i in range(len(self_row)): if(i != node_index): new_value = updated_dv_matrix[from_node_index][node_index] + updated_dv_matrix[from_node_index][i] existing_value = updated_dv_matrix[node_index][i] updated_dv_matrix[node_index][i] = min(new_value, existing_value) #----------check if DV estimate is different--------- if(updated_dv_matrix[node_index] == last_updated_dv_matrix[node_index]): print("No change in DV at node %s" % (receiver)) else: updates[node_index] = 1 print("Updating DV matrix at node %s" % (receiver)) print("New DV matrix at node %s = " % (receiver)) pp.pprint(updated_dv_matrix) last_updated_dv_matrix = copy.deepcopy(updated_dv_matrix) final_round = round #-----------sending data back to the client---------- connection.sendall(data) else: break finally: # Clean up the connection connection.close() def send_dv_to_neighbor(neighbor_name, port, fromNodeIndex, message): # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening server_address = ('localhost', port) #print('connecting to %s port %s' % server_address) sock.connect(server_address) try: # Send data print("\nSending DV to node %s" % (neighbor_name)) sock.sendall(message.encode()) # response from server response = sock.recv(1024).decode() #print('received "%s"' % response) finally: #print('closing socket') sock.close() #print(response) return response def init_nxn_matrix(n): initial_nxn_matrix = [] for i in range(n): row = [] for j in range(n): row.append(999) initial_nxn_matrix.append(row) return initial_nxn_matrix def populate_nxn_matrix(updated_dv_matrix, node_index, neighbor_info): for i in range(len(updated_dv_matrix[node_index])): if(i in neighbor_info.keys()): updated_dv_matrix[node_index][i] = neighbor_info[i] updated_dv_matrix[node_index][node_index] = 0 def create_server_thread(port, old_dv_matrix, updated_dv_matrix, node_index): server_thread = threading.Thread(target=server_thread_task, args=(port, old_dv_matrix, updated_dv_matrix, node_index), daemon=True) server_thread.start() time.sleep(2) def create_dv_msg(updated_dv_matrix, node_index): weight_list = [] for value in updated_dv_matrix[node_index]: weight_list.append(str(value)) delimeter = "," dv_msg = str(node_index) + ":" + delimeter.join(weight_list) return dv_msg def send_update(node_index, neighbor_info, updated_dv_matrix): dv_msg = create_dv_msg(updated_dv_matrix, node_index) neighbors = [] for key in neighbor_info.keys(): neighbors.append(key) neighbors.sort() #--------------- bigger = [] smaller = [] for value in neighbors: if value > node_index: bigger.append(value) else: smaller.append(value) neighbors_list = bigger + smaller #--------------- i = 0 while i < len(neighbors_list): neighbor_port = nodes[str(neighbors_list[i])]["port"] neighbor_name = nodes[str(neighbors_list[i])]["name"] response = send_dv_to_neighbor(neighbor_name, neighbor_port, node_index, dv_msg) if(response): i += 1 def print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round): print("---------------------------------------------------------------------") print("Round %d : %s" % (round, nodes[str(node_index)]["name"])) #print("Current DV matrix = ", str(updated_dv_matrix)) print("Current DV matrix = ") pp.pprint(updated_dv_matrix) #print("Last DV matrix = ", str(old_dv_matrix)) print("Last DV matrix = ") pp.pprint(old_dv_matrix) def node_thread(lock, node_index, port, neighbor_info): global turn global convergence global round global final_round print("node %s started" % nodes[str(node_index)]["name"]) #initialize nxn matrix old_dv_matrix = init_nxn_matrix(5) #populate nxn matrix with neighbor info populate_nxn_matrix(old_dv_matrix, node_index, neighbor_info) updated_dv_matrix = copy.deepcopy(old_dv_matrix) #--------------server thread------------------------- create_server_thread(port, old_dv_matrix, updated_dv_matrix, node_index) #--------------server thread------------------------- while True: lock.acquire() if(turn == 1 and node_index == 0): if(convergence == 1): #print('nothing to do %s' % nodes[str(node_index)]["name"]) set_next(2) final_output["0"] = updated_dv_matrix lock.release() break print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round) if(updates[node_index] == 1): print("Updated from last DV matrix or the same? Updated") #------------send data to neighbors---------------- send_update(node_index, neighbor_info, updated_dv_matrix) #------------update 'update' flag------------------ updates[node_index] = 0 #-----------update the old dv---------------------- old_dv_matrix = copy.deepcopy(updated_dv_matrix) else: print("Updated from last DV matrix or the same? Same") if(sum(updates) == 0): convergence = 1 set_next(2) final_output["0"] = updated_dv_matrix lock.release() break else: set_next(2) round += 1 time.sleep(1) if(turn == 2 and node_index == 1): if(convergence == 1): #print('nothing to do %s' % nodes[str(node_index)]["name"]) set_next(3) final_output["1"] = updated_dv_matrix lock.release() break print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round) if(updates[node_index] == 1): print("Updated from last DV matrix or the same? Updated") #------------send data to neighbors---------------- send_update(node_index, neighbor_info, updated_dv_matrix) #------------update 'update' flag------------------ updates[node_index] = 0 #-----------update the old dv---------------------- old_dv_matrix = copy.deepcopy(updated_dv_matrix) else: print("Updated from last DV matrix or the same? Same") if(sum(updates) == 0): convergence = 1 set_next(3) final_output["1"] = updated_dv_matrix lock.release() break else: set_next(3) round += 1 time.sleep(1) if(turn == 3 and node_index == 2): if(convergence == 1): #print('nothing to do %s' % nodes[str(node_index)]["name"]) set_next(4) final_output["2"] = updated_dv_matrix lock.release() break print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round) if(updates[node_index] == 1): print("Updated from last DV matrix or the same? Updated") #------------send data to neighbors---------------- send_update(node_index, neighbor_info, updated_dv_matrix) #------------update 'update' flag------------------ updates[node_index] = 0 #-----------update the old dv---------------------- old_dv_matrix = copy.deepcopy(updated_dv_matrix) else: print("Updated from last DV matrix or the same? Same") if(sum(updates) == 0): convergence = 1 set_next(4) final_output["2"] = updated_dv_matrix lock.release() break else: set_next(4) round += 1 time.sleep(1) if(turn == 4 and node_index == 3): if(convergence == 1): #print('nothing to do %s' % nodes[str(node_index)]["name"]) set_next(5) final_output["3"] = updated_dv_matrix lock.release() break print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round) if(updates[node_index] == 1): print("Updated from last DV matrix or the same? Updated") #------------send data to neighbors---------------- send_update(node_index, neighbor_info, updated_dv_matrix) #------------update 'update' flag------------------ updates[node_index] = 0 #-----------update the old dv---------------------- old_dv_matrix = copy.deepcopy(updated_dv_matrix) else: print("Updated from last DV matrix or the same? Same") if(sum(updates) == 0): convergence = 1 set_next(5) final_output["3"] = updated_dv_matrix lock.release() break else: set_next(5) round += 1 time.sleep(1) if(turn == 5 and node_index == 4): if(convergence == 1): #print('nothing to do %s' % nodes[str(node_index)]["name"]) set_next(1) final_output["4"] = updated_dv_matrix lock.release() break print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round) if(updates[node_index] == 1): print("Updated from last DV matrix or the same? Updated") #------------send data to neighbors---------------- send_update(node_index, neighbor_info, updated_dv_matrix) #------------update 'update' flag------------------ updates[node_index] = 0 #-----------update the old dv---------------------- old_dv_matrix = copy.deepcopy(updated_dv_matrix) else: print("Updated from last DV matrix or the same? Same") if(sum(updates) == 0): convergence = 1 set_next(1) final_output["4"] = updated_dv_matrix lock.release() break else: set_next(1) round += 1 time.sleep(1) lock.release() def get_adjacency_matrix(): file_name = "network1.txt" adjacency_matrix = [] with open(file_name) as fp: lines = fp.readlines() for line in lines: row = line.strip().split() for i in range(len(row)): row[i] = int(row[i]) adjacency_matrix.append(row) return adjacency_matrix def get_neighbor_info_list(adjacency_matrix): neighbor_info_list = [] for node_index in range(len(adjacency_matrix)): neighbor_info = {} for adj_node_index in range(len(adjacency_matrix[node_index])): if(adjacency_matrix[node_index][adj_node_index] != 0): neighbor_info[adj_node_index] = adjacency_matrix[node_index][adj_node_index] neighbor_info_list.append(neighbor_info) return neighbor_info_list def main_task(): #adjacency matrix & neighbor info adjacency_matrix= get_adjacency_matrix() neighbor_info_list = get_neighbor_info_list(adjacency_matrix) #print(neighbor_info_list) #node index nodeA_index = nodes["0"]["index"] nodeB_index = nodes["1"]["index"] nodeC_index = nodes["2"]["index"] nodeD_index = nodes["3"]["index"] nodeE_index = nodes["4"]["index"] #node ports nodeA_port = nodes["0"]["port"] nodeB_port = nodes["1"]["port"] nodeC_port = nodes["2"]["port"] nodeD_port = nodes["3"]["port"] nodeE_port = nodes["4"]["port"] # creating a lock lock = threading.Lock() # creating threads thread_A = threading.Thread(target=node_thread, args=(lock, nodeA_index, nodeA_port, neighbor_info_list[0])) thread_B = threading.Thread(target=node_thread, args=(lock, nodeB_index, nodeB_port, neighbor_info_list[1])) thread_C = threading.Thread(target=node_thread, args=(lock, nodeC_index, nodeC_port, neighbor_info_list[2])) thread_D = threading.Thread(target=node_thread, args=(lock, nodeD_index, nodeD_port, neighbor_info_list[3])) thread_E = threading.Thread(target=node_thread, args=(lock, nodeE_index, nodeE_port, neighbor_info_list[4])) # start threads thread_A.start() thread_B.start() thread_C.start() thread_D.start() thread_E.start() # wait until threads finish their job thread_A.join() thread_B.join() thread_C.join() thread_D.join() thread_E.join() #final output print("---------------------------------------------------------------------") print("Final output: \n") print("Node A DV = ") pp.pprint(final_output["0"]) print("Node B DV = ") pp.pprint(final_output["1"]) print("Node C DV = ") pp.pprint(final_output["2"]) print("Node D DV = ") pp.pprint(final_output["3"]) print("Node E DV = ") pp.pprint(final_output["4"]) print("\nNumber of rounds till convergence (Round # when one of the nodes last updated its DV) = %d" % (final_round)) if __name__ == "__main__": main_task()
python
#!/usr/bin/env python """Setup script for the package.""" import os import sys import setuptools PACKAGE_NAME = 'api' MINIMUM_PYTHON_VERSION = 3, 6 def check_python_version(): """Exit when the Python version is too low.""" if sys.version_info < MINIMUM_PYTHON_VERSION: sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION)) def read_package_variable(key): """Read the value of a variable from the package without importing.""" module_path = os.path.join(PACKAGE_NAME, '__init__.py') with open(module_path) as module: for line in module: parts = line.strip().split(' ') if parts and parts[0] == key: return parts[-1].strip("'") assert 0, "'{0}' not found in '{1}'".format(key, module_path) def read_descriptions(): """Build a description for the project from documentation files.""" try: readme = open("README.rst").read() changelog = open("CHANGELOG.rst").read() except IOError: return "<placeholder>" else: return readme + '\n' + changelog check_python_version() setuptools.setup( name=read_package_variable('__project__'), version=read_package_variable('__version__'), description="A place to track your code coverage metrics.", url='https://github.com/jacebrowning/coverage-space', author='Jace Browning', author_email='[email protected]', packages=setuptools.find_packages(), entry_points={'console_scripts': []}, long_description=read_descriptions(), license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Flask', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', ], )
python
# Back compatibility -- use broad subdirectory for new code from bcbio.broad.metrics import *
python
import copy import torch import numpy as np from PIL import Image from torchvision import transforms class BlackBoxAttack(object): MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) def __init__(self, model, input_size=224, epsilon=16, num_iters=10000, early_stopping=False, use_cuda=False, random_state=None): '''__INIT__ reference: Guo C, Gardner J R, You Y, et al. Simple black-box adversarial attacks[J]. arXiv preprint arXiv:1905.07121, 2019. model: model instance or list of model instances input_size: int, size of input tentor to model epsilon: int, limit on the perturbation size num_iters: int, number of iterations early_stopping: bool, if True, stop at once if adversarial image has been found use_cuda: bool, True or False, whether to use GPU random_state: int or None, for reproducing ''' self.num_iters = num_iters self.epsilon = epsilon # self.epsilon = epsilon / 255 self.early_stopping = early_stopping self.use_cuda = torch.cuda.is_available() and use_cuda self.nbits = int(np.ceil(np.log10(num_iters)) + 1) self.preprocess = transforms.Compose([ transforms.Resize(input_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(self.MEAN, self.STD), ]) if not isinstance(model, list): model = [model] model = [copy.deepcopy(m) for m in model] for m in model: m.eval() if self.use_cuda: m.cuda() self.model = model if random_state is not None: np.random.seed(seed=random_state) return def __call__(self, image_path, label, target=False): '''__CALL__ image_path: string, path of uint8 input image label: int, the true label of input image if target is False, the target label to learn if target is True target: bool, if True, perform target adversarial attack; if False, perform non-target adversarial attack ''' self.target = target src_image = np.array(Image.open(image_path)) adv_image = self.forward(src_image, label) return adv_image.astype(np.uint8) def forward(self, src_image, label): image = src_image.copy().astype(float) n_dims = len(image.flatten()) perm = np.random.permutation(n_dims) last_prob, _ = self.__predict(image, label) is_better = np.greater if self.target else np.less num_iters = min([self.num_iters, len(perm)]) for i in range(num_iters): diff = np.zeros((n_dims)) diff[perm[i]] = self.epsilon diff = diff.reshape(image.shape) left_image = np.clip(image - diff, 0.0, 255.0) left_prob, is_stop = self.__predict(left_image, label) if is_stop or is_better(left_prob, last_prob): image = left_image.copy() last_prob = left_prob if is_stop: break else: right_image = np.clip(image + diff, 0.0, 255.0) right_prob, is_stop = self.__predict(right_image, label) if is_stop or is_better(right_prob, last_prob): image = right_image.copy() last_prob = right_prob if is_stop: break iter_msg = '[Running]-[Step:{}/{}]-[Prob:{:.6f}]' print(iter_msg.format(i + 1, num_iters, last_prob), end='\r') iter_msg = '\n[Stopped]-[Step:{}/{}]-[Prob:{:.6f}]' print(iter_msg.format(i + 1, num_iters, last_prob)) return image def __predict(self, image, label): def get_prob(model, image_norm): pred = model(image_norm) probs = torch.softmax(pred, dim=1) probs = probs.data.cpu().detach().numpy().flatten() pred = np.argmax(probs) return probs[label], pred image_norm = self.__norm(image) if self.use_cuda: image_norm = image_norm.cuda() prob_preds = [get_prob(model, image_norm) for model in self.model] probs = [item[0] for item in prob_preds] prob = min(probs) if self.target else max(probs) preds = [item[1] for item in prob_preds] is_stop = False if self.early_stopping: if self.target and preds.count(label) == len(preds): is_stop = True elif (not self.target) and preds.count(label) == 0: is_stop = True return prob, is_stop def __norm(self, image): image_cp = Image.fromarray(image.astype(np.uint8)) image_norm = self.preprocess(image_cp) image_norm = image_norm.unsqueeze(0) return image_norm
python
import RPi.GPIO import sys import random sys.path.append("../../") from gfxlcd.driver.nju6450.gpio import GPIO from gfxlcd.driver.nju6450.nju6450 import NJU6450 RPi.GPIO.setmode(RPi.GPIO.BCM) def hole(o, x, y): o.draw_pixel(x+1, y) o.draw_pixel(x+2, y) o.draw_pixel(x+3, y) o.draw_pixel(x+1, y + 4) o.draw_pixel(x+2, y + 4) o.draw_pixel(x+3, y + 4) o.draw_pixel(x, y + 1) o.draw_pixel(x+4, y + 1) o.draw_pixel(x, y + 2) o.draw_pixel(x+4, y + 2) o.draw_pixel(x, y + 3) o.draw_pixel(x+4, y + 3) def draw_points(o): for _ in range(0, 50): hole(o, random.randint(2, o.width-10), random.randint(2, o.height-10)) def draw_net(o): s = 0 while s < o.width-1: o.draw_line(s, 0, s, o.height-1) s += 10 s = 0 while s < o.height-1: o.draw_line(0, s, o.width-1, s) s += 10 lcd_nju = NJU6450(122, 32, GPIO()) lcd_nju.init() lcd_nju.auto_flush = False lcd_nju.draw_circle(60, 15, 15) lcd_nju.draw_circle(53, 10, 3) lcd_nju.draw_circle(67, 10, 3) lcd_nju.draw_arc(60, 15, 10, 45, 135) lcd_nju.draw_line(60, 12, 57, 17) lcd_nju.draw_line(60, 12, 63, 17) lcd_nju.draw_arc(60, 15, 3, 45, 135) lcd_nju.fill_rect(2, 2, 42, 29) lcd_nju.fill_rect(119, 2, 109, 12) lcd_nju.fill_rect(119, 17, 109, 19) lcd_nju.draw_rect(77, 6, 105, 16) lcd_nju.fill_rect(77, 16, 105, 25) lcd_nju.flush(True)
python
from utils.functions import get_env DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'HOST': get_env("POSTGRES_HOST", "db"), 'PORT': get_env("POSTGRES_PORT", "5432"), 'NAME': get_env("POSTGRES_DB"), 'USER': get_env("POSTGRES_USER"), 'PASSWORD': get_env("POSTGRES_PASSWORD") } }
python
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Import utilities: Utilities related to imports and our lazy inits. """ import importlib.util import json import os import sys from collections import OrderedDict from functools import wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() _torch_version = "N/A" if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: _torch_available = importlib.util.find_spec("torch") is not None if _torch_available: try: _torch_version = importlib_metadata.version("torch") logger.info(f"PyTorch version {_torch_version} available.") except importlib_metadata.PackageNotFoundError: _torch_available = False else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False _tf_version = "N/A" if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: _tf_available = importlib.util.find_spec("tensorflow") is not None if _tf_available: candidates = ( "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "intel-tensorflow", "intel-tensorflow-avx512", "tensorflow-rocm", "tensorflow-macos", ) _tf_version = None # For the metadata, we have to look for both tensorflow and tensorflow-cpu for pkg in candidates: try: _tf_version = importlib_metadata.version(pkg) break except importlib_metadata.PackageNotFoundError: pass _tf_available = _tf_version is not None if _tf_available: if version.parse(_tf_version) < version.parse("2"): logger.info(f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum.") _tf_available = False else: logger.info(f"TensorFlow version {_tf_version} available.") else: logger.info("Disabling Tensorflow because USE_TORCH is set") _tf_available = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None if _flax_available: try: _jax_version = importlib_metadata.version("jax") _flax_version = importlib_metadata.version("flax") logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") except importlib_metadata.PackageNotFoundError: _flax_available = False else: _flax_available = False _datasets_available = importlib.util.find_spec("datasets") is not None try: # Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version # AND checking it has an author field in the metadata that is HuggingFace. _ = importlib_metadata.version("datasets") _datasets_metadata = importlib_metadata.metadata("datasets") if _datasets_metadata.get("author", "") != "HuggingFace Inc.": _datasets_available = False except importlib_metadata.PackageNotFoundError: _datasets_available = False _detectron2_available = importlib.util.find_spec("detectron2") is not None try: _detectron2_version = importlib_metadata.version("detectron2") logger.debug(f"Successfully imported detectron2 version {_detectron2_version}") except importlib_metadata.PackageNotFoundError: _detectron2_available = False _faiss_available = importlib.util.find_spec("faiss") is not None try: _faiss_version = importlib_metadata.version("faiss") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib_metadata.PackageNotFoundError: try: _faiss_version = importlib_metadata.version("faiss-cpu") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib_metadata.PackageNotFoundError: _faiss_available = False _ftfy_available = importlib.util.find_spec("ftfy") is not None try: _ftfy_version = importlib_metadata.version("ftfy") logger.debug(f"Successfully imported ftfy version {_ftfy_version}") except importlib_metadata.PackageNotFoundError: _ftfy_available = False coloredlogs = importlib.util.find_spec("coloredlogs") is not None try: _coloredlogs_available = importlib_metadata.version("coloredlogs") logger.debug(f"Successfully imported sympy version {_coloredlogs_available}") except importlib_metadata.PackageNotFoundError: _coloredlogs_available = False sympy_available = importlib.util.find_spec("sympy") is not None try: _sympy_available = importlib_metadata.version("sympy") logger.debug(f"Successfully imported sympy version {_sympy_available}") except importlib_metadata.PackageNotFoundError: _sympy_available = False _tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None try: _tf2onnx_version = importlib_metadata.version("tf2onnx") logger.debug(f"Successfully imported tf2onnx version {_tf2onnx_version}") except importlib_metadata.PackageNotFoundError: _tf2onnx_available = False _onnx_available = importlib.util.find_spec("onnxruntime") is not None try: _onxx_version = importlib_metadata.version("onnx") logger.debug(f"Successfully imported onnx version {_onxx_version}") except importlib_metadata.PackageNotFoundError: _onnx_available = False _scatter_available = importlib.util.find_spec("torch_scatter") is not None try: _scatter_version = importlib_metadata.version("torch_scatter") logger.debug(f"Successfully imported torch-scatter version {_scatter_version}") except importlib_metadata.PackageNotFoundError: _scatter_available = False _pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None try: _pytorch_quantization_version = importlib_metadata.version("pytorch_quantization") logger.debug(f"Successfully imported pytorch-quantization version {_pytorch_quantization_version}") except importlib_metadata.PackageNotFoundError: _pytorch_quantization_available = False _soundfile_available = importlib.util.find_spec("soundfile") is not None try: _soundfile_version = importlib_metadata.version("soundfile") logger.debug(f"Successfully imported soundfile version {_soundfile_version}") except importlib_metadata.PackageNotFoundError: _soundfile_available = False _tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None try: _tensorflow_probability_version = importlib_metadata.version("tensorflow_probability") logger.debug(f"Successfully imported tensorflow-probability version {_tensorflow_probability_version}") except importlib_metadata.PackageNotFoundError: _tensorflow_probability_available = False _timm_available = importlib.util.find_spec("timm") is not None try: _timm_version = importlib_metadata.version("timm") logger.debug(f"Successfully imported timm version {_timm_version}") except importlib_metadata.PackageNotFoundError: _timm_available = False _torchaudio_available = importlib.util.find_spec("torchaudio") is not None try: _torchaudio_version = importlib_metadata.version("torchaudio") logger.debug(f"Successfully imported torchaudio version {_torchaudio_version}") except importlib_metadata.PackageNotFoundError: _torchaudio_available = False _phonemizer_available = importlib.util.find_spec("phonemizer") is not None try: _phonemizer_version = importlib_metadata.version("phonemizer") logger.debug(f"Successfully imported phonemizer version {_phonemizer_version}") except importlib_metadata.PackageNotFoundError: _phonemizer_available = False _pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None try: _pyctcdecode_version = importlib_metadata.version("pyctcdecode") logger.debug(f"Successfully imported pyctcdecode version {_pyctcdecode_version}") except importlib_metadata.PackageNotFoundError: _pyctcdecode_available = False _librosa_available = importlib.util.find_spec("librosa") is not None try: _librosa_version = importlib_metadata.version("librosa") logger.debug(f"Successfully imported librosa version {_librosa_version}") except importlib_metadata.PackageNotFoundError: _librosa_available = False # This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. TORCH_FX_REQUIRED_VERSION = version.parse("1.10") TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION = version.parse("1.8") def is_torch_available(): return _torch_available def is_pyctcdecode_available(): return _pyctcdecode_available def is_librosa_available(): return _librosa_available def is_torch_cuda_available(): if is_torch_available(): import torch return torch.cuda.is_available() else: return False def is_torch_bf16_gpu_available(): if not is_torch_available(): return False import torch # since currently no utility function is available we build our own. # some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51 # with additional check for torch version # to succeed: # 1. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal) # 2. the hardware needs to support bf16 (GPU arch >= Ampere, or CPU) # 3. if using gpu, CUDA >= 11 # 4. torch.autocast exists # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's # really only correct for the 0th gpu (or currently set default device if different from 0) if version.parse(torch.__version__) < version.parse("1.10"): return False if torch.cuda.is_available() and torch.version.cuda is not None: if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if not hasattr(torch.cuda.amp, "autocast"): return False else: return False return True def is_torch_bf16_cpu_available(): if not is_torch_available(): return False import torch if version.parse(torch.__version__) < version.parse("1.10"): return False try: # multiple levels of AttributeError depending on the pytorch version so do them all in one check _ = torch.cpu.amp.autocast except AttributeError: return False return True def is_torch_bf16_available(): return is_torch_bf16_cpu_available() or is_torch_bf16_gpu_available() def is_torch_tf32_available(): if not is_torch_available(): return False import torch if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(torch.__version__) < version.parse("1.7"): return False return True torch_version = None _torch_fx_available = _torch_onnx_dict_inputs_support_available = False if _torch_available: torch_version = version.parse(importlib_metadata.version("torch")) _torch_fx_available = (torch_version.major, torch_version.minor) >= ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION def is_torch_fx_available(): return _torch_fx_available def is_torch_onnx_dict_inputs_support_available(): return _torch_onnx_dict_inputs_support_available def is_tf_available(): return _tf_available def is_coloredlogs_available(): return _coloredlogs_available def is_tf2onnx_available(): return _tf2onnx_available def is_onnx_available(): return _onnx_available def is_flax_available(): return _flax_available def is_ftfy_available(): return _ftfy_available def is_torch_tpu_available(): if not _torch_available: return False # This test is probably enough, but just in case, we unpack a bit. if importlib.util.find_spec("torch_xla") is None: return False if importlib.util.find_spec("torch_xla.core") is None: return False return importlib.util.find_spec("torch_xla.core.xla_model") is not None def is_torchdynamo_available(): return importlib.util.find_spec("torchdynamo") is not None def is_datasets_available(): return _datasets_available def is_detectron2_available(): return _detectron2_available def is_rjieba_available(): return importlib.util.find_spec("rjieba") is not None def is_psutil_available(): return importlib.util.find_spec("psutil") is not None def is_py3nvml_available(): return importlib.util.find_spec("py3nvml") is not None def is_apex_available(): return importlib.util.find_spec("apex") is not None def is_ipex_available(): return importlib.util.find_spec("intel_extension_for_pytorch") is not None def is_bitsandbytes_available(): return importlib.util.find_spec("bitsandbytes") is not None def is_faiss_available(): return _faiss_available def is_scipy_available(): return importlib.util.find_spec("scipy") is not None def is_sklearn_available(): if importlib.util.find_spec("sklearn") is None: return False return is_scipy_available() and importlib.util.find_spec("sklearn.metrics") def is_sentencepiece_available(): return importlib.util.find_spec("sentencepiece") is not None def is_protobuf_available(): if importlib.util.find_spec("google") is None: return False return importlib.util.find_spec("google.protobuf") is not None def is_accelerate_available(): return importlib.util.find_spec("accelerate") is not None def is_tokenizers_available(): return importlib.util.find_spec("tokenizers") is not None def is_vision_available(): return importlib.util.find_spec("PIL") is not None def is_pytesseract_available(): return importlib.util.find_spec("pytesseract") is not None def is_spacy_available(): return importlib.util.find_spec("spacy") is not None def is_in_notebook(): try: # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") if "VSCODE_PID" in os.environ: raise ImportError("vscode") if "DATABRICKS_RUNTIME_VERSION" in os.environ: raise ImportError("databricks") return importlib.util.find_spec("IPython") is not None except (AttributeError, ImportError, KeyError): return False def is_scatter_available(): return _scatter_available def is_pytorch_quantization_available(): return _pytorch_quantization_available def is_tensorflow_probability_available(): return _tensorflow_probability_available def is_pandas_available(): return importlib.util.find_spec("pandas") is not None def is_sagemaker_dp_enabled(): # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None def is_sagemaker_mp_enabled(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None def is_training_run_on_sagemaker(): return "SAGEMAKER_JOB_NAME" in os.environ def is_soundfile_availble(): return _soundfile_available def is_timm_available(): return _timm_available def is_torchaudio_available(): return _torchaudio_available def is_speech_available(): # For now this depends on torchaudio but the exact dependency might evolve in the future. return _torchaudio_available def is_phonemizer_available(): return _phonemizer_available def torch_only_method(fn): def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper # docstyle-ignore DATASETS_IMPORT_ERROR = """ {0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: ``` pip install datasets ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install datasets ``` then restarting your kernel. Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or that python file if that's the case. """ # docstyle-ignore TOKENIZERS_IMPORT_ERROR = """ {0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: ``` pip install tokenizers ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install tokenizers ``` """ # docstyle-ignore SENTENCEPIECE_IMPORT_ERROR = """ {0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones that match your environment. """ # docstyle-ignore PROTOBUF_IMPORT_ERROR = """ {0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones that match your environment. """ # docstyle-ignore FAISS_IMPORT_ERROR = """ {0} requires the faiss library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones that match your environment. """ # docstyle-ignore PYTORCH_IMPORT_ERROR = """ {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. """ # docstyle-ignore SKLEARN_IMPORT_ERROR = """ {0} requires the scikit-learn library but it was not found in your environment. You can install it with: ``` pip install -U scikit-learn ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install -U scikit-learn ``` """ # docstyle-ignore TENSORFLOW_IMPORT_ERROR = """ {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. """ # docstyle-ignore DETECTRON2_IMPORT_ERROR = """ {0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones that match your environment. """ # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. """ # docstyle-ignore FTFY_IMPORT_ERROR = """ {0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones that match your environment. """ # docstyle-ignore SCATTER_IMPORT_ERROR = """ {0} requires the torch-scatter library but it was not found in your environment. You can install it with pip as explained here: https://github.com/rusty1s/pytorch_scatter. """ # docstyle-ignore PYTORCH_QUANTIZATION_IMPORT_ERROR = """ {0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip: `pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` """ # docstyle-ignore TENSORFLOW_PROBABILITY_IMPORT_ERROR = """ {0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as explained here: https://github.com/tensorflow/probability. """ # docstyle-ignore PANDAS_IMPORT_ERROR = """ {0} requires the pandas library but it was not found in your environment. You can install it with pip as explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html. """ # docstyle-ignore PHONEMIZER_IMPORT_ERROR = """ {0} requires the phonemizer library but it was not found in your environment. You can install it with pip: `pip install phonemizer` """ # docstyle-ignore SCIPY_IMPORT_ERROR = """ {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` """ # docstyle-ignore SPEECH_IMPORT_ERROR = """ {0} requires the torchaudio library but it was not found in your environment. You can install it with pip: `pip install torchaudio` """ # docstyle-ignore TIMM_IMPORT_ERROR = """ {0} requires the timm library but it was not found in your environment. You can install it with pip: `pip install timm` """ # docstyle-ignore VISION_IMPORT_ERROR = """ {0} requires the PIL library but it was not found in your environment. You can install it with pip: `pip install pillow` """ # docstyle-ignore PYTESSERACT_IMPORT_ERROR = """ {0} requires the PyTesseract library but it was not found in your environment. You can install it with pip: `pip install pytesseract` """ # docstyle-ignore PYCTCDECODE_IMPORT_ERROR = """ {0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip: `pip install pyctcdecode` """ # docstyle-ignore ACCELERATE_IMPORT_ERROR = """ {0} requires the accelerate library but it was not found in your environment. You can install it with pip: `pip install accelerate` """ BACKENDS_MAPPING = OrderedDict( [ ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ checks = (BACKENDS_MAPPING[backend] for backend in backends) failed = [msg.format(name) for available, msg in checks if not available()] if failed: raise ImportError("".join(failed)) class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ def __getattr__(cls, key): if key.startswith("_"): return super().__getattr__(cls, key) requires_backends(cls, cls._backends) def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper def tf_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires TF.") return wrapper def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise RuntimeError( f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" f" traceback):\n{e}" ) from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure)) class OptionalDependencyNotAvailable(BaseException): """Internally used error class for signalling an optional dependency was not found."""
python
# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume utilities for virt drivers. """ from os_brick.initiator import connector from oslo_concurrency import processutils as putils from jacket.compute import utils def get_iscsi_initiator(execute=None): """Get iscsi initiator name for this machine.""" root_helper = utils.get_root_helper() # so we can mock out the execute itself # in unit tests. if not execute: execute = putils.execute iscsi = connector.ISCSIConnector(root_helper=root_helper, execute=execute) return iscsi.get_initiator()
python
import tensorflow as tf class Model: def __init__(self, image_size = 224, n_classes = 16, fc_size = 1024): self.n_classes = n_classes tf.compat.v1.disable_eager_execution() self.dropout = tf.compat.v1.placeholder(tf.float32, name="dropout_rate") self.input_images = tf.compat.v1.placeholder(tf.float32, shape=[None, image_size, image_size, 3], name="input_images") ## First convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3,3,3,16],stddev=1e-1), name="conv1_weights") conv = tf.nn.conv2d(self.input_images, kernel, [1,2,2,1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([16])) conv_with_bias = tf.nn.bias_add(conv, bias) # Rectifier see: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) conv1 = tf.nn.leaky_relu(conv_with_bias, name="conv1") # local response normalization see: https://prateekvjoshi.com/2016/04/05/what-is-local-response-normalization-in-convolutional-neural-networks/ lrn1 = tf.nn.lrn(conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0) pooled_conv1 = tf.nn.max_pool2d(lrn1, ksize=[1,3,3,1], strides=[1,2,2,1], padding="SAME", name="pool1") ## Second convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 16, 64],stddev=1e-1), name="conv2_weights") conv = tf.nn.conv2d(pooled_conv1, kernel, [1, 2, 2, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([64]), name="conv2_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv2 = tf.nn.leaky_relu(conv_with_bias, name="conv2") lrn2 = tf.nn.lrn(conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0) pooled_conv2 = tf.nn.max_pool2d(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name="pool2") ## Third convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 64, 128],stddev=1e-1), name="conv3_weights") conv = tf.nn.conv2d(pooled_conv2, kernel, [1, 1, 1, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([128]), name="conv3_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv3 = tf.nn.leaky_relu(conv_with_bias, name="conv3") ## Fourth convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 128, 256],stddev=1e-1), name="conv4_weights") conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([256]), name="conv4_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv4 = tf.nn.leaky_relu(conv_with_bias, name="conv4") ## Fifth convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 256, 384],stddev=1e-1), name="conv5_weights") conv = tf.nn.conv2d(conv4, kernel, [1, 2, 2, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([384]), name="conv5_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv5 = tf.nn.leaky_relu(conv_with_bias, name="conv5") ## 6th convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 384, 512],stddev=1e-1), name="conv6_weights") conv = tf.nn.conv2d(conv5, kernel, [1, 2, 2, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([512]), name="conv6_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv6 = tf.nn.leaky_relu(conv_with_bias, name="conv6") ## 7th convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 512, 768],stddev=1e-1), name="conv7_weights") conv = tf.nn.conv2d(conv6, kernel, [1, 2, 2, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([768]), name="conv7_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv7 = tf.nn.leaky_relu(conv_with_bias, name="conv7") ## 8th convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 768, 768],stddev=1e-1), name="conv8_weights") conv = tf.nn.conv2d(conv7, kernel, [1, 2, 2, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([768]), name="conv8_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv8 = tf.nn.leaky_relu(conv_with_bias, name="conv8") ## 9th convolutional layer kernel = tf.Variable(tf.random.truncated_normal([3, 3, 768, 768],stddev=1e-1), name="conv8_weights") conv = tf.nn.conv2d(conv8, kernel, [1, 2, 2, 1], padding="SAME") bias = tf.Variable(tf.random.truncated_normal([768]), name="conv9_bias") conv_with_bias = tf.nn.bias_add(conv, bias) conv9 = tf.nn.leaky_relu(conv_with_bias, name="conv9") ## Fully connected layers conv9 = tf.keras.layers.Flatten()(conv9) # tf.flatten # fc_size_in = 768 fc_size_in = conv9.shape[-1] # First fully connected layer weights = tf.Variable(tf.random.truncated_normal([fc_size_in, fc_size]), name="fc1_weights") bias = tf.Variable(tf.random.truncated_normal([fc_size]), name="fc1_bias") fc1 = tf.matmul(conv9, weights) + bias fc1 = tf.nn.leaky_relu(fc1, name="fc1") fc1 = tf.nn.dropout(fc1, rate = (self.dropout)) # Second fully connected layer weights = tf.Variable(tf.random.truncated_normal([fc_size, fc_size]), name="fc2_weights") bias = tf.Variable(tf.random.truncated_normal([fc_size]), name="fc2_bias") fc2 = tf.matmul(fc1, weights) + bias fc2 = tf.nn.leaky_relu(fc2, name="fc2") fc2 = tf.nn.dropout(fc2, rate = (self.dropout)) # Output layer weights = tf.Variable(tf.zeros([fc_size, n_classes]), name="output_weight") bias = tf.Variable(tf.random.truncated_normal([n_classes]), name="output_bias") self.out = tf.matmul(fc2, weights) + bias
python
''' based on the noise model of https://github.com/paninski-lab/yass ''' import numpy as np from scipy.spatial.distance import pdist, squareform import os import torch def make_noise(n, spatial_SIG, temporal_SIG): """Make noise Parameters ---------- n: int Number of noise events to generate Returns ------ numpy.ndarray Noise """ n_neigh, _ = spatial_SIG.shape waveform_length, _ = temporal_SIG.shape # get noise noise = np.random.normal(size=(n, waveform_length, n_neigh)) for c in range(n_neigh): noise[:, :, c] = np.matmul(noise[:, :, c], temporal_SIG) reshaped_noise = np.reshape(noise, (-1, n_neigh)) the_noise = np.reshape(np.matmul(reshaped_noise, spatial_SIG), (n, waveform_length, n_neigh)) return the_noise def make_noise_torch(n, spatial_SIG, temporal_SIG): """Make noise in Pytorch Parameters ---------- n: int Number of noise events to generate spatial_SIG: torch array Spatial covariance matrix temporal_SIG: torch array Temporal covariance matrix Returns ------ torch array Noise """ n_neigh, _ = spatial_SIG.shape waveform_length, _ = temporal_SIG.shape # get noise dist = torch.distributions.normal.Normal(0.0, 1.0) noise = dist.sample(sample_shape=(n, waveform_length, n_neigh)) # multiple random gaussian; then multiply by covariance for c in range(n_neigh): noise[:, :, c] = torch.matmul(noise[:, :, c], temporal_SIG) reshaped_noise = torch.reshape(noise, (-1, n_neigh)) the_noise = torch.reshape(torch.matmul(reshaped_noise, spatial_SIG), (n, waveform_length, n_neigh)) return the_noise def kill_signal(recordings, threshold, window_size): """ Thresholds recordings, values above 'threshold' are considered signal (set to 0), a window of size 'window_size' is drawn around the signal points and those observations are also killed Returns ------- recordings: numpy.ndarray The modified recordings with values above the threshold set to 0 is_noise_idx: numpy.ndarray A boolean array with the same shap as 'recordings' indicating if the observation is noise (1) or was killed (0). """ recordings = np.copy(recordings) # print("rec shape:", recordings.shape) T, C = recordings.shape R = int((window_size-1)/2) # this will hold a flag 1 (noise), 0 (signal) for every obseration in the # recordings is_noise_idx = np.zeros((T, C)) # go through every neighboring channel for c in range(C): # get obserations where observation is above threshold idx_temp = np.where(np.abs(recordings[:, c]) > threshold)[0] # shift every index found for j in range(-R, R+1): # shift idx_temp2 = idx_temp + j # remove indexes outside range [0, T] idx_temp2 = idx_temp2[np.logical_and(idx_temp2 >= 0, idx_temp2 < T)] # set surviving indexes to nan recordings[idx_temp2, c] = np.nan # noise indexes are the ones that are not nan # FIXME: compare to np.nan instead is_noise_idx_temp = (recordings[:, c] == recordings[:, c]) # standarize data, ignoring nans recordings[:, c] = recordings[:, c]/np.nanstd(recordings[:, c]) # set non noise indexes to 0 in the recordings recordings[~is_noise_idx_temp, c] = 0 # save noise indexes is_noise_idx[is_noise_idx_temp, c] = 1 # print("recordings after kill sig:", recordings.shape, is_noise_idx.shape) # print("number of zeros:", np.sum(recordings==0)) return recordings, is_noise_idx def noise_cov(recordings, temporal_size, window_size, sample_size=1000, threshold=3.0, max_trials_per_sample=100, allow_smaller_sample_size=False): """Compute noise temporal and spatial covariance Parameters ---------- recordings: numpy.ndarray Recordings temporal_size: Waveform size sample_size: int Number of noise snippets of temporal_size to search threshold: float Observations below this number are considered noise Returns ------- spatial_SIG: numpy.ndarray temporal_SIG: numpy.ndarray """ #logger = logging.getLogger(__name__) # kill signal above threshold in recordings rec, is_noise_idx = kill_signal(recordings, threshold, window_size) # compute spatial covariance, output: (n_channels, n_channels) spatial_cov = np.divide(np.matmul(rec.T, rec), np.matmul(is_noise_idx.T, is_noise_idx)) # compute spatial sig w_spatial, v_spatial = np.linalg.eig(spatial_cov) spatial_SIG = np.matmul(np.matmul(v_spatial, np.diag(np.sqrt(w_spatial))), v_spatial.T) # apply spatial whitening to recordings spatial_whitener = np.matmul(np.matmul(v_spatial, np.diag(1/np.sqrt(w_spatial))), v_spatial.T) rec = np.matmul(rec, spatial_whitener) # search single noise channel snippets noise_wf = search_noise_snippets( rec, is_noise_idx, sample_size, temporal_size, channel_choices=None, max_trials_per_sample=max_trials_per_sample, allow_smaller_sample_size=allow_smaller_sample_size) print ('Computing temporal sig...') w, v = np.linalg.eig(np.cov(noise_wf.T)) temporal_SIG = np.matmul(np.matmul(v, np.diag(np.sqrt(w))), v.T) return spatial_SIG, temporal_SIG def search_noise_snippets(recordings, is_noise_idx, sample_size, temporal_size, channel_choices=None, max_trials_per_sample=100, allow_smaller_sample_size=False): """ Randomly search noise snippets of 'temporal_size' Parameters ---------- channel_choices: list List of sets of channels to select at random on each trial max_trials_per_sample: int, optional Maximum random trials per sample allow_smaller_sample_size: bool, optional If 'max_trials_per_sample' is reached and this is True, the noise snippets found up to that time are returned Raises ------ ValueError if after 'max_trials_per_sample' trials, no noise snippet has been found this exception is raised Notes ----- Channels selected at random using the random module from the standard library (not using np.random) """ #logger = logging.getLogger(__name__) T, C = recordings.shape if channel_choices is None: noise_wf = np.zeros((sample_size, temporal_size)) else: lenghts = set([len(ch) for ch in channel_choices]) if len(lenghts) > 1: raise ValueError('All elements in channel_choices must have ' 'the same length, got {}'.format(lenghts)) n_channels = len(channel_choices[0]) noise_wf = np.zeros((sample_size, temporal_size, n_channels)) count = 0 #logger.debug('Starting to search noise snippets...') trial = 0 # repeat until you get sample_size noise snippets while count < sample_size: # random number for the start of the noise snippet t_start = np.random.randint(T-temporal_size) if channel_choices is None: # random channel ch = np.random.randint(0, C - 1) else: ch = np.random.choice(channel_choices) t_slice = slice(t_start, t_start+temporal_size) # get a snippet from the recordings and the noise flags for the same # location snippet = recordings[t_slice, ch] snipped_idx_noise = is_noise_idx[t_slice, ch] # check if all observations in snippet are noise if snipped_idx_noise.all(): # add the snippet and increase count noise_wf[count] = snippet count += 1 trial = 0 #logger.debug('Found %i/%i...', count, sample_size) trial += 1 if trial == max_trials_per_sample: if allow_smaller_sample_size: return noise_wf[:count] else: raise ValueError("Couldn't find snippet {} of size {} after " "{} iterations (only {} found)" .format(count + 1, temporal_size, max_trials_per_sample, count)) return noise_wf def order_channels_by_distance(reference, channels, geom): """Order channels by distance using certain channel as reference Parameters ---------- reference: int Reference channel channels: np.ndarray Channels to order geom Geometry matrix Returns ------- numpy.ndarray 1D array with the channels ordered by distance using the reference channels numpy.ndarray 1D array with the indexes for the ordered channels """ coord_main = geom[reference] coord_others = geom[channels] idx = np.argsort(np.sum(np.square(coord_others - coord_main), axis=1)) return channels[idx], idx
python
import matplotlib.pyplot as plt import numpy as np p_guess = [0.5,0.55,0.6,0.7] repeat_experiment = 30 n = 32 k = 5 plt.title('n = 32, k = 5') plt.xlabel("Number of CRPs", fontsize=12) plt.ylabel("Accuracy (x100%)", fontsize=12) crps = np.load('./xorpuf'+str(k)+'_n'+str(n)+'_reps'+str(repeat_experiment)+'_crps.npy') for i in range(len(p_guess)): accuracy_hpuf = np.load('./xorpuf'+str(k)+'_n'+str(n)+'_p'+str(p_guess[i])+'_reps'+str(repeat_experiment)+'_accuracy.npy') plt.plot(crps,accuracy_hpuf, label = 'p_guess = '+str(p_guess[i])) plt.legend() plt.show()
python
# Copyright 2013 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.common.utils import data_utils from tempest import config from tempest.openstack.common import log as logging from tempest.scenario import manager from tempest import test CONF = config.CONF LOG = logging.getLogger(__name__) class TestLargeOpsScenario(manager.ScenarioTest): """ Test large operations. This test below: * Spin up multiple instances in one nova call, and repeat three times * as a regular user * TODO: same thing for cinder """ @classmethod def resource_setup(cls): if CONF.scenario.large_ops_number < 1: raise cls.skipException("large_ops_number not set to multiple " "instances") cls.set_network_resources() super(TestLargeOpsScenario, cls).resource_setup() def _wait_for_server_status(self, status): for server in self.servers: # Make sure nova list keeps working throughout the build process self.servers_client.list_servers() self.servers_client.wait_for_server_status(server['id'], status) def nova_boot(self): name = data_utils.rand_name('scenario-server-') flavor_id = CONF.compute.flavor_ref secgroup = self._create_security_group() self.servers_client.create_server( name, self.image, flavor_id, min_count=CONF.scenario.large_ops_number, security_groups=[secgroup]) # needed because of bug 1199788 params = {'name': name} _, server_list = self.servers_client.list_servers(params) self.servers = server_list['servers'] for server in self.servers: # after deleting all servers - wait for all servers to clear # before cleanup continues self.addCleanup(self.servers_client.wait_for_server_termination, server['id']) for server in self.servers: self.addCleanup_with_wait( waiter_callable=(self.servers_client. wait_for_server_termination), thing_id=server['id'], thing_id_param='server_id', cleanup_callable=self.delete_wrapper, cleanup_args=[self.servers_client.delete_server, server['id']]) self._wait_for_server_status('ACTIVE') def _large_ops_scenario(self): self.glance_image_create() self.nova_boot() @test.services('compute', 'image') def test_large_ops_scenario_1(self): self._large_ops_scenario() @test.services('compute', 'image') def test_large_ops_scenario_2(self): self._large_ops_scenario() @test.services('compute', 'image') def test_large_ops_scenario_3(self): self._large_ops_scenario()
python
# -*- coding: UTF-8 -*- # @yasinkuyu import sys import time import config from BinanceAPI import * # trader.py --quantity -- symbol --profit --wait_time # ex: trader.py 1 IOTABTC 1.3 1 #int(sys.argv[0]) #quantity #sys.argv[1] #symbol #sys.argv[2] #percentage of profit #sys.argv[3] #wait_time TEST_MODE = False PROFIT = 1.3 #percentage of profit ORDER_ID = None TARGET_PRICE = 0 QUANTITY = 2 INCREASING = 0.00000001 TARGET_PROFITABLE_PRICE = None WAIT_TIME = 3 # default 3 seconds client = BinanceAPI(config.api_key, config.api_secret) def buy_limit(symbol, quantity, buyPrice): global TEST_MODE if not TEST_MODE: ret = client.buy_limit(symbol, quantity, buyPrice) if 'msg' in ret: errexit(ret['msg']) orderId = ret['orderId'] file = open("ORDER", "w") file.write("{}\n".format([symbol, orderId, quantity, buyPrice])) print "******************" print 'Order Id: %d' % orderId else: orderId = "100000" return orderId def sell_limit(symbol, quantity, orderId): global TEST_MODE global ORDER_ID global TARGET_PRICE global TARGET_PROFITABLE_PRICE ret = client.get_open_orders(symbol) if 'msg' in ret: errexit(ret['msg']) print "Orders" for order in ret: price = float(order['price']) origQty = float(order['origQty']) executedQty = float(order['executedQty']) if order['orderId'] == orderId: print "Order: %d: %lf\t%lf\t%lf" % (order['orderId'], price, origQty, executedQty) TARGET_PROFITABLE_PRICE = None ORDER_ID = None if not TEST_MODE: ret = client.sell_limit(symbol, quantity, TARGET_PRICE) print 'Sales were made at %s price.' % (TARGET_PRICE) print '---------------------------------------------' if 'msg' in ret: errexit(ret['msg']) print ret else: print "Order Id: %s. The test order is complete. Price %s" % (orderId, TARGET_PRICE) def cancel_order(symbol, orderId): global TEST_MODE if orderId is not None: if not TEST_MODE: ret = client.cancel(symbol, orderId) if 'msg' in ret: errexit(ret['msg']) print 'Order has been canceled.' def get_ticker(symbol): ret = client.get_ticker(symbol) return float(ret["lastPrice"]) def errexit(msg): print("Error: " + msg) exit(1) def action(symbol): global ORDER_ID global QUANTITY global TARGET_PRICE global TARGET_PROFITABLE_PRICE file = open("ORDER", "r") #print file.read() lastPrice = get_ticker(symbol) ret = client.get_orderbooks(symbol, 5) lastBid = float(ret['bids'][0][0]) lastAsk = float(ret['asks'][0][0]) btcPrice = get_ticker("BTCUSDT") buyPrice = lastBid + INCREASING sellPrice = lastAsk - INCREASING profitablePrice = buyPrice + (buyPrice * PROFIT / 100) earnTotal = sellPrice - buyPrice TARGET_PRICE = sellPrice if ORDER_ID is None: print 'price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f BTC:$%.1f' % (lastPrice, buyPrice, sellPrice, lastBid, lastAsk, btcPrice) if lastAsk >= profitablePrice: TARGET_PROFITABLE_PRICE = profitablePrice ORDER_ID = buy_limit(symbol, QUANTITY, buyPrice) print "Percentage of %s profit. Order created from %.8f. Profit: %.8f BTC" % (PROFIT, sellPrice, earnTotal) print "#####################" else: TARGET_PROFITABLE_PRICE = None cancel_order(symbol, ORDER_ID) else: print "Target sell price: %.8f " % TARGET_PROFITABLE_PRICE if lastAsk >= TARGET_PROFITABLE_PRICE: sell_limit(symbol, QUANTITY, ORDER_ID) def main(): symbol = 'IOTABTC' print "@yasinkuyu, 2017" print "Auto Trading for Binance.com (Beta). Enter your symbol. Ex: %s" % symbol name = raw_input() if name != "": symbol = name print '%%%s profit for scanning %s' % (PROFIT, symbol) if TEST_MODE: print "Test mode active" while True: startTime = time.time() action(symbol) endTime = time.time() if endTime - startTime < WAIT_TIME: time.sleep(WAIT_TIME - (endTime - startTime)) if __name__ == "__main__": main()
python
from typing import List, Dict, Optional, Set, Any, Tuple, Type from Dataset import GraphDataset from Models.EmbeddingLayers import EmbeddingLayer from Models.GnnLayers import GCNLayer, GATLayer, HGCNLayer, IHGNNLayer from Models.PredictionLayers import HemPredictionLayer from Helpers.Torches import * from Helpers.GlobalSettings import Gs, Gsv class RawGnn(nn.Module): _saved_output_feature: Tensor = None def __init__(self, device: torch.device, dataset: GraphDataset, embedding_size: int, gnn_layer_type: Type, gnn_layer_count: int, feature_interaction_order: int, phase2_attention: bool, predictions: Type, lambda_muq: float): """初始化一个模型。 参数: device: 设备。 embedding_size: 对 user, query 或 item 做 embedding 时的目标向量维度。 gcn_layer_count: GCN 网络层数。 users_onehot: 一维张量。 queries_multihot: 二维稀疏张量,每行表示一个 query,列表示 one-hot 维度。 items_onehot: 一维张量。 adjacency: 二维稀疏张量,表示邻接矩阵。对角线上的元素须为零。 vocabulary_onehot: query 所用的词典,是一维张量。 """ super().__init__() # 记录参数 self.device = device self.dataset = dataset self.embedding_size = embedding_size self.gnn_layer_type = gnn_layer_type self.gnn_layer_count = gnn_layer_count self.feature_interaction_order = feature_interaction_order self.phase2_attention = phase2_attention self.prediction_layer_type = predictions self.output_feature_size = embedding_size * (1 + self.gnn_layer_count) # Embedding 层 self.embeddings = EmbeddingLayer( dataset=dataset, embedding_size=embedding_size ) # GNN 网络层,多层 self.gnns = [] for layer in range(self.gnn_layer_count): if gnn_layer_type in [ GCNLayer, GATLayer, HGCNLayer ]: self.gnns.append( gnn_layer_type( device=device, dataset=dataset, input_dimension=embedding_size, output_dimension=embedding_size ) ) elif gnn_layer_type in [ IHGNNLayer ]: fi_order_here = feature_interaction_order if fi_order_here > 1 and layer > 0: fi_order_here = 1 self.gnns.append( gnn_layer_type( device=device, dataset=dataset, input_dimension=embedding_size, output_dimension=embedding_size, feature_interaction_order=fi_order_here, phase2_attention=phase2_attention ) ) else: raise NotImplementedError(f'不支持的 GNN 网络层类型:{gnn_layer_type}') for i, gnn in enumerate(self.gnns): self.add_module(f'gnn_{i}', gnn) # 预测层 if predictions == HemPredictionLayer: self.prediction_layer = HemPredictionLayer( feature_dimension=self.output_feature_size, lambda_muq=lambda_muq, item_count=dataset.item_count ) else: raise NotImplementedError(f'不支持的预测层类型:{predictions}') def forward(self, user_indices: Tensor, query_indices: Tensor, item_indices: Optional[Tensor] = None): """参数:这里的 u q i 并非其在邻接矩阵的结点列表中的索引,而是从 0 开始的。\n 返回值:由每个 interaction 发生的可能性分数构成的一维张量。""" # 训练模式下 if self._saved_output_feature is None: # 计算图中所有结点的特征,生成结点特征矩阵 X input_features = torch.cat(self.embeddings(None, None, None)) # 算 GCN 输出,把所有输出拼接起来,得到输出的结点特征矩阵 X' gnn_outputs = [input_features] gnn_output = input_features for gnn in self.gnns: gnn_output = gnn(gnn_output) gnn_outputs.append(gnn_output) # 需要测量高阶特征权重时,直接中断操作 if Gs.Debug._calculate_highorder_info: return output_feature = torch.cat(gnn_outputs, 1) # 测试模式下 else: output_feature = self._saved_output_feature # 分别提取 user query item 的 feature output_user_feature = output_feature[user_indices] output_query_feature = output_feature[query_indices + self.dataset.query_start_index_in_graph] if item_indices is not None: output_item_feature = output_feature[item_indices + self.dataset.item_start_index_in_graph] else: output_item_feature = output_feature[self.dataset.item_start_index_in_graph:] # 做预测 if self.prediction_layer_type == HemPredictionLayer: similarity: Tensor = self.prediction_layer( output_user_feature, output_query_feature, output_item_feature, item_indices ) return similarity def save_features_for_test(self) -> None: '''在测试模式(无梯度)下,保存所有 GNN 网络层的输出以加速测试。''' input_features = torch.cat(self.embeddings(None, None, None)) gnn_outputs = [input_features] gnn_output = input_features for gnn in self.gnns: gnn_output = gnn(gnn_output) gnn_outputs.append(gnn_output) self._saved_output_feature = torch.cat(gnn_outputs, 1) def clear_saved_feature(self) -> None: self._saved_output_feature = None
python
import matplotlib.pyplot as plt import numpy as np # Define a main() function that prints a data statistics. def main(): data = np.loadtxt('data/populations.txt') year, hares, lynxes, carrots = data.T # trick: columns to variables plt.axes([0.1, 0.1, 0.5, 0.8]) plt.plot(year, hares, year, lynxes, year, carrots) plt.legend(('Hare', 'Lynx', 'Carrot'), loc=(1.05, 0.5)) plt.show() # calculate mean and std population for each species (column) separately is slower. # calculating separately takes ~3.06 µs for each mean operation and on array # that contains all the data by axis=0 takes ~4.68 µs populations = data[:, 1:] print("Means by species: {}".format(populations.mean(axis=0))) print("Standard deviation by species: {}".format(populations.std(axis=0))) # calculate year when each species had the larges population max_populations = np.argmax(populations, axis=0) print("Max populations in years: {}".format(year[max_populations])) # calculate species that has larges population for each year max_species_idx = np.argmax(populations, axis=1) max_species = np.array(['H', 'L', 'C'])[max_species_idx] print("Max species: {}".format(tuple(zip(year, max_species)))) # calculate years when any of the populations is above 50000 above_mask = np.any(np.greater(populations, 50000), axis=1) print("Years any population above 50000: {}".format(year[above_mask])) # find the top 2 years for each species when they had the lowest populations sorted_indices = populations.argsort(axis=0) years_sorted = year[sorted_indices] print("Two smallest years: {}".format(years_sorted[:2, :])) # compare (plot) the change in hare population and the number of lynxes hare_gradients = np.gradient(hares) plt.axes([0.1, 0.1, 0.7, 0.8]) plt.plot(year, hare_gradients, year, lynxes) plt.legend(('Hare', 'Lynx'), loc=(1.05, 0.5)) plt.show() # calculate correlation print("Hares and lynxes correlation: {}".format(np.corrcoef(hare_gradients, lynxes)[0, 1])) # This is the standard boilerplate that calls the main() function. if __name__ == '__main__': main()
python
from ..datapack import DataPack from ..logging import logging from .data_utils import make_coord_array import numpy as np import os import astropy.time as at def make_example_datapack(Nd,Nf,Nt,pols=None, time_corr=50.,dir_corr=0.5*np.pi/180.,tec_scale=0.02,tec_noise=1e-3,name='test.hdf5',clobber=False): logging.info("=== Creating example datapack ===") name = os.path.abspath(name) if os.path.isfile(name) and clobber: os.unlink(name) datapack = DataPack(name,readonly=False) with datapack: datapack.add_antennas() datapack.add_sources(np.random.normal(np.pi/4.,np.pi/180.*2.5,size=[Nd,2])) _, directions = datapack.sources _, antennas = datapack.antennas ref_dist = np.linalg.norm(antennas - antennas[0:1,:],axis=1)[None,None,:,None]#1,1,Na,1 times = at.Time(np.linspace(0,Nt*8,Nt)[:,None],format='gps').mjd*86400.#mjs freqs = np.linspace(120,160,Nf)*1e6 if pols is not None: use_pols = True assert isinstance(pols,(tuple,list)) else: use_pols = False pols = ['XX'] tec_conversion = -8.440e9/freqs #Nf X = make_coord_array(directions/dir_corr, times/time_corr)# Nd*Nt, 3 X2 = np.sum((X[:,:,None] - X.T[None,:,:])**2, axis=1)#N,N K = tec_scale**2 * np.exp(-0.5*X2) L = np.linalg.cholesky(K + 1e-6*np.eye(K.shape[0]))#N,N Z = np.random.normal(size=(K.shape[0],len(pols)))#N,npols tec = np.einsum("ab,bc->ac",L,Z)#N,npols tec = tec.reshape((Nd,Nt,len(pols))).transpose((2,0,1))#Npols,Nd,Nt tec = tec[:,:,None,:]*(0.2+ref_dist/np.max(ref_dist))#Npols,Nd,Na,Nt # print(tec) tec += tec_noise*np.random.normal(size=tec.shape) phase = tec[:,:,:,None,:]*tec_conversion[None,None,None,:,None]##Npols,Nd,Na,Nf,Nt # print(phase) phase = np.angle(np.exp(1j*phase)) if not use_pols: phase = phase[0,...] pols = None datapack.add_freq_dep_tab('phase',times=times[:,0],freqs=freqs,pols=pols,vals=phase) datapack.phase = phase return datapack
python
#! /usr/bin/env python # If you ever need to modify example JSON data that is shown in the sampleData.js file, you can use this script to generate it. import sys import os from pathlib import Path sys.path.append(str(Path(os.path.dirname(__file__)).parent)) import json from cloudsplaining.shared.validation import check_authorization_details_schema from cloudsplaining.scan.authorization_details import AuthorizationDetails account_authorization_details_file = os.path.abspath(os.path.join( os.path.dirname(__file__), os.path.pardir, "examples", "files", "example.json", ) ) with open(account_authorization_details_file) as json_file: account_authorization_details_cfg = json.load(json_file) results_file = os.path.abspath(os.path.join( os.path.dirname(__file__), "example-iam-data.json", ) ) def generate_example_iam_data(): check_authorization_details_schema(account_authorization_details_cfg) authorization_details = AuthorizationDetails(account_authorization_details_cfg) results = authorization_details.results print(f"Top-level keys of results dictionary: {results.keys()}") # Write the results if os.path.exists(results_file): os.remove(results_file) with open(results_file, "w") as file: json.dump(results, file, indent=4) print(f"Wrote new example IAM data file to: {results_file}") # print(json.dumps(results, indent=4)) return results def replace_sample_data_js(results): sample_data_js_file = os.path.abspath(os.path.join( os.path.dirname(__file__), os.path.pardir, "cloudsplaining", "output", "src", "sampleData.js" )) content = f"""var sample_iam_data = {json.dumps(results, indent=4)} exports.sample_iam_data = sample_iam_data; """ if os.path.exists(sample_data_js_file): print(f"Removing existing file and replacing its contents") os.remove(sample_data_js_file) with open(sample_data_js_file, "w") as f: f.write(content) if __name__ == '__main__': results = generate_example_iam_data() print("Replacing sampleData.js content with the most recent content") replace_sample_data_js(results) print("Replaced sampleData.js content")
python
#!/usr/bin/env python3 # # id3v1.py # From the stagger project: http://code.google.com/p/stagger/ # # Copyright (c) 2009-2011 Karoly Lorentey <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ###################################################################### # This test automatically downloads the ID3v1 test suite by Martin Nilsson, # and runs stagger's id3v1 decoder on all 274 test cases, comparing # decoded field values to expected values listed in the test suite's # generation.log file. # # Nilsson's tests are rather strict -- stagger intentionally accepts broken # id3v1 tags, so it only complains on test case 3 (bad tag header). # # Test cases 7 and 8 (junk after string terminator) include NUL characters # in field values in the log file, which is likely a mistake. Their # description prescribes that the NULs and the data after them should # not show up for the user, so I override the test case's field values to check that. # # Test case 12 has leading spaces in the year field which are intentionally # stripped by stagger. # # In two test cases, Nilsson uses genre names that differ from most other # sources/implementations: # # Test case Genre # Genre in test Genre elsewhere # 151 136 Christian Christian Gangsta Rap # 155 140 Contemporary Contemporary Christian # # Stagger follows the de facto ID3v1 standard and resolves 136 and 140 to # the insane genres on the right. import unittest import os import os.path import re import string import urllib.request import tarfile import random import io import warnings from stagger.errors import * import stagger.id3v1 testsuite_url = r"http://id3.org/Developer%20Information?action=AttachFile&do=get&target=id3v1_test_suite.tar.gz" testsuite_file = os.path.join(os.path.dirname(__file__), "id3v1_test_suite.tar.gz") testsuite_log = "id3v1/generation.log" def download_testsuite(): try: with open(testsuite_file, "rb") as file: pass except IOError: urllib.request.urlretrieve(testsuite_url, testsuite_file) class ID3v1TestCase(unittest.TestCase): def parse_log(self): log = self.tar.extractfile(testsuite_log) try: tests = [] tag = {} for bline in log: line = bline.decode('iso-8859-1') m = re.match(r'^Test case ([0-9]+)$', line) if m is not None: tag["id"] = int(m.group(1)) continue m = re.match(r'^Generated test file "([a-zA-Z0-9_.]+)"$', line) if m is not None: tag["filename"] = m.group(1) continue m = re.match(r'^([a-z]+) *: "([^"]*)"$', line) if m is not None: tag[m.group(1)] = m.group(2) continue m = re.match(r'^version: (1\.[01])$', line) if m is not None: tag["version"] = m.group(1) continue m = re.match(r'^genre : ([0-9]+ \(.*\))$', line) if m is not None: tag["genre"] = m.group(1) continue m = re.match(r'^$', line) if m is not None and tag: tests.append(tag) tag = {} return tests finally: log.close() def setUp(self): download_testsuite() self.tar = tarfile.open(testsuite_file) def tearDown(self): self.tar.close() def testID3v1Conformance(self): for test in self.parse_log(): # Fix expected values in test cases 7-8 (junk after string terminator). if test["id"] in [7, 8]: for field in ["title", "artist", "album", "comment"]: test[field] = "12345" # Fix expected value in test case 12 (strip year field). if test["id"] == 12: test["year"] = test["year"].strip(string.whitespace) # Fix expected genre names in test cases 151 and 155 to de-facto standard values. if test["id"] == 151: test["genre"] = '136 (Christian Gangsta Rap)' if test["id"] == 155: test["genre"] = '140 (Contemporary Christian)' filename = 'id3v1/' + test["filename"] file = self.tar.extractfile(filename) try: # Test case 3 contains no valid ID3v1 tag. if test["id"] == 3: self.assertRaises(NoTagError, stagger.id3v1.Tag1.read, file) continue tag = stagger.id3v1.Tag1.read(file) for field in ["title", "artist", "album", "year", "comment", "track", "genre"]: if field in test: self.assertEqual(test[field], getattr(tag, field), "Value mismatch in field " + field + " of testcase " + str(test["id"]) + ": '" + test[field] + "' vs '" + getattr(tag, field) + "'") # Try encoding the tag and comparing binary data if test["id"] not in [7, 8, 12]: data = tag.encode() file.seek(-128, 2) data2 = file.read(128) self.assertEqual(data, data2, "Data mismatch in testcase " + str(test["id"])) finally: file.close() suite = unittest.TestLoader().loadTestsFromTestCase(ID3v1TestCase) if __name__ == "__main__": warnings.simplefilter("always", stagger.Warning) unittest.main(defaultTest="suite")
python
a = np.arange(30).reshape((2,3,5)) a[a>5]
python
import os import sys from cseg import cut_file msr_test = 'corpus/msr_test.utf8' msr_test_gold = 'corpus/msr_test_gold.utf8' msr_out = ['output/msr_test_2_add1', 'output/msr_test_2_ad', 'output/msr_test_2_kn', 'output/msr_test_1', 'output/msr_test_2_add1_hmm', 'output/msr_test_2_ad_hmm', 'output/msr_test_2_kn_hmm', 'output/msr_test_1_hmm'] pku_test = 'corpus/pku_test.utf8' pku_test_gold = 'corpus/pku_test_gold.utf8' pku_out = ['output/pku_test_2_add1', 'output/pku_test_2_ad', 'output/pku_test_2_kn', 'output/pku_test_1', 'output/pku_test_2_add1_hmm', 'output/pku_test_2_ad_hmm', 'output/pku_test_2_kn_hmm', 'output/pku_test_1_hmm'] weibo_test = 'corpus/nlpcc2016-wordseg-dev.dat' weibo_test_gold = 'corpus/nlpcc2016-wordseg-dev_gold.dat' weibo_out = ['output/weibo_test_2_add1', 'output/weibo_test_2_ad', 'output/weibo_test_2_kn', 'output/weibo_test_1', 'output/weibo_test_2_add1_hmm', 'output/weibo_test_2_ad_hmm', 'output/weibo_test_2_kn_hmm', 'output/weibo_test_1_hmm'] tips = ["2-gram, +1平滑:", "2-gram, 绝对减值平滑:", "2-gram, Kneser-Ney平滑:", "1-gram:", "HMM: 2-gram, +1平滑:", "HMM: 2-gram, 绝对减值平滑:", "HMM: 2-gram, Kneser-Ney平滑:", "HMM: 1-gram:"] use_hmm = [False, False, False, False, True, True, True, True] use_2gram = [True, True, True, False, True, True, True, False] smooth = ['add1', 'abs', 'kneser_ney', '', 'add1', 'abs', 'kneser_ney', ''] tests = {'msr': msr_test, 'pku': pku_test, 'weibo': weibo_test } test_golds = {'msr': msr_test_gold, 'pku': pku_test_gold, 'weibo': weibo_test_gold } outs = {'msr': msr_out, 'pku': pku_out, 'weibo': weibo_out } if __name__ == '__main__': argv = sys.argv[1:] if len(argv) < 1: print('test.py msr|pku|weibo') sys.exit() else: if argv[0] not in ['msr', 'pku', 'weibo']: print('test.py msr|pku|weibo') sys.exit() print("开始切分... ") test = tests[argv[0]] test_gold = test_golds[argv[0]] out = outs[argv[0]] for i in range(len(out)): cut_file(test, out[i], use_hmm[i], use_2gram[i], smooth[i]) print("%s 测试结果: " % argv[0]) for i in range(len(out)): print(tips[i]) os.system("python eval.py %s %s %s" % (test_gold, out[i], out[i]+'_err'))
python
""" created by ldolin """ """ 正则表达式 动机: 1.经常性文本处理 2.文本内容的快速搜索,定位,提取比较复杂 3.产生正则表达式 定义: 正则即是文本的高级匹配模式,提供搜索,替代,查找等功能, 本质是由一系列特殊符号和字符组成的字符串 特点: 1.方便检索和修改文本内容的操作 2.支持多种编程语言 3.灵活多样 目标: 1.能够看懂并编写基本简单的正则表达式 2.能够使用python操作正则表达式 设计原则: 1.正确性:能够正确匹配要求内容 2.唯一性:只要需要的 3.全面性:对目标特征考虑全面 """ # import re # # # def main(): # tel = input("请输入手机号:") # # ret = re.match(r"1[35678]\d{9}", tel) # # 由于手机号位数大于11位也能匹配成功,所以修改如下: # ret = re.match(r"^1[35678]\d{9}$", tel) # if ret: # print("匹配成功") # else: # print("匹配失败") # # # if __name__ == "__main__": # main() """ python操作正则表达式-re模块-处理正则表达式 1.导入模块 import re 元字符:在正则表达式中有一定特殊含义的符号 1.re.findall(pattern.string): 功能:使用正则表达式匹配字符串 参数: pattern:表达式字符串 string:目标字符串 返回值:返回匹配到的字符串 1.普通字符: 元字符:a b c & # 匹配规则:匹配字符本身 """ import re # s = 'abcdefg' # str1 = re.findall('abc', s) # print(str1) # s1 = '你们好' # str2 = re.findall('你好', s1) # print(str2) """ 2.或 元字符:| 匹配规则:匹配|两边任意一个正则表达式 注意:1.竖线的两边不要有空格 2.匹配过的不会再匹配 """ # s3 = '你好你们好' # str3 = re.findall('你好|你们好', s3) # print(str3) """ 3.匹配单一字符 元字符:. 匹配规则:匹配除了\n以外任意的字符 比如: a.c --> abc adc a@c a!c """ # s3 = 'abc adc a@c a!c' # str3 = re.findall('a.c', s3) # print(str3) """ 4.匹配开始位置 元字符:^ 匹配规则:匹配一个字符的开始位置 """ # s4 = 'hello python' # str5 = re.findall('^hello', s4) # print(str5) """ 5.匹配结尾位置 元字符:$ 匹配规则:匹配字符串的结尾位置 """ # s5 = 'regx_demo.py' # str6 = re.findall('py$', s5) # print(str6) """ 6.匹配重复字符 元字符:* 匹配规则:匹配前面的正则表达式,重复0到多次 """ # s5 = 'abababcdfghacbca' # str6 = re.findall('ab*', s5) # print(str6) """ 7.匹配重复 元字符:+ 匹配规则:匹配前面的正则表达式,重复1到多次 """ # s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbb' # str6 = re.findall('a+b+', s5) # print(str6) """ 8.匹配重复 元字符:? 匹配规则:匹配前面出现的元字符,重复0或1次 """ # s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbb' # str6 = re.findall('ab?', s5) # print(str6) """ 9.匹配重复 元字符:{n} 匹配规则:匹配前面的正则表达式n次 """ # s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbbab' # str6 = re.findall('ab{3}', s5) # b 重复3次 # print(str6) """ 10.匹配重复 元字符:{m,n} 匹配规则:匹配前面的正则表达式出现m到n次 """ # s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbbabbbb' # str6 = re.findall('ab{3,15}', s5) # b 重复3-15次 a 1次 # print(str6) """ 11.匹配字符集合 元字符:[字符集] 匹配规则:匹配括号内任意一个字符 """ # s5 = 'abcd 123456' # str6 = re.findall('[ab12]', s5) # print(str6) """ 12.匹配字符集合 元字符:[^字符集] 匹配规则:匹配除了字符集中任意一个字符 """ # s5 = 'abcd 1 2 3 4 5 6' # str6 = re.findall('[^ab 12]', s5) # print(str6) """ 13.匹配任意数字字符(非数字) 元字符:\d \D 匹配规则: \d : 匹配任意数字字符[0-9] \D : 匹配任意非数字字符[^0-9] """ # s5 = '13789721034' # str6 = re.findall('^1[3567]\d{9}$', s5) # print(str6) """ 14.匹配任意普通字符---数字字母下划线 元字符:\w \W 匹配规则: \w : 匹配任意一个普通字符 [_0-9a-zA-Z] \W : 匹配任意一个非普通字符 [^_0-9a-zA-Z] """ # s5 = 'hello 中国 @ $' # str6 = re.findall('\W+', s5) # print(str6) """ 15.匹配空与非空字符---空格 \r \t \n \0 元字符:\s \S 匹配规则: \s : 匹配任意空字符 \S : 匹配任意非空字符 """ # s5 = 'hello python' # str6 = re.findall('\s+', s5) # str7 = re.findall('\S+', s5) # print(str6) # print(str7) """ 元字符: 1.匹配单个字符:a . \d \D \w \W \s \S [] [^] 匹配重复:* + ? {n} {m,n} 匹配位置:^ $ 其他:| 如果匹配?咋匹配 可使用\进行转义,也可在表达式前加小写r,代表原生字符,不进行转义 """ # s5 = 'hello python?你\好' # str6 = re.findall('\?', s5) # str7 = re.findall(r'\\', s5) # print(str6) # print(str7) """ 贪婪与非贪婪: 贪婪模式: 正则表达式在匹配正确的情况下,尽可能多的向后匹配 如:* + ? {m,n} 非贪婪模式: 正则表达式在匹配正确的情况下,尽可能少的向后匹配 贪婪转非贪婪 如:*? +? ?? {m,n}? """ # s5 = 'abbbbcd 1 2 3 4 5 6' # str6 = re.findall('ab+?', s5) # print(str6) """ 2.re.split: 格式:re.split(pattern, string) 功能:通过正则表达式分割字符串 参数: pattern:正则表达式 string:目标字符串 返回分割后的字符串 """ # s5 = 'abbbbcd 1 2 3 4 5 6' # print(re.split('\s', s5)) # print(re.split('[ ]', s5)) # print(re.split(' ', s5)) """ 3.re.sub: 格式:re.sub(pattern, replaceStr, string, max) 替换正则匹配到的内容 参数: pattern:正则 replaceStr:要换入的新的内容 string:目标字符串 max:最多替换几处 """ # s5 = 'abbbbcd,1,2,3,4,5,6' # a = re.sub('[,]', '#', s5, 2) # b = re.sub('[,]', '?', a, 4) # print(b) """ 4.re.match: 匹配目标字符串开头 格式:re.match(pattern, string) 参数: pattern:正则 string:目标字符串 返回值:返回匹配到的内容 注意:取值需要通过group(1)来取值,取第一个,如果失败返回None,则不能返回match对象的属性 """ # s = """hello,python # hello,java # hello,c++ # """ # str1 = re.match('hello', s).group() # print(str1) """ 5.re.search(pattern, string) 功能:匹配目标字符串,只能到第一处 """ s = 'AabbbbAc' b = re.findall('[A-Z][a-z]*', s) print(b) s1 = 'a0,1,2,3,4,5,6,9,88s' b1 = re.findall('[1-98]{1,2}', s1) print(b1) s2 = 'a01234569sAss_sssssssssssssss' s3 = re.search('[a-zA-Z0-9_]{8,20}', s2).group() print(s3) s4 = 'python = 9999, c = 7890, c++ = 12345' s5 = re.findall('[0-9]{4,5}', s4) print(s5) s6 = '阅读次数为 9999' s7 = re.search(r'\d+', s6).group() print(s7) s8 = 'python = 997' s9 = re.sub('[0-9]{3}', '998', s8, 1) print(s9) s10 = 'info:xiaozhang 33 shandong' s11 = re.split('[: ]', s10) s14 = re.split('\W', s10) print(s11) print(s14) # search(),match(),sub() 返回字符串 # findall(),split() 返回列表 import random L = [] random.shuffle(L) # a = re.findall('<title>(.*)</title>', html)
python
from datetime import timezone, timedelta, datetime, date, time import databases import pytest import sqlalchemy import ormar from tests.settings import DATABASE_URL database = databases.Database(DATABASE_URL, force_rollback=True) metadata = sqlalchemy.MetaData() class DateFieldsModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) created_date: datetime = ormar.DateTime( default=datetime.now(tz=timezone(timedelta(hours=3))), timezone=True ) updated_date: datetime = ormar.DateTime( default=datetime.now(tz=timezone(timedelta(hours=3))), name="modification_date", timezone=True, ) class SampleModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) updated_at: datetime = ormar.DateTime() class TimeModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) elapsed: time = ormar.Time() class DateModel(ormar.Model): class Meta: database = database metadata = metadata id: int = ormar.Integer(primary_key=True) creation_date: date = ormar.Date() @pytest.fixture(autouse=True, scope="module") def create_test_database(): engine = sqlalchemy.create_engine(DATABASE_URL) metadata.drop_all(engine) metadata.create_all(engine) yield metadata.drop_all(engine) @pytest.mark.asyncio async def test_model_crud_with_timezone(): async with database: datemodel = await DateFieldsModel().save() assert datemodel.created_date is not None assert datemodel.updated_date is not None @pytest.mark.asyncio async def test_query_with_datetime_in_filter(): async with database: creation_dt = datetime(2021, 5, 18, 0, 0, 0, 0) sample = await SampleModel.objects.create(updated_at=creation_dt) current_dt = datetime(2021, 5, 19, 0, 0, 0, 0) outdated_samples = await SampleModel.objects.filter( updated_at__lt=current_dt ).all() assert outdated_samples[0] == sample @pytest.mark.asyncio async def test_query_with_date_in_filter(): async with database: sample = await TimeModel.objects.create(elapsed=time(0, 20, 20)) await TimeModel.objects.create(elapsed=time(0, 12, 0)) await TimeModel.objects.create(elapsed=time(0, 19, 55)) sample4 = await TimeModel.objects.create(elapsed=time(0, 21, 15)) threshold = time(0, 20, 0) samples = await TimeModel.objects.filter(TimeModel.elapsed >= threshold).all() assert len(samples) == 2 assert samples[0] == sample assert samples[1] == sample4 @pytest.mark.asyncio async def test_query_with_time_in_filter(): async with database: await DateModel.objects.create(creation_date=date(2021, 5, 18)) sample2 = await DateModel.objects.create(creation_date=date(2021, 5, 19)) sample3 = await DateModel.objects.create(creation_date=date(2021, 5, 20)) outdated_samples = await DateModel.objects.filter( creation_date__in=[date(2021, 5, 19), date(2021, 5, 20)] ).all() assert len(outdated_samples) == 2 assert outdated_samples[0] == sample2 assert outdated_samples[1] == sample3
python
import json import os from pathlib import Path import shutil from appdirs import user_data_dir from elpis.engines.common.objects.fsobject import FSObject from elpis.engines.common.utilities import hasher from elpis.engines.common.utilities.logger import Logger from elpis.engines.common.errors import InterfaceError from elpis.engines.common.objects.dataset import Dataset from elpis.engines.common.objects.pron_dict import PronDict class Interface(FSObject): _config_file = 'interface.json' def __init__(self, path: Path = None, use_existing=False): """ :param Boolean use_existing: If this flag is enabled and an interface already exists at the specified ``path``, then load the interface at the ``path``. When ``path`` is not specified or if the interface is not at the ``path``, then a new interface is created. """ path_was_none = False if path is None: path_was_none = True name = hasher.new() parent_path = Path(user_data_dir('elpis')).joinpath('interfaces') path = parent_path.joinpath(name) # super().__init__( # parent_path=Path(user_data_dir('elpis')), # dir_name=name, # pre_allocated_hash=name, # name=name # ) path = Path(path).absolute() # === Check if the existing interface is valid =================== # If any of the below nested if-statements fail, the existing (if # it exists) interface is not valid. In that case, wipe the # path directory and start a new interface directory. class InvalidInterfaceError(Exception): pass config_file_path = path.joinpath(Interface._config_file) try: if (use_existing is True and path.exists() and path.is_dir() and config_file_path.exists() and config_file_path.is_file()): # a valid interface exists. (this is a shallow check) pass else: raise InvalidInterfaceError # === Create a new interface object ============================== except InvalidInterfaceError: # Must wipe the interface and make a new one if path.exists(): # Tempted to use shutil.rmtree? It breaks if we have mounted /state from # local filesystem into the docker container. # Error is "Device or resource busy: '/state'" # We need to keep the dir and delete the contents... for root, subdirectories, files in os.walk(path): for file_ in files: os.unlink(os.path.join(root, file_)) for directory in subdirectories: shutil.rmtree(os.path.join(root, directory)) super().__init__( parent_path=path.parent, dir_name=path.name, pre_allocated_hash=(path.name if path_was_none else None), name=(path.name if path_was_none else None) ) self.config['loggers'] = [] self.config['datasets'] = {} self.config['pron_dicts'] = {} self.config['models'] = {} self.config['transcriptions'] = {} # === Use existing interface object ============================== else: # Create a new interface without wiping the directory. # Uses existing _config_file. super().__init__( parent_path=path.parent, dir_name=path.name ) # ensure object directories exist self.datasets_path = self.path.joinpath('datasets') self.datasets_path.mkdir(parents=True, exist_ok=True) self.pron_dicts_path = self.path.joinpath('pron_dicts') self.pron_dicts_path.mkdir(parents=True, exist_ok=True) self.models_path = self.path.joinpath('models') self.models_path.mkdir(parents=True, exist_ok=True) self.loggers_path = self.path.joinpath('loggers') self.loggers_path.mkdir(parents=True, exist_ok=True) self.transcriptions_path = self.path.joinpath('transcriptions') # config objects self.loggers = [] self.datasets = {} self.pron_dicts = {} self.models = {} self.transcriptions = {} # make a default logger self.new_logger(default=True) # set during runtime self.engine = None @classmethod def load(cls, base_path: Path): self = super().load(base_path) self.datasets_path = self.path.joinpath('datasets') self.datasets_path.mkdir(parents=True, exist_ok=True) self.pron_dicts_path = self.path.joinpath('pron_dicts') self.pron_dicts_path.mkdir(parents=True, exist_ok=True) self.models_path = self.path.joinpath('models') self.models_path.mkdir(parents=True, exist_ok=True) self.loggers_path = self.path.joinpath('loggers') self.loggers_path.mkdir(parents=True, exist_ok=True) self.transcriptions_path = self.path.joinpath('transcriptions') # config objects self.loggers = [] self.datasets = {} self.pron_dicts = {} self.models = {} self.transcriptions = {} return self def new_logger(self, default=False): logger = Logger(self.loggers_path) self.config['loggers'] += [logger.hash] if default: self.logger = logger return logger def new_dataset(self, dsname): existing_names = self.list_datasets() if dsname in self.config['datasets'].keys(): raise InterfaceError( f'Tried adding \'{dsname}\' which is already in {existing_names} with hash {self.config["datasets"][dsname]}.', human_message=f'Dataset with name "{dsname}" already exists' ) ds = Dataset(parent_path=self.datasets_path, name=dsname) datasets = self.config['datasets'] datasets[dsname] = ds.hash self.config['datasets'] = datasets return ds def get_dataset(self, dsname): if dsname not in self.list_datasets(): raise InterfaceError(f'Tried to load a dataset called "{dsname}" that does not exist') hash_dir = self.config['datasets'][dsname] return Dataset.load(self.datasets_path.joinpath(hash_dir)) def list_datasets(self): names = [name for name in self.config['datasets'].keys()] return names def new_pron_dict(self, pdname): existing_names = self.list_pron_dicts() if pdname in self.config['pron_dicts'].keys(): raise InterfaceError( f'Tried adding \'{pdname}\' which is already in {existing_names} with hash {self.config["pron_dicts"][pdname]}.', human_message=f'Pronunciation dictionary with name "{pdname}" already exists' ) pd = PronDict(parent_path=self.pron_dicts_path, name=pdname) pron_dicts = self.config['pron_dicts'] pron_dicts[pdname] = pd.hash self.config['pron_dicts'] = pron_dicts return pd def get_pron_dict(self, pdname): if pdname not in self.list_pron_dicts(): raise InterfaceError(f'Tried to load a pron dict called "{pdname}" that does not exist') hash_dir = self.config['pron_dicts'][pdname] pd = PronDict.load(self.pron_dicts_path.joinpath(hash_dir)) pd.dataset = self.get_dataset(pd.config['dataset_name']) return pd def list_pron_dicts(self): names = [name for name in self.config['pron_dicts'].keys()] return names def list_pron_dicts_verbose(self): pron_dicts = [] names = [name for name in self.config['pron_dicts'].keys()] for name in names: pd = self.get_pron_dict(name) pron_dicts.append({"name": name, "dataset_name": pd.dataset.name}) return pron_dicts def new_model(self, mname): if self.engine is None: raise RuntimeError("Engine must be set before model creation") existing_names = self.list_models() if mname in self.config['models'].keys(): raise InterfaceError( f'Tried adding \'{mname}\' which is already in {existing_names} with hash {self.config["models"][mname]}.', human_message=f'Model with name "{mname}" already exists' ) m = self.engine.model(parent_path=self.models_path, name=mname) models = self.config['models'] models[mname] = m.hash self.config['models'] = models return m def get_model(self, mname): if self.engine is None: raise RuntimeError("Engine must be set to get a model") if mname not in self.list_models(): raise InterfaceError(f'Tried to load a model called "{mname}" that does not exist') hash_dir = self.config['models'][mname] m = self.engine.model.load(self.models_path.joinpath(hash_dir)) m.dataset = self.get_dataset(m.config['dataset_name']) if m.config['pron_dict_name'] is not None: m.pron_dict = self.get_pron_dict(m.config['pron_dict_name']) return m def list_models(self): models = [] for hash_dir in os.listdir(f'{self.models_path}'): if not hash_dir.startswith('.'): with self.models_path.joinpath(hash_dir, "model.json").open() as fin: name = json.load(fin)['name'] models.append(name) return models def list_models_verbose(self): models = [] for hash_dir in os.listdir(f'{self.models_path}'): if not hash_dir.startswith('.'): config_file_path = self.models_path.joinpath(hash_dir, "model.json") if os.path.isfile(config_file_path): with config_file_path.open() as model_config_file: model = json.load(model_config_file) model_info = { 'name': model['name'], 'dataset_name': model['dataset_name'], 'engine_name': model['engine_name'], 'pron_dict_name': model['pron_dict_name'], 'status': model['status'], 'results': model['results'] } models.append(model_info) return models def new_transcription(self, tname): if self.engine is None: raise RuntimeError("Engine must be set prior to transcription") print("{}".format(self.engine)) t = self.engine.transcription(parent_path=self.transcriptions_path, name=tname) transcriptions = self.config['transcriptions'] transcriptions[tname] = t.hash self.config['transcriptions'] = transcriptions return t def get_transcription(self, tname): if tname not in self.list_transcriptions(): raise InterfaceError(f'Tried to load a transcription called "{tname}" that does not exist') hash_dir = self.config['transcriptions'][tname] t = self.engine.transcription.load(self.transcriptions_path.joinpath(hash_dir)) t.model = self.get_model(t.config['model_name']) return t def list_transcriptions(self): if self.engine is None: raise RuntimeError("Engine must be set to list transcriptions") names = [] if not Path(f'{self.transcriptions_path}').exists(): return names # no directory -> no items in list for hash_dir in os.listdir(f'{self.transcriptions_path}'): if not hash_dir.startswith('.'): with self.transcriptions_path.joinpath( hash_dir, self.engine.transcription._config_file).open() as fin: name = json.load(fin)['name'] names.append(name) return names def set_engine(self, engine): self.engine = engine
python
import torch from torch import nn from torch.utils.data import Dataset, DataLoader, sampler import h5py import numpy as np import pandas as pd import matplotlib.pyplot as plt from itertools import cycle import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib as mpl from matplotlib.font_manager import FontProperties class HDF5Dataset(Dataset): """ Args: h5data_path(str): path of h5 file train(boolean): whether use train data or not transform(optional) """ def __init__(self, h5data_path,train=True, transform=None): self.h5data = h5py.File(h5data_path,'r') self.transform = transform self.train = train self.train_x = np.array(self.h5data["train_in_seq"]) self.train_y = np.array(self.h5data["train_out"]) self.valid_x = np.array(self.h5data["valid_in_seq"]) self.valid_y = np.array(self.h5data["valid_out"]) def __getitem__(self, index): if self.train: x = self.train_x[index,...] y = self.train_y[index,...] else: x = self.valid_x[index,...] y = self.valid_y[index,...] if self.transform: x = self.transform(x) else: x = torch.from_numpy(x) y = torch.from_numpy(y) # convert datatype x = x.type('torch.cuda.FloatTensor') y = y.type('torch.cuda.FloatTensor') return (x, y) def __len__(self): if self.train: return self.train_x.shape[0] else: return self.valid_x.shape[0] # test purpose # def __len__(self): # return 200 class RMdata(Dataset): def __init__(self, data_path, use_embedding,length,mode): """ Inputs: mode: train, valid, test """ self.data_path = data_path self.mode = mode self.use_embedding = use_embedding self.radius = length // 2 if self.mode == 'train': self.train_x = pd.read_hdf(self.data_path,'train_in') self.train_y = pd.read_hdf(self.data_path,'train_out').to_numpy() self.valid_x = pd.read_hdf(self.data_path,'valid_in') self.valid_y = pd.read_hdf(self.data_path,'valid_out').to_numpy() if self.use_embedding: print('Using pre-trained embeddings!'+'-' * 60) self.train_x = pd.read_hdf(self.data_path,'train_in_3_mers') self.valid_x = pd.read_hdf(self.data_path,'valid_in_3_mers') total_length = self.train_x.shape[1] middle_index = total_length // 2 self.train_x = self.train_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy() # print(self.train_x.shape[1]) self.valid_x = self.valid_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy() else: # cropping the sequence one_hot encoding total_length = self.train_x.shape[1] middle_index = total_length // 2 # print(middle_index) self.train_x = self.train_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy() self.valid_x = self.valid_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy() else: self.valid_x = pd.read_hdf(self.data_path,'valid_in') self.valid_y = pd.read_hdf(self.data_path,'valid_out').to_numpy() self.test_x = pd.read_hdf(self.data_path,'test_in') self.test_y = pd.read_hdf(self.data_path,'test_out').to_numpy() if self.use_embedding: self.valid_x = pd.read_hdf(self.data_path,'valid_in_3_mers') self.test_x = pd.read_hdf(self.data_path,'test_in_3_mers') # cropping the sequence accroding to its length total_length = self.valid_x.shape[1] middle_index = total_length // 2 # print(self.train_x.shape[1]) self.valid_x = self.valid_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy() self.test_x = self.test_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy() else: # cropping the sequence one_hot encoding total_length = self.valid_x.shape[1] middle_index = total_length // 2 # print(middle_index) self.valid_x = self.valid_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy() # print(self.train_x.shape[1]) self.test_x = self.test_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy() self.class_name = list(pd.read_hdf(self.data_path,'test_out').columns) def __getitem__(self,index): if self.mode == 'train': x = self.train_x[index,...] y = self.train_y[index,...] elif self.mode == 'valid': x = self.valid_x[index,...] y = self.valid_y[index,...] elif self.mode == 'test': x = self.test_x[index,...] y = self.test_y[index,...] x = torch.from_numpy(x) y = torch.from_numpy(y) x = x.type('torch.cuda.FloatTensor') y = y.type('torch.cuda.FloatTensor') return (x, y) def __len__(self): if self.mode == 'train': return self.train_x.shape[0] elif self.mode == 'valid': return self.valid_x.shape[0] elif self.mode == 'test': return self.test_x.shape[0] def load_RM_data(path,batch_size,length,use_embedding,balanced_sampler=False): train = RMdata(path,use_embedding=use_embedding, length= length,mode='train') valid = RMdata(path,use_embedding=use_embedding, length=length, mode='valid') if not balanced_sampler: train_loader = DataLoader(dataset=train,batch_size=batch_size,shuffle=True) else: weights_train = make_weights_for_balanced_classes(train) # weights_valid = make_weights_for_balanced_classes(valid) weights_train = torch.cuda.DoubleTensor(weights_train) # weights_valid = torch.cuda.DoubleTensor(weights_valid) sampler_train = sampler.WeightedRandomSampler(weights_train, len(weights_train)) # sampler_valid = sampler.WeightedRandomSampler(weights_valid, len(weights_valid)) train_loader = DataLoader(dataset=train,batch_size=batch_size,sampler=sampler_train) # valid_loader = DataLoader(dataset=valid,batch_size=batch_size,sampler=sampler_valid) valid_loader = DataLoader(dataset=valid,batch_size=batch_size,shuffle=True) return train_loader, valid_loader def make_weights_for_balanced_classes(dataset): X, y = dataset[:] num_examples = len(y) nclasses = len(y[1]) + 1 count = np.zeros(nclasses) y = y.cpu().numpy() for i in range(num_examples): count[np.concatenate([np.squeeze(y[i,:]),np.array([0])])==1] += 1 # negative class weight count[-1] = num_examples - np.sum([count[i] for i in range(nclasses)]) weight_per_class = np.zeros(nclasses) N = float(sum(count)) for i in range(nclasses): weight_per_class[i] = N/float(count[i]) weight = [0] * num_examples for i in range(num_examples): if not list(np.squeeze(y[i,:])) == list(np.zeros(len(y[1]))): weight[i] = np.mean(weight_per_class[np.concatenate([np.squeeze(y[i,:]),np.array([0])])==1]) else: # negative cases weight[i] = weight_per_class[-1] return weight def cal_precision(y_true, y_pred,eps=1e-7): true_positives = torch.sum(torch.round(torch.clamp(y_true * y_pred, 0, 1))) predicted_positives = torch.sum(torch.round(torch.clamp(y_pred, 0, 1))) precision = true_positives / (predicted_positives + eps) return precision def cal_recall(y_true, y_pred,eps=1e-7): true_positives = torch.sum(torch.round(torch.clamp(y_true * y_pred, 0, 1))) possible_positives = torch.sum(torch.round(torch.clamp(y_true, 0, 1))) recall = true_positives / (possible_positives + eps) return recall def cal_accuary(y_true, y_pred): acc = torch.mean((torch.round(torch.clamp(y_pred,0,1))==y_true).type('torch.cuda.FloatTensor')) return acc def precision_multi(y_true,y_pred): """ Input: y_true, y_pred with shape: [n_samples, n_classes] Output: example-based precision """ n_samples = y_true.shape[0] result = 0 for i in range(n_samples): if not (y_pred[i] == 0).all(): true_posi = y_true[i] * y_pred[i] n_true_posi = np.sum(true_posi) n_pred_posi = np.sum(y_pred[i]) result += n_true_posi / n_pred_posi return result / n_samples def recall_multi(y_true,y_pred): """ Input: y_true, y_pred with shape: [n_samples, n_classes] Output: example-based recall """ n_samples = y_true.shape[0] result = 0 for i in range(n_samples): if not (y_true[i] == 0).all(): true_posi = y_true[i] * y_pred[i] n_true_posi = np.sum(true_posi) n_ground_true = np.sum(y_true[i]) result += n_true_posi / n_ground_true return result / n_samples def f1_multi(y_true,y_pred): """ Input: y_true, y_pred with shape: [n_samples, n_classes] Output: example-based recall """ n_samples = y_true.shape[0] result = 0 for i in range(n_samples): if not ((y_true[i] == 0).all() and (y_pred[i] == 0).all()): true_posi = y_true[i] * y_pred[i] n_true_posi = np.sum(true_posi) n_ground_true = np.sum(y_true[i]) n_pred_posi = np.sum(y_pred[i]) f1 = 2*(n_true_posi) / (n_ground_true+n_pred_posi) result += f1 return result / n_samples def hamming_loss(y_true,y_pred): """ Input: y_true, y_pred with shape: [n_samples, n_classes] Output: hamming loss """ n_samples = y_true.shape[0] n_classes = y_true.shape[1] loss = 0 for i in range(n_samples): xor = np.sum((y_true[i] + y_pred[i]) % 2) loss += xor / n_classes return loss / n_samples def cal_metrics(model_out,label,plot=False,class_names=None,plot_name=None): """ Inputs: class_name: for plot purpose """ from sklearn.metrics import recall_score,precision_score,roc_auc_score,roc_curve, average_precision_score from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_recall_curve num_task = len(model_out) # threshold_list = [0.5 for i in range(num_task)] # thresholds standard threshold_list = [0.002887,0.004897,0.001442,0.010347,0.036834,0.028677, 0.009135,0.095019,0.001394,0.007883,0.113931,0.125591] # thresholds for multirm # # threshold_list = [0.004554,0.014769,0.005969,0.043316,0.076438,0.091157, # 0.121174,0.175164,0.006239,0.001260,0.051128,0.255274] # thresholds for hmm # threshold_list = [0.007389,0.050478,0.046165,0.068021,0.088967,0.150652, # thresholds for CNN+Lstm # 0.080001,0.317348,0.003866,0.013430,0.090117,0.256765] metrics = {'recall':[],'precision':[],'accuracy':[],'auc':[],'auc_2':[], 'sn':[],'sp':[],'acc_2':[],'mcc':[], 'ap':[], 'ap_2':[]} # auc_2: auc across all samples # auc: auc across one single class metrics_avg = {'recall':0, 'precision':0,'accuracy':0,'auc':0,'auc_2':0} # Compute ROC curve and ROC area for each class fpr,tpr = dict(), dict() fpr_2,tpr_2 = dict(), dict() precisions, recalls = dict(), dict() precisions_m, recalls_m = dict(), dict() label = label.cpu().numpy() Y_pred = np.zeros(label.shape) for i in range(num_task): y_true = label[:,i] y_pred = torch.clamp(model_out[i].cpu().detach(),0,1).numpy() y_pred = np.array([0 if instance < threshold_list[i] else 1 for instance in list(y_pred)]) Y_pred[:,i] = y_pred y_score = model_out[i].cpu().detach().numpy() # if i==0: # print(y_pred[y_true==1]) # recall = recall_score(y_true,y_pred,zero_division=1) # precision = precision_score(y_true,y_pred,zero_division=1) acc = np.mean(y_true==y_pred) # handle one_class problem # test binary auc auc = roc_auc_score(y_true[i*100:(i+1)*100],y_score[i*100:(i+1)*100]) # test binary ap ap = average_precision_score(y_true[i*100:(i+1)*100],y_score[i*100:(i+1)*100]) # test multiclass auc auc_2 = roc_auc_score(y_true,y_score) # test multi ap ap_2 = average_precision_score(y_true,y_score) fpr[i], tpr[i], thresholds = roc_curve(y_true[i*100:(i+1)*100], y_score[i*100:(i+1)*100]) fpr_2[i], tpr_2[i], thresholds_2 = roc_curve(y_true, y_score) precisions[i], recalls[i], _ = precision_recall_curve(y_true[i*100:(i+1)*100], y_score[i*100:(i+1)*100]) precisions_m[i], recalls_m[i], _ = precision_recall_curve(y_true, y_score) gmeans = np.sqrt(tpr_2[i] * (1-fpr_2[i])) # locate the index of the largest g-mean ix = np.argmax(gmeans) print('Best Threshold=%f, G-Mean=%.3f' % (thresholds_2[ix], gmeans[ix])) best_threshold = thresholds_2[ix] y_pred_new = np.array([0 if instance < best_threshold else 1 for instance in list(y_score)]) # binary based confusion_matrix # tn, fp, fn, tp = confusion_matrix(y_true[i*100:(i+1)*100], y_pred_new[i*100:(i+1)*100]).ravel() # multiclass based confusion_matrix tn, fp, fn, tp = confusion_matrix(y_true, y_pred_new).ravel() pp = tp+fn pn = tn+fp sensitivity = tp / pp specificity = tn / pn recall = sensitivity precision = tp / (tp + fp) acc_2 = (tp+tn) / (pp+pn) # mcc = acc_2 / np.sqrt((1+(fp-fn)/pp)*(1+(fn-fp)/pn)) mcc = ((tp*tn)-(fp*fn))/np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)) # update dictionary metrics['auc_2'].append(auc_2) metrics['sn'].append(sensitivity) metrics['sp'].append(specificity) metrics['acc_2'].append(acc_2) metrics['mcc'].append(mcc) metrics['ap'].append(ap) metrics['ap_2'].append(ap_2) metrics['recall'].append(recall) metrics['precision'].append(precision) metrics['accuracy'].append(acc) metrics['auc'].append(auc) metrics_avg['recall'] += recall metrics_avg['precision'] += precision metrics_avg['accuracy'] += acc # metrics_avg['auc'] += auc precision_multi_ = precision_multi(label,Y_pred) recall_multi_ = recall_multi(label,Y_pred) f1_multi_ = f1_multi(label,Y_pred) hamming_loss_ = hamming_loss(label,Y_pred) print("precision multi: %f"%(precision_multi_)) print("recall multi: %f"%(recall_multi_)) print("f1 multi: %f"%(f1_multi_)) print("hamming loss: %f"%(hamming_loss_)) metrics_avg['recall'] /= num_task metrics_avg['precision'] /= num_task metrics_avg['accuracy'] /= num_task # metrics_avg['auc'] /= num_task print(plot) if plot: # define colors colors = [(39,64,139),(0,128,128),(31, 119, 180), (44, 160, 44), (152, 223, 138), (174, 199, 232), (255, 127, 14), (255, 187, 120),(214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213)] for i in range(len(colors)): r, g, b = colors[i] colors[i] = (r / 255., g / 255., b / 255.) # modifying parameters for plot from math import sqrt golden_mean = (sqrt(5)-1.0)/2.0 #used for size= fig_width = 6 # fig width in inches fig_height = fig_width*golden_mean # fig height in inches mpl.rcParams['axes.labelsize'] = 10 mpl.rcParams['axes.titlesize'] = 10 mpl.rcParams['font.size'] = 10 mpl.rcParams['legend.fontsize'] = 10 mpl.rcParams['xtick.labelsize'] = 8 mpl.rcParams['ytick.labelsize'] = 8 mpl.rcParams['text.usetex'] = False mpl.rcParams['font.family'] = 'serif' # params = {'axes.labelsize': 10, # fontsize for x and y labels (was 10) # 'axes.titlesize': 10, # 'font.size': 10, # 'legend.fontsize': 10, # 'xtick.labelsize': 8, # 'ytick.labelsize': 8, # 'text.usetex': False, # 'font.family': 'serif' # } lw = 2 #fig, axes = plt.subplots(nrows=1,ncols=2,figsize=(13,4),gridspec_kw={'width_ratios': [1, 2.2]}) # roc curve fig, axes = plt.subplots(nrows=1,ncols=2,figsize=(fig_width*2+0.7,fig_height+0.1)) # PR curve fig_2, axes_2 = plt.subplots(nrows=1,ncols=2,figsize=(fig_width*2+0.7,fig_height+0.1)) # matplotlib.rcParams.update(params) # set color palettes for i, class_name in zip(range(num_task), class_names): axes[0].plot(fpr[i], tpr[i], color=colors[i],lw=lw) axes[0].plot([0, 1], [0, 1], 'k--', lw=lw) axes[0].set_xlim([0.0, 1.0]) axes[0].set_ylim([0.0, 1.0]) axes[0].tick_params(axis='x',which='both',top=False) axes[0].tick_params(axis='y',which='both',right=False) axes[0].set_aspect('equal', adjustable='box') axes[0].set_xlabel('False Positive Rate') axes[0].set_ylabel('True Positive Rate') axes[0].set_title('ROC curves (binary)') axes_2[0].plot(recalls[i], precisions[i], color=colors[i],lw=lw) axes_2[0].plot([0, 1], [0.5, 0.5], 'k--', lw=lw) axes_2[0].set_xlim([0.0, 1.0]) axes_2[0].set_ylim([0.45, 1.0]) axes_2[0].tick_params(axis='x',which='both',top=False) axes_2[0].tick_params(axis='y',which='both',right=False) xmin, xmax = axes_2[0].get_xlim() ymin, ymax = axes_2[0].get_ylim() axes_2[0].set_aspect(abs((xmax-xmin)/(ymax-ymin)), adjustable='box') axes_2[0].set_xlabel('Recall') axes_2[0].set_ylabel('Precision') axes_2[0].set_title('PR curves (binary)') if class_name == 'Atol': class_name = 'A-to-I' elif class_name == 'hPsi': class_name = 'Psi' elif class_name[-1] == 'm': class_name = class_name[1:] else: # tmp = class_name[2:] # num = class_name[1] # class_name = 'm^{%s}%s'%(num,tmp) class_name = class_name[1:] axes[1].plot(fpr_2[i], tpr_2[i], color=colors[i],lw=lw, label ='%s ($AUC_{b}$ = %.2f, $AUC_{m}$ = %.2f)'%(class_name, metrics['auc'][i],metrics['auc_2'][i])) axes[1].set_xlim([0.0, 1.0]) axes[1].set_ylim([0.0, 1.0]) axes[1].tick_params(axis='x',which='both',top=False) axes[1].tick_params(axis='y',which='both',right=False,left=False,labelleft=False) axes[1].set_aspect('equal', adjustable='box') axes[1].set_xlabel('False Positive Rate') axes[1].set_ylabel('True Positive Rate') axes[1].set_title('ROC curves (multiple)') axes_2[1].plot(recalls_m[i], precisions_m[i], color=colors[i],lw=lw, label ='%s ($AP_{b}$ = %.2f, $AP_{m}$ = %.2f)'%(class_name, metrics['ap'][i],metrics['ap_2'][i])) axes_2[1].set_xlim([0.0, 1.0]) axes_2[1].set_ylim([0.0, 1.0]) axes_2[1].tick_params(axis='x',which='both',top=False) axes_2[1].tick_params(axis='y',which='both',right=False,left=False,labelleft=True) xmin, xmax = axes_2[1].get_xlim() ymin, ymax = axes_2[1].get_ylim() axes_2[1].set_aspect(abs((xmax-xmin)/(ymax-ymin)), adjustable='box') axes_2[1].set_xlabel('Recall') axes_2[1].set_ylabel('Precision') axes_2[1].set_title('PR curves (multiple)') # Shrink current axis by 20% # box = axes[1].get_position() # print(box) # axes[1].set_position([box.x0, box.y0, box.x1-box.width * 0.5, box.height]) # print(axes[1].get_position()) axes[1].plot([0, 1], [0, 1], 'k--', lw=lw, label='no skill') axes_2[1].plot([0, 1], [0.04, 0.04], 'k--', lw=lw, label = 'no skill') # Put a legend to the right of the current axis axes[1].legend(loc='upper left', bbox_to_anchor=(1.05, 1),borderaxespad=0.,frameon=False) axes_2[1].legend(loc='upper left', bbox_to_anchor=(1.05, 1),borderaxespad=0.,frameon=False) fig.tight_layout() fig_2.tight_layout() fig.savefig('../Figs/roc_curve_%s.pdf'%(plot_name)) fig_2.savefig('../Figs/precision_recall_curve_%s.pdf'%(plot_name)) print('Successfully save figure to ../Figs/roc_curve_%s.pdf'%(plot_name)) print('Successfully save figure to ../Figs/precision_recall_curve_%s.pdf'%(plot_name)) return metrics,metrics_avg def cal_metrics_sampling(model_out,label): from sklearn.metrics import recall_score,precision_score,roc_auc_score,roc_curve, average_precision_score from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_recall_curve label = label.cpu().numpy() Y_pred = np.zeros(label.shape) num_task = len(model_out) metrics = {i : {'acc':[],'auc':[], 'ap':[], 'fprs':[], 'tprs':[],'precisions':[],'recalls':[]} for i in range(num_task)} total_num = 304661 posi_num = np.array([1591, 1878,1471,2253,16346,3207,3696,65178,2447,1036,3137,52618]) neg_num = total_num - posi_num ratio = np.round(neg_num / posi_num).astype(int) iterations = 2000 for i in range(num_task): y_true_pos = label[label[:,i]==1,i] y_true_neg = label[label[:,i]!=1,i] y_pred = model_out[i].cpu().detach().numpy() y_pred_pos = y_pred[label[:,i]==1] y_pred_neg = y_pred[label[:,i]!=1] for iter in range(iterations): pos_num = len(label[:,i]==1) pos_idx = np.random.randint(0,len(y_true_pos),pos_num) neg_idx = np.random.randint(0, len(y_true_neg),pos_num*ratio[i]) y_true = np.concatenate([y_true_pos[pos_idx], y_true_neg[neg_idx]]) y_score = np.concatenate([y_pred_pos[pos_idx], y_pred_neg[neg_idx]]) y_pred_label = y_score > 0.5 acc = np.mean(y_true==y_pred_label) auc = roc_auc_score(y_true,y_score) ap = average_precision_score(y_true,y_score) fprs, tprs, thresholds = roc_curve(y_true, y_score) precisions, recalls, _ = precision_recall_curve(y_true, y_score) metrics[i]['acc'].append(acc) metrics[i]['auc'].append(auc) metrics[i]['ap'].append(ap) metrics[i]['fprs'] = fprs.tolist() metrics[i]['tprs'] = tprs.tolist() metrics[i]['precisions'] = precisions.tolist() metrics[i]['recalls'] = recalls.tolist() metrics_avg = dict() metrics_avg['acc'] = [np.mean(metrics[i]['acc']) for i in range(num_task)] metrics_avg['auc'] = [np.mean(metrics[i]['auc']) for i in range(num_task)] metrics_avg['ap'] = [np.mean(metrics[i]['ap']) for i in range(num_task)] return metrics, metrics_avg
python
from django import forms from django.contrib.auth import login, authenticate from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from .models import Ore class CreateNewOreupdate(forms.ModelForm): class Meta: model = Ore fields = ('oret','contrattiok','contrattiko','nomecognome','statuse',) widgets = { 'data' : forms.DateInput( attrs={ 'class': 'md-form' } ), 'user' : forms.Select( attrs={ 'class': 'custom-select', } ), 'contrattiok' : forms.NumberInput( attrs={ 'class': 'form-control', } ), 'oret' : forms.NumberInput( attrs={ 'class': 'form-control' } ), 'nomecognome' : forms.TextInput( attrs={ 'class': 'form-control' } ), } class CreateNewOre(forms.ModelForm): class Meta: model = Ore exclude = ('statuse','contrattiko', ) fields = ('user','oret','contrattiok','contrattiko','nomecognome','statuse',) widgets = { 'data' : forms.DateInput( attrs={ 'class': 'md-form' } ), 'user' : forms.Select( attrs={ 'class': 'custom-select', } ), 'contrattiok' : forms.NumberInput( attrs={ 'class': 'form-control' } ), 'oret' : forms.NumberInput( attrs={ 'class': 'form-control' } ), 'nomecognome' : forms.TextInput( attrs={ 'class': 'form-control' } ), }
python
import pytest import pandas as pd from hypper.data import ( read_banking, read_breast_cancer_data, read_churn, read_congressional_voting_records, read_german_data, read_hr, read_phishing, read_spect_heart, ) @pytest.mark.parametrize( "read_fun", [ read_banking, read_breast_cancer_data, read_churn, read_congressional_voting_records, read_german_data, read_hr, read_phishing, read_spect_heart, ], ) def test_reading_data_types(read_fun): df, label, cat_cols = read_fun() assert type(df) == pd.DataFrame assert type(label) == str assert type(cat_cols) == list
python
"""Base camera module This file contains the class definition for the Camera class on which all subsequent cameras should be based on. """ from __future__ import print_function, division import numpy.random as npr from .log import logger # from .ringbuffer import RingBuffer from .camprops import CameraProperties # from .exceptions import CameraError class CameraError(Exception): """Generic camera error""" class Camera(object): """Base class for all cameras. New camera implementations should subclass this and override all methods necessary for use. Attributes ---------- clib : WinDLL or CDLL A ctypes library reference roi : list The defined region of interest in the form [x1, y1, x2, y2]. t_ms : float Exposure time in ms. gain : int or float Gain setting. The type is dependent on the camera used. shape : tuple Number of pixels (x, y) bins : int Bin size to use. crop : list Crop specifications. Should be of the form:: [horiz start, horiz end, vert start, vert end] with indeces starting from 1. shutter_open : bool For cameras that are equipped with an integrated shutter: is the shutter open? cooler_active : bool True if the cooler is on. temperature_set_point : int Temperature set point for the cooler if present. acq_mode : str Camera acquisition mode. trigger_mode : int Camera triggering mode. These are obviously defined differently depending on the particular camera's SDK. rbuffer : RingBuffer The RingBuffer object for autosaving of images. props : CameraProperties A CameraProperties object defining several generic settings of the camera as well as flags indicating if certain functionality is available. """ def __init__(self, **kwargs): """Initialize a camera. Additional keyword arguments may also be passed and checked for the initialize function to be defined by child classes. Keyword arguments ----------------- bins : int Binning to use. buffer_dir : str Directory to store the ring buffer file to. Default: '.'. log_level : int Logging level to use. Default: ``logging.INFO``. """ self.clib = None self.roi = [1, 1, 10, 10] self.t_ms = 100. self.gain = 0 self.shape = (512, 512) self.bins = 1 self.crop = (1, self.shape[0], 1, self.shape[1]) self.shutter_open = False self.cooler_active = False self.temperature_set_point = 0 self.acq_mode = "single" self.trigger_mode = 0 self.rbuffer = None self.props = CameraProperties() # Get kwargs and set defaults bins = kwargs.get('bins', 1) buffer_dir = kwargs.get('buffer_dir', '.') recording = kwargs.get('recording', True) # Check kwarg types are correct assert isinstance(bins, int) assert isinstance(buffer_dir, str) # Configure logging logger.info("Connecting to camera") # Initialize try: # self.rbuffer = RingBuffer( # directory=buffer_dir, recording=recording, roi=self.roi) raise ValueError except ValueError: # logger.warn('Error opening the ring buffer. This is expected with a remote camera server.') self.rbuffer = None x0 = npr.randint(self.shape[0]/4, self.shape[0]/2) y0 = npr.randint(self.shape[1]/4, self.shape[1]/2) self.sim_img_center = (x0, y0) self.initialize(**kwargs) self.get_camera_properties() def initialize(self, **kwargs): """Any extra initialization required should be placed in this function for child camera classes. """ def get_camera_properties(self): """Code for getting camera properties should go here.""" logger.warning( "Properties not being set. " + "Did you forget to override get_camera_properties?") def __enter__(self): return self def __exit__(self, type_, value, traceback): logger.info("Shutting down camera.") if self.rbuffer is not None: self.rbuffer.close() self.close() def close(self): """Close the camera safely. Anything necessary for doing so should be defined here. """ raise NotImplementedError def set_acquisition_mode(self, mode): """Set the image acquisition mode.""" raise NotImplementedError def get_num_available_images(self, mode): """Get num of available images.""" raise NotImplementedError def get_image(self): """Acquire the current image from the camera. """ img = self.acquire_image_data() return img def acquire_image_data(self): """Code for getting image data from the camera should be placed here. This must return a numpy array. """ raise NotImplementedError def get_images_as_buffer(self, first, last): """Acquire the current image from the camera and write it to the ring buffer. This function should *not* be overwritten by child classes. Instead, everything necessary to acquire an image from the camera should be added to the :meth:`acquire_image_data` method. """ buffer, size = self.acquire_images() return buffer, size def acquire_images(self, first, last): """Code for getting image data from the camera should be placed here. This must return a numpy array. """ raise NotImplementedError def get_trigger_mode(self): """Query the current trigger mode.""" raise NotImplementedError def set_trigger_mode(self, mode): """Setup trigger mode.""" raise NotImplementedError def start(self): """Code needed for getting the camera to begin triggering should be placed here. """ raise NotImplementedError def stop(self): """Code needed to stop accepting triggering should be placed here. """ raise NotImplementedError # Not all cameras have builtin shutters, so the next few functions # should have no actual effect in that case. Child classes should # override the set_shutter function to set the shutter state. def open_shutter(self): """Open the shutter.""" self.shutter_open = True logger.info('Opening shutter.') self.set_shutter('open') def close_shutter(self): """Close the shutter.""" self.shutter_open = False logger.info('Closing shutter.') self.set_shutter('closed') def set_shutter(self, state): """This will set the shutter to the given state ('open' or 'closed'). Since not all cameras have a built in shutter, this will simply do nothing if not overridden. """ logger.debug("set_shutter not overridden") def toggle_shutter(self, state): """Toggle the shutter state from open to closed and vice versa.""" if self.shutter_open: self.close_shutter() else: self.open_shutter() def get_exposure_time(self): """Query for the current exposure time. Default is to just return what is stored in the instantiation. """ return self.t_ms def set_exposure_time(self, t): """Set the exposure time.""" self.t_ms = t timings = self.update_exposure_time(t) return timings def update_exposure_time(self, t): """Camera-specific code for setting the exposure time should go here. """ raise NotImplementedError def get_gain(self): """Query the current gain settings.""" raise NotImplementedError def set_gain(self, **kwargs): """Set the camera gain.""" raise NotImplementedError # Don't override :meth:`set_cooler`, but rather the # :meth:`cooler_on` and :meth:`cooler_off`. def cooler_on(self): """Turn on the TEC.""" def cooler_off(self): """Turn off the TEC.""" def set_cooler(self, mode): assert isinstance(mode, (bool, int)) self.cooler_active = mode if mode: self.cooler_on() else: self.cooler_off() def get_cooler_temperature(self): """Check the TEC temperature.""" logger.warn("No action: get_cooler_temperature not overriden.") def set_cooler_temperature(self, temp): """Set the cooler temperature to temp.""" logger.warn("No action: set_cooler_temperature not overriden.") raise NotImplementedError("No cooler?") def set_roi(self, roi): """Define the region of interest. Since ROI stuff is handled entirely in software, this function does not need to be implemented in inheriting classes. """ if len(roi) != 4: raise CameraError("roi must be a length 4 list.") if roi[0] >= roi[2] or roi[1] >= roi[3] or roi[0] < 0 or roi[1] < 0: logger.error( 'Invalid ROI: {0}. Keeping old ROI.'.format(roi)) return old = self.roi self.roi = roi if self.rbuffer is not None: self.rbuffer.roi = roi logger.info( 'Adjusting ROI: {0} --> {1}'.format(str(old), str(self.roi))) def get_crop(self): """Get the current CCD crop settings. If this function is not overloaded, it will simply return the value stored in the crop attribute. """ return self.crop def set_crop(self, crop): """Define the portion of the CCD to actually collect data from. Using a reduced sensor area typically allows for faster readout. Derived classes should define :meth:`update_crop` instead of overriding this one. """ assert crop[1] > crop[0] assert crop[3] > crop[2] if len(crop) != 4: raise CameraError("crop must be a length 4 array.") self.crop = crop self.update_crop(self.crop) def reset_crop(self): """Reset the crop to the maximum size.""" self.crop = [1, self.shape[0], 1, self.shape[1]] self.update_crop(self.crop) def update_crop(self, crop): """Camera-specific code for setting the crop should go here. """ logger.debug("update_crop not implemented.") def get_bins(self): """Query the current binning. If this function is not overloaded, it will simply return the value stored in the bins attribute. """ return self.bins def set_bins(self, bins): """Set binning to bins x bins.""" logger.debug("set_bins not implemented.")
python
from .copy import files_copy from .delete import files_delete from .download import files_download from .history import files_history from .import_files import files_import from .list import files_list from .mkdir import files_mkdir from .move import files_move from .pems_delete import files_pems_delete from .pems_list import files_pems_list from .pems_update import files_pems_update from .upload import files_upload
python
import turtle def draw_piece(row, col, color): x = offset_x + 25 + col * 2 * (radius + gap) y = offset_y - 25 - row * 2 * (radius + gap) t.up() t.home() t.goto(x,y) t.down() t.color(color) t.begin_fill() t.circle(radius) t.end_fill() def draw(x, y): global board, rb, winner col = int((x - offset_x) // square_size) def check_winner(): pass def draw_board(): # TODO: use a for loop to simplify the code t.color("purple") t.begin_fill() t.up() t.goto(190, -180) t.down() t.left(90) t.forward(310) t.left(90) t.forward(380) t.left(90) t.forward(310) t.left(90) t.forward(380) t.end_fill() for row in range(6): for col in range(7): draw_piece(row,col,"white") radius = 23 gap = 2 square_size = 2 * (radius + gap) offset_x = -180 offset_y = 100 board = [ [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], ] winner = "" rb = "red" t = turtle.Turtle() t.ht() t.speed(200) draw_board() #draw_piece(0, 0, "blue") #draw_piece(0, 1, "red") #draw_piece(3, 5, "purple") t.up() t.home() t.down() wn = turtle.Screen() wn.onclick(draw) wn.mainloop()
python
from machine.tokenization import ZwspWordDetokenizer def test_detokenize_empty() -> None: detokenizer = ZwspWordDetokenizer() assert detokenizer.detokenize([]) == "" def test_detokenize_space() -> None: detokenizer = ZwspWordDetokenizer() assert ( detokenizer.detokenize(["គែស", "មាង់", " ", "អី", "នៃ", "ជេង", "នារ", "ត៝ល់", "ព្វាន់", "។"]) == "គែស\u200bមាង់ អី\u200bនៃ\u200bជេង\u200bនារ\u200bត៝ល់\u200bព្វាន់។" ) def test_detokenize_guillment() -> None: detokenizer = ZwspWordDetokenizer() assert detokenizer.detokenize(["ឞ្ក្នៃ", "រាញា", "«", "នារ", "»", "ជេសរី"]) == "ឞ្ក្នៃ\u200bរាញា «នារ» ជេសរី" def test_detokenize_punctuation() -> None: detokenizer = ZwspWordDetokenizer() assert ( detokenizer.detokenize(["ไป", "ไหน", "มา", "?", "เขา", "ถาม", "ผม", "."]) == "ไป\u200bไหน\u200bมา? เขา\u200bถาม\u200bผม." ) assert detokenizer.detokenize(["ช้าง", ",", "ม้า", ",", "วัว", ",", "กระบือ"]) == "ช้าง, ม้า, วัว, กระบือ" def test_detokenize_punctuation_inside_word() -> None: detokenizer = ZwspWordDetokenizer() assert ( detokenizer.detokenize(["เริ่ม", "ต้น", "ที่", " ", "7,999", " ", "บาท"]) == "เริ่ม\u200bต้น\u200bที่ 7,999 บาท" ) def test_detokenize_multiple_spaces() -> None: detokenizer = ZwspWordDetokenizer() assert ( detokenizer.detokenize(["គែស", "មាង់", " ", "អី", "នៃ", "ជេង", "នារ", "ត៝ល់", "ព្វាន់", "។"]) == "គែស\u200bមាង់ អី\u200bនៃ\u200bជេង\u200bនារ\u200bត៝ល់\u200bព្វាន់។" )
python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo # Copyright (C) 2017-2020 German Aerospace Center (DLR) and others. # This program and the accompanying materials are made available under the # terms of the Eclipse Public License 2.0 which is available at # https://www.eclipse.org/legal/epl-2.0/ # This Source Code may also be made available under the following Secondary # Licenses when the conditions for such availability set forth in the Eclipse # Public License 2.0 are satisfied: GNU General Public License, version 2 # or later which is available at # https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html # SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later # @file __init__.py # @author Leonhard Luecken # @date 2017-04-09 """ simpla - A simple platooning plugin for TraCI simpla is a configurable, simple platooning plugin for TraCI. A platooning configuration has to be created before using. Its possible elements are given in the example configuration file 'simpla_example.cfg.xml' Information about vType mappings between original and platooning vTypes has to be supplied. This can be done directly in the configuration xml-file by using 'vTypeMapLeader', 'vTypeMapFollower' and 'vTypeMapCatchup' elements or by reference to seperate files which define the mappings as 'originalVType : mappedVType' All specified vTypes should be available within the simulation, the "default" type is optional and used whenever information is missing for some original type if no default is specified, the original type remains unchanged within the platoon. For the definition of platooning vTypes for existing basic vTypes, and generating vTypeMapping-files see the script generateModifiedVTypes.py. Usage: 1) import simpla into your traci script. 2) After establishing a connection to SUMO with traci, call simpla.load(<configuration_filename>) 3) Only applies to SUMO version < 0.30: After starting simpla, call simpla.update() after each call to traci.simulationStep() Notes: 1) simpla changes the vehicle types, speedfactors, and lane changemodes of all connected vehicles. If your application does so as well, this might have unintended consequences. 2) Currently, steps of lengths other than DeltaT are not supported (i.e. if traci.simulationStep() is called with argument when simpla is running this may yield undesired behaviour). 3) simpla adds subscriptions to VAR_ROAD_ID, VAR_LANE_INDEX (and currently VAR_LANE_ID) and removes them when stopped """ import sys import os if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) else: sys.exit("please declare environment variable 'SUMO_HOME'") import traci # noqa from ._utils import openGap # noqa from ._utils import SimplaException # noqa import simpla._config # noqa import simpla._reporting as rp # noqa import simpla._platoonmanager # noqa warn = rp.Warner("simpla") _mgr = None _mgr_listenerID = None _useStepListener = 'addStepListener' in dir(traci) _emergencyDecelImplemented = 'VAR_EMERGENCY_DECEL' in dir(traci.constants) if not _emergencyDecelImplemented: # Old traci version. No emergency decel present. if rp.VERBOSITY >= 1: warn("Using old traci version assuming emergency decel == decel", True) # Set emergency decel to decel traci.constants.VAR_EMERGENCY_DECEL = 0x7b traci.vehicletype.getEmergencyDecel = traci.vehicletype.getDecel def load(config_filename): ''' Load the config from file and create a Platoon Manager ''' global _mgr, _mgr_listenerID simpla._config.load(config_filename) _mgr = simpla._platoonmanager.PlatoonManager() if _useStepListener: # For SUMO version >= 0.30 _mgr_listenerID = traci.addStepListener(_mgr) def stop(): ''' Stop the PlatoonManager ''' global _mgr, _mgr_listenerID if _mgr is not None: _mgr.stop() traci.removeStepListener(_mgr_listenerID) _mgr = None def update(): ''' Function called each simulation step. Only to be used for SUMO version < 1.0 ''' global _mgr, warn if _mgr is not None: _mgr.step() else: if rp.VERBOSITY >= 1: warn("call simpla.init(<config_file>) before simpla.update()!")
python
#!/bin/python3 # name: vignette_testing.py # author: [email protected] # license: 2019, MIT # date: 2019-12-02 (YYYY-MM-DD) # edit: 2019-12-03 (YYYY-MM-DD) # """ Probe for gnuplot palettes' differences Script 'palette_decomposition.py' provides rapid access to visualize the channels of R, G, B of RGB color space subsequently deposit e.g. as a .png file. compare by ImageMagick allows the superposition of two .png files to identify differences between the two; this is then highlighted by red pixels. Because 'palette_decomposition.py' names the diagnostic files coherently, their inspection with ImageMagick may be be automated. Place the script in the same folder already containing the vignette / decomposition plots to scrutinize as .png. Launch without provision of parameters by python3 vignette_comparsion.py The script will point ImageMagick which files to check against each other. This for example allows to discern quickly palettes with a similar name, but from different repositories (e.g., magma). """ import fnmatch import os import shutil import subprocess as sub import sys def identify(): """ Learn about the vignette .png to process at all. """ global register register = [] for file in os.listdir("."): if fnmatch.fnmatch(file, "*_vig.png"): register.append(file) register.sort() def probe(): """ Call compare by ImageMagick to work on the data. """ print("\nProbing for differences in the palette files' plots.") while len(register) > 1: for entry in register[1:]: reference = register[0] probe = entry difference = str("diff_{}_{}".format(str(reference[:-4]), probe)) test = str("compare {} {} {}".format(reference, entry, difference)) try: sub.call(test, shell=True) except IOError: print("Possibly no callable instance of ImageMagick.") sys.exit(0) del register[0] print("Probing for differences in the palette files' plots complete.") def stamp(): """ Add an 'inner stamp' to ease the visual discern of the plots. """ print("\nProvision of 'inner stamps' in the difference plots.") diff_register = [] for file in os.listdir("."): if fnmatch.fnmatch(file, "diff*.png"): diff_register.append(file) diff_register.sort() for entry in diff_register: intermediate = str(entry)[:-4] + str("_intermediate.png") stamping = str( "convert {} label:'{}' -gravity Center -append {}".format( entry, entry, intermediate)) try: sub.call(stamping, shell=True) # Cleaning; retain only the stamped file filed by old name. os.remove(entry) shutil.move(intermediate, entry) except IOError: print("problem with {}".format(entry)) sys.exit(0) # action calls: identify() probe() stamp() sys.exit(0)
python
from learnml.metrics import mean_squared_error import numpy as np import unittest class Test(unittest.TestCase): def test_mean_squared_error(self): expected_results = [0, 1] for i, y_pred in enumerate(np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])): self.assertEqual(expected_results[i], mean_squared_error(np.array([1, 2, 3, 4, 5]), y_pred)) if __name__ == "__main__": unittest.main()
python
# -*- coding: utf-8 -*- import json from TM1py.Objects.User import User from TM1py.Services.ObjectService import ObjectService class SecurityService(ObjectService): """ Service to handle Security stuff """ def __init__(self, rest): super().__init__(rest) def create_user(self, user): """ Create a user on TM1 Server :param user: instance of TM1py.User :return: response """ request = '/api/v1/Users' self._rest.POST(request, user.body) def get_user(self, user_name): """ Get user from TM1 Server :param user_name: :return: instance of TM1py.User """ request = '/api/v1/Users(\'{}\')?$expand=Groups'.format(user_name) response = self._rest.GET(request) return User.from_json(response) def update_user(self, user): """ Update user on TM1 Server :param user: instance of TM1py.User :return: response """ for current_group in self.get_groups(user.name): if current_group not in user.groups: self.remove_user_from_group(current_group, user.name) request = '/api/v1/Users(\'{}\')'.format(user.name) return self._rest.PATCH(request, user.body) def delete_user(self, user_name): """ Delete user on TM1 Server :param user_name: :return: response """ request = '/api/v1/Users(\'{}\')'.format(user_name) return self._rest.DELETE(request) def get_all_users(self): """ Get all users from TM1 Server :return: List of TM1py.User instances """ request = '/api/v1/Users?$expand=Groups' response = self._rest.GET(request) response_as_dict = json.loads(response) users = [User.from_dict(user) for user in response_as_dict['value']] return users def get_users_from_group(self, group_name): """ Get all users from group :param group_name: :return: List of TM1py.User instances """ request = '/api/v1/Groups(\'{}\')?$expand=Users($expand=Groups)'.format(group_name) response = self._rest.GET(request) response_as_dict = json.loads(response) users = [User.from_dict(user) for user in response_as_dict['Users']] return users def get_groups(self, user_name): """ Get the groups of a user in TM1 Server :param user_name: :return: List of strings """ request = '/api/v1/Users(\'{}\')/Groups'.format(user_name) response = self._rest.GET(request) groups = json.loads(response)['value'] return [group['Name'] for group in groups] def remove_user_from_group(self, group_name, user_name): """ Remove user from group in TM1 Server :param group_name: :param user_name: :return: response """ request = '/api/v1/Users(\'{}\')/Groups?$id=Groups(\'{}\')'.format(user_name, group_name) return self._rest.DELETE(request) def get_all_groups(self): """ Get all groups from TM1 Server :return: List of strings """ request = '/api/v1/Groups?$select=Name' response = self._rest.GET(request) response_as_dict = json.loads(response) groups = [entry['Name'] for entry in response_as_dict['value']] return groups
python
from abc import abstractmethod from typing import Callable, Tuple import numpy as np from ._func import Func class OriFunc(Func): @abstractmethod def __call__(self, t: float) -> float: """ :param t: Time. :return: Orientation in degrees. """ pass class Tangential(OriFunc): def __init__( self, pos_func: Callable[[float], Tuple[float, float]], dt: float = 1e-2, init_ori: float = 0, ): """Orient the stimulus tangentially to its trajectory. :param pos_func: Position as function of time. :param dt: Approximate time between consecutive frames. """ self.__pos_func = pos_func self.__dt = dt self.__init_ori = init_ori self.__prev_ori = init_ori def __call__(self, t: float): old_value = np.array(self.__pos_func(t - self.__dt)) new_value = np.array(self.__pos_func(t)) if all(old_value == new_value): return self.__prev_ori ori = ( np.rad2deg(np.arctan2(*(new_value - old_value))) + self.__init_ori ) self.__prev_ori = ori return ori
python
a = ["1", 1, "1", 2] # ex-14: Remove duplicates from list a a = list(set(a)) print(a) # ex-15: Create a dictionary that contains the keys a and b and their respec # tive values 1 and 2 . my_dict = {"a":1, "b":2} print(my_dict) print(type(my_dict)) # Add "c":3 to dictionary my_dict["c"] = 3 print(my_dict) my_dict2 = dict([("a",1), ("b",2)]) print(my_dict2) # ex-16: Please complete the script so that it prints out the value of key b . d = {"a": 1, "b": 2} print(d["b"]) # ex-17: Calculate the sum of the values of keys a and b . d = {"a": 1, "b": 2, "c": 3} sum = d["a"] + d["b"] print(sum) # ex-19: Add a new pair of key (e.g. c ) and value (e.g. 3 ) to the dictionary # and print out the new dictionary. d = {"a": 1, "b": 2} d["c"] = 3 print(d) # ex-20: Calculate the sum of all dictionary values. d = {"a": 1, "b": 2, "c": 3} sum = 0 for keys in d.keys(): sum += d[keys] print(sum) ## There is simple oneliner # print(sum(d.values())) # print(sum(d.values())) d = {'key1': 1, 'key2': 14, 'key3': 47} sum1 = [d[key] for key in d.keys()] print(sum1)
python
from task_grounding.task_grounding import TaskGrounding, TaskGroundingReturn, TaskErrorType from database_handler.database_handler import DatabaseHandler import unittest from unittest.mock import Mock from ner_lib.ner import EntityType from ner_lib.command_builder import Task, TaskType, ObjectEntity, SpatialType, SpatialDescription ################################# ISOLATED UNIT TESTS ----- BEGIN ########################################################## class SimpleSkillTest(unittest.TestCase): def setUp(self): self.db_mock = Mock() self.task_grounding = TaskGrounding(db=self.db_mock) self.entities = [ (EntityType.COLOUR, "blue"), (EntityType.OBJECT, "cover"), (EntityType.LOCATION, "next"), (EntityType.COLOUR, "black"), (EntityType.OBJECT, "bottom cover"), (EntityType.LOCATION, "above"), (EntityType.OBJECT, "bottom cover") ] def test_get_specific_task_from_task__task_is_pick_up__returns_task_with_pick_up(self): self.db_mock.get_task = Mock(return_value=(1, "pick up")) task = Task(name="pick up") task.objects_to_execute_on = [ObjectEntity()] returned = self.task_grounding.get_specific_task_from_task(task) self.assertEqual(TaskType.PICK, returned.task_info[0].task_type) def test_get_specific_task_from_task__task_is_move__returns_task_with_move(self): self.db_mock.get_task = Mock(return_value=(1, "move")) task = Task(name="move") task.objects_to_execute_on = [ObjectEntity()] returned = self.task_grounding.get_specific_task_from_task(task) self.assertEqual(TaskType.MOVE, returned.task_info[0].task_type) def test_get_specific_task_from_task__task_is_place__returns_task_with_place(self): self.db_mock.get_task = Mock(return_value=(1, "place")) task = Task(name="place") task.objects_to_execute_on = [ObjectEntity()] returned = self.task_grounding.get_specific_task_from_task(task) self.assertEqual(TaskType.PLACE, returned.task_info[0].task_type) def test_get_specific_task_from_task__task_is_find__returns_task_with_find(self): self.db_mock.get_task = Mock(return_value=(1, "find")) task = Task(name="find") task.objects_to_execute_on = [ObjectEntity()] returned = self.task_grounding.get_specific_task_from_task(task) self.assertEqual(TaskType.FIND, returned.task_info[0].task_type) def test_get_specific_task_from_task__task_is_unknown__returns_error_code_unknown(self): self.db_mock.get_task = Mock(return_value=(1, None)) task = Task(name="asdasd") returned = self.task_grounding.get_specific_task_from_task(task) self.assertFalse(returned.is_success) self.assertEqual(TaskErrorType.UNKNOWN, returned.error.error_code) def test_get_specific_task_from_task__task_has_no_object__returns_error_code_no_object(self): self.db_mock.get_task = Mock(return_value=(1, "pick up")) task = Task(name="pick up") returned = self.task_grounding.get_specific_task_from_task(task) self.assertFalse(returned.is_success) self.assertEqual(TaskErrorType.NO_OBJECT, returned.error.error_code) class AdvancedTaskTest(unittest.TestCase): def setUp(self): self.db_mock = Mock() self.task_grounding = TaskGrounding(db=self.db_mock) self.entities = [ (EntityType.COLOUR, "blue"), (EntityType.OBJECT, "cover"), (EntityType.LOCATION, "next"), (EntityType.COLOUR, "black"), (EntityType.OBJECT, "bottom cover"), (EntityType.LOCATION, "above"), (EntityType.OBJECT, "bottom cover") ] def test_get_specific_task_from_task__task_is_custom_task__returns_list_of_primary_skills(self): pick_up_task = Task("pick up") pick_up_task.task_type = TaskType.PICK pick_up_task.objects_to_execute_on = [ObjectEntity()] move_task = Task("pick up") move_task.task_type = TaskType.MOVE move_task.objects_to_execute_on = [ObjectEntity()] place_task = Task("pick up") place_task.task_type = TaskType.PICK place_task.objects_to_execute_on = [ObjectEntity()] sub_tasks = [[1, 2, 3], ["pick up", "move", "place"], [pick_up_task, move_task, place_task]] tasks = [TaskType.PICK, TaskType.MOVE, TaskType.PLACE] self.db_mock.get_task = Mock(return_value=(1, "clear table")) self.db_mock.get_sub_tasks = Mock(return_value=sub_tasks) task = Task("tidy") returned = self.task_grounding.get_specific_task_from_task(task) returned_tasks = [returned.task_info[0].task_type, returned.task_info[1].task_type, returned.task_info[2].task_type] self.assertEqual(tasks, returned_tasks) def test_get_specific_task_from_tasks__task_is_custom_task_without_sub_tasks__returns_error_code_no_sub_tasks(self): self.db_mock.get_task = Mock(return_value=(1, "clear table")) self.db_mock.get_sub_tasks = Mock(return_value=None) task = Task("tidy") returned = self.task_grounding.get_specific_task_from_task(task) self.assertFalse(returned.is_success) self.assertEqual(TaskErrorType.NO_SUBTASKS, returned.error.error_code) class TeachSystemTest(unittest.TestCase): def setUp(self): self.db_mock = Mock() self.task_grounding = TaskGrounding(db=self.db_mock) def test_teach_new_task__valid_input__returns_success(self): self.db_mock.add_task = Mock() self.db_mock.get_task = Mock() self.db_mock.get_task.side_effect = [(1, None), (2, None), (3, None)] self.db_mock.add_sub_task = Mock() returned = self.task_grounding.teach_new_task("nice task name", [Task("take"), Task("move"), Task("put")], "nice task keyword") self.assertTrue(returned.is_success) def test_teach_new_task__contains_unknown_task__returns_unknown_error_code(self): self.db_mock.add_task = Mock() self.db_mock.get_task = Mock() self.db_mock.get_task.side_effect = [(None, None)] self.db_mock.add_sub_task = Mock() returned = self.task_grounding.teach_new_task("nice task name", [Task("take"), Task("move"), Task("put")], "nice task keyword") self.assertFalse(returned.is_success) self.assertEqual(TaskErrorType.UNKNOWN, returned.error.error_code) def test_add_sub_task__valid_input__returns_success(self): self.db_mock.get_task = Mock() self.db_mock.add_sub_task = Mock() self.db_mock.get_task.side_effect = [(5, "clear table"), (1, "pick up")] returned = self.task_grounding.add_sub_task("tidy", ["get"]) self.assertTrue(returned.is_success) ################################# ISOLATED UNIT TESTS ----- END ########################################################## ################################# INTEGRATION TESTS ----- BEGIN ########################################################## class SimpleSkillIntegration(unittest.TestCase): def setUp(self): self.task_grounding = TaskGrounding(DatabaseHandler("test_grounding.db")) self.returned = TaskGroundingReturn() self.entities = [ (EntityType.COLOUR, "blue"), (EntityType.OBJECT, "cover"), (EntityType.LOCATION, "next"), (EntityType.COLOUR, "black"), (EntityType.OBJECT, "bottom cover"), (EntityType.LOCATION, "above"), (EntityType.OBJECT, "bottom cover") ] def test_Pick(self): self.returned = self.task_grounding.get_specific_task_from_task("take", self.entities) self.assertEqual(self.returned.task_info[0].get_name(), "PickUpTask") def test_Move(self): self.returned = self.task_grounding.get_specific_task_from_task("relocate", self.entities) self.assertEqual(self.returned.task_info[0].get_name(), "MoveTask") def test_Place(self): self.returned = self.task_grounding.get_specific_task_from_task("put", self.entities) self.assertEqual(self.returned.task_info[0].get_name(), "PlaceTask") def test_Find(self): self.returned = self.task_grounding.get_specific_task_from_task("locate", self.entities) self.assertEqual(self.returned.task_info[0].get_name(), "FindTask") def test_UnknownObject(self): self.returned = self.task_grounding.get_specific_task_from_task("asdasd") self.assertFalse(self.returned.is_success) self.assertEqual(self.returned.error_code, TaskErrorType.UNKNOWN) def test_NoObjectSpecified(self): self.returned = self.task_grounding.get_specific_task_from_task("take") self.assertFalse(self.returned.is_success) self.assertEqual(self.returned.error_code, TaskErrorType.NO_OBJECT) class AdvancedTaskIntegration(unittest.TestCase): def setUp(self): self.task_grounding = TaskGrounding(DatabaseHandler("test_grounding.db")) self.returned = TaskGroundingReturn() self.entities = [ (EntityType.COLOUR, "blue"), (EntityType.OBJECT, "cover"), (EntityType.LOCATION, "next"), (EntityType.COLOUR, "black"), (EntityType.OBJECT, "bottom cover"), (EntityType.LOCATION, "above"), (EntityType.OBJECT, "bottom cover") ] def test_MoveBlue(self): tasks = [TaskType.PICK, TaskType.PLACE] returned = self.task_grounding.get_specific_task_from_task(Task("blue1")) returned_tasks = [returned.task_info[0].task_type, returned.task_info[1].task_type] self.assertEqual(tasks, returned_tasks) def test_ClearTable(self): tasks = ["PickUpTask", "MoveTask", "PlaceTask"] self.returned = self.task_grounding.get_specific_task_from_task("tidy", self.entities) returned_tasks = [self.returned.task_info[0].get_name(), self.returned.task_info[1].get_name(), self.returned.task_info[2].get_name()] self.assertEqual(tasks, returned_tasks) class TeachSystemIntegration(unittest.TestCase): def setUp(self): self.db = DatabaseHandler("test_grounding.db") self.task_grounding = TaskGrounding(self.db) self.returned = TaskGroundingReturn() def test_TeachTask(self): returned = self.task_grounding.teach_new_task("test_task1", ["take", "move", "put"], ["test1-1", "test1-2"]) self.assertTrue(returned.is_success) self.clean_test_db("test_task1") def test_AddWord(self): returned = self.task_grounding.add_word_to_task("blue1", "blue2") self.assertTrue(returned.is_success) self.db.conn.execute("delete from TASK_WORDS where WORD='blue2';") self.db.conn.commit() def test_TeachTaskUnknownSubTask(self): returned = self.task_grounding.teach_new_task("test_task2", ["UNKNOWN TASK"], ["test1", "test2-1"]) self.assertFalse(returned.is_success) self.assertEqual(returned.error_code, TaskErrorType.UNKNOWN) self.clean_test_db("test_task2") def test_AddWordsToTask(self): #self.task_grounding.teach_new_task("test_task3", ["take", "move", "put"], ["test3-1", "test3-2"]) #returned = self.task_grounding.add_word_to_task("test_task3-1", "TEST WORD") #self.assertTrue(returned.is_success) self.clean_test_db("test_task3") def test_AddSubTask(self): self.task_grounding.teach_new_task("test_task4", ["take", "move", "put"], ["test4-1", "test4-2"]) returned = self.task_grounding.add_sub_task("test_task4", ["get"]) self.assertTrue(returned.is_success) self.clean_test_db("test_task4") def clean_test_db(self, task_name): task_id = self.db.get_task_id(task_name) self.db.conn.execute("delete from TASK_WORDS where TASK_ID=?;", (task_id,)) self.db.conn.execute("delete from TASK_INFO where TASK_NAME=?;", (task_name,)) self.db.conn.commit() ################################# INTEGRATION TESTS ----- END ##########################################################
python
# Generated by Django 3.2.5 on 2022-01-24 05:22 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('metrics', '0002_initial'), ] operations = [ migrations.CreateModel( name='FeedbackResponseKeyword', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=64)), ], options={ 'ordering': ['name'], }, ), migrations.CreateModel( name='ImportLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True, db_index=True)), ('responses_imported_count', models.PositiveIntegerField(blank=True, null=True)), ('projects_affected_count', models.PositiveIntegerField(blank=True, null=True)), ('run_time_seconds', models.FloatField()), ('import_type', models.CharField(choices=[('beeheard', 'BeeHeard'), ('usabilla', 'Usabilla')], max_length=12)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='usabilla_import_log_user', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-date'], }, ), migrations.AddField( model_name='campaign', name='feedback_response_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='campaign', name='latest_feedback_response_date', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='campaign', name='latest_other_response_date', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='campaign', name='other_response_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='campaign', name='vote_response_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='feedbackresponse', name='assignees', field=models.ManyToManyField(blank=True, related_name='feedback_response_assignees', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='feedbackresponse', name='notes', field=models.TextField(blank=True, max_length=3000), ), migrations.AlterField( model_name='domainyearsnapshot', name='year', field=models.PositiveIntegerField(default=2022), ), migrations.AlterField( model_name='project', name='admins', field=models.ManyToManyField(blank=True, help_text='Admins have full edit access to the project info, can add other admins and editors, and can see emails in responses', related_name='project_admins', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='project', name='contact', field=models.ForeignKey(blank=True, help_text='Contact is the owner and has full edit access to the project info, can add other admins and editors, and can see emails in responses', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='project_contact', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='project', name='editors', field=models.ManyToManyField(blank=True, help_text='Editors can only create/edit manual snapshots and can see emails in responses', related_name='project_editors', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='projectyearsetting', name='year', field=models.PositiveIntegerField(default=2022), ), migrations.DeleteModel( name='UsabillaImportLog', ), migrations.AddField( model_name='feedbackresponse', name='keywords', field=models.ManyToManyField(blank=True, related_name='feedback_response_keywords', to='metrics.FeedbackResponseKeyword'), ), ]
python
from itertools import count CARD_PUBLIC_KEY = 14205034 DOOR_PUBLIC_KEY = 18047856 def transform_one_step(value, subject_number): return (value * subject_number) % 20201227 def transform(loop_size, subject_number=7): value = 1 for _ in range(loop_size): value = transform_one_step(value, subject_number) return value def get_loop_size(target, subject_number=7): value = 1 for loop_size in count(1): value = transform_one_step(value, subject_number) if value == target: return loop_size card_loop_size = get_loop_size(CARD_PUBLIC_KEY) print(transform(card_loop_size, DOOR_PUBLIC_KEY))
python
# MIT License # Copyright (c) 2017 MassChallenge, Inc. from __future__ import unicode_literals import swapper from factory import ( Sequence, SubFactory, post_generation, ) from accelerator.tests.factories.core_profile_factory import CoreProfileFactory from accelerator.tests.factories.expert_category_factory import ( ExpertCategoryFactory ) from accelerator.tests.factories.industry_factory import IndustryFactory from accelerator.tests.factories.program_family_factory import ( ProgramFamilyFactory ) ExpertProfile = swapper.load_model('accelerator', 'ExpertProfile') class ExpertProfileFactory(CoreProfileFactory): class Meta: model = ExpertProfile salutation = Sequence(lambda x: "Expert Title %d" % x) title = Sequence(lambda x: "Expert title %d" % x) company = Sequence(lambda x: "Expert Company %d" % x) expert_category = SubFactory(ExpertCategoryFactory) primary_industry = SubFactory(IndustryFactory) privacy_email = "finalists and staff" privacy_phone = "finalists and staff" privacy_web = "finalists and staff" public_website_consent = True public_website_consent_checked = True judge_interest = False mentor_interest = False speaker_interest = False speaker_topics = "" office_hours_interest = False office_hours_topics = "" expert_group = "" reliability = None referred_by = "" other_potential_experts = "" internal_notes = "" bio = Sequence(lambda x: "Bio text %d" % x) home_program_family = SubFactory(ProgramFamilyFactory) @post_generation def functional_expertise(self, create, extracted, **kwargs): if not create: return if extracted: for expertise in extracted: self.functional_expertise.add(expertise) @post_generation def additional_industries(self, create, extracted, **kwargs): if not create: return if extracted: for industry in extracted: self.additional_industries.add(industry) @post_generation def mentoring_specialties(self, create, extracted, **kwargs): if not create: return if extracted: for specialty in extracted: self.mentoring_specialties.add(specialty)
python
from typing import Callable, Dict, Tuple, Text from recommenders.datasets import Dataset import numpy as np import tensorflow as tf import tensorflow_recommenders as tfrs from pathlib import Path SAVE_PATH = Path(__file__).resolve().parents[1] / "weights" class RankingModel(tfrs.models.Model): def __init__( self, dataset: Dataset, network_fn: Callable, network_args: Dict = None ): super().__init__() self._name = f"{self.__class__.__name__}_{network_fn.__name__}" if network_args is None: network_args = {} self.ranking_model: tf.keras.Model = network_fn( unique_user_ids = dataset.unique_user_ids, unique_item_ids = dataset.unique_movie_ids, **network_args) self.task = tfrs.tasks.Ranking( loss = tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.RootMeanSquaredError()] ) def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor: prediction = self.ranking_model(**features) return self.task(prediction, features['rating']) def call(self, features: Dict[Text, tf.Tensor]): return self.ranking_model(**features) def print_summary(self): print(self.ranking_model.print_summary()) def save_weights(self, save_dir): if save_dir is None: save_dir = SAVE_PATH save_dir.mkdir(parents=True, exist_ok=True) self.ranking_model.save_weights(str(Path(save_dir) /'ranking'))
python
"""Convert Noorlib library html to OpenITI mARkdown. This script subclasses the generic MarkdownConverter class from the html2md module (based on python-markdownify, https://github.com/matthewwithanm/python-markdownify), which uses BeautifulSoup to create a flexible converter. The subclass in this module, NoorlibHtmlConverter, adds methods specifically for the conversion of books from the eShia library to OpenITI mARkdown: * Span, div and p conversion: span, div and p classes needed to be converted are defined in self.class_dict. Inheritance schema of the NoorlibHtmlConverter: ======================== ========================== MarkdownConverter NoorlibHtmlConverter ======================== ========================== Options (inherited) DefaultOptions (inherited) __init__ (inherited) __getattr__ (inherited) convert (inherited) process_tag (inherited) process_text (inherited) fill_out_columns (inherited) post_process_md (inherited) should_convert_tag (inherited) indent (inherited) underline (inherited) create_underline_line (inherited) convert_a (inherited) convert_b (inherited) convert_blockquote (inherited) convert_br (inherited) convert_em (inherited) convert_hn (inherited) convert_i (inherited) convert_img (inherited) convert_list (inherited) convert_li (inherited) convert_ol (inherited) convert_p convert_p convert_table (inherited) convert_tr (inherited) convert_ul (inherited) convert_strong (inherited) convert_span convert_div ======================== ========================== """ import re if __name__ == '__main__': from os import sys, path root_folder = path.dirname(path.dirname(path.abspath(__file__))) root_folder = path.dirname(path.dirname(path.dirname(root_folder))) sys.path.append(root_folder) from openiti.new_books.convert.helper import html2md from openiti.new_books.convert.helper.html2md import * # import all constants! class NoorlibHtmlConverter(html2md.MarkdownConverter): """Convert Noorlib library html to OpenITI mARkdown. Examples: >>> import html2md_noorlib >>> h = '<img class="libimages" src="/images/books/86596/01/cover.jpg">' >>> html2md_noorlib.markdownify(h) '![](img/86596/01/cover.jpg)' >>> import html2md_noorlib >>> h = 'abc <a href="www.example.com">def</a> ghi' >>> html2md_noorlib.markdownify(h) 'abc def ghi' """ def __init__(self, **options): super().__init__(**options) self.class_dict = dict() self.class_dict["rightpome"] = "\n# {} %~% " # <span class> self.class_dict["leftpome"] = "{}\n" # <span class> self.class_dict["footnote"] = "{}\n" # <div class> ## ##old: ## self.class_dict["Titr3"] = "\n\n### ||| {}\n\n" # <span class> ## self.class_dict["KalamateKhas2"] = "\n\n### || {}\n\n" # <p class> ## self.class_dict["KalamateKhas"] = "\n\n### ||| {}\n\n" # <p class> ## self.class_dict["TextsStyles3"] = "\n\n### ||| {}\n\n" # <p class> ## self.class_dict["TextsStyles1"] = "@QUR@ {}\n" # <span class> ## self.class_dict["Aye"] = "@QUR@ {}\n" # <span class> ## self.class_dict["tdfehrest2"] = "\t{}" # <td class> ## self.class_dict["list3"] = "\t{}" # <div class> ## self.class_dict["sher"] = "# {}\n" # <p class> ## self.class_dict["#6C3934"] = "\n\n# {}\n\n" # <span class> self.options["image_link_regex"] = "/?images/books" ## self.options["image_folder"] = "img" self.options["strip"] = ["a", "img"] def convert_span(self, el, text): """Converts html <span> tags, depending on their class attribute. Supported span classes should be stored in self.class_dict (key: span class (str); value: formatting string) E.g., {"quran": "@QUR@ {}\\n"} Example: >>> import html2md_noorlib >>> h = 'abc <span>def</span> ghi' >>> html2md_noorlib.markdownify(h) 'abc def ghi' >>> h = 'abc <span class="unknown_span_class">def</span> ghi' >>> html2md_noorlib.markdownify(h) 'abc def ghi' #>>> h = 'abc <span class="Aya">def ghi</span> jkl' #>>> html2md_noorlib.markdownify(h) #'abc @QUR02 def ghi jkl' # the @QUR@ example outputs are a result of post-processing; # the function itself will produce: # 'abc @QUR@ def ghi\\njkl' >>> h = '<span class="rightpome">abc def</span><span class="leftpome">ghi jkl</span>' >>> html2md_noorlib.markdownify(h) '\\n# abc def %~% ghi jkl' """ try: # will fail if el has no class attribute for c in el["class"]: #print(c) if c in self.class_dict: return self.class_dict[c].format(text) if text else '' if c == "ayah": try: sura = el["surah"] except: sura = "0" try: aya = el["ayah"] except: aya = "0" #print("@QUR{}.{}@ {}".format(sura, aya, text)) return "@QUR{}.{}@ {}\n".format(sura, aya, text) except Exception as e: pass return text def convert_div(self, el, text): """Converts html <div> tags, depending on their class attribute. Supported div classes should be stored in self.class_dict (key: div class (str); value: formatting string) Example: >>> import html2md_noorlib >>> h = 'abc <div>def</div> ghi' >>> html2md_noorlib.markdownify(h) 'abc def ghi' >>> h = 'abc <div class="unknown_div_class">def</div> ghi' >>> html2md_noorlib.markdownify(h) 'abc def ghi' >>> h = '<div class="ClssDivMeesage">Page Is Empty</div>' >>> html2md_noorlib.markdownify(h) '' """ try: # will fail if el has no class attribute for c in el["class"]: if c in self.class_dict: return self.class_dict[c].format(text) if text else '' if c == "ClssDivMeesage": return "" except Exception as e: pass return text def convert_p(self, el, text): """Converts <p> tags according to their class. Supported p classes should be stored in self.class_dict (key: span class (str); value: formatting string) E.g., {"quran": "@QUR@ {}\\n"} <p> tags without class attribute, or unsupported class, will be converted according to the markdown style as defined in the self.options["md_style"] value (from super().DefaultOptions) Examples: >>> import html2md_noorlib >>> h = "<p>abc</p>" >>> html2md_noorlib.markdownify(h) '\\n\\n# abc\\n\\n' >>> h = "<p>abc</p>" >>> html2md_noorlib.markdownify(h, md_style=ATX) '\\n\\nabc\\n\\n' >>> h = "<p></p>" >>> html2md_noorlib.markdownify(h, md_style=ATX) '' """ if self.options['md_style'] == OPENITI: return '\n\n# %s\n\n' % text if text else '' else: return '\n\n%s\n\n' % text if text else '' def convert_sup(self, el, text): """Converts <sup> tags (used for footnote markers).""" return "({})".format(text.strip()) def markdownify(html, **options): """Shortcut to the convert method of the HindawiConverter class.""" return NoorlibHtmlConverter(**options).convert(html) if __name__ == "__main__": import doctest doctest.testmod()
python
import pytest from bot.haiku.models import HaikuMetadata @pytest.fixture() def haiku_metadata(data_connection): """Create a haiku metadata.""" HaikuMetadata.client = data_connection return HaikuMetadata
python
#!/usr/bin/env python3 import argparse import os import re import sys from itertools import product import h5py import numpy as np if __name__ == "__main__": ORIG_WIDTH = 512 ORIG_NUM_PARAMS = 4 parser = argparse.ArgumentParser() parser.add_argument("hdf5_files", nargs="*", help="Path to a CosmoFlow HDF5 file.") parser.add_argument("--out_dir", type=str, default="dataset", help="An optional value.") parser.add_argument("--width", type=int, default=128, help="The output spatial width.") parser.add_argument("--datatype", type=str, default="float32", help="The data type for universe data.") args = parser.parse_args() if not os.path.exists(args.out_dir) or not os.path.isdir(args.out_dir): sys.stderr.write("The output directory does not exist: {}\n" .format(args.out_dir)) exit(1) if (ORIG_WIDTH % args.width) != 0: sys.stderr.write("The output width is not a divisor of the original width({}): {}\n" .format(ORIG_WIDTH, args.width)) exit(1) if args.datatype not in ["float", "float32", "int16"]: sys.stderr.write("Unrecognized data type: {}\n".format(args.datatype)) data_type = getattr(np, args.datatype) sub_cube_count = ORIG_WIDTH // args.width for hdf5_file in args.hdf5_files: m = re.compile("(.*)\\.hdf5$").match(os.path.basename(hdf5_file)) if m is None: sys.stderr.write("Unrecognized file name: {}\n".format(hdf5_file)) exit(1) hdf5_file_wo_ext = m.group(1) h = h5py.File(hdf5_file, "r") full = h["full"] unitPar = h["unitPar"] assert full.value.shape == tuple([ORIG_WIDTH]*3+[ORIG_NUM_PARAMS]) assert unitPar.value.shape == (ORIG_NUM_PARAMS,) full_transposed = full.value.transpose().astype(data_type) for ix, iy, iz in product(range(sub_cube_count), range(sub_cube_count), range(sub_cube_count)): cube = full_transposed[ :, (args.width*ix):(args.width*(ix+1)), (args.width*iy):(args.width*(iy+1)), (args.width*iz):(args.width*(iz+1)), ] assert cube.shape == tuple([ORIG_NUM_PARAMS]+[args.width]*3) out_path = os.path.join( args.out_dir, "{}_{}_{}_{}.hdf5".format(hdf5_file_wo_ext, ix, iy, iz)) with h5py.File(out_path, "w-") as hw: hw["full"] = cube hw["unitPar"] = unitPar.value
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : EINDEX Li @File : __init__.py.py @Created : 26/12/2017 """ from aiospider.tools.singleton import OnlySingleton class AIOSpider(metaclass=OnlySingleton): def __init__(self, loop=None): self.config = dict() self.loop = loop def __eq__(self, other): return id(self.config) == id(other.config) def __repr__(self): return ""
python
# @author: Michael Vorotyntsev # @email: [email protected] # @github: unaxfromsibiria import logging import string from enum import Enum from hashlib import sha256, md5 from random import SystemRandom _cr_methods = { 'sha256': sha256, 'md5': md5, } class ServiceGroup(Enum): service = 1 server = 2 web = 3 class CommandTargetType(Enum): exit = 0 auth_request = 1 auth = 2 client_data = 3 wait_command = 4 server_status = 5 methods_registration = 6 call_method = 7 wait_free = 8 problem = 9 ok = 10 server_call = 11 ping = 12 get_result = 13 class Protocol(object): _handlers = {} _options = {} _logger = None def __init__(self, **options): self._options.update(**options) @classmethod def add_handler(cls, target, handler): assert callable(handler) cls._handlers[target] = handler def processing(self, command, manager): if not self._logger: self._logger = logging.getLogger( self._options.get('logger_name')) handler = self._handlers.get(command.target) if not callable(handler): raise NotImplementedError( 'Unknown target {}!'.format(command.target)) return handler(command, manager, self._options, self._logger) # # handlers # # def auth_request(command, manager, options, logger): key = command.data variants = string.digits + string.ascii_letters rand = SystemRandom() size = len(key) client_solt = ''.join(rand.choice(variants) for _ in range(size)) content = '{}{}{}'.format(options.get('secret'), client_solt, key) _hash = _cr_methods.get(options.get('hash_method')) if hash: content = _hash(bytes(content, 'utf-8')).hexdigest() else: content = 'no method' return command.create( target=CommandTargetType.auth, data='{}:{}'.format(content, client_solt)) def send_client_data(command, manager, options, logger): manager.setup_cid(command.cid) return command.create( target=CommandTargetType.client_data, data={ 'workers': options.get('workers') or 1, 'group': ServiceGroup.server.value, }) def send_api_methods(command, manager, options, logger): return command.create( target=CommandTargetType.methods_registration, data={ 'methods': manager.get_public_methods(), }) def start_info(command, manager, options, logger): return None def send_status(command, manager, options, logger): return command.create( target=CommandTargetType.server_status, data={ 'status': manager.get_status().value, }) # # link # # Protocol.add_handler(CommandTargetType.auth_request, auth_request) Protocol.add_handler(CommandTargetType.client_data, send_client_data) Protocol.add_handler(CommandTargetType.methods_registration, send_api_methods) Protocol.add_handler(CommandTargetType.wait_command, start_info) Protocol.add_handler(CommandTargetType.server_status, send_status)
python
from decimal import * N = int(input()) print(int(int((N-1)*N)/2))
python
# -*- coding: utf-8 eval: (yapf-mode 1) -*- # # January 13 2019, Christian E. Hopps <[email protected]> # # Copyright (c) 2019, LabN Consulting, L.L.C. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes import argparse import fcntl import logging import io import os import socket import struct import sys import threading from . import iptfs TUNSETIFF = 0x400454ca IFF_TUN = 0x0001 IFF_TAP = 0x0002 IFF_NO_PI = 0x1000 logger = logging.getLogger(__file__) def usage(): print("usage: {} [-c|--connect server] [-p|--port service]\n", sys.argv[0]) sys.exit(1) def tun_alloc(devname): fd = os.open("/dev/net/tun", os.O_RDWR) rfd = io.open(fd, "rb", buffering=0) wfd = io.open(fd, "wb", buffering=0) # ff = io.open(fd, "rb") # f = io.open("/dev/net/tun", "rb", buffering=0) ifs = fcntl.ioctl(fd, TUNSETIFF, struct.pack("16sH", devname.encode(), IFF_TUN | IFF_NO_PI)) devname = ifs[:16] devname = devname.strip(b"\x00") return rfd, wfd, devname def connect(sname, service, isudp): # stype = socket.SOCK_DGRAM if isudp else socket.SOCK_STREAM proto = socket.IPPROTO_UDP if isudp else socket.IPPROTO_TCP for hent in socket.getaddrinfo(sname, service, 0, 0, proto): try: s = socket.socket(*hent[0:3]) if isudp: # Save the peer address iptfs.peeraddr = hent[4] s.connect(hent[4]) return s except socket.error: continue return None def accept(sname, service, isudp): # stype = socket.SOCK_DGRAM if isudp else socket.SOCK_STREAM proto = socket.IPPROTO_UDP if isudp else socket.IPPROTO_TCP for hent in socket.getaddrinfo(sname, service, 0, 0, proto): try: logger.info("Get socket") s = socket.socket(*hent[0:3]) logger.info("Set socketopt") s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) logger.info("Try to bind to: %s", str(hent[4])) s.bind(hent[4]) break except socket.error as e: logger.info("Got exception for %s: %s", str(hent), str(e)) continue else: logger.info("Can't bind to %s:%s", sname, service) return None if isudp: # Do PEEK to get first UDP address from client. logger.info("Server: waiting on initial UDP packet %s:%s:%s", sname, service, str(hent)) # pylint: disable=W0631 b = bytearray(9170) (n, iptfs.peeraddr) = s.recvfrom_into(b, 0, socket.MSG_PEEK) logger.info("Server: Got UDP packet from %s of len %d", iptfs.peeraddr, n) s.connect(iptfs.peeraddr) return (s, iptfs.peeraddr) logger.info("Listen 5 on %s", str(iptfs.peeraddr)) s.listen(5) logger.info("Doing accept.") return s.accept() def checked_main(*margs): parser = argparse.ArgumentParser() parser.add_argument( "-a", "--ack-rate", type=float, default=1.0, help="Rate in float seconds to send ACK info") parser.add_argument("-c", "--connect", help="Connect to server") parser.add_argument( "--congest-rate", type=float, default=0, help="Forced maximum egress rate in Kilobits") parser.add_argument("-d", "--dev", default="vtun%d", help="Name of tun interface.") parser.add_argument("--debug", action="store_true", help="Debug logging and checks.") parser.add_argument( "--no-egress", action="store_true", help="Do not create tunnel egress endpoint") parser.add_argument( "--no-ingress", action="store_true", help="Do not create tunnel ingress endpoint") parser.add_argument("-l", "--listen", default="::", help="Server listen on this address") parser.add_argument("-p", "--port", default="8001", help="TCP port to use.") # parser.add_argument("-u", "--udp", action="store_true", help="Use UDP instead of TCP") parser.add_argument("-r", "--rate", type=float, default=0, help="Tunnel rate in Kilobits") parser.add_argument("--trace", action="store_true", help="Trace logging.") parser.add_argument("-v", "--verbose", action="store_true", help="Verbose logging.") args = parser.parse_args(*margs) FORMAT = '%(asctime)-15s %(threadName)s %(message)s' if args.trace: iptfs.TRACE = True iptfs.DEBUG = True logging.basicConfig(format=FORMAT, level=logging.DEBUG) elif args.debug: iptfs.DEBUG = True logging.basicConfig(format=FORMAT, level=logging.DEBUG) elif args.verbose: logging.basicConfig(format=FORMAT, level=logging.DEBUG) else: logging.basicConfig(format=FORMAT, level=logging.INFO) riffd, wiffd, devname = tun_alloc(args.dev) logger.info("Opened tun device: %s", devname) if not args.connect: s, _ = accept(args.listen, args.port, True) logger.info("Accepted from client: %s", str(s)) else: s = connect(args.connect, args.port, True) logger.info("Connected to server: %s", str(s)) send_lock = threading.Lock() threads = [] if not args.no_ingress: threads.extend(iptfs.tunnel_ingress(riffd, s, send_lock, int(args.rate * 1000))) if not args.no_egress: threads.extend( iptfs.tunnel_egress(s, send_lock, wiffd, args.ack_rate, int(args.congest_rate * 1000))) for thread in threads: thread.join() return 0 def main(*margs): try: return checked_main(*margs) except Exception as e: # pylint: disable=W0703 logger.critical("Unexpected exception: %s", str(e)) sys.exit(1) __author__ = "Christian E. Hopps" __date__ = "January 13 2019" __version__ = "1.0" __docformat__ = "restructuredtext en"
python
from pyrete.settings import settings from . import ( get_attr_name, ParserLiterals, ) class DataLayer(object): """ The DataLayer is responsible for fetching data from the database. It parses the provided rules and fetches only the data required for running the rules. Example: .. code-block:: python from pyrete.core.nodes import ReteGraph from pyrete.core.data_layer import DataLayer rule = { 'key': 'some_rule', 'description': 'Some awesome description', ... } graph = ReteGraph() graph.load_rule(rule) # ---------------------- Fetch data from DB data = DataLayer().get_data( rules=[rule], filter={}, limit=10) """ def get_all_collections(self): """ Gets list of all collections in the database. """ return settings.DB.collection_names() def _get_keys(self, doc, parent=None): """ Gets list of all the keys in a dict, including nested dicts and dicts inside a list. Example: demo_dict = { "subtotal_price": "51.00", "billing_address": { "province" : "North Carolina", "city" : "Franklinton" }, "note_attributes": [ { "name": "address-type", "value": "residential", }, { ""name": "transit-time", "value": "1", } ], "token" : "384779c27a35e8fcc0c948ad87f0ac35" } Converts above into: ['subtotal_price', 'billing_address', 'billing_address.province', 'billing_address.city', 'note_attributes.name', 'note_attributes.value', 'token'] """ key_list = [] for key in list(doc.keys()): # Add parent.key if parent is present if parent: key_list.append(parent + '.' + key) else: key_list.append(key) if isinstance(doc[key], dict): # If nested dict, call this method again new_parent = parent + '.' + key if parent else key key_list.extend( self._get_keys(doc[key], new_parent)) elif isinstance(doc[key], list): if len(doc[key]) > 0 and isinstance(doc[key][0], dict): # If nested dict inside a list, call this method again new_parent = parent + '.' + key if parent else key key_list.extend( self._get_keys(doc[key][0], new_parent)) return key_list def get_collection_fields(self, collection_name): """ Gets list of all collections in the database. **Args:** * **collection_name** *(str)*: The name of the collection for which field names are to be fetched. **Returns:** Returns the list of field names of the given **collection_name**. """ if settings.DB[collection_name].find_one(): doc = settings.DB[collection_name].find_one() return self._get_keys(doc) else: return {} def _get_collection_data(self, rule, collection_name, filter={}, skip=0, limit=0): """ Gets only required data attributes from the database collection by evaluating projection for the given **collection_name**. **Args:** * **rule** *(dict)*: The rule dictionary. * **collection_name** *(str)*: The Collection Name for which projection needs to be evaluated. * **filter** *(dict)*: Optional. Dictionary of filter for querying filtered data. * **skip** *(int)*: Optional. The number of documents to be skipped while fetching the data. * **limit** *(int)*: Optional. The maximum number of records to be fetched. **Returns:** Data dictionary of the provided **collection_name**, fetched from the database. """ projection = [] for variable in rule['variables']: # Getting field names from "variables" coll_name, attr_name, fn_name, fn_type = get_attr_name(variable['value']) if attr_name and coll_name == collection_name: projection.append(attr_name) for condition in rule['when']['any']: # Getting field names from "any" coll_name, attr_name, fn_name, fn_type = get_attr_name(condition['name']) if attr_name and coll_name == collection_name: projection.append(attr_name) for condition in rule['when']['any']: # Getting field names from "value" if it is a "join condition" condition_value = condition['value'] if isinstance( condition_value, str) and condition_value.startswith( ParserLiterals.OBJECT_VALUE_IDENTIFIER): condition_value = condition['value'].replace(ParserLiterals.OBJECT_VALUE_IDENTIFIER, '') coll_name, attr_name, fn_name, fn_type = get_attr_name(condition_value) if attr_name and coll_name == collection_name: projection.append(attr_name) for condition in rule['when']['all']: # Getting field names from "all" coll_name, attr_name, fn_name, fn_type = get_attr_name(condition['name']) if attr_name and coll_name == collection_name: projection.append(attr_name) for condition in rule['when']['all']: # Getting field names from "value" if it is a "join condition" condition_value = condition['value'] if isinstance( condition_value, str) and condition_value.startswith( ParserLiterals.OBJECT_VALUE_IDENTIFIER): condition_value = condition['value'].replace(ParserLiterals.OBJECT_VALUE_IDENTIFIER, '') coll_name, attr_name, fn_name, fn_type = get_attr_name(condition_value) if attr_name and coll_name == collection_name: projection.append(attr_name) for action in rule['then']: # Getting field names from "then" for param in action['params']: coll_name, attr_name, fn_name, fn_type = get_attr_name(param['value']) if attr_name and coll_name == collection_name: projection.append(attr_name) projection.append('email') cursor = settings.DB[collection_name].find( filter=filter, projection=projection, skip=skip, limit=limit) # Return data instead of the cursor data = [] for datum in cursor: data.append(datum) return data def get_data(self, rules, filter={}, skip=0, limit=0): """ Gets the required data from the database. All the collections listed in the **collections** key of the rule. **Args:** * **rules** *(list of dict)*: The list of rules. * **filter** *(dict)*: Optional. Dictionary of filter for querying filtered data. * **skip** *(int)*: Optional. The number of documents to be skipped while fetching the data. * **limit** *(int)*: Optional. The maximum number of records to be fetched. **Returns:** Data dictionary of the provided **collection_name**, fetched from the database. """ data = {} for rule in rules: for collection_name in rule['collections']: data[collection_name] = self._get_collection_data( rule, collection_name, filter={}, skip=skip, limit=limit) return data
python
#!/usr/bin/env python """ Raven-django ============ Raven-Django is a Raven extension that provides full out-of-the-box support for `Django <https://www.djangoproject.com>`_ framework. Raven itself is a Python client for `Sentry <http://www.getsentry.com/>`_. """ # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error # in multiprocessing/util.py _exit_function when running `python # setup.py test` (see # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) for m in ('multiprocessing', 'billiard'): try: __import__(m) except ImportError: pass from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand import sys dev_requires = [ 'flake8>=2.0,<2.1', ] tests_require = [ 'Django>=1.4', 'mock', 'pep8', 'pytz', 'pytest', 'pytest-cov>=1.4', 'pytest-django', 'python-coveralls', ] class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_suite = True def run_tests(self): #import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.test_args) sys.exit(errno) setup( name='raven', version='0.0.0', author='Xavier Ordoquy', author_email='[email protected]', url='http://github.com/getsentry/raven-django', description='Raven-django is a Django extension for Raven (https://www.getsentry.com)', long_description=__doc__, packages=find_packages(exclude=("tests", "tests.*",)), zip_safe=False, install_requires=['raven'], extras_require={ 'tests': tests_require, 'dev': dev_requires, }, license='BSD', tests_require=tests_require, cmdclass={'test': PyTest}, include_package_data=True, entry_points={}, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: OS Independent', 'Topic :: Software Development', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], )
python
from typing import List import allure from markupsafe import Markup from overhave.entities import OverhaveDescriptionManagerSettings class DescriptionManager: """ Class for test-suit custom description management and setting to Allure report. """ def __init__(self, settings: OverhaveDescriptionManagerSettings): self._settings = settings self._description: List[str] = [] def apply_description(self) -> None: if self._description: joined_description = self._settings.blocks_delimiter.join(self._description) if not self._settings.html: allure.dynamic.description(joined_description) return allure.dynamic.description_html(Markup(joined_description)) def add_description(self, value: str) -> None: self._description.append(value) def add_description_above(self, value: str) -> None: self._description.insert(0, value)
python
# Copyright 2021 The Pigweed Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. _RTOS_NONE = "//pw_build/constraints/rtos:none" # Common select for tagging a target as only compatible with host OS's. This # select implements the logic '(Windows or Macos or Linux) and not RTOS'. # Example usage: # load("//pw_build:selects.bzl","TARGET_COMPATIBLE_WITH_HOST_SELECT") # pw_cc_library( # name = "some_host_only_lib", # hdrs = ["host.h"], # target_compatible_with = select(TARGET_COMPATIBLE_WITH_HOST_SELECT), # ) TARGET_COMPATIBLE_WITH_HOST_SELECT = { "@platforms//os:windows": [_RTOS_NONE], "@platforms//os:macos": [_RTOS_NONE], "@platforms//os:linux": [_RTOS_NONE], "//conditions:default": ["@platforms//:incompatible"], }
python