content
stringlengths
0
894k
type
stringclasses
2 values
from simple_rl.amdp.AMDPPolicyGeneratorClass import AMDPPolicyGenerator #from simple_rl.amdp.abstr_domains.grid_world.AbstractGridWorldStateMapperClass import AbstractGridWorldL1StateMapper from simple_rl.apmdp.AP_MDP.cleanup.CleanupQMDPClass import CleanupQMDP from simple_rl.apmdp.AP_MDP.cleanup.CleanupQStateClass import CleanupQState from simple_rl.apmdp.AP_MDP.cleanup.AbstractCleanupMDPClass import CleanupL1MDP, CleanupL2MDP from simple_rl.apmdp.AP_MDP.cleanup.AbstractCleanupStateMapperClass import AbstractCleanupL1StateMapper, AbstractCleanupL2StateMapper class CleanupL2PolicyGenerator(AMDPPolicyGenerator): def __init__(self, l1MDP, state_mapper, verbose=False, env_file =[], constraints = {}, ap_maps = {}): ''' Args: l1MDP (CleanupMDP): lower domain state_mapper (AbstractGridWorldL1StateMapper): to map l0 states to l1 domain verbose (bool): debug mode ''' self.domain = l1MDP self.verbose = verbose self.state_mapper = state_mapper self.env_file = env_file def generate_policy(self, l2_state, grounded_action): ''' Args: l1_state (CleanupL1State): generate policy in l1 domain starting from l1_state grounded_action (CleanupRootGroundedAction): TaskNode above defining the subgoal for current MDP ''' mdp = CleanupL2MDP(init_state=l2_state, env_file=self.env_file, constraints=grounded_action.goal_constraints, ap_maps=grounded_action.ap_maps) return self.get_policy(mdp, verbose=self.verbose, max_iterations=50, horizon=50) def generate_abstract_state(self, l1_state): return self.state_mapper.map_state(l1_state) class CleanupL1PolicyGenerator(AMDPPolicyGenerator): def __init__(self, l0MDP, state_mapper, verbose=False, env_file = [], constraints = {}, ap_maps = {}): ''' Args: l0MDP (FourRoomMDP): lower domain state_mapper (AbstractGridWorldL1StateMapper): to map l0 states to l1 domain verbose (bool): debug mode ''' self.domain = l0MDP self.verbose = verbose self.state_mapper = state_mapper self.env_file = env_file self.constraints = constraints self.ap_maps = ap_maps def generate_policy(self, l1_state, grounded_action): ''' Args: l1_state (FourRoomL1State): generate policy in l1 domain starting from l1_state grounded_action (FourRoomRootGroundedAction): TaskNode above defining the subgoal for current MDP ''' #destination_locations = self.grounded_action.l1_domain. #.floor_to_rooms[grounded_action.goal_state.agent_on_floor_number] mdp = CleanupL1MDP(l1_state, env_file=self.env_file, constraints=grounded_action.goal_constraints, ap_maps=grounded_action.ap_maps) return self.get_policy(mdp, verbose=self.verbose, max_iterations=50, horizon=50) def generate_abstract_state(self, l0_state): return self.state_mapper.map_state(l0_state) class CleanupL0PolicyGenerator(AMDPPolicyGenerator): def __init__(self, l0_domain, verbose=False, env_file = []): self.domain = l0_domain self.verbose = verbose self.env_file = env_file def generate_policy(self, state, grounded_task): ''' Args: state (): plan in L0 starting from state grounded_task (FourRoomL1GroundedAction): L1 TaskNode defining L0 subgoal ''' # destination_locations = self.domain.room_to_locs[grounded_task.goal_state.agent_in_room_number] #init_location = (state.x, state.y, state.obj_id) #print("generate_policy-CleanupL0PolicyGenerator is called:{}",grounded_task) mdp = CleanupQMDP(init_state=state, env_file=self.env_file, constraints=grounded_task.goal_constraints, ap_maps=grounded_task.ap_maps) return self.get_policy(mdp, verbose=self.verbose, max_iterations=50, horizon=100) # 500, 100
python
from osbot_aws.apis.shell.Lambda_Shell import lambda_shell @lambda_shell def run(event, context): return 'testing lambda layer ... '
python
# -*- coding: utf-8 -*- # # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common utilities for deleting resources.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.composer import operations_util as operations_api_util from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.composer import util as command_util from googlecloudsdk.core import log import six class EnvironmentDeletionWaiter(object): """Class for waiting for synchronous deletion of one or more Environments.""" def __init__(self, release_track=base.ReleaseTrack.GA): self.pending_deletes = [] self.release_track = release_track def AddPendingDelete(self, environment_name, operation): """Adds an environment whose deletion to track. Args: environment_name: str, the relative resource name of the environment being deleted operation: Operation, the longrunning operation object returned by the API when the deletion was initiated """ self.pending_deletes.append( _PendingEnvironmentDelete(environment_name, operation)) def Wait(self): """Polls pending deletions and returns when they are complete.""" encountered_errors = False for pending_delete in self.pending_deletes: try: operations_api_util.WaitForOperation( pending_delete.operation, 'Waiting for [{}] to be deleted'.format( pending_delete.environment_name), release_track=self.release_track) except command_util.OperationError as e: encountered_errors = True log.DeletedResource( pending_delete.environment_name, kind='environment', is_async=False, failed=six.text_type(e)) return encountered_errors class _PendingEnvironmentDelete(object): """Data class holding information about a pending environment deletion.""" def __init__(self, environment_name, operation): self.environment_name = environment_name self.operation = operation
python
from imutils.video import VideoStream from datetime import datetime import imutils import cv2 import numpy as np import sys import json import os import time import inspect # Configuration from MMM CONFIG = json.loads(sys.argv[1]) # Computer vision lib files needed by OpenCV path_to_file = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) facePath = path_to_file + '/haarcascade_frontalface_default.xml' smilePath = path_to_file + '/haarcascade_smile.xml' faceCascade = cv2.CascadeClassifier(facePath) smileCascade = cv2.CascadeClassifier(smilePath) log_path = path_to_file + '/../log/' if not os.path.exists(log_path): os.makedirs(log_path) def to_node(type, message): # Send message to MMM # convert to json and print (node helper will read from stdout) try: print(json.dumps({type: message})) except Exception: pass # stdout has to be flushed manually to prevent delays in the node helper # communication sys.stdout.flush() # ************************************************************* # Main function # ************************************************************* # Start video stream vs = VideoStream(usePiCamera=CONFIG['usePiCam']).start() # allow the camera sensor to warmup time.sleep(2) to_node('camera_ready', True) # track smile time smileTime = 0 endtime = time.time() + CONFIG['testRunTime'] while True: # take a frame every second time.sleep(1) # use VS instead of cv2.VideoCapture frame = vs.read() try: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) except: to_node('error', sys.exc_info()[0]) break faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=8, minSize=(55, 55), flags=cv2.CASCADE_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1) roi_gray = gray[y:y + h, x:x + w] roi_color = frame[y:y + h, x:x + w] smile = smileCascade.detectMultiScale( roi_gray, scaleFactor=1.2, minNeighbors=10, minSize=(20, 20), flags=cv2.CASCADE_SCALE_IMAGE ) if(len(smile) > 0): smileTime += 1 to_node('result', smileTime) # log the smile test with a selfie if smileTime == (CONFIG['smileLength'] / 2): for (x, y, w, h) in smile: cv2.rectangle(roi_color, (x, y), (x + w, y + h), (255, 0, 0), 1) cv2.imwrite(log_path + datetime.now().isoformat("T") + '.jpg', frame) # cv2.imshow('Smile Detector', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if smileTime >= CONFIG['smileLength']: smileTime = 0 break if time.time() >= endtime: to_node('result', -1) break vs.stop() cv2.destroyAllWindows()
python
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS). # Last modified by David J Turner ([email protected]) 10/01/2021, 16:51. Copyright (c) David J Turner import numpy as np from astropy.units import Quantity from ...models.misc import power_law from ...products.relation import ScalingRelation xcs_sdss_r500_52 = ScalingRelation(np.array([1.67, 0.96]), np.array([0.13, 0.08]), power_law, Quantity(60), Quantity(0.8e+44, 'erg / s'), r"$\lambda$", r"E(z)$^{-1}$L$_{\rm{x},500,0.5-2.0}$", relation_author='Giles et al.', relation_year='In Prep', relation_doi='', relation_name='SDSSRM-XCS$_{T_{x},vol}$ 0.5-2.0keV', x_lims=Quantity([20, 220]))
python
def insertion_sort(A): for i in range(len(A)-1): while i >= 0 and A[i+1] < A[i]: A[i], A[i+1] = A[i+1], A[i] i -= 1 return A if __name__ == '__main__': import random arr = [random.randint(1, 10) for _ in range(10)] assert insertion_sort(arr) == sorted(arr) assert insertion_sort([3, 0, 4, -1]) == [-1, 0, 3, 4]
python
# # The MIT License (MIT) # # Copyright 2018 AT&T Intellectual Property. All other rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # """ .. module: lycan.serializations :platform: Unix .. version:: $$VERSION$$ .. moduleauthor:: Michael Stair <[email protected]> """ import six, json from lycan.message import * class OpenC2MessageEncoder(json.JSONEncoder): def _encode_message(self, obj, message): if obj.header: message["header"] = {} header = obj.header message["header"]["version"] = header.version if header.id: message["header"]["id"] = header.id if header.created: message["header"]["created"] = header.created if header.sender: message["header"]["sender"] = header.sender message["header"]["content_type"] = header.content_type if obj.body: body = obj.body if isinstance(body, OpenC2Command): message["command"] = {} self._encode_command(body, message["command"]) elif isinstance(body, OpenC2Response): message["response"] = {} self._encode_response(body, message["response"]) else: raise ValueError("Invalid OpenC2 message") else: raise ValueError("Invalid OpenC2 message") def _encode_command(self, obj, message): message["action"] = obj.action if isinstance(obj.target.specifiers, six.string_types): message["target"] = {str(obj.target): str(obj.target.specifiers)} else: target = str(obj.target) message["target"] = {target: {}} if obj.target.specifiers: for (k, v) in six.iteritems(obj.target.specifiers): message["target"][target][k] = v if obj.actuator: actuator = str(obj.actuator) message["actuator"] = {actuator: {}} if obj.actuator.specifiers: for (k, v) in six.iteritems(obj.actuator.specifiers): message["actuator"][actuator][k] = v if obj.id: message["id"] = str(obj.id) if obj.args: message["args"] = obj.args def _encode_response(self, obj, message): message["id"] = str(obj.id) message["id_ref"] = str(obj.id_ref) message["status"] = obj.status if obj.status_text: message["status_text"] = obj.status_text if obj.results: message["results"] = obj.results def default(self, obj): message = {} if isinstance(obj, OpenC2Message): self._encode_message(obj, message) if isinstance(obj, OpenC2Command): self._encode_command(obj, message) if isinstance(obj, OpenC2Response): self._encode_response(obj, message) return message class OpenC2MessageDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def _decode_message(self, obj): header = self._decode_header(obj["header"]) if "command" in obj: body = obj["command"] elif "response" in obj: body = obj["response"] else: raise ValueError("Invalid OpenC2 message") return OpenC2Message(header, body) def _decode_header(self, obj): if "version" not in obj: raise ValueError("Invalid OpenC2 header: version required") if "content_type" not in obj: raise ValueError("Invalid OpenC2 header: content_type required") return OpenC2Header(obj["version"], obj["command_id"] if "command_id" in obj else None, obj["created"] if "created" in obj else None, obj["sender"] if "sender" in obj else None, obj["content_type"] if "content_type" in obj else None) def _decode_command(self, obj): if "target" not in obj: raise ValueError("Invalid OpenC2 command: target required") target_name = list(obj["target"].keys())[0] target_specifiers = list(obj["target"].values())[0] if isinstance(target_specifiers, dict): target = OpenC2Target(target_name, **target_specifiers) elif isinstance(target_specifiers, six.string_types): target = OpenC2Target(target_name, target_specifiers) else: raise ValueError("Invalid OpenC2 command target") actuator = None if "actuator" in obj: actuator_name = list(obj["actuator"].keys())[0] actuator_specifiers = list(obj["actuator"].values())[0] actuator = OpenC2Actuator(actuator_name, **actuator_specifiers) return OpenC2Command(obj["action"], target, obj["id"] if "id" in obj else None, actuator, OpenC2Args(obj["args"]) if "args" in obj else None) def _decode_response(self, obj): if "id" not in obj: raise ValueError("Invalid OpenC2 response: id required") if "id_ref" not in obj: raise ValueError("Invalid OpenC2 response: id_ref required") if "status" not in obj: raise ValueError("Invalid OpenC2 response: status required") return OpenC2Response(obj["id"], obj["id_ref"], obj["status"], obj["status_text"] if "status_text" in obj else None, obj["results"] if "results" in obj else None) def object_hook(self, obj): if "header" in obj: message = self._decode_message(obj) elif "action" in obj: message = self._decode_command(obj) elif "id_ref" in obj: message = self._decode_response(obj) else: message = obj return message
python
#https://www.acmicpc.net/problem/2775 testCase = int(input()) for i in range(testCase): list_base = [i for i in range(1, 15)] list_new = [] k = int(input()) n = int(input()) for j in range(k): for l in range(n): if l-1 >= 0: list_new.append(list_new[l-1] + list_base[l]) else: list_new.append(1) list_a = list_b list_b = [] print(list_a[n-1])
python
import tornado.ioloop, tornado.web, tornado.websocket, tornado.template import logging, uuid, subprocess, pykka from datetime import datetime from tornado.escape import json_encode, json_decode logger = logging.getLogger(__name__) # container for all current pusher connections connections = {} frontend = {} ## # Send a message to an individual connection # # @param recipient_connection_ids = array # @param type = string (type of event, ie connection_opened) # @param action = string (action method of this message) # @param message_id = string (used for callbacks) # @param data = array (any data required to include in our message) ## def send_message( recipient_connection_id, type, action, message_id, data ): message = { 'type': type, 'action': action, 'message_id': message_id, 'data': data } connections[recipient_connection_id]['connection'].write_message( json_encode(message) ) ## # Broadcast a message to all recipients # # @param action = string # @param data = array (the body of our message to send) ## def broadcast( action, data ): for connection in connections.itervalues(): message = { 'type': 'broadcast', 'action': action, 'data': data } connection['connection'].write_message( json_encode(message) ) # digest a protocol header into it's id/name parts def digest_protocol( protocol ): # if we're a string, split into list # this handles the different ways we get this passed (select_subprotocols gives string, headers.get gives list) if isinstance(protocol, basestring): # make sure we strip any spaces (IE gives "element,element", proper browsers give "element, element") protocol = [i.strip() for i in protocol.split(',')] # if we've been given a valid array try: clientid = protocol[0] connectionid = protocol[1] username = protocol[2] generated = False # invalid, so just create a default connection, and auto-generate an ID except: clientid = str(uuid.uuid4().hex) connectionid = str(uuid.uuid4().hex) username = str(uuid.uuid4().hex) generated = True # construct our protocol object, and return return {"clientid": clientid, "connectionid": connectionid, "username": username, "generated": generated} ## # Websocket server # # This is the actual websocket thread that accepts, digests and emits messages. # TODO: Figure out how to merge this into the main Mopidy websocket to avoid needing two websocket servers ## class PusherWebsocketHandler(tornado.websocket.WebSocketHandler): def initialize(self, frontend): self.frontend = frontend def check_origin(self, origin): return True # when a new connection is opened def open(self): # decode our connection protocol value (which is a payload of id/name from javascript) protocolElements = digest_protocol(self.request.headers.get('Sec-Websocket-Protocol', [])) connectionid = protocolElements['connectionid'] clientid = protocolElements['clientid'] self.connectionid = connectionid username = protocolElements['username'] created = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S') # construct our client object, and add to our list of connections client = { 'clientid': clientid, 'connectionid': connectionid, 'username': username, 'ip': self.request.remote_ip, 'created': created } connections[connectionid] = { 'client': client, 'connection': self } logger.info( 'Pusher connection established: '+ connectionid +' ('+ clientid +'/'+ username +')' ) # broadcast to all connections that a new user has connected broadcast( 'client_connected', client ) def select_subprotocol(self, subprotocols): # select one of our subprotocol elements and return it. This confirms the connection has been accepted. protocols = digest_protocol( subprotocols ) # if we've auto-generated some ids, the provided subprotocols was a string, so just return it right back # this allows a connection to be completed if protocols['generated']: return subprotocols[0] # otherwise, just return one of the supplied subprotocols else: return protocols['clientid'] # server received a message def on_message(self, message): messageJson = json_decode(message) # construct the origin client info messageJson['origin'] = { 'connectionid' : self.connectionid, 'clientid': connections[self.connectionid]['client']['clientid'], 'ip': self.request.remote_ip, 'username': connections[self.connectionid]['client']['username'] } logger.debug('Pusher message received: '+message) # query-based message that is expecting a response if messageJson['type'] == 'query': # fetch our pusher connections if messageJson['action'] == 'get_connections': connectionsDetailsList = [] for connection in connections.itervalues(): connectionsDetailsList.append(connection['client']) send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'connections': connectionsDetailsList } ) # change connection's client username elif messageJson['action'] == 'change_username': # username is the only value we allow clients to change connections[messageJson['origin']['connectionid']]['client']['username'] = messageJson['data'] # respond to request send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'connection': connections[messageJson['origin']['connectionid']]['client'] } ) # notify all clients of this change broadcast( 'connection_updated', { 'connections': connections[messageJson['origin']['connectionid']]['client'] }) # start radio elif messageJson['action'] == 'start_radio': # pull out just the radio data (we don't want all the message_id guff) radio = { 'enabled': 1, 'seed_artists': messageJson['seed_artists'], 'seed_genres': messageJson['seed_genres'], 'seed_tracks': messageJson['seed_tracks'] } radio = self.frontend.start_radio( radio ) send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'radio': radio } ) # stop radio elif messageJson['action'] == 'stop_radio': radio = self.frontend.stop_radio() send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'radio': self.frontend.radio } ) # fetch our current radio state elif messageJson['action'] == 'get_radio': send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'radio': self.frontend.radio } ) # get our spotify authentication token elif messageJson['action'] == 'get_spotify_token': send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'token': self.frontend.spotify_token } ) # refresh our spotify authentication token elif messageJson['action'] == 'refresh_spotify_token': token = self.frontend.refresh_spotify_token() send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'token': token } ) # get system version and check for upgrade elif messageJson['action'] == 'get_version': version = self.frontend.get_version() send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'version': version } ) # get system version and check for upgrade elif messageJson['action'] == 'perform_upgrade': version = self.frontend.get_version() version['upgrade_successful'] = self.frontend.perform_upgrade() send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'version': version } ) # notify all clients of this change broadcast( 'upgraded', { 'version': version }) # restart mopidy elif messageJson['action'] == 'restart': self.frontend.restart() # not an action we recognise! else: send_message( self.connectionid, 'response', messageJson['action'], messageJson['message_id'], { 'error': 'Unhandled action' } ) # point-and-shoot one-way broadcast elif messageJson['type'] == 'broadcast': # recipients array has items, so only send to specific clients if messageJson.has_key('recipients'): for connectionid in messageJson['recipients']: connectionid = connectionid.encode("utf-8") # make sure we actually have a connection matching the provided connectionid if connectionid in connections: connections[connectionid]['connection'].write_message(messageJson) else: logger.warn('Pusher: Tried to broadcast to connectionid '+connectionid+' but it doesn\'t exist!'); # empty, so send to all clients else: for connection in connections.itervalues(): # if we've set ignore_self, then don't send message to originating connection if messageJson.has_key('ignore_self'): if connection['client']['connectionid'] != messageJson['origin']['connectionid']: connection['connection'].write_message(messageJson) # send it to everyone else: connection['connection'].write_message(messageJson) logger.debug( 'Pusher: Message received from '+ self.connectionid ) # connection closed def on_close(self): if self.connectionid in connections: clientRemoved = connections[self.connectionid]['client'] logger.debug( 'Spotmop Pusher connection to '+ self.connectionid +' closed' ) # now actually remove it try: del connections[self.connectionid] except: logger.info( 'Failed to close connection to '+ self.connectionid ) broadcast( 'client_disconnected', clientRemoved )
python
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-02-03 13:32 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('democracy', '0004_lengthen_type_field'), ] operations = [ migrations.AlterField( model_name='hearingcomment', name='author_name', field=models.CharField(blank=True, editable=False, max_length=255, null=True, verbose_name='author name'), ), migrations.AlterField( model_name='hearingcomment', name='title', field=models.CharField(blank=True, max_length=255, verbose_name='title'), ), migrations.AlterField( model_name='sectioncomment', name='author_name', field=models.CharField(blank=True, editable=False, max_length=255, null=True, verbose_name='author name'), ), migrations.AlterField( model_name='sectioncomment', name='title', field=models.CharField(blank=True, max_length=255, verbose_name='title'), ), ]
python
#encoding=utf-8 # bankfile_psr2000.py # This file is part of PSR Registration Shuffler # # Copyright (C) 2008 - Dennis Schulmeister <dennis -at- ncc-1701a.homelinux.net> # # This is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # It is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this file. If not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA ''' PURPOSE ======= This module contains the BankFile sub-class for dealing with registration bank files from the YAMAHA PSR-2000 keyboard. ''' # Public export of module content __all__ = [ "BankFile_PSR2000" ] # Import global modules import struct # Import application modules import bankfile from .. import const from .. import util # Define class class BankFile_PSR2000(bankfile.BankFile): ''' This class provides support for YAMAHA PSR-2000 bank files. ''' # Short names of supported keyboard models keyboardNames = [const.YAMAHA_PSR2000] # User-information shown on the keyboard information page groupName = _("Yamaha PSR-2000 and alike") information = _("Released in 2001 the Yamaha PSR-2000 marks the end of the highly successful PSR-9000 line. It shares many features of its big brothers the PSR-9000 and 9000pro, among them most sounds, styles and a very similar albeit updated operating system. Updates include a largely re-designed main screen, notation display as well as icons next to each object name (with the icon descriptor being a suffix to the name).") # Maximum amount of registrations maxReg = 8 # File extension fileExt = "reg" # Magic file header fileHeader = "\x52\x45\x47\x2D\x31\x30\x30\x2D" \ + "\x31\x30\x30\x2D\x31\x30\x30\x30" \ + "\x50\x53\x52\x32\x30\x30\x30\x78" \ + "\x00\x08\x00\x40" # Special padding between header and data blocks specialPadding = "\x24\xFF\xFF\xFF\xFF\xFF\xFF\xFF" \ + "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" \ + "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" \ + "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" \ + "\xFF\xFF\xFF\xFF\xFF\x00\x00\x00" \ + "\x00\x00\x00\x00\x00\x00\x00\x00" # Object initialization.................................................... def __init__(self, filename="", file=None, keyboardName=""): ''' Constructor. If neither a filename nor a file object is given a new bank file will be created in memory. If at least one is given the existing file will be used. If both are given the file object will be ignored. ''' # Initialize object from super-constructor bankfile.BankFile.__init__(self, filename=filename, file=file, keyboardName=keyboardName) def initEmptyFile(self): ''' This method gets called by the default constructor. It's meant to be overwritten by sub-classes in order to initialize a new object as being an empty bank file. ''' # Nothing to do pass def initFromExistingFile(self, file): ''' This method gets called by the default constructor. It's meant to be overwritten by sub-classes in order to initialize a new object from an existing bank file whise file object gets passed as argument. The most important taske to be carried out here is to extract all registrations from the given file, nicely pack them into Registration objects and to line them up in a list called self.regList. ''' # Slice out registrations into self.regList # NOTE: 0xffffffff marks non-existing registration. Other values # give absolute byte pointer within file. for i in range(self.__class__.maxReg): # Read location of registration file.seek(32 + (4 * i)) startPos = file.read(4) # Skip empty registrations if startPos == "\xff\xff\xff\xff": continue # Read length of registration block start = struct.unpack(">I", startPos)[0] file.seek(start + 6) # RGST01.. blockLength = file.read(2) length = struct.unpack(">H", blockLength)[0] # Slice out binary data of registration file.seek(start) binary = file.read(length) # Create Registration object and put it into the list self.regList[i] = self.createRegistrationObject(binary) # Static helper methods.................................................... def stripName(cls, name=""): ''' This method needs to be reimplemented by subclasses. It's meant to remove file extions and other non-name data (like icons) from name strings. ''' return util.stripNameYamaha( fileExt = cls.fileExt, name = name ) stripName = classmethod(stripName) # File access.............................................................. def storeBankFile(self, filename): ''' This method stores the contents of self to a keyboard readable bank file. File format is as follows: ========= ======= ===================================================== Position Length Description ========= ======= ===================================================== 0 28 File header 28 4 Amount of registrations 32 32 Access list with location of registration (8x) 64 48 Special padding 112 .. Registration blocks (up to 8x) ========= ======= ===================================================== All numbers are stored as BigEndian, 4-Byte, Unsigned Integer. ''' # Prepare access list and large data block nRegs = 0 startPosi = 112 accessList = "" dataBlock = "" for reg in self.regList: # Skip empty registrations if not reg: accessList += "\xFF\xFF\xFF\xFF" continue # Determine effective amount of registrations nRegs += 1 # Write access list and update location for next registration posi = startPosi + len(dataBlock) accessList += struct.pack(">I", posi) # BE, UInt, 4 Bytes dataBlock += reg.getBinaryContent() # Write file contents file = open(filename, "wb+") file.write(self.__class__.fileHeader) # File header file.write(struct.pack("<I", nRegs)) # Amount of registrations (LE???) file.write(accessList) # Location pointers file.write(self.__class__.specialPadding) # Special padding file.write(dataBlock) # Registration block file.close() def canUnderstandFile(cls, file=None): ''' A class method which checks whether the class can be used for accessing the given file's contents. A file object which can be read from gets passed to the method. Method must return either True or False. ''' # Compare file header headerSize = len(cls.fileHeader) file.seek(0) fileHeader = file.read(headerSize) return fileHeader == cls.fileHeader canUnderstandFile = classmethod(canUnderstandFile) def getKeyboardNameFromFile(cls, file=None, filename=""): ''' A class method which determines the keyboard model of a give file. If the model can't be guessed an appexceptions.UnknownKeyboardModel exception gets raised. The file can be given either by its filename or by a file object. If both are given the file object will be ignored. ''' # Make sure to have a file object at hand file = util.getFileObject(filename, file) if cls.canUnderstandFile(file=file): return const.YAMAHA_PSR2000 else: raise appexceptions.UnknownKeyboardModel(cls) getKeyboardNameFromFile = classmethod(getKeyboardNameFromFile)
python
from rest_framework import serializers from .models import EnrollmentSecret, MetaBusinessUnit, Tag class MetaBusinessUnitSerializer(serializers.ModelSerializer): api_enrollment_enabled = serializers.BooleanField(required=False) class Meta: model = MetaBusinessUnit fields = ("id", "name", "api_enrollment_enabled") read_only_fields = ("api_enrollment_enabled",) def validate_api_enrollment_enabled(self, value): if self.instance and self.instance.api_enrollment_enabled() and not value: raise serializers.ValidationError("Cannot disable API enrollment") return value def create(self, validated_data): api_enrollment_enabled = validated_data.pop("api_enrollment_enabled", False) mbu = super().create(validated_data) if api_enrollment_enabled: mbu.create_enrollment_business_unit() return mbu def update(self, instance, validated_data): api_enrollment_enabled = validated_data.pop("api_enrollment_enabled", False) mbu = super().update(instance, validated_data) if not mbu.api_enrollment_enabled() and api_enrollment_enabled: mbu.create_enrollment_business_unit() # TODO: switch off api_enrollment_enabled return mbu class TagSerializer(serializers.ModelSerializer): class Meta: model = Tag fields = ("id", "meta_business_unit", "name", "slug", "color") # TODO: Taxonomy class EnrollmentSecretSerializer(serializers.ModelSerializer): class Meta: model = EnrollmentSecret fields = ("id", "secret", "meta_business_unit", "tags", "serial_numbers", "udids", "quota", "request_count")
python
from discord.ext import commands from discord.utils import get import discord from datetime import datetime from bot import Shiro from util import strfdelta from apis.anilist_api import find_anime_by_id import asyncio class ModsCog(commands.Cog): def __init__(self, bot: Shiro): self.bot = bot @commands.command() async def uptime(self, ctx): if self.bot.is_mod(ctx.author): timedif = strfdelta(datetime.utcnow() - self.bot.start_time) await ctx.send(content=f"I have been up for **{timedif}**!") @commands.command() async def purge(self, ctx, amount): if self.bot.is_mod(ctx.author): amount = int(amount) purge_list = [] async for i in ctx.channel.history(limit=amount): purge_list.append(i) amount_deleted = purge_list.__len__() cycles = (amount_deleted // 100) + 1 for i in range(cycles): delete_from = i * 100 delete_to = (i + 1) * 100 await ctx.channel.delete_messages(purge_list[delete_from:delete_to]) embed = self.bot.basic_embed(True, "**{}** messages have been deleted!".format(amount_deleted)) message = await ctx.send(embed=embed) self.bot.send_log( "Msg Purge", f"{ctx.message.author}: Purged {amount_deleted} messages in {ctx.message.channel} - " f"See list of purged messages below:\n") self.bot.send_log("Msg Purge", "====================================================================") for message in purge_list: user_name = f"{message.author}".ljust(18, " ") print(f"[{message.created_at}] {user_name}: {message.content}") self.bot.send_log("Msg Purge", "====================================================================") await asyncio.sleep(10) await message.delete() @commands.command() async def echo(self, ctx, destination, *args): if self.bot.is_mod(ctx.author): message = "" for string in args: message += (string + " ") message = message.strip() dest_channel_id = destination.replace("<", "").replace(">", "").replace("#", "") try: dest_channel_id = int(dest_channel_id) dest_channel = get(ctx.guild.channels, id=int(dest_channel_id)) except ValueError: dest_channel = get(ctx.guild.channels, name=dest_channel_id) if not dest_channel: dest_channel = get(ctx.guild.channels, name=destination) if isinstance(dest_channel, discord.TextChannel): self.bot.send_log("Mod Echo", "{} sent a message via echo to #{}".format( ctx.message.author, dest_channel.name, message)) await dest_channel.send(content=message) embed = self.bot.basic_embed(True, "Message **sent**!") await ctx.send(embed=embed) else: self.bot.send_log("Mod Echo", "{} tried to send a message to {} (Failed)".format( ctx.message.author, dest_channel, message)) embed = self.bot.basic_embed(False, "Channel **not found**!") await ctx.send(embed=embed) @commands.command() async def mute(self, ctx, user_id): if self.bot.is_mod(ctx.author): user_id = self.bot.mention_cleanup(user_id) muted_user = self.bot.senko_guild.get_member(int(user_id)) if muted_user: self.bot.send_log("Mute", "{}: Mute pending user {}({}) found: Applying mute.".format( ctx.message.author, user_id, muted_user)) await muted_user.add_roles(self.bot.roles.muted) embed = self.bot.basic_embed(True, "User **Muted**!") await ctx.send(embed=embed) else: self.bot.send_log("Mute", "{}: Mute pending user {}({}) not found.".format( ctx.message.author, user_id, muted_user)) embed = self.bot.basic_embed(False, "User **not found**!") await ctx.send(embed=embed) @commands.command() async def unmute(self, ctx, user_id): if self.bot.is_mod(ctx.author): user_id = self.bot.mention_cleanup(user_id) try: unmuted_user = self.bot.senko_guild.get_member(int(user_id)) ismuted = get(unmuted_user.roles, id=self.bot.roles.muted.id) if unmuted_user: if ismuted: self.bot.send_log("Unmute", "{}: Unmute pending user {}({}) found: Removing mute.".format( ctx.author, user_id, unmuted_user)) await unmuted_user.remove_roles(self.bot.roles.muted) embed = self.bot.basic_embed(True, "User **Unmuted**!") else: self.bot.send_log("Unmute", "{}: Unmute pending user {}({}) found: ERROR! " "User is not muted.".format( ctx.message.author, user_id, unmuted_user)) embed = self.bot.basic_embed(False, "User is **not muted**!") else: self.bot.send_log("Unmute", "{}: Unmute pending user {}({}) not found.".format( ctx.message.author, user_id, unmuted_user)) embed = self.bot.basic_embed(False, "User **not found**!") except AttributeError: self.bot.send_log("Unmute", "{}: Unmute pending user {} not found.".format(ctx.message.author, user_id)) embed = self.bot.basic_embed(False, "User **not found**!") await ctx.send(embed=embed) @commands.command() async def ban(self, ctx, user_id): if self.bot.is_mod(ctx.author): user_id = self.bot.mention_cleanup(user_id) try: ban_user = self.bot.senko_guild.get_member(int(user_id)) if ban_user: self.bot.send_log("Ban", "{}: Ban pending user {}({}) found: Banning.".format( ctx.message.author, user_id, ban_user)) await self.bot.senko_guild.ban(ban_user) else: fake_member = discord.Object(id=int(user_id)) await self.bot.senko_guild.ban(fake_member) self.bot.send_log("Ban", "{}: Ban pending user {}({}) not found in server: Fake Banning.".format( ctx.message.author, user_id, ban_user)) embed = self.bot.basic_embed(True, "User **banned**!") await ctx.send(embed=embed) except (discord.NotFound, TypeError): self.bot.send_log("Ban", "{}: Ban pending user {} not found.".format(ctx.message.author, user_id)) embed = self.bot.basic_embed(False, "User **not found**!") await ctx.send(embed=embed) @commands.command() async def unban(self, ctx, user_id): if self.bot.is_mod(ctx.author): user_id = self.bot.mention_cleanup(user_id) if get(self.bot.senko_guild.members, id=int(user_id)): embed = self.bot.basic_embed(False, "User is **not banned**!") await ctx.send(embed=embed) else: try: fake_member = discord.Object(id=int(user_id)) await self.bot.senko_guild.unban(fake_member) self.bot.send_log("Unban", "{}: Unban pending user {}({}) not found in server: Unbanning.".format( ctx.message.author, user_id, fake_member)) embed = self.bot.basic_embed(True, "User **unbanned**!") await ctx.send(embed=embed) except discord.NotFound: self.bot.send_log("Unban", "{}: Unban pending user {} not found.".format( ctx.message.author, user_id)) embed = self.bot.basic_embed(False, "User **not found**!") await ctx.send(embed=embed) @commands.command() async def pingrole(self, ctx, role): if self.bot.is_mod(ctx.author): role = role.lower() if role == "server" or role == "s": role = self.bot.roles.news_server elif role == "anime" or role == "a": role = self.bot.roles.news_anime else: embed = self.bot.basic_embed(False, "Role {} **not found**!".format(role)) await ctx.send(embed=embed) return await ctx.message.delete() await role.edit(mentionable=True) await ctx.send(content=role.mention) await role.edit(mentionable=False) await ctx.send("** **") @commands.command() async def mrf(self, ctx): if self.bot.is_mod(ctx.author): loading = discord.Embed( title=":hourglass: **Refreshing** embeds for *#24h* channel...", timestamp=ctx.message.created_at, color=0xffa749 ) msg = await ctx.send(embed=loading) await msg.edit(embed=loading) await self.bot.refresh_24h() embed = discord.Embed( title=":white_check_mark: **Refreshed** embeds for *#24h* channel!", timestamp=ctx.message.created_at, color=0x89af5b ) await msg.edit(embed=embed) @commands.command() async def track(self, ctx, aid): if self.bot.is_mod(ctx.author): if aid == "-l": desc = "─────────────────" for i in self.bot.tracking: title_name = None animeid = None for key, value in i.items(): animeid = key for k, v in value.items(): title_name = v if len(title_name) >= 41: title_name = title_name[:40].strip() + "..." str_to_add = "\n`{}` - {}".format(str(animeid).rjust(6, "0"), title_name) desc += str_to_add embed = discord.Embed( title=":notepad_spiral: **Currently tracking anime:**", description=desc, color=0xcdd4db, timestamp=datetime.utcnow() ) await ctx.send(embed=embed) else: data = await find_anime_by_id(aid) if data: duplicate = False if self.bot.tracking: for i in self.bot.tracking: for key, value in i.items(): if str(key) == str(data["id"]): duplicate = True title = data["title"]["romaji"] if not duplicate: is_releasing = False status = data["status"] if status.lower() == "releasing": is_releasing = True else: try: x = data["airingSchedule"]["edges"][0]["node"]["episode"] if x: is_releasing = True except (IndexError, KeyError): embed = self.bot.basic_embed( False, "__{}__ **not currently releasing**!".format(data["title"]["romaji"])) await ctx.send(embed=embed) return if is_releasing: to_append = { str(data["id"]): { "title": title } } await self.bot.append_tracking(to_append) self.bot.send_log("Tracking", "Started tracking {} ({}) by {}".format( title, data["id"], ctx.author)) embed = self.bot.basic_embed(True, "Started tracking **{}**!".format(title)) else: embed = self.bot.basic_embed( False, "__{}__ **not currently releasing**!".format(data["title"]["romaji"])) else: embed = self.bot.basic_embed(False, "Already tracking **{}**!".format(title)) else: embed = self.bot.basic_embed(False, "No anime with ID **{}** found!".format(aid)) embed.set_footer(text="Use !anime <name> -id to get the anilist ID.") await ctx.send(embed=embed) def setup(bot): bot.add_cog(ModsCog(bot))
python
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. ## ## Test weakref ## ## * Since the IronPython GC heavily differs from CPython GC (absence of reference counting), ## the CPython unit tests cannot fully be made pass on IronPython without modification ## ## * Comments below in double quotes are from the Python standard library documentation. ## ## * Issues of the current implementation of _weakref.cs: ## ## - weakref finalization callbacks are run in the CLR finalizer thread. ## This is likely to cause data races in user code. ## - WeakRefTracker.cs code and internal state handling most likely is not ## implemented in a thread-safe way. ## import gc import weakref from iptest import IronPythonTestCase, run_test class C(object): def __init__(self, value=0): self.value = value def __hash__(self): return hash(self.value) def __eq__(self, other): return isinstance(other, C) and self.value == other.value def __ne__(self, other): return not self.__eq__(other) class WeakrefTest(IronPythonTestCase): def _create_weakrefs(self, o, count, cb = None): # force creation of different instances for the same target if not cb and count > 1: cb = lambda r: None if count==1: return weakref.ref(o, cb) elif count==2: r1, r2 = weakref.ref(o, cb), weakref.ref(o, cb) self.assertTrue(r1 is not r2) return r1, r2 else: raise Exception("not implemented") def test_ref_callable(self): # "if the referent is no longer alive, calling the reference object will cause None to # be returned" o = C("a") r = self._create_weakrefs(o, 1) # for reasons stated in create_weakrefs(), we cannot test on instance equality self.assertTrue(r().value == "a") del o gc.collect() self.assertTrue(r() is None) def test_ref_hashable(self): # "Weak references are hashable if the object is hashable. They will maintain their hash value # even after the object was deleted. If hash() is called the first time only after the object # was deleted, the call will raise TypeError." o = C("a") r1, r2 = self._create_weakrefs(o, 2) self.assertTrue(hash(r1) == hash("a")) del o gc.collect() self.assertTrue(r1() is None) self.assertTrue(r2() is None) self.assertTrue(hash(r1) == hash("a")) self.assertRaises(TypeError, lambda: hash(r2)) def test_ref_equality(self): # "If the referents are still alive, two references have the same equality relationship as # their referents (regardless of the callback). If either referent has been deleted, the # references are equal only if the reference objects are the same object." o, o2 = C("a"), C("a") r1, r2 = self._create_weakrefs(o, 2) r3 = self._create_weakrefs(o2, 1) self.assertTrue(r1 == r2) self.assertTrue(r1 == r3) del o, o2 gc.collect() self.assertTrue(r1() is None) self.assertTrue(r3() is None) self.assertTrue(r1 != r2) self.assertTrue(r1 != r3) run_test(__name__)
python
#Done by Lauro Ribeiro (12/02/2021) # Tutorial 7 - Use the Where Clause import sqlite3 #Connect to database conn = sqlite3.connect('customer.db') #Create a cursor c = conn.cursor() #Query the database c.execute("SELECT * FROM customers WHERE email LIKE '%gmail.com'") items = c.fetchall() for item in items: print(item) #Commit our command conn.commit() #Close our connection conn.close()
python
import os, sys # Kiny passou aqui XD def restart(): python=sys.executable;os.excl(python, python, *sys.argv) try: import colorama, requests except: os.system('pip install -r requirements.txt');restart() try: from data import ui, numero, cpf, nome, rg, email except Exception as e: print('ARQUIVO CORROMPIDO! '+str(e));exit() C= "\033[97;1m" G = "\033[92;1m" P = "\033[1;35m" Sair=False while(Sair==False): try: op=int(ui.menu(ms0=f'\n{C}[{G}1{C}] Numero\n{C}[{G}2{C}] CPF\n{C}[{G}3{C}] Nome\n{C}[{G}4{C}] RG\n{C}[{G}5{C}] EMAIL\n{C}\n[{P}0{C}] Sair')) if op==1: numero.consultar() elif op==2: cpf.consultar() elif op==3: nome.consultar() elif op==4: rg.consultar() elif op==5: email.consultar() elif op==0: ui.clear();Sair=True except: ui.error()
python
# -*- coding: utf-8 -*- import pytest from wemake_python_styleguide.violations.best_practices import ( YieldInComprehensionViolation, ) from wemake_python_styleguide.visitors.ast.loops import ( WrongComprehensionVisitor, ) list_comprehension = """ def container(): nodes = [{0} for xy in "abc"] """ generator_expression = """ def container(): nodes = ({0} for xy in "abc") """ set_comprehension = """ def container(): nodes = {{{0} for xy in "abc"}} """ # We ignore `DeprecationWarning: 'yield' inside generator expression` here @pytest.mark.filterwarnings('ignore:DeprecationWarning') @pytest.mark.parametrize('code', [ list_comprehension, generator_expression, set_comprehension, ]) def test_yield_keyword_in_comprehension( assert_errors, parse_ast_tree, code, default_options, mode, ): """Testing that using `yield` keyword is not allowed.""" tree = parse_ast_tree(mode(code.format('(yield xy)'))) visitor = WrongComprehensionVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [YieldInComprehensionViolation]) @pytest.mark.parametrize('code', [ list_comprehension, generator_expression, set_comprehension, ]) def test_comprehension_without_yield( assert_errors, parse_ast_tree, code, default_options, mode, ): """Testing that regular comprehensions are allowed.""" tree = parse_ast_tree(mode(code.format('xy'))) visitor = WrongComprehensionVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [])
python
# ################################################################## # SAMPLE USAGE # ################################################################## if __name__ == '__main__': # #################### # IMPORT # #################### import json import cProfile from .client import deltaClient # facade # #################### # SOME API KEYS # #################### with open("/etc/config/delta.json") as config_file: config = json.load(config_file) key = config["KEY"] secret = config["SECRET"] # #################### # INSTANTIATE CLIENT # #################### delta = deltaClient(key=key, secret=secret) # Test with low expectations server_time = delta.send_request.server_time() print(server_time) account_summary = delta.send_request.account_summary() print(account_summary) pos_btc = delta.send_request.all_positions() # #################### # TEST : REQUESTS # #################### print("Requesting server_time.") server_time = delta.send_request.server_time() print(server_time) print("Requesting index_level.") index_level = delta.send_request.index_level("BTC") print(index_level) print("Requesting BTC index_level.") BTC_index_level = delta.send_request.btc_index() print(BTC_index_level) print("Requesting ETH index_level.") ETH_index_level = delta.send_request.eth_index() print(ETH_index_level) print("Requesting delta instruments.") instruments = delta.send_request.instruments("BTC") print(instruments) print("Requesting delta trades (from the last, going back up to last - count).") some_trades = delta.send_request.trades(["BTC-PERPETUAL", "ETH-PERPETUAL"]) print(some_trades) print("Requesting all delta trades for given instruments.") futures = ["ETH-PERPETUAL", "BTC-PERPETUAL"] options = ["BTC-29NOV19-8000-C"] books = delta.send_request.orderbooks(futures) print(books)
python
from .currency import * from .profile import * from .account import * from .base import * from .transaction import * from .budget import *
python
from kratos import * import kratos as kts def create_port_pkt(data_width, consumer_ports): return PackedStruct(f"port_pkt_{data_width}_{consumer_ports}", [("data", data_width, False), ("port", consumer_ports, False), ("valid", 1, False)]) class RWArbiter(Generator): ''' The read_write arbiter dynamically schedules reads and writes to a single bank of SRAM ''' def __init__(self, fetch_width=16, data_width=16, memory_depth=32, num_tiles=1, int_in_ports=2, int_out_ports=2, strg_wr_ports=2, strg_rd_ports=2, read_delay=0, rw_same_cycle=True, separate_addresses=True): assert not (memory_depth & (memory_depth - 1)), "Memory depth needs to be a power of 2" super().__init__("rw_arbiter") # Absorb inputs self.fetch_width = fetch_width self.data_width = data_width self.fw_int = int(self.fetch_width / self.data_width) self.int_in_ports = int_in_ports self.int_out_ports = int_out_ports self.strg_wr_ports = strg_wr_ports self.strg_rd_ports = strg_rd_ports self.memory_depth = memory_depth self.num_tiles = num_tiles self.mem_addr_width = clog2(self.num_tiles * self.memory_depth) self.read_delay = read_delay self.rw_same_cycle = rw_same_cycle self.separate_addresses = separate_addresses # Clock and Reset self._clk = self.clock("clk") self._rst_n = self.reset("rst_n") # Generate the packed struct of the right size port_pkt_struct = create_port_pkt(self.fetch_width, self.int_out_ports) # Inputs self._wen_in = self.input("wen_in", self.strg_wr_ports) # self._wen_en = self.input("wen_en", self.strg_wr_ports) self._w_data = self.input("w_data", self.data_width, size=(self.strg_wr_ports, self.fw_int), explicit_array=True, packed=True) self._w_addr = self.input("w_addr", self.mem_addr_width, size=self.strg_wr_ports, explicit_array=True, packed=True) self._data_from_mem = self.input("data_from_mem", self.data_width, size=(self.strg_rd_ports, self.fw_int), explicit_array=True, packed=True) self._mem_valid_data = self.input("mem_valid_data", self.strg_rd_ports) self._out_mem_valid_data = self.output("out_mem_valid_data", self.strg_rd_ports) self._ren_in = self.input("ren_in", self.int_out_ports) self._ren_en = self.input("ren_en", self.int_out_ports) self._rd_addr = self.input("rd_addr", self.mem_addr_width, size=self.int_out_ports, explicit_array=True, packed=True) self._rd_addr_sel = self.var("rd_addr_sel", self.mem_addr_width, size=self.strg_rd_ports, explicit_array=True, packed=True) # Outputs self._out_data = self.output("out_data", self.data_width, size=(self.strg_rd_ports, self.fw_int), explicit_array=True, packed=True) self._out_port = self.output("out_port", self.int_out_ports, size=self.strg_rd_ports, explicit_array=True, packed=True) self._out_valid = self.output("out_valid", self.strg_rd_ports) self._cen_mem = self.output("cen_mem", self.strg_rd_ports) self._wen_mem = self.output("wen_mem", self.strg_wr_ports) self._data_to_mem = self.output("data_to_mem", self.data_width, size=(self.strg_wr_ports, self.fw_int), explicit_array=True, packed=True) # In this case, need separate addresses if self.separate_addresses: self._wr_addr_to_mem = self.output("wr_addr_to_mem", self.mem_addr_width, size=self.strg_wr_ports, explicit_array=True, packed=True) self._rd_addr_to_mem = self.output("rd_addr_to_mem", self.mem_addr_width, size=self.strg_rd_ports, explicit_array=True, packed=True) # If the addresses are combined, we better have in==out else: assert self.strg_rd_ports == self.strg_wr_ports, \ "Cannot have coalesced address with mismatch port count" assert not self.rw_same_cycle, \ "Cannot read and write with a shared address...set rw_same_cycle to false" self._addr_to_mem = self.output("addr_to_mem", self.mem_addr_width, size=self.strg_rd_ports, explicit_array=True, packed=True) self._out_ack = self.output("out_ack", self.int_out_ports) # Local # self._rd_data = self.var("rd_data", self.fetch_width) self._wen_int = self.var("wen_int", self.strg_wr_ports) self._ren_int = self.var("ren_int", self.int_out_ports) self.wire(self._ren_int, self._ren_in & self._ren_en) self.wire(self._wen_int, self._wen_in) # & self._wen_en) self._rd_valid = self.var("rd_valid", self.strg_rd_ports) self._rd_port = self.var("rd_port", self.int_out_ports, size=self.strg_rd_ports, explicit_array=True, packed=True) self._next_rd_port = self.var("next_rd_port", self.int_out_ports, size=self.strg_rd_ports, explicit_array=True, packed=True) # For demuxing the read ports self._done = self.var("done", self.strg_rd_ports) self.add_code(self.set_next_read_port_lowest) if(self.strg_rd_ports > 1): self._idx_cnt = self.var("idx_cnt", 5, size=self.strg_rd_ports - 1, explicit_array=True, packed=True) for i in range(self.strg_rd_ports - 1): self.add_code(self.set_next_read_port_alt, index=i + 1) # If we have more than one read port, we need to use slightly different logic # to set the other reads... self._next_rd_port_red = self.var("next_rd_port_red", self.int_out_ports) for i in range(self.int_out_ports): temp_port = self._next_rd_port[0][i] for j in range(self.strg_rd_ports - 1): temp_port = kts.concat(temp_port, self._next_rd_port[j + 1][i]) self.wire(self._next_rd_port_red[i], temp_port.r_or()) # The next read port can be used to acknowledge reads # We do not need to gate the ack if we can read and write in the same cycle if self.rw_same_cycle: self.wire(self._out_ack, self._next_rd_port_red) else: self.wire(self._out_ack, self._next_rd_port_red & kts.concat(*([~self._wen_int] * self._out_ack.width))) # self.add_code(self.mem_controls) if self.separate_addresses: for i in range(self.strg_wr_ports): self.add_code(self.mem_controls_wr, idx=i) for i in range(self.strg_rd_ports): self.add_code(self.mem_controls_rd, idx=i) else: for i in range(self.strg_rd_ports): self.add_code(self.mem_controls_combined, idx=i) if self.read_delay == 1: for i in range(self.strg_rd_ports): self.add_code(self.next_read_valid, idx=i) else: for i in range(self.strg_rd_ports): self.add_code(self.zero_delay_read, idx=i) self.add_code(self.output_stage) @always_comb def mem_controls_wr(self, idx): self._wen_mem[idx] = self._wen_int[idx] self._data_to_mem[idx] = self._w_data[idx] self._wr_addr_to_mem[idx] = self._w_addr[idx] @always_comb def mem_controls_rd(self, idx): # cen_mem acts as ren_mem when the ports are separated self._cen_mem[idx] = self._next_rd_port[idx].r_or() self._rd_addr_to_mem[idx] = self._rd_addr_sel[idx] @always_comb # Prioritizes writes over reads def mem_controls_combined(self, idx): self._wen_mem[idx] = self._wen_int[idx] self._cen_mem[idx] = (self._wen_int[idx] | (self._next_rd_port[idx].r_or())) self._data_to_mem[idx] = self._w_data[idx] # Consume wr over read if(self._wen_int[idx]): self._addr_to_mem[idx] = self._w_addr[idx] else: self._addr_to_mem[idx] = self._rd_addr_sel[idx] @always_comb # Find lowest ready def set_next_read_port_lowest(self): self._next_rd_port[0] = 0 self._rd_addr_sel[0] = 0 self._done[0] = 0 for i in range(self.int_out_ports): if ~self._done[0]: if self._ren_int[i]: self._rd_addr_sel[0] = self._rd_addr[i] self._next_rd_port[0][i] = 1 self._done[0] = 1 # Find lowest ready @always_comb def set_next_read_port_alt(self, index): self._next_rd_port[index] = 0 self._idx_cnt[index - 1] = 0 self._rd_addr_sel[index] = 0 self._done[index] = 0 for i in range(self.int_out_ports): if ~self._done[index]: if self._ren_int[i] & (self._idx_cnt[index - 1] == index): self._done[index] = 1 self._rd_addr_sel[index] = self._rd_addr[i] self._next_rd_port[index][i] = 1 self._idx_cnt[index - 1] = self._idx_cnt[index - 1] + 1 @always_ff((posedge, "clk"), (negedge, "rst_n")) def next_read_valid(self, idx): if ~self._rst_n: self._rd_port[idx] = 0 self._rd_valid[idx] = 0 else: self._rd_valid[idx] = ((~self._wen_int[idx] | (self.rw_same_cycle)) & (self._next_rd_port[idx].r_or())) self._rd_port[idx] = self._next_rd_port[idx] @always_comb def zero_delay_read(self, idx): self._rd_valid[idx] = (~self._wen_int[idx] | (self.rw_same_cycle)) & (self._next_rd_port[idx].r_or()) self._rd_port[idx] = self._next_rd_port[idx] @always_comb def output_stage(self): self._out_data = self._data_from_mem self._out_port = self._rd_port self._out_valid = self._rd_valid self._out_mem_valid_data = self._mem_valid_data if __name__ == "__main__": db_dut = RWArbiter() verilog(db_dut, filename="rw_arbiter.sv")
python
from __future__ import division from __future__ import print_function def elink_module(elink_intf, emesh_intf): """ The Adapteva ELink off-chip communication channel. Interfaces: elink_intf: The external link signals emesh_intf: The internal EMesh packet interface """ # keep track of all the myhdl generators mod_inst = [] # clock and reset config # g = ecfg_elink() # mod_inst.append(g) # receiver # g = erx(elink, emesh_e) # mod_inst.append(g) # transmitter # g = etx(elink, emesh_e) # mod_inst.append(g) # CDC FIFO # g = ecfg_fifo(emesh, emesh_e) # mod_inst.append(g) # Vendor specific IO SERDES # g = io_serdes() # mod_inst.append(g) return mod_inst
python
import os import re import sys from functools import partial from datetime import datetime from jinja2 import Template from traitlets.config.configurable import Configurable from traitlets import Integer, CBool, Unicode, Float, Set, Dict, Unicode from jupyterhub.traitlets import Callable from wtforms import BooleanField, DecimalField, SelectField, StringField, Form, RadioField from wtforms.form import BaseForm from wtforms.validators import InputRequired, NumberRange, AnyOf from wtforms.fields.html5 import IntegerField from wtforms.widgets.html5 import NumberInput from . traitlets import NumericRangeWidget, SelectWidget class FakeMultiDict(dict): getlist = dict.__getitem__ def resolve(value, *args, **kargs): if callable(value): return value(*args, **kargs) else: return value class SbatchForm(Configurable): runtime = NumericRangeWidget( { 'min' : 0.25, 'def' : 1.0, 'step': 0.25, 'lock': False, }, help="Define parameters of runtime numeric range widget" ).tag(config=True) memory = NumericRangeWidget( { 'min' : 1024, 'step': 1, 'lock': False, 'def': lambda api, user: int(max(api.get_mems()) / max(api.get_cpus())), 'max': lambda api, user: max(api.get_mems()) }, help="Define parameters of memory numeric range widget in MB" ).tag(config=True) nprocs = NumericRangeWidget( { 'min' : 1, 'step': 1, 'lock': False, 'def': 1, 'max' : lambda api, user: max(api.get_cpus()) }, help="Define parameters of core numeric range widget" ).tag(config=True) oversubscribe = Dict({'def' : False, 'lock' : True}).tag(config=True) gpus = SelectWidget( { 'def' : 'gpu:0', 'choices' : lambda api, user: api.get_gres(), 'lock' : False }, help="Define the list of available gpu configurations." ).tag(config=True) account = SelectWidget( { 'choices' : lambda api, user: api.get_accounts(user), 'lock' : False }, help="Define the list of available accounts." ).tag(config=True) reservation = SelectWidget( { 'def' : '', 'choices' : lambda api, user: api.get_active_reservations(user, api.get_accounts(user)), 'lock' : False }, help="Define the list of available reservations." ).tag(config=True) ui = SelectWidget( { 'lock' : False, 'def' : 'lab', 'choices' : ['notebook', 'lab', 'terminal'] }, help="Define the list of available user interface." ).tag(config=True) form_template_path = Unicode( os.path.join(sys.prefix, 'share', 'slurmformspawner', 'templates', 'form.html'), help="Path to the Jinja2 template of the form" ).tag(config=True) def __init__(self, username, slurm_api, ui_args, user_options = {}, config=None): super().__init__(config=config) fields = { 'account' : SelectField("Account", validators=[AnyOf([])]), 'runtime' : DecimalField('Time (hours)', validators=[InputRequired(), NumberRange()], widget=NumberInput()), 'ui' : SelectField('User interface', validators=[AnyOf([])]), 'nprocs' : IntegerField('Number of cores', validators=[InputRequired(), NumberRange()], widget=NumberInput()), 'memory' : IntegerField('Memory (MB)', validators=[InputRequired(), NumberRange()], widget=NumberInput()), 'gpus' : SelectField('GPU configuration', validators=[AnyOf([])]), 'oversubscribe' : BooleanField('Enable core oversubscription?'), 'reservation' : SelectField("Reservation", validators=[AnyOf([])]) } self.form = BaseForm(fields) self.form['runtime'].filters = [float] self.resolve = partial(resolve, api=slurm_api, user=username) self.ui_args = ui_args with open(self.form_template_path, 'r') as template_file: self.template = template_file.read() for key in fields: dict_ = getattr(self, key) if dict_.get('lock') is True: if dict_.get('def') is None: raise Exception(f'You need to define a default value for {key} because it is locked.') if key in user_options: self.form[key].process(formdata=FakeMultiDict({key : [user_options[key]]})) else: self.form[key].process(formdata=FakeMultiDict({key : [self.resolve(getattr(self, key).get('def'))]})) @property def data(self): return self.form.data @property def errors(self): return self.form.errors def process(self, formdata): for key in self.form._fields.keys(): lock = self.resolve(getattr(self, key).get('lock')) value = formdata.get(key) if not lock and value is not None: self.form[key].process(formdata=FakeMultiDict({key : value})) def validate(self): valid = True for key in self.form._fields.keys(): lock = self.resolve(getattr(self, key).get('lock')) if not lock: valid = self.form[key].validate(self.form) and valid return valid def render(self): self.config_runtime() self.config_nprocs() self.config_memory() self.config_oversubscribe() self.config_ui() self.config_gpus() self.config_reservations() self.config_account() return Template(self.template).render(form=self.form) def config_runtime(self): lock = self.resolve(self.runtime.get('lock')) if lock: def_ = self.resolve(self.runtime.get('def')) self.form['runtime'].render_kw = {'disabled': 'disabled'} self.form['runtime'].widget.min = def_ self.form['runtime'].widget.max = def_ self.form['runtime'].validators[-1].min = def_ self.form['runtime'].validators[-1].max = def_ self.form['runtime'].validators[-1].message = f'Runtime can only be {def_}' else: min_ = self.resolve(self.runtime.get('min')) max_ = self.resolve(self.runtime.get('max')) step = self.resolve(self.runtime.get('step')) self.form['runtime'].widget.min = min_ self.form['runtime'].widget.max = max_ self.form['runtime'].widget.step = step if min_ is not None: self.form['runtime'].validators[-1].min = min_ if max_ is not None: self.form['runtime'].validators[-1].max = max_ self.form['runtime'].validators[-1].message = f'Runtime outside of allowed range [{min_}, {max_}]' def config_nprocs(self): lock = self.resolve(self.nprocs.get('lock')) if lock: def_ = self.resolve(self.nprocs.get('def')) self.form['nprocs'].render_kw = {'disabled': 'disabled'} self.form['nprocs'].widget.min = def_ self.form['nprocs'].widget.max = def_ self.form['nprocs'].validators[-1].min = def_ self.form['nprocs'].validators[-1].max = def_ else: min_ = self.resolve(self.nprocs.get('min')) max_ = self.resolve(self.nprocs.get('max')) step = self.resolve(self.nprocs.get('step')) self.form['nprocs'].widget.min = min_ self.form['nprocs'].widget.max = max_ self.form['nprocs'].widget.step = step self.form['nprocs'].validators[-1].min = min_ self.form['nprocs'].validators[-1].max = max_ def config_memory(self): lock = self.resolve(self.memory.get('lock')) if lock: def_ = self.resolve(self.memory.get('def')) self.form['memory'].render_kw = {'disabled': 'disabled'} self.form['memory'].widget.min = def_ self.form['memory'].widget.max = def_ self.form['memory'].validators[-1].min = def_ self.form['memory'].validators[-1].max = def_ else: min_ = self.resolve(self.memory.get('min')) max_ = self.resolve(self.memory.get('max')) step = self.resolve(self.memory.get('step')) self.form['memory'].widget.min = min_ self.form['memory'].widget.max = max_ self.form['memory'].widget.step = step self.form['memory'].validators[-1].min = min_ self.form['memory'].validators[-1].max = max_ def config_oversubscribe(self): if self.oversubscribe['lock']: self.form['oversubscribe'].render_kw = {'disabled': 'disabled'} def config_account(self): keys = self.resolve(self.account.get('choices')) if keys: choices = list(zip(keys, keys)) else: keys = [""] choices = [("", "None")] self.form['account'].choices = choices self.form['account'].validators[-1].values = keys if self.resolve(self.account.get('lock')): self.form['account'].render_kw = {'disabled': 'disabled'} def config_gpus(self): choices = self.resolve(self.gpus.get('choices')) lock = self.resolve(self.gpus.get('lock')) gpu_choice_map = {} for gres in choices: if gres == 'gpu:0': gpu_choice_map['gpu:0'] = 'None' continue match = re.match(r"(gpu:[\w:]+)", gres) if match: gres = match.group(1).split(':') number = int(gres[-1]) if len(gres) == 2: strings = ('gpu:{}', '{} x GPU') elif len(gres) > 2: strings = ('gpu:{}:{{}}'.format(gres[1]), '{{}} x {}'.format(gres[1].upper())) for i in range(1, number + 1): gpu_choice_map[strings[0].format(i)] = strings[1].format(i) self.form['gpus'].choices = list(gpu_choice_map.items()) if lock: self.form['gpus'].render_kw = {'disabled': 'disabled'} self.form['gpus'].validators[-1].values = [key for key, value in self.form['gpus'].choices] def config_ui(self): choices = self.resolve(self.ui.get('choices')) lock = self.resolve(self.ui.get('lock')) self.form['ui'].validators[-1].values = [key for key in choices] self.form['ui'].choices = [(key, self.ui_args[key]['name']) for key in choices] if lock: self.form['ui'].render_kw = {'disabled': 'disabled'} def config_reservations(self): choices = self.resolve(self.reservation.get('choices')) lock = self.resolve(self.reservation.get('lock')) prev = self.form['reservation'].data if choices is None: choices = [] now = datetime.now() prev_is_valid = False self.form['reservation'].choices = [("", "None")] for rsv in choices: name = rsv['ReservationName'] duration = rsv['EndTime'] - now string = '{} - time left: {}'.format(name, duration) self.form['reservation'].choices.append((name, string)) if prev == name: prev_is_valid = True if lock: self.form['reservation'].render_kw = {'disabled': 'disabled'} self.form['reservation'].validators[-1].values = [key for key, value in self.form['reservation'].choices]
python
import yaml from boardgamegeek import BGGClient def main(user, member_data_file): bgg = BGGClient() with open(member_data_file, "r") as data_file: member_data = yaml.load(data_file) user_data = member_data[user] del member_data[user] user_collection_size = len(user_data) member_scores = list() for user, ratings in member_data.items(): score = 0 games_in_common = 0 for game, rating in user_data.items(): if game in ratings: diff = (rating - ratings[game])**2 score += diff games_in_common += 1 member_scores.append( {"user": user, "score": score, "common": games_in_common}) member_scores = [x for x in member_scores if x[ "common"] >= 0.5 * user_collection_size] member_scores.sort(key=lambda x: x["score"]) filename = user + "_followers.yml" with open(filename, "w") as fo: yaml.dump(member_scores, fo) for i in range(5): member = member_scores[i] print(member["user"], member["score"], member["common"]) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--user") parser.add_argument("--member-data") args = parser.parse_args() main(args.user, args.member_data)
python
# Generated by Django 2.2 on 2020-10-20 18:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('library', '0003_librarysubscription_nightshift'), ] operations = [ migrations.AlterField( model_name='librarybranch', name='address', field=models.TextField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='admission_fees', field=models.PositiveSmallIntegerField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='ammenities', field=models.ManyToManyField(blank=True, null=True, to='core.Ammenity'), ), migrations.AlterField( model_name='librarybranch', name='beginning_of_summer_season', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], null=True), ), migrations.AlterField( model_name='librarybranch', name='beginning_of_winter_season', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], null=True), ), migrations.AlterField( model_name='librarybranch', name='closing_time', field=models.TimeField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='description', field=models.TextField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='gst', field=models.FloatField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='locker_fees', field=models.PositiveSmallIntegerField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='night_shift_from', field=models.TimeField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='night_shift_to', field=models.TimeField(blank=True, null=True), ), migrations.AlterField( model_name='librarybranch', name='opening_days', field=models.ManyToManyField(blank=True, null=True, to='core.OpeningDays'), ), migrations.AlterField( model_name='librarybranch', name='opening_time', field=models.TimeField(blank=True, null=True), ), ]
python
import mock from util.factory import channel_factory from util.factory import new_podcast_factory from util.factory import requested_podcast_factory from podcast.download import _download_from_url from podcast.download import download_channel from podcast.models import NewStatus from podcast.models import RadioDirectory def get_types(channel): return [ type(podcast.status).__name__ for podcast in channel.known_podcasts ] def test_download_channel_none_requested(): channel = channel_factory(known_podcasts=[ new_podcast_factory(), new_podcast_factory(), new_podcast_factory(), ]) with mock.patch( 'podcast.download.download_podcast') as mock_download_podcast: new_channel = download_channel( RadioDirectory('tmp'), channel) assert len(mock_download_podcast.mock_calls) == 0 assert channel == new_channel assert get_types(channel) == get_types(new_channel) def test_download_channel_success(): channel = channel_factory(known_podcasts=[requested_podcast_factory()]) with mock.patch( 'podcast.download._download_from_url', return_value=True) as mock_download_podcast: new_channel = download_channel( RadioDirectory('tmp'), channel) assert len(mock_download_podcast.mock_calls) == 1 expected = channel._replace( known_podcasts=[ channel.known_podcasts[0]._replace(status=NewStatus()) ]) assert channel == expected assert get_types(new_channel) == get_types(expected) # Let's test the tests assert get_types(new_channel) != get_types(channel) def test_download_channel_fail(): channel = channel_factory(known_podcasts=[requested_podcast_factory()]) with mock.patch( 'podcast.download._download_from_url', return_value=False) as mock_download_podcast: new_channel = download_channel( RadioDirectory('tmp'), channel) assert len(mock_download_podcast.mock_calls) == 1 assert channel == new_channel assert get_types(channel) == get_types(new_channel) def test_download_from_url_success(): with mock.patch('urllib.request.urlretrieve'): assert _download_from_url( 'http://jessicastringham.com/something', 'nope') def test_download_from_url_fail(): with mock.patch('urllib.request.urlretrieve', side_effect=IOError): assert not _download_from_url( 'http://jessicastringham.com/something', 'nope')
python
import mne import os import numpy as np import pandas as pd #from .kcmodel import scoring_algorithm_kc from ..features.spectral_features import compute_absol_pow_freq_bands from .base import BaseMethods import sys from scipy.signal import find_peaks import pywt import joblib try: wd = sys._MEIPASS except AttributeError: wd = os.path.dirname(__file__) try: import torch import torch.jit #torch.jit.script_method = script_method #torch.jit.script = script except ImportError: print(ImportError) try: import gpytorch from gpytorch.variational import CholeskyVariationalDistribution from gpytorch.variational import WhitenedVariationalStrategy except ImportError: print(ImportError) class KC(BaseMethods): """ ... one line comment ... Parameters ---------- raw : mne.Base.io.raw object hypnogram : hypnogram class Methods ------- """ def __init__(self, include_stages = 'all', **kwargs): super().__init__() self._include_stages = include_stages if include_stages =='all': self._include_stages = [-1,0,1,2,3,4,5,9] self._epochs_data = {} self._metadata = {} self._scoring = {} def fit(self, raw, hypnogram, picks=None, events=None,**kwargs): """ Parameters ---------- raw hypnogram path picks events kwargs Returns ------- """ self._check_raw(raw) self._check_hypno(hypnogram) if kwargs: self.set_params(parameters_dict=kwargs, check_has_key=True) if picks is not None: raw = raw.pick_channels(ch_names=picks) self._raw = raw.filter(l_freq=0.3,h_freq=None, verbose = 'error') self._hypno = hypnogram def score(self): """ Score K-complexes and calculate characteristics K-complexes parameters. More informations about the scoring algorithm can be found in [1] and in :py:func:`~SHAI.EEG.KCscoring.model`. Results (scoring + metrics) is stored in kc_scoring dict. Some metrics are scored according to [2]. Parameters ---------- Returns ------- output : ndarray, shape (n_channels * n_edge,) With: `n_edge = 1` if `edge` is None or `n_edge = len(edge)` otherwise. Notes ----- The following parameters are calculated for each K-complexes: KC_onset : onset, in seconds from the beginning of the recordings, of the KC KC_probas : probability of the K-complex KC_stage : sleep stage of the K-complex N550 : Amplitude of the N550 components, in uv P900 : Amplitude of the P900 components, in uv PTP : Peak to peak amplitude of the KC, in uv Slope : K-complex slope, define as (P900-N550)/(tP900-tN550), in uv/sec dt_P9_N5 : Time tP900-tN550, in seconds baseline_delta: absoulte delta power in the 3 seconds preceeding the k-complex, in uv^2/Hz baseline_alpha : absoulte alpha power in the 3 seconds preceeding the k-complex, in uv^2/Hz after_delta : absoulte delta power in the 3 seconds after the k-complex, in uv^2/Hz after_alpha : absoulte alpha power in the 3 seconds after the k-complex, in uv^2/Hz ratio_delta : after_delta/baseline_delta, ratio_alpha : after_alpha/baseline_alpha [1] Lechat, B., et al. (2020). "Beyond K-complex binary scoring during sleep: Probabilistic classification using deep learning." Sleep. [2] Parekh A, et al. (2019) "Slow-wave activity surrounding stage N2 K-complexes and daytime function measured by psychomotor vigilance test in obstructive sleep apnea." Sleep. """ self._scoring = {} self._epochs_data = {} hypno = self._hypno raw = self._raw include_stages = self._include_stages Stages = hypno sfreq = raw.info['sfreq'] for channel in raw.info['ch_names']: ################################### ###### Scoring of K-complexes ##### kc_onsets, kc_probas, kc_stages = scoring_algorithm_kc(raw, channel, Stages, score_on_stages=include_stages, amplitude_threshold=20e-6, distance=2, reject_epoch=400e-6, probability_threshold=0.5) # print('Detected {} K-complexes on '.format(len(kc_onsets)) + channel) ################################### #### Calulate features #### # organize event matrix for mne onsets_int = np.array(kc_onsets * raw.info['sfreq'], dtype='int')\ + self._raw.first_samp events = np.vstack((onsets_int, np.ones_like(onsets_int), np.ones_like(onsets_int))).T # get epochs data epochs = mne.Epochs(raw, events, picks=channel, event_id=None, tmin=-6, tmax=6, baseline=(None, -0.5), reject=None, reject_by_annotation=False, verbose='critical', flat=None) times = epochs.times kc_matrix = epochs.get_data().squeeze() *-1 * 10 ** 6 ################################### ###### Time-Feature calculations t_P900_N550, P900_timing, KC_900, KC_550, ptp_amp, slope = _temporal_features_kcs( kc_matrix, sfreq) ################################### ###### Frequency-Feature calculations delta_before, alpha_before, delta_after, alpha_after = _kc_frequency_features( kc_matrix, times, sfreq) scg = { 'KC_onset': kc_onsets, 'KC_probas': kc_probas, 'KC_stage': kc_stages, 'N550': KC_550, 'P900': KC_900, 'PTP': ptp_amp, 'Slope': slope, 'dt_P9_N5': t_P900_N550, 'baseline_delta': delta_before, 'baseline_alpha': alpha_before, 'after_delta': delta_after, 'after_alpha': alpha_after, 'ratio_delta': (delta_after - delta_before) / delta_before, 'ratio_alpha': (alpha_after - alpha_before) / alpha_before } self._scoring[channel] = scg self._epochs_data[channel] = (kc_matrix, times, kc_probas) return self._scoring, self._epochs_data def score_from_events(self, events): event_onset = events.onset.values scoring = self._scoring for channel in list(scoring.keys()): sc = [] d = pd.DataFrame.from_dict(scoring[channel]) kcs_onset = d['KC_onset'].values for event_count, single_event_onset in enumerate(event_onset): args = np.argwhere(kcs_onset>single_event_onset) if len(args) !=0: dkc = d.loc[args[0],:] dkc['noise_count'] = event_count dkc['delta_t'] = dkc['KC_onset'] - single_event_onset sc.append(dkc) dch = pd.concat(sc) dch = dch.set_index('noise_count') dch.columns = [col+'_'+channel for col in dch.columns] events = events.merge(dch, how='left',left_index=True, right_index=True) return events def overnight_metrics(self,probability_thresholds = 0.5): """ Calculate summary k-complex metrics Summary K-complexes metrics (see Notes for a detailed list) are calculated for each channels and individual sleep stages. Notes ----- Parameters are calculated for each channels. Furthermore, parameters are calculated for stage 2, 3 and NREM. For example, K-complexes densities (dKC) are returned as follows: dKC : KC density (#/min) in NREM sleep dKC_N1 : KC density (#/min) in N2 dKC_N2 : KC density (#/min) in N2 dKC_N3 : KC density (#/min) in N3 Full list of parameters: dKC : KC density (#/min) in NREM sleep N550 : Amplitude of the N550 components, in uv P900 : Amplitude of the P900 components, in uv PTP : Peak to peak amplitude of the KC, in uv Slope : K-complex slope, define as (P900-N550)/(tP900-tN550), in uv/sec dt_P9_N5 : Time tP900-tN550, in seconds baseline_delta: absoulte delta power in the 3 seconds preceeding the k-complex, in uv^2/Hz baseline_alpha : absoulte alpha power in the 3 seconds preceeding the k-complex, in uv^2/Hz after_delta : absoulte delta power in the 3 seconds after the k-complex, in uv^2/Hz after_alpha : absoulte alpha power in the 3 seconds after the k-complex, in uv^2/Hz ratio_delta : after_delta/baseline_delta, ratio_alpha : after_alpha/baseline_alpha density_function markers ? [1] Lechat, B., et al. (2020). "Beyond K-complex binary scoring during sleep: Probabilistic classification using deep learning." Sleep. [2] Parekh A, et al. (2019) "Slow-wave activity surrounding stage N2 K-complexes and daytime function measured by psychomotor vigilance test in obstructive sleep apnea." Sleep. """ if not self._scoring: scoring, metadata = self.score() else: scoring, metadata = self._scoring, self._epochs_data if any([probability_thresholds < 0, probability_thresholds > 1]): raise ValueError('K-complex ``probability_thresholds`` must be a float between 0 and 1.') """ Calculate KCs metrics""" metrics = {} for channel, kc_dict in scoring.items(): m = kc_metrics_by_sleep_stage(kc_dict, hypnogram=self._hypno, pth = probability_thresholds) m = m.to_dict(orient='list') for key, val in m.items(): metrics[channel + key] = float(val[0]) return metrics def _plot_average(self): pass #if not self._scoring: # raise RuntimeError('You need to score K-complex before plotting') #from .plotting import KC_from_probas, KC_from_electrodes, # KC_from_electrodes_all #KC_from_probas(self._epochs_data, np.asarray(self._scoring[ # self._raw.info[ # 'ch_names'][ # 0]]['KC_probas'])) #KC_from_electrodes(self._epochs_data) #KC_from_electrodes_all(self._epochs_data) def kc_metrics_by_sleep_stage(kc_dict, hypnogram, pth): df = pd.DataFrame.from_dict(kc_dict) df = df.loc[df.KC_probas > pth, :] ## NREM nrem = df.mean().to_frame().T nrem.columns = [x + '_mean' for x in nrem.columns] nrem = nrem.drop(['KC_onset_mean'], axis=1) t = nrem kc_stage = df['KC_stage'].values if -1 in np.unique(hypnogram.label.values): # Hypnogram is unscored t['n_KC'] = len(kc_stage) t['dKC'] = float( len(kc_stage) * 2 / len(hypnogram.label.values)) else: t['n_KC'] = float(len(kc_stage)) t['dKC'] = float( np.sum(np.isin(kc_stage, [1, 2, 3, 4])) * 2 / np.sum( np.isin(hypnogram.label.values, [1, 2, 3, 4]))) t['dKC_N1'] = float(np.sum(np.isin(kc_stage, [1])) * 2 / np.sum( np.isin(hypnogram.label.values, [1]))) t['dKC_N2'] = float(np.sum(np.isin(kc_stage, [2])) * 2 / np.sum( np.isin(hypnogram.label.values, [2]))) t['dKC_N3'] = float(np.sum(np.isin(kc_stage, [3])) * 2 / np.sum( np.isin(hypnogram.label.values, [3]))) return t def _temporal_features_kcs(time_data,Fs): """ Calculate characteristics time points of K-complexes TODO: I'm sure it's possible to do a function that can handle 1d and 2d arrays """ if time_data.ndim == 1: return _kc_temporal_features_1d(time_data, Fs) else: return _kc_temporal_features_2d(time_data,Fs) def _kc_frequency_features(time_data, times, sfreq): """ Calculate absolute power of delta and alpha band before (on a 3 seconds windows) and after K-complexes""" exp = [('before', -2.5, -0.5), ('after', 1, 3)] res = {} for m in exp: kc_matrix_temp = time_data[:, np.bitwise_and(times > m[1], times < m[2])] absol_power = compute_absol_pow_freq_bands(sfreq, kc_matrix_temp, psd_method='multitaper', psd_params={'mt_adaptive': True, 'mt_bandwidth': 3, 'mt_low_bias': True}, freq_bands=[0.5, 4, 8, 12]) delta = absol_power[:, 0] alpha = absol_power[:, 2] res[m[0]] = (delta, alpha) delta_before, alpha_before, delta_after, alpha_after = res['before'][0], res['before'][1],\ res['after'][0], res['after'][1] return delta_before, alpha_before, delta_after, alpha_after def _kc_temporal_features_1d(time_data, Fs): """Calculate kc features for 1d array""" half_index = int(len(time_data) / 2) #epochs are centered around N550 components N550_index = np.argmax(time_data[half_index - int(0.2 * Fs):half_index + int(0.2 * Fs)]) + \ half_index - int(0.2 * Fs) P900_index = np.argmax(-1 * time_data[half_index + int(0.2 * Fs):half_index + int(0.750 * Fs)]) + \ half_index + int(0.2 * Fs) t_P900_N550 = (P900_index - N550_index) / Fs P900_timing = (P900_index - half_index) / Fs KC_900 = -1 * time_data[P900_index] KC_550 = time_data[N550_index] ptp_amp = abs(KC_900) + KC_550 slope = ptp_amp / t_P900_N550 return t_P900_N550, P900_timing, KC_900, KC_550, ptp_amp, slope def _kc_temporal_features_2d(time_data, Fs): """Calculate kc features for 2d array""" half_index = int(np.shape(time_data)[1] / 2) N550_index = np.argmax(-1* time_data[np.arange(np.shape(time_data)[0]), half_index - int(0.2 * Fs):half_index + int(0.2 * Fs)], axis=1) + half_index - int( 0.2 * Fs) P900_index = np.argmax( time_data[np.arange(np.shape(time_data)[0]), half_index + int(0.2 * Fs):half_index + int(0.750 * Fs)], axis=1) + half_index + int( 0.2 * Fs) t_P900_N550 = (P900_index - N550_index) / Fs P900_timing = (P900_index - half_index) / Fs KC_900 = time_data[np.arange(np.shape(time_data)[0]), P900_index] KC_550 = -1*time_data[np.arange(np.shape(time_data)[0]), N550_index] ptp_amp = abs(KC_900) + KC_550 slope = ptp_amp / t_P900_N550 return t_P900_N550, P900_timing, KC_900, KC_550, ptp_amp, slope ########################################################################## ## K-complex scoring functions ## ########################################################################## def scoring_algorithm_kc(raw, channel, stages, score_on_stages = [1,2,3], amplitude_threshold = 20e-6, distance = 2, reject_epoch = 500e-6, probability_threshold = 0.5): """ Score K-complexes according to [1]. Briefly, peaks superior to "amplitude_threshold" in the raw EEG are found, and then classified using deep kernel learning. Deep kernel learning is a mix between neural network and gaussian processes; and it attributes each waveform a "probability" (probability threshold) of being a K-complex. The higher the probability, the more "confident" is the algorithm; which is generally seen in very large and well defined K-complexes. Parameters ---------- raw : :py:class:`mne.io.BaseRaw` Raw data channel : str Channel on which socre K-complexes stages : pd.DataFrame Dataframe containing the following keys: "onset" (sleep stage scoring onset), "dur" (duration of the scored stage) and "label" (sleep stage label) score_on_stages : list Valid sleep stages to score K-complexes. amplitude_threshold : float or int Minimum amplitude for a peak to be considered as possible K-complexes distance: float or int Minimum between two consecutive peaks to be classified as K-complexes reject_epoch: float or int Reject candidate K-complexes if their maximum values (positive or negative) is superior to this value probability_threshold: float Reject waveform scored as K-complexes if their probability is inferior to this threshold. Returns ------- onsets: K-complexes onsets (in seconds) probas: Probability of the K-complex stage_peaks: sleep stage of the k-complex Notes ----- Lechat, B., et al. (2020). "Beyond K-complex binary scoring during sleep: Probabilistic classification using deep learning." Sleep. """ C3 = np.asarray( [raw[count, :][0] for count, k in enumerate(raw.info['ch_names']) if k == channel]).ravel() Fs = raw.info['sfreq'] st = stages.loc[stages['label'].isin(score_on_stages),:] length_of_stages = int(st['duration'].values[0]*Fs) onset_of_stages = np.round(st['onset'].values[1:-1]* Fs).astype('int') stage_label = st['label'].values[1:-1] ###### preprocessing ########### peaks,stage_peaks = _find_peaks_staged(C3, Fs, sonset=onset_of_stages,sdur=length_of_stages, slabel=stage_label, min = amplitude_threshold, distance=distance) d, args = Epochdata(C3, Fs, peaks, detrend=True, reject_max = reject_epoch) peaks = peaks[args] stage_peaks = stage_peaks[args] d_pad = pad_nextpow2(d) ######## Wavelet decomposition ######### wavelet = pywt.Wavelet('sym3') coefs = pywt.wavedec(d_pad, wavelet=wavelet, mode='periodization', level=pywt.dwt_max_level(d.shape[-1], wavelet.dec_len)) X = np.hstack(coefs[:5]) ########### Model prediction ############# model, likelihood = get_model() data_scaled = scale_input(X) probas, _ = predict(model, likelihood, torch.from_numpy(data_scaled)) ####################################################################### stage_peaks = stage_peaks[probas > probability_threshold] onsets = peaks[probas > probability_threshold] / Fs probas = probas[probas > probability_threshold] return onsets, probas, stage_peaks ########################################################################## ## pre-processing functions ## ########################################################################## def scale_input(X, scaler = True): scaler_filename = os.path.join(wd, 'model/scaler_final_A2.save') scaler = joblib.load(scaler_filename) X_scaled = scaler.transform(X) return X_scaled def pad_nextpow2(dat): """ return an array pad with zero to the next power of 2 of the input """ g = np.ceil(np.log2(np.shape(dat)[1])) ze = np.zeros((np.shape(dat)[0],np.array(np.power(2, g) - np.shape(dat)[1], dtype='int'))) data = np.hstack([dat, ze]) return data def _find_peaks_staged(data, Fs, sonset,sdur, slabel, min, distance): """Find peaks of at least "min" amplitude the given sleep stages """ p = [] stages = [] for j,(low,up,sstage) in enumerate(zip(sonset, sonset+sdur,slabel)): data_for_peak = data[low:up] - np.mean(data[low:up]) temp, _ = find_peaks(data_for_peak, height=min, distance=distance * Fs) p.append(temp + low) stages.append(np.ones(len(temp))*sstage) return np.hstack(p), np.hstack(stages) def Epochdata(data, Fs, peaks, post_peak=3, pre_peak=3, detrend=True, reject_max = None): """ Epochs raw data for each peak in peaks. """ max_peaks_locs = len(data) - int(post_peak*Fs) min_peaks_locs = int(pre_peak*Fs) peaks = peaks[np.bitwise_and(peaks>min_peaks_locs,peaks<max_peaks_locs)] epochs = np.vstack([data[up:low] for up,low in zip(peaks-int(pre_peak * Fs), peaks+int(post_peak * Fs))]) if detrend: epochs = epochs - np.mean(epochs,axis=1, keepdims=True) if reject_max is not None: args = np.argwhere(~(np.max(np.abs(epochs),axis=1)>reject_max)).squeeze() #print(np.max(np.abs(epochs),axis=1)) epochs = epochs[args,:] return epochs, args else: return epochs ########################################################################## ## Predictions models/functions ## ########################################################################## class LargeFeatureExtractor(torch.nn.Sequential): """ Neural network used for feature extraction""" def __init__(self, input_dim, output_dim,drop_out =0.5): super(LargeFeatureExtractor, self).__init__() self.add_module('linear1', torch.nn.Linear(input_dim, 1000, bias=False)) self.add_module('bn1', torch.nn.BatchNorm1d(1000)) self.add_module('relu1', torch.nn.ReLU()) self.add_module('dropout1', torch.nn.Dropout(p=drop_out, inplace=False)) self.add_module('linear2', torch.nn.Linear(1000, 1000,bias=False)) self.add_module('bn2', torch.nn.BatchNorm1d(1000)) self.add_module('relu2', torch.nn.ReLU()) self.add_module('dropout2', torch.nn.Dropout(p=drop_out, inplace=False)) self.add_module('linear3', torch.nn.Linear(1000, 500,bias=False)) self.add_module('bn3', torch.nn.BatchNorm1d(500)) self.add_module('relu3', torch.nn.ReLU()) self.add_module('dropout3', torch.nn.Dropout(p=drop_out, inplace=False)) self.add_module('linear4', torch.nn.Linear(500, 256,bias=False)) self.add_module('bn4', torch.nn.BatchNorm1d(256)) self.add_module('relu4', torch.nn.ReLU()) self.add_module('dropout4', torch.nn.Dropout(p=drop_out, inplace=False)) self.add_module('linear6', torch.nn.Linear(256, output_dim,bias=False)) class GaussianProcessLayer(gpytorch.models.AbstractVariationalGP): def __init__(self, inducing_points): variational_distribution = CholeskyVariationalDistribution(inducing_points.size(0)) variational_strategy = WhitenedVariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=True) super(GaussianProcessLayer, self).__init__(variational_strategy) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) class DKLModel(gpytorch.Module): """ Deep kernel learning model as gaussian processes on top of neural network""" def __init__(self, inducing_points, feature_extractor, num_features): super(DKLModel, self).__init__() self.feature_extractor = feature_extractor self.gp_layer = GaussianProcessLayer(inducing_points) self.num_features = num_features def forward(self, x): #print(x.type()) projected_x = self.feature_extractor(x.float()) res = self.gp_layer(projected_x) return res def predict(model, likelihood, X): """prediction """ model.eval() likelihood.eval() correct = 0 with torch.no_grad(): output = likelihood(model(X)) # pred_labels = output.mean.ge(0.5).float().cpu().numpy() probas = output.mean.cpu().numpy() return probas, pred_labels def get_model(): """ convenience function to load the model with its parameters """ inducing_filename = os.path.join(wd, 'model/inducing_points_A2.npy') model_file = os.path.join(wd, 'model/finaldkl_final_model_epoch50.dat') data_dim = 128 num_features = 16 drop_out_rate = 0.8 feature_extractor = LargeFeatureExtractor(input_dim=data_dim, output_dim=num_features, drop_out=drop_out_rate) X_induced = torch.from_numpy(np.load(inducing_filename)) model = DKLModel(inducing_points=X_induced, feature_extractor=feature_extractor, num_features=num_features) # Bernouilli likelihood because only 2 classes likelihood = gpytorch.likelihoods.BernoulliLikelihood() model.load_state_dict(torch.load(model_file,map_location=torch.device('cpu'))['model']) likelihood.load_state_dict(torch.load(model_file,map_location=torch.device('cpu'))['likelihood']) return model, likelihood
python
import abc import logging import os import re import shutil import subprocess from pathlib import Path import git from halo import Halo from utils.ExecutionContext import TestRunInfo, get_context, get_timeout, has_bonus, is_strict, set_bonus, set_timeout from utils.TerminalColors import TC from utils.Utils import intersection, show_banner logger = logging.getLogger("base") norm_func_regex = re.compile(r"^([\w\\/]+\.(?:c|h)): Error!") def run_command(command: str, spinner: Halo): to_execute = command.split(" ") process = subprocess.run(to_execute, capture_output=True, text=True) logger.info(process) if process.returncode != 0: spinner.fail() print(process.stderr) raise Exception("Problem creating the library") return process class BaseTester: name = "base" testers = [] timeout = 10 def __init__(self, info: TestRunInfo) -> None: self.info = info self.temp_dir = info.base_dir / "temp" / self.name self.tests_dir = info.base_dir / "tests" / self.name self.source_dir = info.source_dir set_timeout(self.timeout) @staticmethod @abc.abstractmethod def is_project(current_path): pass def execute_testers(self): show_banner(self.name) testers = self.test_selector() with Halo(TC.CYAN + "Preparing framework" + TC.NC) as spinner: self.prepare_ex_files() spinner.succeed() norm_res = "" if not self.info.args.ignore_norm: norm_res = self.check_norminette() srcs_path = Path(self.temp_dir, "__my_srcs") logger.info(f"copying {self.source_dir} to {srcs_path}") shutil.copytree(self.source_dir, srcs_path) all_funcs = self.select_tests_to_execute() present = self.get_functions_present() to_execute = intersection(all_funcs, present) if self.info.ex_to_execute: to_execute = self.info.ex_to_execute missing = [test for test in all_funcs if test not in to_execute] logger.info(f"To execute: {to_execute}") logger.info(f"Missing: {missing}") self.compile_source() funcs_error = [] for tester in testers: funcs_error.append(self.test_using(to_execute, missing, tester)) if not self.info.ex_to_execute: self.show_summary(norm_res, missing, funcs_error, to_execute) def test_selector(self): selected_testers = self.info.args.testers if (selected_testers == None): if is_strict() and self.my_tester: return [self.my_tester] return self.testers # TODO: check valid tester if (selected_testers == []): print(f"Please select one or more of the available testers:") for i, tester in enumerate(self.testers): print(f"{TC.B_BLUE} {i + 1}) {TC.B_WHITE}{tester.name}{TC.NC} ({tester.git_url})") print(f"You can pass the numbers as arguments to {TC.B_WHITE}--testers{TC.NC} to not see this prompt") selected_testers = [char for char in input()] selected_testers = [test for test in ''.join(selected_testers) if test != ' '] result = [self.testers[int(i) - 1] for i in selected_testers] if is_strict() and self.my_tester in result: return [self.my_tester] return result def prepare_ex_files(self): def check_and_delete(repo, file): if os.path.isfile(file) and repo.ignored(file): logger.info(f"removing ignored file: {file}") os.remove(file) if os.path.exists(self.temp_dir): logger.info(f"Removing already present directory {self.temp_dir}") shutil.rmtree(self.temp_dir) logger.info(f"copying {self.source_dir} to {self.temp_dir}") shutil.copytree(self.source_dir, self.temp_dir) try: repo = git.Repo(self.temp_dir) for path in Path(self.temp_dir).glob("*"): if not path.match(".git") and path.is_dir(): for file in path.rglob("*"): check_and_delete(repo, file) if path.is_file(): check_and_delete(repo, path) logger.info(f"removing {self.temp_dir / '.git'}") shutil.rmtree(self.temp_dir / ".git") except Exception as ex: logger.exception(ex) def check_norminette(self): os.chdir(os.path.join(self.temp_dir)) logger.info(f"On directory {os.getcwd()}") norm_exec = ["norminette"] text = f"{TC.CYAN}Executing: {TC.B_WHITE}{' '.join(norm_exec)}{TC.NC}" with Halo(text=text) as spinner: result = subprocess.run(norm_exec, capture_output=True, text=True) logger.info(result) if result.returncode != 0: spinner.fail() print(f"{TC.YELLOW}{result.stdout}{TC.NC}") else: spinner.succeed() return result.stdout def select_tests_to_execute(self): if self.has_bonus(): set_bonus(True) return [] def get_functions_present(self): return [] def has_bonus(self): makefile = Path(self.temp_dir, "Makefile") if not makefile.exists(): return with open(makefile, "r") as m_file: bonus = [line for line in m_file.readlines() if re.match(r"^\s*bonus\s*:.*", line)] logger.info(f"bonus investigation: {bonus}") return len(bonus) != 0 def compile_source(self): os.chdir(os.path.join(self.temp_dir)) makefile = Path(self.temp_dir, "Makefile") if not makefile.exists(): return command = "make fclean " + ("bonus" if has_bonus() else "all") logger.info(f"Calling '{command}' on directory {os.getcwd()}") text = f"{TC.CYAN}Executing: {TC.B_WHITE}{command}{TC.NC} " + ("" if has_bonus() else "(no bonus)") with Halo(text=text) as spinner: run_command(command, spinner) spinner.succeed() def test_using(self, to_execute, missing, tester): try: self.prepare_tests(tester) tx = tester(self.tests_dir, self.temp_dir, to_execute, missing) return (tester.name, tx.execute()) except Exception as ex: print(ex) if 'fraaaa' in str(get_context().base_dir): raise ex else: logger.exception(ex) return (tester.name, [tester.name]) def prepare_tests(self, tester): # delete destination folder if already present temp_dir = os.path.join(self.temp_dir, tester.folder) if os.path.exists(temp_dir): logger.info(f"Removing already present directory {temp_dir}") shutil.rmtree(temp_dir) # copy test framework tester_dir = os.path.join(self.tests_dir, tester.folder) logger.info(f"Copying from {tester_dir} to {temp_dir}") shutil.copytree(tester_dir, temp_dir) def show_summary(self, norm: str, missing, errors, to_execute): def get_norm_errors(): def get_fname(line): return norm_func_regex.match(line).group(1) def is_file(line): return norm_func_regex.match(line) return [get_fname(line) for line in norm.splitlines() if is_file(line)] norm_errors = get_norm_errors() error_funcs = set() for results in errors: error_funcs = error_funcs.union(results[1]) has_errors = missing or norm_errors or error_funcs if (not has_errors): print() print(f"{TC.CYAN}╔══════════════════════════════════════════════════════════════════════════════╗") print(f"{TC.CYAN}║ 🎉🥳 {TC.B_GREEN}All tests passed! Congratulations!{TC.CYAN} 🥳🎉 ║") print(f"{TC.CYAN}╚══════════════════════════════════════════════════════════════════════════════╝") print(TC.NC) logger.info("All tests ok!") return True print(f"\n{TC.B_CYAN}Summary{TC.NC}: {'' if has_bonus() else 'no bonus'}") logger.warn(f"norminette errors: {norm_errors}") if norm_errors: print(f"\n{TC.B_YELLOW}Norminette Errors{TC.NC}:", ', '.join(norm_errors)) logger.warn(f"missing functions: {missing}") if missing: print(f"\n{TC.B_RED}Missing functions{TC.NC}: {', '.join(missing)}") logger.warn(f"errors in functions: {errors}") if error_funcs: print(f"\n{TC.B_RED}Failed tests{TC.NC}: {', '.join(error_funcs)}") tests_ok = [test for test in to_execute if test not in errors] if tests_ok: print(f"\n{TC.B_GREEN}Passed tests{TC.NC}: {', '.join(tests_ok)}") exit(0)
python
# flake8: noqa from .random_word import RandomWord, NoWordsToChoseFrom, Defaults from .random_sentence import RandomSentence __author__ = "Maxim R." __copyright__ = "Copyright 2020, Wonderwords" __credits__ = ["Maxim R."] __license__ = "MIT" __version__ = "2.2.0" __maintainer__ = "Maxim R." __email__ = "[email protected]" __status__ = "Production"
python
# Wesley Dias (1º Semestre ADS-B), Lista XI # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Exercícios extras # G. verbing # Dada uma string, caso seu comprimento seja pelo menos 3, # adiciona 'ing' no final # Caso a string já termine em 'ing', acrescentará 'ly'. def verbing(s): if len(s) >= 3: if s[-3:] == 'ing': s += 'ly' else: s += 'ing' return s # H. not_bad # Dada uma string, procura a primeira ocorrência de 'not' e 'bad' # Se 'bad' aparece depois de 'not' troca 'not' ... 'bad' por 'good' # Assim 'This dinner is not that bad!' retorna 'This dinner is good!' def not_bad(s): if s.count('not') > 0 and s.count('bad') > 0: if s.index('not') < s.index('bad'): s = s.replace(s[s.index('not'):s.index('bad')+3], 'good') return s # I. inicio_final # Divida cada string em dois pedaços. # Se a string tiver um número ímpar de caracteres # o primeiro pedaço terá um caracter a mais, # Exemplo: 'abcde', divide-se em 'abc' e 'de'. # Dadas 2 strings, a e b, retorna a string # a-inicio + b-inicio + a-final + b-final def inicio_final(a, b): if len(a) % 2 == 0 and len(b) % 2 == 0: final = a[:len(a) // 2] + b[:len(b) // 2] + a[len(a) // 2:] + b[len(b) // 2:] elif len(a) % 2 != 0 and len(b) % 2 == 0: final = a[:len(a) // 2 + 1] + b[:len(b) // 2] + a[len(a) // 2 + 1:] + b[len(b) // 2:] elif len(a) % 2 == 0 and len(b) % 2 != 0: final = a[:len(a) // 2] + b[:len(b) // 2 + 1] + a[len(a) // 2:] + b[len(b) // 2 + 1:] else: final = a[:len(a) // 2 + 1] + b[:len(b) // 2 + 1] + a[len(a) // 2 + 1:] + b[len(b) // 2 + 1:] return final # J. zeros finais # Verifique quantos zeros há no final de um número inteiro positivo # Exemplo: 10010 tem 1 zero no fim e 908007000 possui três def zf(n): cont = 0 for num in str(n)[::-1]: if num == '0': cont += 1 else: break return cont # K. conta 2 # Verifique quantas vezes o dígito 2 aparece entre 0 e n-1 # Exemplo: para n = 20 o dígito 2 aparece duas vezes entre 0 e 19 def conta2(n): cont = 0 for num in range(n): cont += str(num).count(str(2)) return cont # L. inicio em potencia de 2 # Dado um número inteiro positivo n retorne a primeira potência de 2 # que tenha o início igual a n # Exemplo: para n = 65 retornará 16 pois 2**16 = 65536 def inip2(n): cont = 0 while True: cont += 1 if str(n) == str(2**cont)[:len(str(n))]: break return cont def test(obtido, esperado): if obtido == esperado: prefixo = ' Parabéns!' else: prefixo = ' Ainda não' print('%s obtido: %s esperado: %s' % (prefixo, repr(obtido), repr(esperado))) def main(): print('verbing') test(verbing('hail'), 'hailing') test(verbing('swiming'), 'swimingly') test(verbing('do'), 'do') print() print('not_bad') test(not_bad('This movie is not so bad'), 'This movie is good') test(not_bad('This dinner is not that bad!'), 'This dinner is good!') test(not_bad('This tea is not hot'), 'This tea is not hot') test(not_bad("It's bad yet not"), "It's bad yet not") print() print('inicio_final') test(inicio_final('abcd', 'xy'), 'abxcdy') test(inicio_final('abcde', 'xyz'), 'abcxydez') test(inicio_final('Kitten', 'Donut'), 'KitDontenut') print() print('zeros finais') test(zf(10100100010000), 4) test(zf(90000000000000000010), 1) print() print('conta 2') test(conta2(20), 2) test(conta2(999), 300) test(conta2(555), 216) print() print('inicio p2') test(inip2(7), 46) test(inip2(133), 316) test(inip2(1024), 10) if __name__ == '__main__': main()
python
#!/usr/bin/env python from setuptools import setup, find_packages __VERSION__ = '5.0.1' setup( name='sanetime_py3', version=__VERSION__, author='prior', author_email='[email protected]', maintainer='finkernagel', maintainer_email='[email protected]', packages=find_packages(), url='http://github.com/TyberiusPrime/sanetime', download_url='https://github.com/TyberiusPrime/sanetime/tarball/v%s'%__VERSION__, license="MIT License", description='A sane date/time python interface: better epoch time, timezones, and deltas -- django support as well. Now with python3 support', long_description=open('README.md').read(), long_description_content_type="text/markdown", install_requires=[ 'pytz', 'python-dateutil', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.5', 'Topic :: Database', 'Topic :: Internet', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Software Development', 'Topic :: Software Development :: Internationalization', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Localization', 'Topic :: Utilities', ], include_package_data=True, test_suite='sanetime.test', platforms=['any'] )
python
""" @Author: huuuuusy @GitHub: https://github.com/huuuuusy 系统: Ubuntu 18.04 IDE: VS Code 1.36 工具: python == 3.7.3 """ """ 思路: 换个消除长度差的方式:拼接两链表。 设长-短链表为 C ,短-长链表为 D ,则当 C 走到长短链表交接处时, D 走在长链表中,且与长链表头距离为 长度差; 链接: https://leetcode-cn.com/problems/two-sum/solution/intersection-of-two-linked-lists-shuang-zhi-zhen-l/ 结果: 执行用时 : 240 ms, 在所有 Python3 提交中击败了81.03%的用户 内存消耗 : 41.8 MB, 在所有 Python3 提交中击败了30.4%的用户 """ # Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None class Solution(object): def getIntersectionNode(self, headA, headB): """ :type head1, head1: ListNode :rtype: ListNode """ # 最开始ha,hb在两个链表的头节点 ha = headA hb = headB # 如果两个节点不同 while ha != hb: # ha将继续向后进行,直到链表A被遍历完,然后ha开始遍历链表B # hb同理 # 这一操作类似于进行拼接 ha = ha.next if ha else headB hb = hb.next if hb else headA # 最终返回ha(此时ha=hb,是相交点) return ha
python
import asyncio import aiopg import psycopg2 from aiopg.transaction import Transaction, IsolationLevel dsn = 'dbname=aiopg user=aiopg password=passwd host=127.0.0.1' async def transaction(cur, isolation_level, readonly=False, deferrable=False): transaction = Transaction(cur, isolation_level, readonly, deferrable) await transaction.begin() try: await cur.execute('insert into tbl values (1)') await transaction.savepoint() try: await cur.execute('insert into tbl values (3)') await transaction.release_savepoint() except psycopg2.Error: await transaction.rollback_savepoint() await cur.execute('insert into tbl values (4)') await transaction.commit() except psycopg2.Error: await transaction.rollback() async def main(): pool = await aiopg.create_pool(dsn) async with pool.cursor() as cur: await transaction(cur, IsolationLevel.repeatable_read) await transaction(cur, IsolationLevel.read_committed) await transaction(cur, IsolationLevel.serializable) cur.execute('select * from tbl') loop = asyncio.get_event_loop() loop.run_until_complete(main())
python
for c in input():print(c,(min((abs(ord(c)-ord(v)),v)for v in'aeiou')[1]+((chr(ord(c)+1)if chr(ord(c)+1)not in'aeiou'else chr(ord(c)+2))if c!='z'else'z'))if c not in('aeiou')else'',sep='',end='')
python
__all__ = ( "class_definition", "class_prefixes", "class_specifier", "long_class_specifier", "short_class_specifier", "der_class_specifier", "base_prefix", "enum_list", "enumeration_literal", "composition", "language_specification", "external_function_call", "element_list", "element", "import_clause", "import_list", ) from arpeggio import ( Optional, ZeroOrMore, OneOrMore, ) from .. import syntax def class_definition(): # type: ignore """ class_definition = ENCAPSULATED? class_prefixes class_specifier """ return ( Optional(syntax.ENCAPSULATED), class_prefixes, class_specifier, ) def class_prefixes(): # type: ignore """ class_prefixes = PARTIAL? ( CLASS / MODEL / OPERATOR? RECORD / BLOCK / EXPANDABLE? CONNECTOR / TYPE / PACKAGE / (PURE / IMPURE)? OPERATOR? FUNCTION / OPERATOR ) """ return ( Optional(syntax.PARTIAL), [ syntax.CLASS, syntax.MODEL, (Optional(syntax.OPERATOR), syntax.RECORD), syntax.BLOCK, (Optional(syntax.EXPANDABLE), syntax.CONNECTOR), syntax.TYPE, syntax.PACKAGE, ( Optional([syntax.PURE, syntax.IMPURE]), Optional(syntax.OPERATOR), syntax.FUNCTION, ), syntax.OPERATOR, ], ) def class_specifier(): # type: ignore """ class_specifier = long_class_specifier / short_class_specifier / der_class_specifier """ return [ long_class_specifier, short_class_specifier, der_class_specifier, ] def long_class_specifier(): # type: ignore """ long_class_specifier = EXTENDS IDENT class_modification? string_comment composition END IDENT / IDENT string_comment composition END IDENT """ return [ ( syntax.EXTENDS, syntax.IDENT, Optional(syntax.class_modification), syntax.string_comment, syntax.composition, syntax.END, syntax.IDENT, ), ( syntax.IDENT, syntax.string_comment, syntax.composition, syntax.END, syntax.IDENT, ), ] def short_class_specifier(): # type: ignore """ short_class_specifier = IDENT "=" ENUMERATION "(" (":" / enum_list?) ")" comment / IDENT "=" base_prefix type_specifier array_subscripts? class_modification? comment """ return [ ( syntax.IDENT, "=", syntax.ENUMERATION, "(", [":", Optional(syntax.enum_list)], ")", syntax.comment, ), ( syntax.IDENT, "=", syntax.base_prefix, syntax.type_specifier, Optional(syntax.array_subscripts), Optional(syntax.class_modification), syntax.comment, ), ] def der_class_specifier(): # type: ignore """ der_class_specifer = IDENT "=" DER "(" type_specifier "," IDENT ("," IDENT)* ")" comment """ return ( syntax.IDENT, "=", syntax.DER, "(", syntax.type_specifier, ",", OneOrMore(syntax.IDENT, sep=","), ")", syntax.comment, ) def base_prefix(): # type: ignore """ base_prefix = (INPUT / OUTPUT)? """ return Optional([syntax.INPUT, syntax.OUTPUT]) def enum_list(): # type: ignore """ enum_list = enumeration_literal ("," enumeration_literal)* """ return OneOrMore(syntax.enumeration_literal, sep=",") def enumeration_literal(): # type: ignore """ enumeration_literal = IDENT comment """ return syntax.IDENT, syntax.comment def composition(): # type: ignore """ composition = element_list ( PUBLIC element_list / PROTECTED element_list / equation_section / algorithm_section )* ( EXTERNAL language_specification? external_function_call? annotation? ";" )? (annotation ";")? """ return ( syntax.element_list, ZeroOrMore( [ (syntax.PUBLIC, syntax.element_list), # type: ignore (syntax.PROTECTED, syntax.element_list), # type: ignore syntax.equation_section, syntax.algorithm_section, ] ), Optional( syntax.EXTERNAL, Optional(syntax.language_specification), Optional(syntax.external_function_call), Optional(syntax.annotation), ";", ), Optional(syntax.annotation, ";"), ) def language_specification(): # type: ignore """ language_specification = STRING """ return syntax.STRING def external_function_call(): # type: ignore """ external_function_call = (component_reference "=")? IDENT "(" expression_list? ")" """ return ( Optional(syntax.component_reference, "="), syntax.IDENT, "(", Optional(syntax.expression_list), ")", ) def element_list(): # type: ignore """ element_list = (element ";")* """ return ZeroOrMore(syntax.element, ";") def element(): # type: ignore """ element = import_clause extends_clause / REDECLARE? FINAL? INNER? OUTER? ( REPLACEABLE (class_definition / component_clause) (constraining_clause comment)? / (class_definition / component_clause) ) """ return [ syntax.import_clause, syntax.extends_clause, ( Optional(syntax.REDECLARE), Optional(syntax.FINAL), Optional(syntax.INNER), Optional(syntax.OUTER), [ ( syntax.REPLACEABLE, [syntax.class_definition, syntax.component_clause], Optional(syntax.constraining_clause, syntax.comment), ), [syntax.class_definition, syntax.component_clause], ], ), ] def import_clause(): # type: ignore """ import_clause = import ( IDENT "=" name / name ("." ("*" / "{" import_list "}") )? ) comment """ return ( syntax.IMPORT, [ (syntax.IDENT, "=", syntax.name), ( syntax.name, Optional( ".", [ "*", ("{", syntax.import_list, "}"), # type: ignore ], ), ), ], syntax.comment, ) def import_list(): # type: ignore """ import_list = IDENT ("," IDENT)* """ return OneOrMore(syntax.IDENT, sep=",")
python
import numpy as np from pyyolo import BBox from collections import OrderedDict class TrackedObject: def __init__(self, timestamp: int, bbox: BBox): self.initial_timestamp = timestamp self.max_timestamp = timestamp self.nframes = 1 self.max_bbox = bbox self.curr_bbox = bbox self.centroid = self._find_centroid(bbox) def update(self, timestamp:int, bbox: BBox) -> None: self.centroid = self._find_centroid(bbox) self.nframes += 1 self.curr_bbox = bbox if self._area(bbox) > self._area(self.max_bbox): self.max_bbox = bbox self.max_timestamp = timestamp def _find_centroid(self, bbox: BBox) -> np.array: xmin, ymin, xmax, ymax = bbox.to_xyxy() return np.array([np.mean([xmin, xmax]), np.mean([ymin, ymax])]) def _area(self, bbox: BBox): return bbox.w * bbox.h def __str__(self): return 'intial_timestamp: ' + str(self.initial_timestamp) + '\nmax_timestamp: ' + str(self.max_timestamp) + '\nnframes: ' + str(self.nframes) + \ '\nmax_bbox: ' + str(self.max_bbox) + '\ncentroid: ' + str(self.centroid) + '\ncurr_bbox: ' + str(self.curr_bbox) class CentroidTracker: def __init__(self, max_disappeared=15, max_distance=5): self.next_id = 0 self.output_log = OrderedDict() self.registered = OrderedDict() self.disappeared = OrderedDict() self.max_disappeared = max_disappeared self.max_distance = max_distance def update(self, detected: list, timestamp: int) -> None: # Take in a list of detected bounding boxes from our yolo detector # update the registered centroids we're keeping track of if len(self.registered) == 0: # initial case, register all detected objects for bbox in detected: self.registered[self.next_id] = TrackedObject(timestamp, bbox) self.next_id += 1 else: # Try to match detected objects to what we have registered unmatched = set(self.registered.keys()) new_objects = [] for i, bbox in enumerate(detected): nn = self._find_neighbor(bbox) if nn in unmatched: unmatched.remove(nn) self.disappeared[nn] = 0 self.registered[nn].update(timestamp, bbox) elif nn == -1: new_objects.append(i) # register a new object for i in new_objects: self.registered[self.next_id] = TrackedObject(timestamp, detected[i]) self.next_id += 1 # deregister an old object which has been gone for too long for id in unmatched: if id not in self.disappeared.keys(): self.disappeared[id] = 0 self.disappeared[id] += 1 if self.disappeared[id] > self.max_disappeared: self._deregister(id) return def signal_end(self) -> OrderedDict: for id in list(self.registered.keys()): self._deregister(id) return self.output_log def get_registered_objects(self) -> OrderedDict: return self.registered def _deregister(self, id: int): self.output_log[id] = self.registered.pop(id) def _find_neighbor(self, bbox: BBox) -> int: min_idx = -1 min_dist = 100000 c = self._find_centroid(bbox) for idx in self.registered.keys(): obj = self.registered[idx] dist = np.linalg.norm(c - obj.centroid) if dist < min_dist and dist < self.max_distance: min_idx = idx min_dist = dist return min_idx def _find_centroid(self, bbox: BBox) -> np.array: xmin, ymin, xmax, ymax = bbox.to_xyxy() return np.array([np.mean([xmin, xmax]), np.mean([ymin, ymax])])
python
import pytest, torch, fastai from fastai.gen_doc.doctest import this_tests from fastai.torch_core import * from fastai.layers import * from math import isclose a=[1,2,3] exp=torch.tensor(a) b=[3,6,6] def test_tensor_with_list(): this_tests(tensor) r = tensor(a) assert torch.all(r==exp) def test_tensor_with_ndarray(): this_tests(tensor) b=np.array(a, dtype=np.int64) r = tensor(b) assert np_address(r.numpy()) == np_address(b) assert torch.all(r==exp) def test_tensor_with_tensor(): this_tests(tensor) c=torch.tensor(a) r = tensor(c) assert r.data_ptr()==c.data_ptr() assert torch.all(r==exp) def test_requires_grad(): this_tests(requires_grad) m = simple_cnn(b) assert requires_grad(m) == True def test_requires_grad_set(): this_tests(requires_grad) m = simple_cnn(b) requires_grad(m,False) allF = np.all([not p.requires_grad for p in m.parameters()]) assert allF, "requires_grad(m,False) did not set all parameters to False" requires_grad(m,True) allT = np.all([p.requires_grad for p in m.parameters()]) assert allT, "requires_grad(m,True) did not set all parameters to True" def test_apply_init(): this_tests(apply_leaf, apply_init) m = simple_cnn(b,bn=True) all2 = lambda m: nn.init.constant_(m.weight,0.2) if hasattr(m, 'weight') else m all7 = lambda m: nn.init.constant_(m,0.7) apply_leaf(m,all2) apply_init(m,all7) conv1_w = torch.full([6,3,3,3],0.7) bn1_w = torch.full([6],0.2) assert conv1_w.equal(m[0][0].weight), "Expected first colvulition layer's weights to be %r" % conv1_w assert bn1_w.equal(m[0][2].weight), "Expected first batch norm layers weights to be %r" % bn1_w def test_in_channels(): this_tests(in_channels) m = simple_cnn(b) assert in_channels(m) == 3 def test_in_channels_no_weights(): this_tests(in_channels) with pytest.raises(Exception) as e_info: in_channels(nn.Sequential()) assert e_info.value.args[0] == 'No weight layer' def test_range_children(): this_tests(range_children) m = simple_cnn(b) assert len(range_children(m)) == 3 def test_split_model(): this_tests(split_model) m = simple_cnn(b) pool = split_model(m,[m[2][0]])[1][0] assert pool == m[2][0], "Did not properly split at adaptive pooling layer" def test_split_no_wd_params(): this_tests(split_no_wd_params) groups = split_no_wd_params(simple_cnn((1, 1, 1), bn=True)) assert len(groups[0]) == 1 assert len(groups[1]) == 2 def test_set_bn_eval(): this_tests(set_bn_eval) m = simple_cnn(b,bn=True) requires_grad(m,False) set_bn_eval(m) assert m[0][2].training == False, "Batch norm layer not properly set to eval mode" def test_np2model_tensor(): this_tests(np2model_tensor) a = np.ones([2,2]) t = np2model_tensor(a) assert isinstance(t,torch.FloatTensor) def test_calc_loss(): this_tests(calc_loss) y_pred = torch.ones([3,8], requires_grad=True) y_true = torch.zeros([3],dtype=torch.long) loss = nn.CrossEntropyLoss() loss = calc_loss(y_pred,y_true,loss) assert isclose(loss.sum(),6.23,abs_tol=1e-2), "final loss does not seem to be correct" loss = F.cross_entropy loss = calc_loss(y_pred,y_true,loss) assert isclose(loss.sum(),6.23,abs_tol=1e-2), "final loss without reduction does not seem to be correct" def test_tensor_array_monkey_patch(): this_tests('na') t = torch.ones(a) t = np.array(t) assert np.all(t == t), "Tensors did not properly convert to numpy arrays" t = torch.ones(a) t = np.array(t,dtype=float) assert np.all(t == t), "Tensors did not properly convert to numpy arrays with a dtype set" def test_keep_parameter(): sa = SelfAttention(128) this_tests(SelfAttention) flat = nn.Sequential(*flatten_model(sa)) for p in sa.parameters(): assert id(p) in [id(a) for a in flat.parameters()]
python
#!/usr/bin/env python2 ########################################################## # # Script: txt2float.py # # Description: Convert GMT text grid files into float # ########################################################## # Basic modules import os import sys import struct from ParseHeader import * class txt2float: def __init__(self, hdr, infile, outfile): self.valid = False self.hdr = hdr self.infile = infile self.outfile = outfile self.valid = True def isValid(self): return self.valid def cleanup(self): return def _parseHdr(self): fp = open(self.hdr, 'r') data = fp.readlines() fp.close() p = ParseConfig(data) p.showDict() config = p.getDict() self.ncols = int(config['ncols']) self.nrows = int(config['nrows']) return(0) def main(self): # Parse header print "Parsing data header" self._parseHdr() ifp = open(self.infile, 'rb') ofp = open(self.outfile, 'wb') for j in xrange(0, self.nrows): for i in xrange(0, self.ncols): buf = ifp.readline() val = float(buf.split()[2]) buf = struct.pack('f', val) ofp.write(buf) ifp.close() ofp.close() return 0 def usage(): print "usage: %s <hdr> <infile> <outfile>" % (sys.argv[0]) return if __name__ == '__main__': if (len(sys.argv) != 4): usage() sys.exit(1) hdr = sys.argv[1] infile = sys.argv[2] outfile = sys.argv[3] prog = txt2float(hdr, infile, outfile) sys.exit(prog.main())
python
# HDM - Heading - Magnetic # Vessel heading in degrees with respect to magnetic north produced by any device or system producing magnetic heading. # $--HDM,x.x,M*hh<CR><LF> # Heading Degrees, magnetic # M = magnetic # Checksum class hdm(): # Constructor def __init__(self): # Switch this on for verbose processing self.debug = 1 @staticmethod def parse(sentence): # Default, invalid fix fix_quality = '0' gps_time = '' dd_longitude_degrees = 0 dd_latitude_degrees = 0 altitude3 = 0 @staticmethod def create(sentence): # Default, invalid fix fix_quality = '0' gps_time = '' dd_longitude_degrees = 0 dd_latitude_degrees = 0 altitude3 = 0
python
import os from conans import ConanFile, tools class CppnanomsgConan(ConanFile): name = "cppnanomsg" version = "20181216" _commit_id = "a36d44db1827a36bbd3868825c1b82d23f10e491" description = "C++ binding for nanomsg" topics = ("conan", "cppnanomsg", "nanomsg", "binding") url = "https://github.com/bincrafters/conan-cppnanomsg" homepage = "https://github.com/nanomsg/cppnanomsg" license = "MIT" requires = ("nanomsg/1.1.2@bincrafters/stable") _source_subfolder = "source_subfolder" def source(self): tools.get("{0}/archive/{1}.zip".format(self.homepage, self._commit_id), sha256="a857c0d4698cb68128071711fc9c3e7aaa7751f4d6f20d9ba2e86d94ce6695d7") extracted_dir = self.name + "-" + self._commit_id os.rename(extracted_dir, self._source_subfolder) def package(self): self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder) self.copy("nn.hpp", dst="include/cppnanomsg", src=self._source_subfolder) def package_id(self): self.info.header_only()
python
# Definition for binary tree with next pointer. # class TreeLinkNode: # def __init__(self, x): # self.val = x # self.left = None` # self.right = None # self.next = None from collections import deque class Solution: # O(n) space # @param root, a tree link node # @return nothing def connect(self, root): if not root: return q = deque([root]) levelsize = 1 while q: lastnode = None for i in xrange(levelsize): node = q.popleft() if lastnode is not None: lastnode.next = node lastnode = node if node.left: q.append(node.left) q.append(node.right) levelsize <<= 1 class Solution: # O(1) space # @param root, a tree link node # @return nothing def connect(self, root): while root and root.left: p = root while p: p.left.next = p.right p.right.next = p.next and p.next.left p = p.next root = root.left
python
# -*- coding: utf-8 -*- # _mod1.py # Module providing the mod1 function # Copyright 2013 Giuseppe Venturini # This file is part of python-deltasigma. # # python-deltasigma is a 1:1 Python replacement of Richard Schreier's # MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based. # The delta sigma toolbox is (c) 2009, Richard Schreier. # # python-deltasigma is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # LICENSE file for the licensing terms. """Module providing the mod1() utility function """ import numpy as np from ._calculateTF import calculateTF def mod1(): """A description of the first-order modulator. **Returns:** ABCD, NTF, STF : a tuple of (ndarray, lti, lti) The elements are the ABCD matrix (ndarray), the NTF (LTI object), the STF (LTI object). .. note:: If a version of the ``scipy`` library equal to 0.16.x or greater is in use, the NTF and STF objects will be ``ZeroPolesGain`` objects, a subclass of the scipy LTI object (``scipy.signal.lti``). """ A = np.array([[1.]]) B = np.array([[1., -1.]]) C = np.array([[1.]]) D = np.array([[0., 0.]]) ABCD = np.vstack((np.hstack((A, B)), np.hstack((C, D)))) H, G = calculateTF(ABCD) return ABCD, H, G
python
from wordsearch.trie import TrieNode import unittest, re def recursive_equal(first, second): """ Return True if the tree rooted by "first" is identical to the tree rooted by "second", i.e. all the nodes and edges are identical. """ first_queue = [first] second_queue = [second] while first_queue and second_queue: first_item = first_queue.pop() second_item = second_queue.pop() if first_item != second_item: return False first_queue.extend(sorted(first_item.children.values(), key=lambda x: x.letter)) second_queue.extend(sorted(second_item.children.values(), key=lambda x: x.letter)) if len(first_queue) != len(second_queue): return False return True class TestRecursiveEqual(unittest.TestCase): def test_equal(self): self.assertTrue(recursive_equal( TrieNode(words=['amp', 'ack', 'bus']), TrieNode(words=['amp', 'ack', 'bus']) )) def test_not_equal(self): self.assertFalse(recursive_equal( TrieNode(words=['amp', 'ack', 'bus']), TrieNode(words=['amm', 'ack', 'bus']) )) self.assertFalse(recursive_equal( TrieNode(words=['am', 'ac', 'bus']), TrieNode(words=['amm', 'ack', 'bus']) )) self.assertFalse(recursive_equal( TrieNode(words=['am', 'ac', 'bus']), TrieNode(words=['am', 'ack', 'bus', 'bar']) )) class TestTrie(unittest.TestCase): def setUp(self): self.reference_root = TrieNode(children=[ TrieNode('a', children=[ TrieNode('m', children=[ TrieNode('p', word_end=True) ]), TrieNode('c', children=[ TrieNode('k', word_end=True) ]) ]), TrieNode('b', children=[ TrieNode('u', children=[ TrieNode('s', word_end=True) ]) ]) ]) def test_root(self): root = TrieNode() self.assertEqual(root.children, {}) self.assertEqual(root.letter, None) def test_equals(self): self.assertEqual(TrieNode(), TrieNode()) self.assertEqual(TrieNode('a'), TrieNode('a')) self.assertEqual(TrieNode(children=[TrieNode('a')]), TrieNode(children=[TrieNode('a')])) self.assertEqual(TrieNode('a', children=[TrieNode('b')]), TrieNode('a', children=[TrieNode('b')])) self.assertEqual(TrieNode('a', word_end=True), TrieNode('a', word_end=True)) def test_not_equals(self): self.assertNotEqual(TrieNode(), TrieNode('a')) self.assertNotEqual(TrieNode(), TrieNode(children=[TrieNode('a')])) self.assertNotEqual(TrieNode('a'), TrieNode('b')) self.assertNotEqual(TrieNode(children=[TrieNode('a')]), TrieNode(children=[TrieNode('b')])) self.assertNotEqual(TrieNode('c', children=[TrieNode('a')]), TrieNode('d', children=[TrieNode('a')])) self.assertNotEqual(TrieNode('c', children=[TrieNode('a')]), TrieNode('c', children=[TrieNode('b')])) self.assertNotEqual(TrieNode('a'), TrieNode('a', word_end=True)) def test_construct_with_children(self): root = TrieNode(children=[TrieNode('a'), TrieNode('b')]) self.assertEqual(root.letter, None) self.assertTrue('a' in root.children) self.assertEqual(root.children['a'], TrieNode('a')) def test_construct_with_children_other_iterator(self): root = TrieNode(children=(TrieNode('a'), TrieNode('b'))) self.assertEqual(root.letter, None) self.assertTrue('a' in root.children) self.assertEqual(root.children['a'], TrieNode('a')) def test_none_in_children(self): self.assertRaises(ValueError, lambda: TrieNode(children=[TrieNode()])) def test_lowers_letter(self): self.assertEqual(TrieNode('A'), TrieNode('a')) def test_only_one_letter(self): self.assertRaises(ValueError, lambda: TrieNode('ab')) def test_init_children_or_words(self): self.assertRaises(ValueError, lambda: TrieNode(children=[TrieNode('a')], words=['b'])) try: TrieNode(children=[TrieNode('a')]) except ValueError: self.fail("Should not get a ValueError when building TrieNode with only children.") try: TrieNode(words=['foo']) except ValueError: self.fail("Should not get a ValueError when building TrieNode with only words") try: TrieNode() except ValueError: self.fail("Should not get a ValueError when building TrieNode with no children or words") def test_index(self): root = TrieNode() root.index('amp', 'ack', 'bus') self.assertTrue(recursive_equal(root, self.reference_root)) def test_index_on_child(self): self.assertRaises(ValueError, lambda: self.reference_root.children['a'].index('foo')) def test_construct_with_words(self): root = TrieNode(words=['amp', 'ack', 'bus']) self.assertEqual(root, self.reference_root) def test_construct_with_words_other_iterator(self): root = TrieNode(words={'amp', 'ack', 'bus'}) self.assertEqual(root, self.reference_root) def test_construct_empty_wordlist(self): self.assertEqual(TrieNode(words=[]), TrieNode()) def test_full_does_contain(self): self.assertTrue(self.reference_root.contains('amp')) self.assertTrue(self.reference_root.contains('ack')) self.assertTrue(self.reference_root.contains('bus')) def test_partial_does_contain(self): self.assertFalse(self.reference_root.contains('a')) self.assertFalse(self.reference_root.contains('ac')) self.assertFalse(self.reference_root.contains('bu')) def test_partial_does_contain_prefix(self): self.assertTrue(self.reference_root.contains('a', prefix=True)) self.assertTrue(self.reference_root.contains('ac', prefix=True)) self.assertTrue(self.reference_root.contains('bu', prefix=True)) def test_does_not_contain(self): self.assertFalse(self.reference_root.contains('car')) self.assertFalse(self.reference_root.contains('candy')) self.assertFalse(self.reference_root.contains('amd')) self.assertFalse(self.reference_root.contains('adc')) self.assertFalse(self.reference_root.contains('bur')) self.assertFalse(self.reference_root.contains('apple')) def test_dunder_contains(self): self.assertTrue('amp' in self.reference_root) self.assertFalse('a' in self.reference_root) self.assertFalse('car' in self.reference_root) def test_repr(self): node = TrieNode('a', children=[TrieNode('b'), TrieNode('c')]) regex = re.match( r"^TrieNode\(letter=a, children=\{([bc], [bc])\}, word_end=False\)$", repr(node) ) self.assertFalse(regex == None) # Compare a set of the children so order doesn't matter self.assertEqual({w.strip() for w in regex.group(1).split(',')}, set(node.children)) def test_add_chilren(self): root = TrieNode() root._add_children(TrieNode('a')) self.assertTrue('a' in root.children) self.assertEqual(root.children['a'], TrieNode('a'))
python
#!/usr/bin/env python import sys import random import importlib def baseline(a): a.sort() return a def test(a): print SORTFUNCSTR, ": ", print a, a = SORTFUNC(a) # check invariant for i in range(1, len(a)): assert a[i] >= a[i-1] print " --> ", print a SORTFUNC = baseline SORTFUNCSTR = "baseline" if len(sys.argv) > 1: SORTFUNCSTR = sys.argv[1] SORTMODULE = importlib.import_module(SORTFUNCSTR) SORTFUNC = SORTMODULE.sort test([0,1,2,3,4,5,6,7,8,9]) test([9,8,7,6,5,4,3,2,1,0]) test([1,1,1,1,1,1,1,1,1,1]) test([1,2,3,4,3,2,1,4,3,2]) test([int(10*random.random()) for i in xrange(10)]) try: test(SORTMODULE.testlist) except: pass
python
import folium my_map = folium.Map(location=[40.4059954,49.8661496,],zoom_start=15) folium.TileLayer('mapquestopen',attr='dasd').add_to(my_map) folium.LayerControl().add_to(my_map) my_map.save('templates/map.html')
python
""" -------------------------------------------------------------- Copyright (c) 2017, AIT Austrian Institute of Technology GmbH. All rights reserved. See file PESTO _LICENSE for details. -------------------------------------------------------------- PESTO-client\createClients\createClients.py for 1 user: enables communication on ports creates working directory if needed executes PESTO-client\Instance\Instance.py as Administrator or as a specified user deletes working directory closes ports for more users: creates working directory with subdirectory for all users enables communication on ports creates windows users executes PESTO-client\Instance\Instance.py as the created users deletes windows users deletes workingdirectory closes ports """ import subprocess import sys import os import shutil import time def createWorkingDirectory(workingdir): """ creates working directory if it doesnt exist. """ print('Creating: ' + workingdir, flush=True) if not os.path.isdir(workingdir): try: os.makedirs(workingdir) except Exception as e: print('Error creating the working directory\n', flush=True) print(e, flush=True) return 1 return 0 def createWorkingDirectories(workingdir, number_of_users): """ creates working directories for all users """ for i in range(number_of_users): newpath = workingdir+'\\MyUser'+str(i) print('Creating: ' + newpath, flush=True) if not os.path.isdir(newpath): try: os.makedirs(newpath) except Exception as e: print('Error creating the directory\n', flush=True) print(e, flush=True) return 1 return 0 def deleteWorkingDirectory(workingdir): """ deletes working directory """ if os.path.isdir(workingdir): print('Deleting: ' + workingdir, flush=True) try: shutil.rmtree(workingdir, ignore_errors=False) except Exception as e: print(workingdir + ' couldnt be deleted.\n', flush=True) print(e, flush=True) return 1 def executeOneInstance(PESTO_client, workingdir, resourcesdir, resultsdir, startingport, numberoftheVM, shareddrive, adminpassword, loglevel, username, userpassword): """ executes an Instance waits till it terminates and returns """ port = startingport + numberoftheVM INSTANCE = os.path.join(PESTO_client, 'PESTO-client\\Instance\\Instance.py') try: if username == "None": print('Executing Instance', flush=True) p = subprocess.Popen(['python', INSTANCE, workingdir, workingdir, resultsdir, resourcesdir, str(port), shareddrive, adminpassword, PESTO_client, loglevel]) else: print('Executing Instance as '+ username + ' with password: '+ userpassword, flush=True) p = subprocess.Popen(['psexec.exe', '-n', '60', '-h', '/accepteula', '-u', username, '-p', userpassword, 'python', INSTANCE, workingdir, workingdir, resultsdir, resourcesdir, str(port), shareddrive, adminpassword, PESTO_client, loglevel],stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except Exception as e: print('Error while executing instance. /returned/', flush=True) print(e, flush=True) return 1 # wait process to terminate p.wait() print('Process returned: ', p.returncode, flush=True) return p.returncode def executeInstances(PESTO_client, number_of_users,workingdir, resourcesdir, resultsdir, startingport, numberoftheVM, shareddrive, password, loglevel): """ executes Instances wait till all terminates wihout h doesnt work from remote VM gives the connection 60 sec timeout. """ Process = [] for i in range(number_of_users): userworkingdir = workingdir + '\\MyUser'+str(i) port = startingport + (number_of_users*numberoftheVM) + i moreINSTANCEs = os.path.join(PESTO_client, 'PESTO-client\\Instance\\Instance.py') print('Executing instance as MyUser' + str(i), flush=True) try: P = subprocess.Popen(['psexec.exe','-n','60','-h','/accepteula','-u', "MyUser"+str(i) , '-p', 'redhat', 'python', moreINSTANCEs, workingdir, userworkingdir, resultsdir, resourcesdir, str(port),shareddrive,password, PESTO_client, loglevel], stdout=subprocess.PIPE, stderr= subprocess.PIPE) time.sleep(1) except Exception as e: print('Error while executing instance. /returned/',flush=True) print(e,flush=True) return 1 Process.append(P) #wait processes to terminate for p in Process: p.wait() flag = 0 for p in Process: print('Process on MyUser' + str(Process.index(p)) +' returned: ', p.returncode, flush=True) if p.returncode != 0: flag = 1 print('All terminated\n',flush=True) return flag def Create_User_Accounts(number_of_users): """ creates windows users and adds them administrator rights """ print('\n', flush=True) for i in range(number_of_users): print('Creating MyUser'+str(i)+' and giving it administrator rights.', flush=True) try: p = subprocess.Popen(['net', 'user', 'MyUser' + str(i), 'redhat', '/add'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if stdout != b'': print(stdout.decode('utf-8'), flush=True) if stderr != b'': print(stderr.decode('utf-8'), flush=True) except Exception as e: print('Error creating user.\n', flush=True) print(e, flush=True) return 1 try: p = subprocess.Popen(['net', 'localgroup','administrators', 'MyUser' + str(i), '/add'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if stdout != b'': print(stdout.decode('utf-8'), flush=True) if stderr != b'': print(stderr.decode('utf-8'), flush=True) except Exception as e: print('Error giving administrator rights.\n', flush=True) print(e, flush=True) return 1 return 0 def Delete_User_Accounts(number_of_users): """ deletes the created users """ for i in range(number_of_users): print('Deleting MyUser' + str(i) + '.', flush=True) try: p = subprocess.Popen(['net', 'user', 'MyUser' + str(i),'/delete'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if stdout != b'': print(stdout.decode('utf-8') + '\n', flush=True) if stderr != b'': print(stderr.decode('utf-8') + '\n', flush=True) except Exception as e: print('Error occured while deleting the user /process continued/.\n', flush=True) print(e, flush=True) return 1 return def allowPorts(startingPort, numberofUsers, numberoftheVM): """ creates new rules on the firewall for all ports """ firstport = startingPort + (numberoftheVM * numberofUsers) lastport = firstport + numberofUsers - 1 if numberofUsers == 1: ports = str(firstport) else: ports = str(firstport) + '-' + str(lastport) print('Enabling ports: ' + ports, flush=True) command = 'netsh advfirewall firewall add rule name="PESTO" dir=in action=allow protocol=TCP localport=' + ports try: p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if stdout != b'': print(stdout.decode('utf-8') + '\n', flush=True) if stderr != b'': print(stderr.decode('utf-8') + '\n', flush=True) except Exception as e: print('Error occured while enabling ports.\n', flush=True) print(e, flush=True) return 1 return 0 def deletePorts(startingPort, numberofUsers, numberoftheVM): """ deletes ports """ firstport = startingPort + (numberoftheVM * numberofUsers) lastport = firstport + numberofUsers - 1 if numberofUsers == 1: ports = str(firstport) else: ports = str(firstport) + '-' + str(lastport) print('Deleting ports: ' + ports, flush=True) command = 'netsh advfirewall firewall delete rule name="PESTO" protocol=tcp localport=' + ports try: p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if stdout != b'': print(stdout.decode('utf-8') + '\n', flush=True) if stderr != b'': print(stderr.decode('utf-8') + '\n', flush=True) except Exception as e: print(e, flush=True) return 0 def runCreateClients(PESTO_client, number_of_users, sharedDrive, resultsdir, workingdir, resourcesdir, numberoftheVM, startingport, password, loglevel, username, userpassword): if number_of_users == 1: #enabling ports retval = allowPorts(startingport, number_of_users, numberoftheVM) if retval == 1: input('Press Enter to continue..') return 1 #creates working directory retval = createWorkingDirectory(workingdir) if retval != 0: deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 #executing the instance retval = executeOneInstance(PESTO_client, workingdir, resourcesdir, resultsdir, startingport, numberoftheVM, sharedDrive, password, loglevel, username, userpassword) if retval != 0: deleteWorkingDirectory(workingdir) deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 #deletes working directory retval = deleteWorkingDirectory(workingdir) if retval == 1: deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 #deletes ports deletePorts(startingport, number_of_users, numberoftheVM) input('ENTER') return 0 else: #creates working directories for all users (workingdir\MyUserX) retval = createWorkingDirectories(workingdir, number_of_users) if retval == 1: input('Press Enter to continue..') return 1 #enabling ports retval = allowPorts(startingport, number_of_users, numberoftheVM) if retval == 1: deleteWorkingDirectory(workingdir) deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 #creates windows users with admin rights retval = Create_User_Accounts(number_of_users) if retval != 0: deleteWorkingDirectory(workingdir) deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 retval = executeInstances(PESTO_client, number_of_users, workingdir, resourcesdir, resultsdir, startingPort, numberoftheVM, sharedDrive, password, loglevel) if retval != 0: Delete_User_Accounts(number_of_users) deleteWorkingDirectory(workingdir) deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 #deletes the created users retval = Delete_User_Accounts(number_of_users) if retval == 1: deleteWorkingDirectory(workingdir) deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 #deletes working directory retval = deleteWorkingDirectory(workingdir) if retval == 1: deletePorts(startingport, number_of_users, numberoftheVM) input('Press Enter to continue..') return 1 # deletes ports deletePorts(startingport, number_of_users, numberoftheVM) input('ENTER') return 0 if __name__ == '__main__': number_of_users = int(sys.argv[1]) ResultsDir = sys.argv[2] ResourcesDir = sys.argv[3] WorkingDir = sys.argv[4] numberoftheVM = int(sys.argv[5]) sharedDrive = sys.argv[6] startingPort = int(sys.argv[7]) password = sys.argv[8] loglevel = sys.argv[9] username = sys.argv[10] userpassword = sys.argv[11] PESTO_client = str.replace(sys.argv[0], r'PESTO-client\createClients\createClients.py', '') runCreateClients(PESTO_client, number_of_users, sharedDrive, ResultsDir, WorkingDir, ResourcesDir, numberoftheVM, startingPort, password, loglevel, username, userpassword)
python
# At : Thu Apr 30 21:04:44 WIB 2020 import os, sys, time print '\x1b[36m ____ _ _ ' print '\x1b[36m | \\ ___ ___ | |_ ___ ___ | |_ ' print '\x1b[36m | | || -_|| _|| . || . ||_ -|| |' print '\x1b[37m |____/ |___||___||___||__,||___||_|_|\x1b[33m v2.0\n \x1b[34m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97\n \x1b[34m\xe2\x95\x91\x1b[31m[\x1b[37m-\x1b[31m]\x1b[37mAuthor : Zen Ezz \x1b[34m\xe2\x95\x91\n \x1b[34m\xe2\x95\x91\x1b[31m[\x1b[37m-\x1b[31m]\x1b[37mYoutube : Zen s \x1b[34m\xe2\x95\x91\n \x1b[34m\xe2\x95\x91\x1b[31m[\x1b[37m-\x1b[31m]\x1b[37mTools : Deobfuscated Bash Shell \x1b[34m\xe2\x95\x91\n \x1b[34m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d' def main(): try: bx = raw_input(' \x1b[31m[\x1b[37m!\x1b[31m] \x1b[36mInput File Address :\x1b[37m ') ob_ = open(bx).read().replace('eval', 'echo') _res = open('un.sh', 'w') _res.write(ob_) _res.close() reb_ = bx.replace('.sh', '_dec.sh') os.system('sh un.sh > ' + reb_) _vew = open(reb_).read() _edAu = open(reb_, 'w') _edAu.write('#Decrypt By Zen clay\n#https://github.com/zen-clay\n' + _vew) _edAu.close() os.system('rm un.sh') print ' \x1b[31m[\x1b[37m!\x1b[31m] \x1b[36mDone ...! \x1b[37mFile Saved > ' + reb_ main() except IOError: print ' \x1b[31m[\x1b[37m!\x1b[31m] \x1b[36mFile Not Found ' main() except: print ' \x1b[31m[\x1b[37m!\x1b[31m] \x1b[36mExit...... ' if __name__ == '__main__': main()
python
#!/usr/bin/env python3 """ Copyright 2018 Brocade Communications Systems LLC. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may also obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import getpass import getopt import sys import os import atexit import inspect from pyfos import pyfos_auth from pyfos import pyfos_util from pyfos.utils import brcd_cli from pyfos.pyfos_auth_token import auth_token_manager from pyfos import pyfos_rest_util # pylint: disable=W0603 session = None full_usage_infra_short_1 = "<-i IPADDR> <-L LOGIN> <-P PASSWORD>" full_usage_infra_short_2 = "[-f VFID] [-v]" def full_usage(usage, valid_options, sessionless=True): o_str = "" for v_op in valid_options: o_str = o_str + " <--" + v_op + "=" + v_op.upper() + ">" print(os.path.basename(sys.argv[0]) + " " + full_usage_infra_short_1 + o_str + " " + full_usage_infra_short_2) print("") print("Usage:") print("") print(" Infrastructure options:") print("") print(" -i, --ipaddr=IPADDR IP address of FOS switch") print(" -L, --login=LOGIN login name") print(" -P, --password=PASSWORD password") print( " -f, --vfid=VFID VFID to which" " the request is directed to. [OPTIONAL]") print( " -s, --secured=MODE HTTPS mode \"self\" or" " \"CA\". [OPTIONAL]") print(" -v, --verbose verbose mode. [OPTIONAL]") if sessionless: print(" -a, --authtoken AuthToken value string" + " or AuthTokenManager config file. [OPTIONAL]") print(" -z, --nosession Session less Authentication.", " [OPTIONAL]") print(" --nocredential No credential ", "Authentication. [OPTIONAL]") print("") usage() def exit_handler(): global session if session is not None: pyfos_auth.logout(session) def exit_register(local_session): global session session = local_session atexit.register(exit_handler) def base_generic_input(argv, usage, valid_options, sessionless): ret_dict = dict() # default value that should be added here ret_dict["secured"] = None ret_dict["verbose"] = 0 ret_dict['utilusage'] = "" try: opts, args = getopt.getopt( argv, "hi:f:s:L:P:avz", [ "activate", "allaccess=", "authtoken=", "acceptEULA", "compare=", "device=", "disable", "displayEULA", "enable", "filename=", "help", "hbaid=", "hostname=", "banner=", "hostport=", "ipaddr=", "login=", "members=", "name=", "password=", "pmembers=", "portid=", "protocol=", "messageid=", "reffcport=", "secured=", "speed=", "stage", "template=", "targetname=", "targetport=", "type=", "usepeer=", "username=", "userpassword=", "verbose", "vfid=", "xlsapply=", "xlscheck=", "json", "nosession", "nocredential", ] ) except getopt.GetoptError as err: print("getopt error", str(err)) full_usage(usage, valid_options, sessionless) sys.exit(2) if len(args) > 0: print("*** Contains invalid options:", args[0]) full_usage(usage, valid_options, sessionless) sys.exit(3) for opt, arg in opts: if opt in ("-h", "--help"): full_usage(usage, valid_options, sessionless) sys.exit() elif opt == "--activate": ret_dict["activate"] = True elif opt == "--allaccess": if not pyfos_util.isInt(arg): print("*** Invalid allacess:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["allaccess"] = int(arg) elif opt == "--acceptEULA": ret_dict["acceptEULA"] = "accept-eula" elif opt in "--compare": ret_dict["compare"] = arg elif opt in "--disable": ret_dict["disable"] = True elif opt in "--displayEULA": ret_dict["displayEULA"] = "display-eula" elif opt in "--device": if not pyfos_util.isWWN(arg): print("*** Invalid device:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["device"] = arg elif opt in "--enable": ret_dict["enable"] = True elif opt in ("-f", "--vfid"): if not pyfos_util.isInt(arg): print("*** Invalid vfid:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["vfid"] = int(arg) elif opt in "--filename": ret_dict["filename"] = arg elif opt in "--hbaid": ret_dict["hbaid"] = arg elif opt in "--hostname": ret_dict["hostname"] = arg elif opt in "--banner": ret_dict["banner"] = arg elif opt in "--hostport": if not pyfos_util.isWWN(arg): print("*** Invalid hostport:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["hostport"] = arg elif opt in ("-i", "--ipaddr"): if not pyfos_util.isIPAddr(arg): print("*** Invalid ipaddr:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["ipaddr"] = arg elif opt in "--json": ret_dict["json"] = True elif opt in ("-L", "--login"): ret_dict["login"] = arg elif opt in "--members": ret_dict["members"] = arg.split(";") elif opt in "--name": ret_dict["name"] = arg elif opt in "--pmembers": ret_dict["pmembers"] = arg.split(";") elif opt in ("-P", "--password"): ret_dict["password"] = arg elif opt in "--portid": ret_dict["portid"] = arg elif opt in "--protocol": ret_dict["protocol"] = arg elif opt in "--messageid": ret_dict["messageid"] = arg elif opt in "--reffcport": if not pyfos_util.isSlotPort(arg): print("*** Invalid reffcport:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["reffcport"] = arg elif opt in ("-s", "--secured"): if arg == "self": ret_dict["secured"] = "self" elif arg == "CA": ret_dict["secured"] = "CA" else: print("defaults to CA") ret_dict["secured"] = "CA" elif opt in "--show": ret_dict["show"] = 1 elif opt in "--speed": if not pyfos_util.isInt(arg): print("*** Invalid speed:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["speed"] = int(arg) elif opt in "--stage": ret_dict["stage"] = True elif opt in "--template": ret_dict["template"] = arg elif opt in "--targetname": ret_dict["targetname"] = arg elif opt in "--targetport": if not pyfos_util.isWWN(arg): print("*** Invalid targetport:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["targetport"] = arg elif opt in "--type": ret_dict["type"] = arg elif opt in "--username": ret_dict["username"] = arg elif opt in "--userpassword": ret_dict["userpassword"] = arg elif opt in "--usepeer": if arg not in ('WWN', ''): print("*** Invalid userpeer:", arg) full_usage(usage, valid_options, sessionless) sys.exit(5) ret_dict["usepeer"] = arg elif opt in ("-v", "--verbose"): ret_dict["verbose"] = 1 elif opt in ("-z", "--nosession"): ret_dict["sessionless"] = True elif opt in "--nocredential": ret_dict["nocredential"] = True elif opt in ("-a", "--authtoken"): if len(arg) == 0: ret_dict['authtoken'] = None else: ret_dict['authtoken'] = arg elif opt in "--xlscheck": ret_dict["xlscheck"] = arg elif opt in "--xlsapply": ret_dict["xlsapply"] = arg else: print("unknown", opt) full_usage(usage, valid_options, sessionless) sys.exit(5) if "ipaddr" not in ret_dict: print("Missing IP address input") print("") full_usage(usage, valid_options, sessionless) sys.exit(6) if "login" not in ret_dict.keys(): login = input("Login:") ret_dict["login"] = login if "password" not in ret_dict.keys(): if 'authtoken' not in ret_dict.keys() and\ 'nocredential' not in ret_dict.keys(): password = getpass.getpass() ret_dict["password"] = password if valid_options is not None: # pylint: disable=W0612 for k, v in ret_dict.items(): if k not in ('login', 'password', 'ipaddr', 'secured', 'vfid', 'verbose', 'authtoken', 'sessionless', 'utilusage', 'nocredential'): found = False for valid_option in valid_options: if valid_option == k: found = True break if not found: print("*** Invalid option given:", k) full_usage(usage, valid_options, sessionless) sys.exit(4) return ret_dict def generic_input(argv, cls_usage, filters=None, validate=None, sessionless=True): inputs = dict() if isinstance(cls_usage, str): mydict = brcd_cli.pseudorestcli(cls_usage) if inspect.isclass(cls_usage): custom_cli = brcd_cli.getcustomcli(cls_usage().container) restobject = cls_usage.parse(argv, inputs, filters, custom_cli, validate) if restobject is None: sys.exit(4) else: inputs.update({'utilobject': restobject}) inputs.update({'utilclass': cls_usage}) inputs.update({'utilfilters': filters}) inputs.update({'utilusage': restobject.showusage(filters)}) return inputs elif isinstance(cls_usage, str) and mydict is not None: restobject = pyfos_rest_util.rest_object.pseudodictrestobject(mydict) restobject = restobject.parse_commandline(argv, inputs, filters, None, validate) if restobject is None: sys.exit(4) else: inputs.update({'utilobject': restobject}) inputs.update({'utilclass': "runtime"}) inputs.update({'utilfilters': filters}) inputs.update({'utilusage': restobject.showusage(filters)}) return inputs else: # Check filters can be none as well based on the utils. inputs = base_generic_input(argv, cls_usage, filters, sessionless) return inputs def parse(argv, cls_usage, filters=None, validate=None): return generic_input(argv, cls_usage, filters, validate) def getsession(inputs): global session tokenManager = None ishttps = None if 'authtoken' in inputs.keys(): # Always need to use the Default Token Manager config # if user wants to use a different configuration then user # config store should be set as default store using the set # default store option. if inputs['authtoken'] is None or \ auth_token_manager.isvalidconfig(inputs['authtoken']): tokenManager = auth_token_manager(inputs['authtoken']) else: tokenManager = inputs['authtoken'] # tokenManager.show() # Get the password or else None ip = inputs.get("ipaddr", None) user = inputs.get("login", None) password = inputs.get("password", None) sessionless = inputs.get('sessionless', False) nocred = inputs.get('nocredential', False) if 'secured' in inputs.keys(): ishttps = inputs['secured'] # Default DEFAULT_THROTTLE_DELAY 1.1 session = pyfos_auth.login(user, password, ip, ishttps, 1.1, 0, tokenManager, sessionless, nocred) if pyfos_auth.is_failed_login(session): print("login failed because", session.get(pyfos_auth.CREDENTIAL_KEY) [pyfos_auth.LOGIN_ERROR_KEY]) print(inputs['utilusage']) sys.exit(3) exit_register(session) if 'vfid' in inputs: pyfos_auth.vfid_set(session, inputs['vfid']) if 'verbose' in inputs and inputs['verbose'] != 0: pyfos_auth.debug_set(session, 1) inputs['session'] = session return session def clean(inputs): restobject = None filters = None if 'utilobject' in inputs.keys(): restobject = inputs['utilobject'] if 'utilfilters' in inputs.keys(): filters = inputs['utilfilters'] if restobject is not None: restobject.clean(filters) inputs['utilobject'] = restobject def pseudodictrestobject(mydictkey): mydict = brcd_cli.pseudorestcli(mydictkey) if mydict is not None and isinstance(mydict, dict): restobject = pyfos_rest_util.rest_object.pseudodictrestobject(mydict) return restobject return None def defaultclioptions(cls): retdict = dict() if inspect.isclass(cls): retdict.update(cls().displaycustomcli()) elif isinstance(cls, list): for i in range(len(cls)): retdict.update(cls[i]().displaycustomcli()) return retdict
python
global register_count register_count = 0 global register_refs register_refs = {} global register_dims register_dims = {} # a data structure such that: # identity unique upon initialization # can be merged with other registers # can can be an array or not: can specify dimension and slots # TODO: should the Register perhaps know its user-given name, to make the final qaasm more readable? # how does that gel with register merging? It'll make sense for a symmetrized merge, but not a block-algebra merge. def Register(): def __init__(self, dim, slots=None): assert int(dim) == dim assert dim > 1 register_count += 1 self.id = register_count register_refs[self.id] = None register_dims[self.id] = (dim,slots) def trace(self): out = self.id while register_refs[out] != None: out = register_refs[out] return out @property def dim(self): return register_dims[self.trace()][0] @property def slots(self): return register_dims[self.trace()][1] def __eq__(self,other): if not isinstance(other,Register): return False return self.trace() == other.trace() def substitute(self,other): assert isinstance(other,Register) assert self.dim == other.dim assert self.slots == other.slots target = other.trace() if target == self.trace(): return key = self.id while register_refs[key] != None: key = register_refs[key] register_refs[key] = target ################################### # Where do the responsibilities of this class end and those of the runtime begin? # Runtime should do: # parsing the ast. # pre-evaluation of expns # distinguish between block expns and value expns # QAASM should do: # circuit synthesis # managing the scope # scope ops: # key is removed because it was consumed # value is swapped out because of a relabeling # block is symmetrized: inputs must equal outputs (need to know scope before and after) # two blocks are matched: inputs=inputs, outputs=outputs (need to know scope before and after) # The whole {"x":1,...},["x"] system worked pretty well for that. # QAASM data structure keeps track of both reg objects and their names in the scope. # Blq objects just keep track of the reg objects. class QAASM(): def __init__(self,parent=None): self.scale = 1 self.instrs = [] # Key idea: I can lazily swap/relabel registers by manipulating the self.outputs dictionary. # Only when I need to symmetrize or align do I need to actually implement a permutation using qaasm. self.inputs = {} self.outputs = {} # Both None if unshaped. Both are lists if shaped. Check via self.shaped. # Needs to be unshaped in order to add instructions. # Needs to be shaped in order to do algebra. self.lshape = None # an ordered subset of self.inputs.keys(), those not in ordering are the consumed registers self.rshape = None # an ordering on self.outputs.keys() # There seems to be a difference in the needs of the methods: # if, repeat, increment, scalar, init, assign # all only really care about IF a variable is in scope, not about scope order of target block # add, multiply, adjoint, tensorproduct # do care about scope order # When is scope order determined? # types of blocks # blocks in if and repeat statements: dont care about scope order at all # ket() expn, consume expn, block cast, Blq's: can just make scope order correct upon init assert isinstance(parent,QAASM) self.parent = parent # Expressions can refer to keys in parent scope. Only if a register is declared/discarded/zero'd # or permuted in scope must ot be an output. # Can make something an explicit output by promoting it. # Should promotion make it a consume or an input? @property def shaped(self): if self.lshape is None: assert self.rshape is None return False assert isinstance(self.lshape,list) assert isinstance(self.rshape,list) return True # get key from parent scope def parentGet(self,key): if self.parent is None: raise KeyError() if key in self.parent.outputs: return self.parent.outputs[key] return self.parent[key] # check if parent has key def parentHas(self,key): if self.parent is None: return False if key in self.parent.outputs: return True return key in self.parent def promote(self, name): assert self.lshape is None and self.rshape is None assert self.parentHas(name) # check that 'name' was never in scope assert name not in self.inputs for instr in self.instrs: if instr["kind"] == "nqaasm_declare": assert instr["name"] != name assert name not in self.outputs prvreg = self.parentGet(name) reg = Register(prvreg.dim, slots=prvreg.slots) self.inputs[name] = reg self.outputs[name] = reg # named-qaasm aka nqaasm # its unclear to me that this is really that different # uses string register names rather than reg objects # except for declare which includes both. Regobj can be an array. # {"kind":"nqaasm_declare", "reg":<regobj>, "name":<name>} # {"kind":"nqaasm_discard", "name":<name>} # {"kind":"nqaasm_zero", "name":<name>} # {"kind":"nqaasm_increment", "name":<name>, "expn":<expn>} # {"kind":"nqaasm_unitary", "name":<name>, "mat":<matrix>} # {"kind":"nqaasm_phase", "value":<complexnr>} # {"kind":"nqaasm_swap", "name1":<name>, "name2":<name>} # {"kind":"nqaasm_if", "name":<register>, "instructions":[<instrs>] } def declare(self, name, dim, slots=None): assert self.lshape is None and self.rshape is None assert name not in self.outputs reg = Register(dim,slots=slots) self.instrs.append({"kind":"nqaasm_declare", "name":name, "reg":reg}) def discard(self, name): assert self.lshape is None and self.rshape is None if name not in self.outputs: self.promote(name) assert name in self.outputs self.instrs.append({"kind":"qaasm_discard", "name":name}) del self.outputs[name] # zero # Boring: # unitary # phase def increment(self, reg, expn): # if reg is not in scope, it has to be in parent scope, and needs to be promoted. # assert expn's regs are either in parent scope or in current scope and have the right shape # perhaps all the array decompiling does is make all indexes integers rather than variables def process_expn(expn): if expn["kind"] == "register_expn": if expn["key"] is None: pass if isinstance(expn["key"],int): pass if isinstance(expn["key"],str): pass # recurse pass process_expn(expn) pass def symmetrize(self): # assert qaasm.input and qaasm.scope need to have the same set of keys for key in qaasm.input.keys(): if qaasm.input[key] == qaasm.scope[key]: continue # check if there is any point in time when both qaasm.input[key] and qaasm.output[key] # are in scope. If yes, need to do a swap. pass def if(self, reg, qaasm): # reg is either in scope or in parent scope. # assert qaasm.lshape == qaasm.rshape == [] assert qaasm.parent = self # qaasm.inputs need to be in self.scope. Promote if needed. qaasm.symmetrize() for key in qaasm.input.keys(): if key not in self.scope: self.promote(key) self.scope[key].substitute(qaasm.input[key]) def repeat(self, qaasm, count): # same as if, basically. pass ################### def scalar_instr(self, qaasm): # how to tell the runtime how the scope changed? # qaasm.rshape == qaasm.lshape == [] assert qaasm.parent = self # promote any qaasm.inputs if needed, and wire them up for key in qaasm.input.keys(): if key not in self.scope: self.promote(key) self.scope[key].substitute(qaasm.input[key]) # delete any consumed variables for key in self.scope.keys(): if key in qaasm.input and key not in qaasm.scope: del qaasm.scope[key] assert len(qaasm.scope.keys()) == 0 self.scale *= qaasm.scale for instr in qaasm.instrs: self.instrs.append(instr) def init_instr(self, targets, qaasm): assert qaasm.parent = self assert len(qaasm.rshape) == 0 for key in qaasm.scope: assert key in qaasm.lshape # is this always true anyway? # for key in qaasm.lshape: assert key in qaasm.scope # this should be true anyway assert len(targets) = len(qaasm.lshape) # promote any qaasm.inputs if needed, and wire them up for key in qaasm.input.keys(): if key not in self.scope: self.promote(key) self.scope[key].substitute(qaasm.input[key]) # delete any consumed variables for key in self.scope.keys(): if key in qaasm.input and key not in qaasm.scope: del qaasm.scope[key] for i in range(len(targets)): target = targets[i] key = qaasm.lshape[i] reg = qaasm.scope[key] assert pass def assign_instr(self, reg, expn): pass def assign_undo_instr(self, reg, expn, undo): pass def assign_array_instr(self, key, regs, expn): pass def assign_array_undo_instr(self, key, regs, expn, undo): pass ############################ # The difference between nqaasm and regular qaasm: # - nqaasm knows what names the user has given to the variables. # - nqaasm can implement user-level permutations and relabelings without actually generating instructions # - nqaasm can't really be obtained from matrix literals or create expressions. (this is a problem!) # - If nqaasm is serialized all the labeling information is lost. It can't be deserialized. # - Need support for temporary names in nqaasm, which is the very problem registers are supposed to solve. # "nqaasm_unnamed_declare"? # have register objects hold on to their user-level names? That merges nqaasm with qaasm, but gets rid of permutation facility. # if swap is a qaasm instruction, then can't the swap overhead be reduced in post? # Idea: instructions are inherently named. algebra is inherently unnamed # consume, create, cast are sort-of the boundary between named and unnamed. # three types: referenced / scoped / unnamed # blocks as inputs to if statements can't have any unnamed registers. # algebraic blocks can't have any scoped registers as output. # user-level blocks can't have any scoped registers and referenced registers # what registers are what is determined by their presence in the bookkeeping dictionaries # not by qaasm. Qaasm only knows about registers. # should qaasm support arrays, just with fixed indices? ################### # Proposal # QAASM blocks are unshaped, and instructions can be appended to them # Blq blocks are shaped and instruction immutable - can only be manipulated via block algebra # problems with this proposal: # Blq objects still need to be able to refer to things in scope, and are thus still nqaasm. Different from userspace blqs. # # Three types blocks: # Unshaped QAASM. Basically a bag of instructions. Can add instructions, can't do algebra. # Shaped QAASM. Knows how to refer to parent scope. Only mutable through algebra. # Userspace Blocks. Doesn't know anything about scope. # Question: why do userspace blocks and shaped qaasm need to be different? # It still seems userspace blocks are just a restricted version of shaped qaasm. # Especially if I need to convert back and forth between the two in order to do anything. # Similarities and differences: # They both use reg objects. # Named qaasm vs regular qaasm. Named qaasm is optimized for a named scope. # The whole register/scope system is still somewhat unprincipled. # Userspace blocks don't know about parent scope, or scope at all. # Open: can userspace blocks permute through relabeling? # should userspace blocks use reg objects? Yes. # if no: need to interconvert a lot # if yes: lots of spare reg objects floating around. # Motivation for yes: blocks exist to be manipulated. are usually not static. # no such things as arrays in userspace blocks # userspace blocks can't refer to things in scope, shaped QAASM can # userspace blocks can't consume, shaped QAASM can # Choice: # userspace rshape,lshape are [<reg>,<reg>] # userspace block shape should match declaration order. # declare x: 2 # declare y: 3 # -> should have lshape [2,3] # Userspace block: # rshape = [<reg>,<reg>], lshape is by declaration order. # I believe this prevents swapping by relabeling. Is that what I want? # If userspace blocks have swapping by relabeling, then permutations automatically cancel. # example: # rshape = [<reg1>,<reg2>] # lshape = [<reg1>,<reg2>] # declare <reg3> # lshape = [<reg1>,<reg2>,<reg3>] # <reg3> += <reg1> # <reg1> -= <reg3> # zero <reg1> # lshape = [<reg2>,<reg3>] # Question: make swapping a primitive? # yes, can do this via algebra, but is more inefficient. # Helps give hints to any future qaasm compilers. # these should all return shaped QAASM blocks def block_create(parent, expn, dim): pass def block_consume(parent, name): pass def block_cast(parent, name): pass def block_userspace(parent, blq): pass ############################### def block_add(*blocks): scales = [b.scale for b in blocks] out = QAASM() out.scale = sum(scales) col1 = [(s/out.scale)**(1/2) for s in scales] mat = [] # make matrix with col1 as the first column # substitution business def block_mul(b1,b2): pass def block_tensor(b1,b2): pass def block_adjoint(b1,b2): pass
python
import numpy as np from scipy import integrate, interpolate import healpy as hp import subprocess import TheoryCL from .. import utils from .. import bessel class SphericalBesselISW(TheoryCL.CosmoLinearGrowth): """Class for computing the ISW using spherical Bessel Transforms from maps of the density contrast given in redshift slices. """ def __init__(self): """Initialises the class. Parameters ---------- CosmoLinearGrowth : class Parent class for calculating Cosmological linear growth functions. """ TheoryCL.CosmoLinearGrowth.__init__(self) self.Tcmb = 2.7255 self.C = 3e8 self.temp_path = None self.sbt_zmin = None self.sbt_zmax = None self.sbt_zedge_min = None self.sbt_zedge_max = None self.slice_in_range = None self.sbt_rmin = None self.sbt_rmax = None self.sbt_kmin = None self.sbt_kmax = None self.sbt_lmax = None self.sbt_nmax = None self.sbt_redge_min = None self.sbt_redge_max = None self.uselightcone = None self.temp_path = None self.boundary_conditions = None self.sim_dens = None def setup(self, zmin, zmax, zedge_min, zedge_max, kmin=None, kmax=0.1, lmax=None, nmax=None, uselightcone=True, temp_path='temp/', boundary_conditions='derivative'): """Finds the slices that are required to compute the SBT coefficients from. Parameters ---------- zmin : float Minimum redshift for spherical Bessel transform. zmax : float Maximum redshift for spherical Bessel transform. zedge_min : array Minimum redshift edge for each slice. zedge_max : array Maximum redshift edge for each slice. kmin : float Minium Fourier mode to consider. kmax : float Maximum Fourier mode to consider. lmax : int Maximum l mode to compute to, if None will be computed based on kmax. nmax : int Maximum n mode to comput to, if None will be computed based on kmax. uselightcone : bool True if density contrast maps are given as a lightcone and not all at redshift 0. boundary_conditions : str - normal : boundaries where spherical bessel function is zero. - derivative : boundaries where the derivative of the spherical Bessel function is zero. """ if zedge_min.min() > zmin: print('zmin given,', zmin, 'is smaller than the zmin of the redshift slices. Converting zmin to zmin_edges.zmin().') self.sbt_zmin = zedge_min.min() else: self.sbt_zmin = zmin if zedge_max.max() < zmax: print('zmax given,', zmax, 'is larger than the zmax of the redshift slices. Converting zmax to zmax_edges.zmax().') self.sbt_zmax = zedge_max.max() else: self.sbt_zmax = zmax self.sbt_zedge_min = zedge_min self.sbt_zedge_max = zedge_max self.slice_in_range = np.where((self.sbt_zedge_min <= self.sbt_zmax))[0] self.sbt_rmin = TheoryCL.growth.get_r(self.sbt_zmin, self.omega_m, self.omega_l, self.omega_r) self.sbt_rmax = TheoryCL.growth.get_r(self.sbt_zmax, self.omega_m, self.omega_l, self.omega_r) self.sbt_kmin = kmin self.sbt_kmax = kmax if lmax is None: self.sbt_lmax = int(self.sbt_rmax*self.sbt_kmax) + 1 else: self.sbt_lmax = lmax if nmax is None: self.sbt_nmax = int(self.sbt_rmax*self.sbt_kmax/np.pi) + 1 else: self.sbt_nmax = nmax self.sbt_redge_min = TheoryCL.growth.get_r(self.sbt_zedge_min, self.omega_m, self.omega_l, self.omega_r) self.sbt_redge_max = TheoryCL.growth.get_r(self.sbt_zedge_max, self.omega_m, self.omega_l, self.omega_r) self.uselightcone = uselightcone self.temp_path = temp_path utils.create_folder(self.temp_path) if boundary_conditions == 'normal' or boundary_conditions == 'derivative': self.boundary_conditions = boundary_conditions else: print("boundary_conditions can only be 'normal' or 'derivative', not", boundary_conditions) def slice2alm(self, map_slice, index): """Given a density contrast map and its corresponding index (for its zedges minimum and maximum) slice2alm will convert the map to its spherical harmonics and save the files. Parameters ---------- map_slice : array Healpix density contrast map. index : int Index of the slice for its zedges. """ if index in self.slice_in_range: map_ = map_slice wl = hp.sphtfunc.pixwin(hp.get_nside(map_), lmax=self.sbt_lmax) alm = hp.map2alm(map_, lmax=self.sbt_lmax, verbose=False) alm = hp.almxfl(alm, 1./wl) condition = np.where(self.slice_in_range == index)[0] np.savetxt(self.temp_path+'map_alm_'+str(condition[0])+'.txt', np.dstack((alm.real, alm.imag))[0]) else: print('Slice not in zmin and zmax range.') def alm2sbt(self): """Converts spherical harmonic coefficients in redshift slices to spherical Bessel coefficients. Stored as delta_lmn in units of (Mpc/h)^(1.5). """ l = np.arange(self.sbt_lmax+1)[2:] n = np.arange(self.sbt_nmax+1)[1:] l_grid, n_grid = np.meshgrid(l, n, indexing='ij') self.l_grid = l_grid self.n_grid = n_grid qln_grid = np.zeros(np.shape(self.l_grid)) print('Finding zeros for Bessel function up to n = '+str(self.sbt_nmax)) for i in range(0, len(self.l_grid)): l_val = self.l_grid[i][0] if i < 10: if self.boundary_conditions == 'normal': qln_grid[i] = bessel.get_qln(l_val, self.sbt_nmax, nstop=100) elif self.boundary_conditions == 'derivative': qln_grid[i] = bessel.get_der_qln(l_val, self.sbt_nmax, nstop=100) else: if self.boundary_conditions == 'normal': qln_grid[i] = bessel.get_qln(l_val, self.sbt_nmax, nstop=100, zerolminus1=qln_grid[i-1], zerolminus2=qln_grid[i-2]) elif self.boundary_conditions == 'derivative': qln_grid[i] = bessel.get_der_qln(l_val, self.sbt_nmax, nstop=100, zerolminus1=qln_grid[i-1], zerolminus2=qln_grid[i-2]) TheoryCL.utils.progress_bar(i, len(self.l_grid)) self.kln_grid = qln_grid/self.sbt_rmax print('Constructing l and n value grid') if self.boundary_conditions == 'normal': self.Nln_grid = ((self.sbt_rmax**3.)/2.)*bessel.get_jl(self.kln_grid*self.sbt_rmax, self.l_grid+1)**2. elif self.boundary_conditions == 'derivative': self.Nln_grid = ((self.sbt_rmax**3.)/2.)*(1. - self.l_grid*(self.l_grid+1.)/((self.kln_grid*self.sbt_rmax)**2.)) self.Nln_grid *= bessel.get_jl(self.kln_grid*self.sbt_rmax, self.l_grid)**2. if self.sbt_kmin is None and self.sbt_kmax is None: l_grid_masked = self.l_grid n_grid_masked = self.n_grid kln_grid_masked = self.kln_grid Nln_grid_masked = self.Nln_grid else: l_grid_masked = [] n_grid_masked = [] kln_grid_masked = [] Nln_grid_masked = [] for i in range(0, len(self.l_grid)): if self.sbt_kmin is None and self.sbt_kmax is None: condition = np.arange(len(self.kln_grid[i])) elif self.sbt_kmin is None: condition = np.where(self.kln_grid[i] <= self.sbt_kmax)[0] elif self.sbt_kmax is None: condition = np.where(self.kln_grid[i] >= self.sbt_kmin)[0] else: condition = np.where((self.kln_grid[i] >= self.sbt_kmin) & (self.kln_grid[i] <= self.sbt_kmax))[0] if len(condition) != 0: l_grid_masked.append(self.l_grid[i, condition]) n_grid_masked.append(self.n_grid[i, condition]) kln_grid_masked.append(self.kln_grid[i, condition]) Nln_grid_masked.append(self.Nln_grid[i, condition]) l_grid_masked = np.array(l_grid_masked, dtype=object) n_grid_masked = np.array(n_grid_masked, dtype=object) kln_grid_masked = np.array(kln_grid_masked, dtype=object) Nln_grid_masked = np.array(Nln_grid_masked, dtype=object) self.l_grid_masked = l_grid_masked self.n_grid_masked = n_grid_masked self.kln_grid_masked = kln_grid_masked self.Nln_grid_masked = Nln_grid_masked # New part print('Pre-compute spherical Bessel integrals') _interpolate_jl_int = [] for i in range(0, len(self.l_grid_masked)): _xmin = 0. _xmax = (self.kln_grid_masked[i]*self.sbt_rmax).max() + 1. _x = np.linspace(_xmin, _xmax, 10000) _jl_int = np.zeros(len(_x)) _jl_int[1:] = integrate.cumtrapz((_x**2.)*bessel.get_jl(_x, l_grid[i][0]), _x) _interpolate_jl_int.append(interpolate.interp1d(_x, _jl_int, kind='cubic', bounds_error=False, fill_value=0.)) TheoryCL.utils.progress_bar(i, len(self.l_grid_masked)) print('Computing spherical Bessel Transform from spherical harmonics') for which_slice in range(0, len(self.slice_in_range)): index = self.slice_in_range[which_slice] r_eff = (3./4.)*(self.sbt_redge_max[index]**4. - self.sbt_redge_min[index]**4.)/(self.sbt_redge_max[index]**3. - self.sbt_redge_min[index]**3.) Dz_eff = self.get_Dr(r_eff) Sln = np.zeros(np.shape(self.kln_grid)) for i in range(0, len(l_grid)): if self.sbt_kmin is None and self.sbt_kmax is None: condition = np.arange(len(self.kln_grid[i])) elif self.sbt_kmin is None: condition = np.where(self.kln_grid[i] <= self.sbt_kmax)[0] elif self.sbt_kmax is None: condition = np.where(self.kln_grid[i] >= self.sbt_kmin)[0] else: condition = np.where((self.kln_grid[i] >= self.sbt_kmin) & (self.kln_grid[i] <= self.sbt_kmax))[0] if len(condition) != 0: Sln[i, condition] += np.array([(1./(np.sqrt(self.Nln_grid_masked[i][j])*self.kln_grid_masked[i][j]**3.))*(_interpolate_jl_int[i](self.kln_grid_masked[i][j]*self.sbt_redge_max[index]) - _interpolate_jl_int[i](self.kln_grid_masked[i][j]*self.sbt_redge_min[index])) for j in range(0, len(self.l_grid_masked[i]))]) data = np.loadtxt(self.temp_path + 'map_alm_'+str(which_slice)+'.txt', unpack=True) delta_lm_real = data[0] delta_lm_imag = data[1] delta_lm = delta_lm_real + 1j*delta_lm_imag if self.uselightcone == True: delta_lm /= Dz_eff if which_slice == 0: l_map, m_map = hp.Alm.getlm(hp.Alm.getlmax(len(delta_lm))) delta_lmn = np.zeros((self.sbt_nmax, len(delta_lm)), dtype='complex') conditions1 = [] conditions2 = [] for i in range(0, len(Sln[0])): if self.sbt_kmin is None and self.sbt_kmax is None: condition = np.arange(len(self.kln_grid[:, i])) elif self.sbt_kmin is None: condition = np.where(self.kln_grid[:, i] <= self.sbt_kmax)[0] elif self.sbt_kmax is None: condition = np.where(self.kln_grid[:, i] >= self.sbt_kmin)[0] else: condition = np.where((self.kln_grid[:, i] >= self.sbt_kmin) & (self.kln_grid[:, i] <= self.sbt_kmax))[0] if len(condition) == 0: lmax = 0 else: lmax = self.l_grid[condition, i].max() condition1 = np.where(self.l_grid[:, i] <= lmax)[0] condition2 = np.where(l_map <= lmax)[0] conditions1.append(condition1) conditions2.append(condition2) conditions1 = np.array(conditions1, dtype=object) conditions2 = np.array(conditions2, dtype=object) for i in range(0, len(Sln[0])): _delta_lmn = np.zeros(len(delta_lm), dtype='complex') _delta_lmn[conditions2[i].astype('int')] = hp.almxfl(delta_lm[conditions2[i].astype('int')], np.concatenate([np.zeros(2), Sln[conditions1[i].astype('int'), i]])) delta_lmn[i] += _delta_lmn TheoryCL.utils.progress_bar(which_slice, len(self.slice_in_range), indexing=True, num_refresh=len(self.slice_in_range)) self.delta_lmn = delta_lmn def save_sbt(self, prefix=None): """Saves spherical Bessel transform coefficients. Parameters ---------- prefix : str Prefix for file containing spherical Bessel transform. """ if prefix is None: fname = 'sbt_zmin_'+str(self.sbt_zmin)+'_zmax_'+str(self.sbt_zmax)+'_lmax_'+str(self.sbt_lmax)+'_nmax_'+str(self.sbt_nmax) else: fname = prefix + '_sbt_zmin_'+str(self.sbt_zmin)+'_zmax_'+str(self.sbt_zmax)+'_lmax_'+str(self.sbt_lmax)+'_nmax_'+str(self.sbt_nmax) if self.boundary_conditions == 'normal': fname += '_normal.npz' elif self.boundary_conditions == 'derivative': fname += '_derivative.npz' np.savez(fname, kln_grid=self.kln_grid, kln_grid_masked=self.kln_grid_masked, l_grid_masked=self.l_grid_masked, Nln_grid_masked=self.Nln_grid_masked, delta_lmn=self.delta_lmn) def sbt2isw_alm(self, zmin=None, zmax=None): """Returns the ISW spherical harmonics between zmin and zmax from the computed spherical Bessel Transform. Parameters ---------- zmin : float Minimum redshift for ISW computation. zmax : float Maximum redshift for ISW computation. """ if zmin is None: zmin = self.sbt_zmin if zmax is None: zmax = self.sbt_zmax r = np.linspace(self.get_rz(zmin), self.get_rz(zmax), 1000) Dz = self.get_Dr(r) Hz = self.get_Hr(r) fz = self.get_fr(r) DHF = Dz*Hz*(1.-fz) Iln = np.zeros(np.shape(self.kln_grid)) for i in range(0, len(self.kln_grid)): if self.sbt_kmin is None and self.sbt_kmax is None: condition = np.arange(len(self.kln_grid[i])) elif self.sbt_kmin is None: condition = np.where(self.kln_grid[i] <= self.sbt_kmax)[0] elif self.sbt_kmax is None: condition = np.where(self.kln_grid[i] >= self.sbt_kmin)[0] else: condition = np.where((self.kln_grid[i] >= self.sbt_kmin) & (self.kln_grid[i] <= self.sbt_kmax))[0] if len(condition) != 0: Iln[i, condition] += np.array([(1./np.sqrt(self.Nln_grid_masked[i][j]))*integrate.simps(DHF*bessel.get_jl(self.kln_grid_masked[i][j]*r, self.l_grid_masked[i][j]), r) for j in range(0, len(self.l_grid_masked[i]))]) TheoryCL.utils.progress_bar(i, len(self.kln_grid)) alm_isw = np.zeros(len(self.delta_lmn[0]), dtype='complex') for i in range(0, len(self.delta_lmn)): alm_isw += hp.almxfl(self.delta_lmn[i], np.concatenate([np.zeros(2), Iln[:, i]/(self.kln_grid[:, i]**2.)])) alm_isw *= 3.*self.omega_m*((100.*self.h0)**2.)/(self.C**3.) alm_isw *= 1e9/(self.h0**3.) return alm_isw def sbt2isw_map(self, zmin, zmax, nside=256): """Returns a healpix map of the ISW between zmin and zmax computed from the spherical Bessel Transform. Parameters ---------- zmin : float Minimum redshift for ISW computation. zmax : float Maximum redshift for ISW computation. nside : int Nside for healpix map. """ alm_isw = self.sbt2isw_alm(zmin, zmax) map_isw = hp.alm2map(alm_isw, nside)*self.Tcmb return map_isw def clean_temp(self): """Removes temporary spherical harmonic files.""" if self.slice_in_range is not None: for i in range(0, len(self.slice_in_range)): subprocess.call('rm -r ' + self.temp_path, shell=True)
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: geoip.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='geoip.proto', package='', syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x0bgeoip.proto\"\x1a\n\x0cGeoIpRequest\x12\n\n\x02ip\x18\x01 \x01(\t\",\n\rGeoIpResponse\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0f\n\x07\x63ountry\x18\x02 \x01(\t2=\n\x0cGeoIpService\x12-\n\ngetCountry\x12\r.GeoIpRequest\x1a\x0e.GeoIpResponse\"\x00\x62\x06proto3' ) _GEOIPREQUEST = _descriptor.Descriptor( name='GeoIpRequest', full_name='GeoIpRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='ip', full_name='GeoIpRequest.ip', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=15, serialized_end=41, ) _GEOIPRESPONSE = _descriptor.Descriptor( name='GeoIpResponse', full_name='GeoIpResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='ip', full_name='GeoIpResponse.ip', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='country', full_name='GeoIpResponse.country', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=43, serialized_end=87, ) DESCRIPTOR.message_types_by_name['GeoIpRequest'] = _GEOIPREQUEST DESCRIPTOR.message_types_by_name['GeoIpResponse'] = _GEOIPRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) GeoIpRequest = _reflection.GeneratedProtocolMessageType('GeoIpRequest', (_message.Message,), { 'DESCRIPTOR' : _GEOIPREQUEST, '__module__' : 'geoip_pb2' # @@protoc_insertion_point(class_scope:GeoIpRequest) }) _sym_db.RegisterMessage(GeoIpRequest) GeoIpResponse = _reflection.GeneratedProtocolMessageType('GeoIpResponse', (_message.Message,), { 'DESCRIPTOR' : _GEOIPRESPONSE, '__module__' : 'geoip_pb2' # @@protoc_insertion_point(class_scope:GeoIpResponse) }) _sym_db.RegisterMessage(GeoIpResponse) _GEOIPSERVICE = _descriptor.ServiceDescriptor( name='GeoIpService', full_name='GeoIpService', file=DESCRIPTOR, index=0, serialized_options=None, create_key=_descriptor._internal_create_key, serialized_start=89, serialized_end=150, methods=[ _descriptor.MethodDescriptor( name='getCountry', full_name='GeoIpService.getCountry', index=0, containing_service=None, input_type=_GEOIPREQUEST, output_type=_GEOIPRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), ]) _sym_db.RegisterServiceDescriptor(_GEOIPSERVICE) DESCRIPTOR.services_by_name['GeoIpService'] = _GEOIPSERVICE # @@protoc_insertion_point(module_scope)
python
import requests import json import clipboard import time def main(): temp = None try: import tkinter temp = 1 except: temp = 0 if temp == 0: print("No Valid Tkinter installation found. Either tkinter is not installed or tkinter is not supported on this platform.") if temp == 1: try: from tkinter import Tk from tkinter.filedialog import askopenfilename Tk().withdraw() filename = askopenfilename() myfiles = {'file': open(filename ,'rb')} url = "https://file.io" postrequest = requests.post(url, files = myfiles) jsonloadedresponse = json.loads(postrequest.text) downloadlink = jsonloadedresponse["link"] print(downloadlink) clipboard.copy(downloadlink) time.sleep(1) except: print("Error") if __name__ == "__main__": main()
python
from typing import * import numpy as np from terminaltables import AsciiTable __all__ = ['format_labels_grid'] def format_labels_grid(labels: Sequence[str], n_cols: Optional[int] = None) -> str: labels = list(labels) if not labels: raise ValueError(f'`labels` must not be empty.') if n_cols is None: n_cols = int(np.ceil(np.sqrt(len(labels)))) n_rows = (len(labels) + n_cols - 1) // n_cols tbl = [] for i in range(n_rows): row = [] for j in range(n_cols): idx = i * n_cols + j if idx < len(labels): row.append(labels[idx]) else: row.append('') tbl.append(row) tbl = AsciiTable(tbl) tbl.inner_heading_row_border = 0 return tbl.table
python
from .driver import Driver from .mindriver import MinDriver from .timedriver import TimeDriver from .hysteresisdriver import HysteresisDriver
python
#Biblioteca para gerar itens aleatórios import random #Função para achar o menos valor de um vetor desconsiderando o primeiro item que é o nome do produto def menor_valor(v): menor = v[1] loja_indice = 0 for i in range(len(v)-1): if menor > v[i+1]: menor = v[i+1] loja_indice = i menor_loja = [loja_indice, menor] return menor_loja l = int(input()) p = int(input()) #Lista de lojas lojas = [] #Lista de Produtos produtos = [] #Ínicio e final do intervalo para gerar um número aleatório produtos_limites = [] #Lista com o produtos e os preçcs das lojas [[nome prod 1, preço loja 1, preço loja 2, ...], [nome prod 2, preço loja 1, preço loja 2, ...]] produtos_precos = [] #Matriz geral com loja e os preços [[loja 1, preço prod 1, preço prod 2, ...],[loja 1, preço prod 1, preço prod 2,...]] lojas_geral = [] #Variável usada para calculo do valor total gasto total = 0 #Gera a lista de lojas for k in range(l): lojas.append(input()) #Gera a lista de produtos e a de produtos com os preços for i in range(p): produto_precos = [] texto = input().split() produto_precos.append(texto[0]) produtos.append(texto[0]) x = round(random.uniform(int(texto[1]),int(texto[2])),2) y = round(random.uniform(int(texto[1]),int(texto[2])),2) produto_precos.append(x) produto_precos.append(y) produtos_precos.append(produto_precos) #Gera a lista com a loja e os preços for n in range(len(lojas)): loja_produtos_precos = [lojas[n]] for m in range(p): loja_produtos_precos.append(produtos_precos[m][n+1]) lojas_geral.append(loja_produtos_precos) print("--------------------------") print("Resultado da pesquisa:") #O :15s fixa o tamanho usado como 15 caracteres independente de quantos a palavra tenha, isso faz com que a matriz seja impressa com os mesmos espaços #O end=" " faz com que o próximo item printado seja colocado ao lado do item anterios ao invés de quebrar a linha print('{:15s}'.format(""), end=" ") for produto in produtos: print('{:15s}'.format(produto), end=" ") #Quebra de linha print() for loja in lojas_geral: for i in range(len(loja)): print('{:15s}'.format(str(loja[i])), end=" ") print() print() print("Menores preços: ") for g in produtos_precos: x = menor_valor(g) total = total + x[1] print(g[0], " ", lojas[x[0]]) print() print("Valor total: ") print("R$ ",round(total, 2)) print("--------------------------")
python
import cv2 import numpy as np def resize(filename,width,height): image = cv2.imread(filename) cv2.imshow('Original image',image) cv2.waitKey(0) org_height , org_width = image.shape[0:2] print("width: ",org_width) print("height: ",org_height) if org_width >= org_height: new_image = cv2.resize(image,(width,height)) else: new_image = cv2.resize(image,(height,width)) return filename,new_image filename,new_image = resize('bird.jpg',600,400) def sharpen(image): kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]]) new_img = cv2.filter2D(image,-1,kernel) cv2.imshow('sharpened_img',new_img) cv2.waitKey() return new_img final_img = sharpen(new_image)
python
try: import simplejson as json except ImportError: import json from .base_file import BaseFilePlugin from ..xnodes import create_xnode, XNode, XDict, XFileError class PluginJson(BaseFilePlugin): def def_extensions(self) -> set: return {'json'} def load(self, content) -> XNode: #elf.full_name) if content.strip() == '': result = XDict(_file=self.file) else: try: result = create_xnode(None, json.loads(content), _file=self.file) except json.JSONDecodeError as ex: result = XFileError(name=ex, _file=self.file) return result
python
import datetime from django.views.generic import TemplateView from django.views.decorators.cache import never_cache from rest_framework import viewsets, generics, status from rest_framework.response import Response from . import models from . import serializers from rest_framework.permissions import BasePermission, IsAuthenticated # Serve Vue Application index_view = never_cache(TemplateView.as_view(template_name='index.html')) class ResendConfirmView(generics.GenericAPIView): serializer_class = serializers.ResendConfirmSerializer def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response({'detail': "Email confirmation sent"}) class ReviewViewSet(viewsets.ModelViewSet): queryset = models.Review.objects.all() serializer_class = serializers.ReviewSerializer class ProfileViewSet(viewsets.ModelViewSet): class HisOwnProfile(BasePermission): def has_object_permission(self, request, view, obj): return obj.is_owner(request.user) permission_classes = (IsAuthenticated, HisOwnProfile) queryset = models.Profile.objects.all() serializer_class = serializers.ProfileSerializer def update(self, request, pk): profile = self.get_queryset().get(pk=pk) serializer = serializers.ProfileSerializer(reservation, data=request.data, partial=True) serializer.is_valid() serializer.save() return Response(serializer.data) class HitupViewSet(viewsets.ModelViewSet): permission_classes = (IsAuthenticated, ) queryset = models.Hitup.objects.all() def get_serializer_class(self): if self.action == 'list': return serializers.HitupSerializer elif self.action == 'create': return serializers.NewHitupSerializer def get_queryset(self): #return models.Hitup.objects.all() return models.Hitup.objects.filter(hangee__user_id=self.request.user, expiration__gt=datetime.datetime.now()).all() def create(self, request, *args, **kwargs): serializer = serializers.NewHitupSerializer(data=request.data, context={'request': request}) serializer.is_valid() result = serializer.save() response = Response(status=status.HTTP_201_CREATED) return response
python
import os import pathlib import matplotlib.pyplot as plt import tensorflow as tf import numpy as np import sys import time import random from tensorflow.keras.preprocessing.image import load_img,img_to_array from tensorflow.keras import layers from multiprocessing.dummy import Pool as ThreadPool print('Python version: %s' % sys.version) print('TensorFlow version: %s' % tf.__version__) print('Keras version: %s' % tf.keras.__version__) #################### ### LOADING DATA ### #################### print("Loading and preprocessing data...") inps = os.listdir("./training_data_inputs") labels = os.listdir("./training_data_labels") data = set(inps) & set(labels) xdim = 180 ydim = 90 padding = 9 dd = 1 + padding * 2 koppens = np.array([ [255, 255, 255], [0, 0, 255], [0, 120, 255], [70, 170, 250], [255, 0, 0], [255, 150, 150], [245, 165, 0], [255, 220, 100], [255, 255, 0], [200, 200, 0], [150, 150, 0], [150, 255, 150], [100, 200, 100], [50, 150, 50], [200, 255, 80], [100, 255, 80], [50, 200, 0], [255, 0, 255], [200, 0, 200], [150, 50, 150], [170, 175, 255], [89, 120, 220], [75, 80, 179], [0, 255, 255], [55, 200, 255], [0, 125, 125], [178, 178, 178], [102, 102, 102] ]) koppens_weights = { 0: 1., # water 1: 1., # jungle 2: 1., # monsoon 3: 1., # savannah 4: 1., 5: 1., 6: 1., 7: 1., 8: 1., 9: 1., 10: 1., 11: 1., 12: 1., 13: 1., 14: 1., 15: 1., 16: 1., 17: 1., 18: 1., 19: 1., 20: 1., 21: 1., 22: 1., 23: 1., 24: 1., 25: 1., 26: 1., 27: 1., } x_train = [] y_train = [] for a in data: start_time = time.time() img_input = img_to_array(load_img("./training_data_inputs/" + a, color_mode='rgb')) img_label = img_to_array(load_img("./training_data_labels/" + a, color_mode='rgb')) input_data = np.zeros((img_input.shape[0], img_input.shape[1], 6)) label_data = np.zeros((img_input.shape[0], img_input.shape[1], 28)) for y in range(img_input.shape[0]): for x in range(img_input.shape[1]): # Process input p = img_input[y, x] if all(p == [0, 0, 255]): input_data[y, x, 0] = 1 # sea elif all(p == [177, 216, 230]): input_data[y, x, 1] = 1 # shelf elif all(p == [0, 0, 139]): input_data[y, x, 2] # trench elif all(p == [0, 255, 0]): input_data[y, x, 3] # plains elif all(p == [150, 75, 0]): input_data[y, x, 4] # mountains elif all(p == [112, 128, 144]): input_data[y, x, 5] # tall mountains else: raise Exception("UNKNOWN INPUT COLOR IN : " + a) # unknown # Process label l = img_label[y, x] min_dist = 255 * 4 index = 0 for n in range(len(koppens)): h = koppens[n] dist = abs(h[0] - l[0]) + abs(h[1] - l[1]) + abs(h[2] - l[2]) if dist < min_dist: min_dist = dist index = n if dist < 5: break if min_dist > 5: raise Exception("NO PIXEL SEEMS TO BE A CLOSE FIT FOR PIXEL: " + str(x) + ", " + str(y) + " IN: " + str(a) + " WITH COLOR: " + str(l)) label_data[y, x, index] = 1 input_data = np.pad(input_data, ((padding, padding), (0, 0), (0, 0)), 'constant', constant_values=(0, 0)) input_data=np.pad(input_data, ((0, 0), (padding, padding), (0, 0)), 'wrap') x_train.append(input_data) y_train.append(label_data) end_time = time.time() print(str(a) + ": " + str(end_time - start_time) + "s") """ # Calculate weights total = 28.0 for i in y_train[0]: for j in i: koppens_weights[np.argmax(j)] = koppens_weights[np.argmax(j)] + 1 total = total + 1.0 for i in range(28): koppens_weights[i] = total / koppens_weights[i] """ print("Image loaded!") x_train = np.array(x_train) y_train = np.array(y_train) print(x_train[0].shape) print(y_train[0].shape) print(y_train) def get_sub_array(ni, xin, yin, slices_of_data): return slices_of_data[ni, yin:yin+2*padding+1, xin:xin+2*padding+1, :] # For training class DataGenerator(tf.keras.utils.Sequence): def __init__(self, batch_size, x_s, y_s, *args, **kwargs): self.batch_size = batch_size self.x_data = x_s self.y_data = y_s def __len__(self): return 5000 def __getitem__(self, index): x = np.array([np.zeros((dd, dd, 6)) for o in range(self.batch_size)]) y = np.array([np.zeros((len(koppens))) for o in range(self.batch_size)]) for o in range(self.batch_size): ni = random.randint(0, self.x_data.shape[0] - 1) # index of the image from which we're copying data xin = random.randint(0, xdim - 1) # x of the pixel we're looking at, -1 is here because of inclusivity of randint yin = random.randint(0, ydim - 1) # y of the pixel we're looking at, -1 is here because of inclusivity of randint # Reroll water tiles while self.y_data[ni, yin, xin, 0] == 1 or self.x_data[ni, padding + yin, padding + xin, 0] == 1 or self.x_data[ni, padding + yin, padding + xin, 1] == 1 or self.x_data[ni, padding + yin, padding + xin, 2] == 1: ni = random.randint(0, self.x_data.shape[0] - 1) # index of the image from which we're copying data xin = random.randint(0, xdim - 1) # x of the pixel we're looking at, -1 is here because of inclusivity of randint yin = random.randint(0, ydim - 1) # y of the pixel we're looking at, -1 is here because of inclusivity of randint ooo = get_sub_array(ni, xin, yin, self.x_data) x[o] = ooo for i in range(len(koppens)): y[o, i] = self.y_data[ni, yin, xin, i] return x, y # For predicting class DataProvider(tf.keras.utils.Sequence): def __init__(self, x_s, ni, batch_size, *args, **kwargs): self.x_data = x_s self.ni = ni self.batch_size = batch_size def __len__(self): return xdim * ydim def __getitem__(self, index): index_int = int(index) xin = index_int % xdim yin = index_int // xdim x = np.array([np.zeros((dd, dd, 6)) for o in range(self.batch_size)]) for o in range(self.batch_size): x[o] = get_sub_array(self.ni, xin, yin, self.x_data) return x def on_epoch_end(self): pass model = tf.keras.models.Sequential() model.add(tf.keras.Input(shape=(dd, dd, 6))) model.add(layers.Flatten()) model.add(layers.Dense(30, activation="relu")) model.add(layers.Dropout(0.2)) model.add(layers.Dense(30, activation="relu")) model.add(layers.Dropout(0.2)) model.add(layers.Dense(len(koppens), activation='softmax')) print("--- compiling the model ---") model.compile( optimizer='adam',#tf.keras.optimizers.SGD(learning_rate=0.0001), loss='categorical_crossentropy', metrics=["mean_squared_error", "categorical_accuracy", "accuracy"] ) model.summary() print("--- model fit ---") gen = DataGenerator(50, x_train, y_train) history = model.fit( gen, epochs=25, workers=10, class_weight=koppens_weights ) print("--- model predict ---") # ID of the image in x_train that we want to export. 0 stands for Earth image_id = 0 img_to_save = np.zeros((ydim, xdim, 3)) gen = DataProvider(x_train, image_id, 80) results = model.predict(gen, workers=10, verbose=1) ii = 0 for x in range(xdim): for y in range(ydim): # Skip water tiles, assing water to them by default if x_train[image_id, padding + y, padding + x, 0] == 1 or x_train[image_id, padding + y, padding + x, 1] == 1 or x_train[image_id, padding + y, padding + x, 2] == 1: img_to_save[y, x] = koppens[0] / 255.0 else: img_to_save[y, x] = koppens[np.argmax(results[ii])] / 255.0 ii = ii + 1 plt.imsave("export.png", img_to_save) print("--- all done ---")
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/9/28 12:39 # @Author : Meta_Chen # @File : sendip.py # @Software: PyCharm # @Target: 以邮件形式发送ip import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.header import Header from utils.getip import GetIP from apscheduler.schedulers.blocking import BlockingScheduler import os import logging import configparser logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(name)s %(levelname)s %(message)s", datefmt = '%Y-%m-%d %H:%M:%S %a') #注意月份和天数不要搞乱了,这里的格式化符与time模块相同 class SendEmail: # 设置smtplib所需的参数 # 下面的发件人,收件人是用于邮件传输的。 smtpserver = 'smtp.163.com' username = '[email protected]' sender = '[email protected]' # receiver='[email protected]' # 收件人为多个收件人 receiver = ['[email protected]'] # subject = 'Python email test' # 通过Header对象编码的文本,包含utf-8编码信息和Base64编码信息。以下中文名测试ok subject = 'IP地址变更' subject=Header(subject, 'utf-8').encode() # 构造邮件对象MIMEMultipart对象 # 下面的主题,发件人,收件人,日期是显示在邮件页面上的。 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = 'meta_chen <{}>'.format(sender) # msg['To'] = '[email protected]' # 收件人为多个收件人,通过join将列表转换为以;为间隔的字符串 msg['To'] = ";".join(receiver) # msg['Date']='2012-3-16' config = configparser.ConfigParser() config.read("./conf.ini", encoding="utf-8") def __init__(self,password): self.password = password def mailsender(self): ''' 构造文字内容,2小时检测一次 :return: ''' logging.info("Start Check IP") checkip = GetIP() myip = checkip.getip() oldip = self.config.get('ip','oldip') if myip != oldip: logging.info('IP has Changed to : {} from {}'.format(myip,oldip)) self.config.set('ip','oldip',str(myip)) self.config.write(open("./conf.ini", "w")) else: logging.info("Nothing changed") return False text = 'Host Ip has Changed :{}'.format(myip) text_plain = MIMEText(text, 'plain', 'utf-8') self.msg.attach(text_plain) # 发送邮件 smtp = smtplib.SMTP() smtp.connect('smtp.163.com') # 我们用set_debuglevel(1)就可以打印出和SMTP服务器交互的所有信息。 smtp.set_debuglevel(1) smtp.login(self.username, self.password) smtp.sendmail(self.sender, self.receiver, self.msg.as_string()) smtp.quit() return True def timeJob(self): ''' 定时检查ip :return: ''' scheduler = BlockingScheduler() # 每2小时触发 scheduler.add_job(self.mailsender, 'interval', days=1) scheduler.start() def main(): emailPassword = os.getenv('163AUTHCODE') sender = SendEmail(emailPassword) sender.timeJob() if __name__ == '__main__': main()
python
from types import SimpleNamespace import re options_render = { "START_FROM": 0, "PATH_MODEL": 'models/hotdog.blend', "DATASET_NAME": 'hotdog_random_exr', "DATAMODEL_NAME": '', # dataset used for training; == %DATASET_NAME% if empty "RESOLUTION": 512, # resolution of resulting renders "ARCH": 'mlnrf_base', # nsvf_base/mlnrf_base/mlnrfiva_base/mlnrfex_base/mlnrfnrf_base/mlnrfexbf_base/mlnrfexva_base "RENDERING_NAME": 'random3', # "POOLS": '', "POOLS": '../pool/u4109/checkpoint/', "COLOR_DEPTH": 16, "FORMAT": 'OPEN_EXR', "CYCLES_SAMPLES": 500,#7000, "CYCLES_MAX_BOUNCES": 20,#20, "OUTPUT_DIR": '%DATASET_NAME%_random3true', "PRESET_VIEWS_FOLDER": 'checkpoints/%POOLS%%DATASET_NAME%/%ARCH%/%RENDERING_NAME%', # "PRESET_VIEWS_FOLDER": 'checkpoints/%POOLS%lego_coloc_exr/%ARCH%/%RENDERING_NAME%', "VIEWS_PATH": '%PRESET_VIEWS_FOLDER%/pose', "LIGHTS_PATH": '%PRESET_VIEWS_FOLDER%/pose_pl', "VOXEL_NUMS": 64, # 512, # can still be later overridden using argument 'VOXEL_NUM' # "CAM_DISTANCE": 1.0, "CAM_HEMISPHERE_ANGLES": [-10, 80], # in degrees "RANDOM_VIEWS": False, # VIEWS_PATH & LIGHTS_PATH must be specified if RANDOM_VIEWS == False "DEBUG": False, "DEPTH_SCALE": 1.4, "RESULTS_PATH": 'target', "PERCENTILE_MIN": 0.5, "PERCENTILE_MAX": 99.5, } if options_render['DATAMODEL_NAME']: options_render['PRESET_VIEWS_FOLDER'] = options_render['PRESET_VIEWS_FOLDER'].replace('%DATASET_NAME%', options_render['DATAMODEL_NAME']) options = options_render; print('\n'.join([''.join(['=']*10)]*3), '>>>>> RENDER <<<<<'); # Substitute vars for key in options: if not isinstance(options[key], str): continue for match in re.finditer('%[A-Z_]+%', options[key]): matchKey = match.group()[1:-1] if matchKey in options: # options[key] = options[key][:match.start()] + options[matchKey] + options[key][match.end():] options[key] = options[key].replace(match.group(), options[matchKey]) opts = SimpleNamespace(**options)
python
# -*- coding: utf-8 -*- from django.conf import settings from django import forms from django.contrib import admin from photologue.models import Photo, Gallery, PhotoEffect, PhotoSize, Watermark from photologue.admin import PhotoAdmin as PhotoAdminDefault from photologue.admin import GalleryAdmin as GalleryAdminDefault from models import PhotoExtended, Agency, Photographer class AgencyAdmin(admin.ModelAdmin): pass class PhotoExtendedModelForm(forms.ModelForm): date_taken = forms.DateField(label=u'Tomada el', widget=admin.widgets.AdminDateWidget(), required=False) def __init__(self, *args, **kwargs): super(PhotoExtendedModelForm, self).__init__(*args, **kwargs) if self.instance.id: self.initial['date_taken'] = self.instance.image.date_taken def save(self, commit=True): instance = super(PhotoExtendedModelForm, self).save(commit=commit) instance.image.date_taken = self.cleaned_data['date_taken'] if not instance.image._old_image: # this is a new image, we need to "fake" the old image to avoid photologue.Photo attemp to rm a "None" file instance.image._old_image = instance.image.image instance.image.save() return instance class Meta: model = PhotoExtended fields = ('date_taken', ) class PhotoExtendedInline(admin.StackedInline): model = PhotoExtended form = PhotoExtendedModelForm can_delete = False fieldsets = ( ('Metadatos', {'fields': ('date_taken', 'type', 'photographer', 'agency')}), (u'Recorte para versión cuadrada', { 'fields': ('focuspoint_x', 'focuspoint_y', 'radius_length'), 'classes': ('collapse', )})) class Media: # jquery loaded again (admin uses custom js namespaces) js = ('admin/js/jquery%s.js' % ('' if settings.DEBUG else '.min'), 'js/jquery.cropbox.js') class PhotoGalleryInline(admin.TabularInline): model = Gallery.photos.through raw_id_fields = ('photo', ) extra = 0 verbose_name = u'foto' verbose_name_plural = u'fotos' readonly_fields = ['photo_admin_thumbnail', 'photo_date_taken', 'photo_date_added'] def photo_admin_thumbnail(self, instance): return instance.photo.admin_thumbnail() photo_admin_thumbnail.short_description = u'thumbnail' photo_admin_thumbnail.allow_tags = True def photo_date_taken(self, instance): return instance.photo.date_taken photo_date_taken.short_description = u'tomada el' def photo_date_added(self, instance): return instance.photo.date_added photo_date_added.short_description = u'fecha de creación' class GalleryAdmin(GalleryAdminDefault): list_display = ('title', 'date_added', 'photo_count', 'is_public') list_filter = ['date_added', 'is_public'] date_hierarchy = 'date_added' prepopulated_fields = {'slug': ('title',)} filter_horizontal = ('photos',) inlines = [PhotoGalleryInline] exclude = ('photos', ) class PhotographerAdmin(admin.ModelAdmin): search_fields = ('name', ) class PhotoEffectAdmin(admin.ModelAdmin): list_display = ( 'name', 'description', 'color', 'brightness', 'contrast', 'sharpness', 'filters', 'admin_sample') fieldsets = ( (None, { 'fields': ('name', 'description') }), ('Adjustments', { 'fields': ('color', 'brightness', 'contrast', 'sharpness') }), ('Filters', { 'fields': ('filters',) }), ('Reflection', { 'fields': ( 'reflection_size', 'reflection_strength', 'background_color') }), ('Transpose', { 'fields': ('transpose_method',) }), ) class PhotoSizeAdmin(admin.ModelAdmin): list_display = ( 'name', 'width', 'height', 'crop', 'pre_cache', 'effect', 'increment_count') fieldsets = ( (None, { 'fields': ('name', 'width', 'height', 'quality') }), ('Options', { 'fields': ('upscale', 'crop', 'pre_cache', 'increment_count') }), ('Enhancements', { 'fields': ('effect', 'watermark',) }), ) class WatermarkAdmin(admin.ModelAdmin): list_display = ('name', 'opacity', 'style') class AgencyFilter(admin.SimpleListFilter): title = 'agency' parameter_name = 'agency' def lookups(self, request, model_admin): return [(a.id, a.name) for a in Agency.objects.all() if a.photos.exists()] def queryset(self, request, queryset): agency = self.value() return queryset.filter( id__in=PhotoExtended.objects.filter(agency=agency).values_list('image', flat=True) ) if agency else queryset class PhotographerFilter(admin.SimpleListFilter): title = 'photographer' parameter_name = 'photographer' def lookups(self, request, model_admin): return [(p.id, p.name) for p in Photographer.objects.all() if p.photos.exists()] def queryset(self, request, queryset): photographer = self.value() return queryset.filter( id__in=PhotoExtended.objects.filter(photographer=photographer).values_list('image', flat=True) ) if photographer else queryset class PhotoAdmin(PhotoAdminDefault): list_display = ('title', 'admin_thumbnail', 'date_taken', 'date_added', 'is_public', 'view_count') list_filter = tuple(PhotoAdminDefault.list_filter) + (AgencyFilter, PhotographerFilter) fieldsets = ( (None, {'fields': ('title', 'image', 'caption')}), ('Avanzado', {'fields': ('slug', 'crop_from', 'is_public'), 'classes': ('collapse', )})) inlines = [PhotoExtendedInline] admin.site.unregister(Photo) admin.site.register(Photo, PhotoAdmin) admin.site.unregister(Gallery) admin.site.register(Gallery, GalleryAdmin) admin.site.register(Agency, AgencyAdmin) admin.site.register(Photographer, PhotographerAdmin) admin.site.unregister(PhotoEffect) admin.site.register(PhotoEffect, PhotoEffectAdmin) admin.site.unregister(PhotoSize) admin.site.register(PhotoSize, PhotoSizeAdmin) admin.site.unregister(Watermark) admin.site.register(Watermark, WatermarkAdmin)
python
#!/usr/bin/env python # coding: utf-8 # In[1]: import requests import numpy as np from bs4 import BeautifulSoup import itertools import warnings warnings.filterwarnings("ignore") import pandas as pd import re from lxml import html import math import time import sys # In[50]: def inside_get_year(url_): #url = "https://www.ptt.cc/bbs/Beauty/M.1568553917.A.175.html" time.sleep(0.1) payload = { "from": "/bbs/Gossiping/index.html", "yes": "yes" } rs = requests.session() res = rs.post("https://www.ptt.cc/ask/over18", verify = False, data = payload) res = rs.get(url_, verify = False, headers={'Connection':'close'}) tree = html.fromstring(res.content) a = tree.xpath("//*[@id=\"main-content\"]/div[4]/span[2]")[0] return( a.text[-4:] ) # In[3]: def url_get_date(int_): head = "https://www.ptt.cc/bbs/Beauty/index" end = ".html" url_ = head + str(int_) + end payload = { "from": "/bbs/Gossiping/index.html", "yes": "yes" } rs = requests.session() res = rs.post("https://www.ptt.cc/ask/over18", verify = False, data = payload) res = rs.get(url_, verify = False, headers={'Connection':'close'}) soup = BeautifulSoup(res.text) block_l = soup.select(".r-ent") for i in block_l: try: date = i.select(".date")[0].text[1:] date = date.replace("/", "") #print(date) URL = i.select(".title")[0].select("a")[0]["href"] head2 = "https://www.ptt.cc" year = inside_get_year(head2 + URL) #print(year) if( len(year + date)==7 ): return( int( year + "0" + date) ) else: return( int( year + date) ) break except: pass # In[4]: #start_time_glo = time.time() def binary_search(date_, start_, end_ , time_): pivot = int((start_ + end_ )/2) date = url_get_date( pivot ) #print(date) #print(date_) if( time.time() - time_ > 15): return(pivot) if( date_ < date): #print("date_ < date\n") return( binary_search(date_, start_, pivot, time_) ) if( date_ > date): #print("date_ > date\n") return( binary_search(date_, pivot, end_, time_) ) if(date_ == date): return(pivot) # In[5]: def find_start_end(): start_time_glo = time.time() start = binary_search(20171231, 0, 3000, time.time()) start_time_glo = time.time() end = binary_search(20190101, 0, 3000, time.time()) return( (start, end)) # In[6]: def num_make_URL(int_): head = "https://www.ptt.cc/bbs/Beauty/index" end = ".html" return(head + str(int_) + end) # In[7]: def url_find_block(url_): #url = "https://www.ptt.cc/bbs/Beauty/index3057.html" try: time.sleep(0.1) while(True): payload = { "from": "/bbs/Gossiping/index.html", "yes": "yes" } print(url_) rs = requests.session() res = rs.post("https://www.ptt.cc/ask/over18", verify = False, data = payload) res = rs.get(url_, verify = False, headers={'Connection':'close'}) soup = BeautifulSoup(res.text) block_l = soup.select(".r-ent") print(url_) return(block_l) break except: print("url_find_block: error") print(url_) print("\n") # In[8]: def block_find_data(block_): date, title, URL, bao, except_, annoucement = None, None, None, None, False, False try: date = block_.select(".date")[0].text title = block_.select(".title")[0].text annoucement = title.startswith('\n[公告]') URL = block_.select(".title")[0].select("a")[0]["href"] bao = block_.select(".nrec")[0].text except: except_ = True return( (date, title, URL, bao, except_, annoucement)) # In[9]: def data_to_df(block_l_): df = pd.DataFrame(list(map(block_find_data, block_l_))) df.columns = ["date","title", "URL", "bao", "except", "annoucement"] return(df) # In[10]: def date_adj(str_): return(str_.replace("/", "")) # In[11]: def title_adj(str_): try: str_ = re.match(u"^\\n(.*)\\n$", str_).groups()[0] except: print("title_adj: error") return( str_) # In[12]: def df_adjust(df): df = df[df["except"] == False] df = df[df["annoucement"] == False] df["URL"] = "https://www.ptt.cc" + df["URL"] df["date"] = list(map(date_adj, df["date"].tolist())) while( df["date"].tolist()[0] == "1231" ): df = df.drop(df.index[0]) while( df["date"].tolist()[-1] == " 101" ): df = df.drop(df.index[-1]) df["title"] = list(map(title_adj, df["title"].tolist())) return(df) # In[1]: def remove_blank(str_): return(str_.replace(" ", "")) # In[13]: def all_articles(df): buf = df[["date", "title", "URL"]].astype(str) buf.date = list(map(remove_blank, buf.date.tolist())) try: buf.to_csv('all_articles.txt', sep=',', index = False, header = False) print("all_articles: success") except: print("all_articles: fail") # In[14]: def all_popular(df): df = df[df["bao"]=="爆"] buf = df[["date", "title", "URL"]].astype(str) buf.date = list(map(remove_blank, buf.date.tolist())) try: buf.to_csv('all_popular.txt', sep=',', index = False, header = False) print("all_popular: success") except: print("all_popular: fail") # In[52]: def crawl(): print("crawl start") page_tuple = find_start_end() print(page_tuple) URL_list = list(map(num_make_URL, np.arange(page_tuple[0], page_tuple[1]))) block_list = list(map(url_find_block, URL_list)) block_list = list(itertools.chain(*block_list)) df = data_to_df(block_list) df = df_adjust(df) all_articles(df) all_popular(df) df.to_csv("HW1-1_3.0.csv") return("problem 1 down") # In[16]: if( sys.argv[1] == "crawl"): crawl() # In[20]: def url_find_soup(url_): #url = "https://www.ptt.cc/bbs/Beauty/index3057.html" time.sleep(0.1) try: while(True): payload = { "from": "/bbs/Gossiping/index.html", "yes": "yes" } rs = requests.session() res = rs.post("https://www.ptt.cc/ask/over18", verify = False, data = payload) res = rs.get(url_, verify = False, headers={'Connection':'close'}) soup = BeautifulSoup(res.text) #block_l = soup.select(".push") return(soup) break except: print("url_find_block: error") print(url_) print("\n") # In[21]: def push_find_pushtag(push_): try: return( push_.select(".hl.push-tag")[0].text) except: print("push_find_pushtag: error:", push_) return(None) def push_find_pushID(push_): try: return( push_.select(".f3.hl.push-userid")[0].text) except: print("push_find_pushID: error", push_) return(None) # In[22]: def tag_to_text(tag_): return(tag_.text) def find_all_href(soup_): compare = "(.PNG|.JPEG|.GIF|.JPG|.png|.jpeg|.gif|.jpg)$" try: all_hreftag = soup_.find_all(href=re.compile(compare)) return( list(map(tag_to_text , all_hreftag)) ) except: print("find_all_href: error") return(None) def find_article_href(soup_): try: compare = "(.PNG|.JPEG|.GIF|.JPG|.png|.jpeg|.gif|.jpg)$" buf = soup_.select("#main-content")[0] article_hreftag = buf.find_all(href=re.compile(compare), recursive=False) return( list(map(tag_to_text ,article_hreftag)) ) except: print("find_article_href: error") return(None) # In[23]: def soup_find_article(soup): soup.select("#main-content")[0].text article = soup.select("#main-content")[0].text article = article.replace("\n", "") compare = r"(.*)--※ 發信站" buf = re.search( compare, article).groups()[0] return(buf) # In[24]: def url_find_data(url_): print(url_) push_tag_l, push_userid_l, all_hreftag = None, None, None article_hreftag, article = None, None try: soup = url_find_soup(url_) push = soup.select(".push") push_tag_l = list(map(push_find_pushtag, push)) push_userid_l = list(map(push_find_pushID, push)) all_hreftag = find_all_href(soup) article_hreftag = find_article_href(soup) article = soup_find_article(soup) except: print("url_find_data: error", url_) return( (push_tag_l, push_userid_l, all_hreftag, article_hreftag, article)) # In[25]: def get_data(): print("get_data start") df = pd.read_csv("HW1-1_3.0.csv") df = df.drop(df.columns[[0]], axis=1) #testdf = df.head(100) start_time = time.time() buf_np = list(map(url_find_data, df["URL"].tolist())) print(time.time() - start_time) np.save('url_data', buf_np) # In[ ]: #if( sys.argv[1] == "push"): # get_data() # # hw 2.5 # In[26]: def make_push_table(np_): df_push = pd.DataFrame() for i in np_: try: buf = pd.DataFrame({'push': i[0], 'ID': i[1]}) df_push = df_push.append(buf) except: print(i) return(df_push) # In[27]: def find_push_boo(df_): buf = df_.groupby(['push']).count() buf2 = buf.loc[ ['推 ' , '噓 '] ,:]["ID"].tolist() return(buf2) # In[28]: def create_like_str(int_): return("like #" + str(int_)) def create_boo_str(int_): return("boo #" + str(int_)) # In[51]: def push(start_date, end_date): #start_date = 101 #end_date = 202 print("push start") start_date = int(start_date) end_date = int(end_date) read_np = np.load('url_data.npy',allow_pickle = True ) df = pd.read_csv("HW1-1_3.0.csv") df = df.drop(df.columns[[0]], axis=1) buf1 = np.array(df.date) >= start_date buf2 = np.array(df.date) <= end_date legel_index = buf1 * buf2 legal_np = read_np[legel_index] df_push = make_push_table(legal_np) buf = df_push[df_push["push"] != "→ "] cross_df = pd.crosstab(buf.ID, buf.push, margins=True) cross_df["pushID"] = cross_df.index push_df = cross_df.sort_values(by = ["推 ", "pushID"], ascending= [False, True])[1:11] boo_df = cross_df.sort_values(by = ["噓 ", "pushID"], ascending = [False, True])[1:11] buf1 = list(map(create_like_str, np.arange(11)[1:])) buf2 = list(map(create_boo_str, np.arange(11)[1:])) col1 = ["all like", "all boo"] + buf1 + buf2 col2 = find_push_boo(df_push) + list(push_df.index) + list(boo_df.index) col3 = [" ", " "] + push_df["推 "].tolist() + boo_df["噓 "].tolist() col4 = [] for i in np.arange(len(col2)): col4 = col4 + [ " " + str(col2[i]) + " " + str(col3[i]) ] #col3 = list(map(str, col3)) output_df = pd.DataFrame({'name':col1, 'number': col4}) output_name = "push[%s-%s].txt" % (start_date, end_date) output_df.to_csv(output_name, sep = ":", index = False, header = False) # In[31]: if( sys.argv[1] == "push"): try: push(sys.argv[2], sys.argv[3]) except: get_data() push(sys.argv[2], sys.argv[3]) # In[32]: # HW3 # In[33]: def one_to_allhref(tuple_): return( tuple_[2]) # In[39]: def popular(start_date, end_date): start_date = int(start_date) end_date = int(end_date) print("popular excute") read_np = np.load('url_data.npy',allow_pickle = True ) df = pd.read_csv("HW1-1_3.0.csv") df = df.drop(df.columns[[0]], axis=1) bao_list = np.array(df["bao"])== ["爆"] buf1 = np.array(df.date) >= start_date buf2 = np.array(df.date) <= end_date legel_index = buf1 * buf2 * bao_list legal_np = read_np[legel_index] href_list = list(map(one_to_allhref, read_np)) buf = list(itertools.compress(href_list, legel_index)) merge_href = list(itertools.chain(*buf)) buf = "number of popular articles: %d" % sum(legel_index) output_df = pd.DataFrame({'col1': [buf] + merge_href }) output_name = "popular[%s-%s].txt" % (start_date, end_date) output_df.to_csv(output_name, sep = ",", index = False, header = False) # In[40]: if( sys.argv[1] == "popular"): popular(sys.argv[2], sys.argv[3]) # In[41]: # HW4 # In[45]: def one_to_article(tuple_): return( tuple_[4]) # In[42]: def article_if_keyword(str_): if( str_ == None ): return( False) else: return( keyword_glo in str_) # In[44]: def one_to_article_href(tuple_): return( tuple_[3]) # In[48]: def keyword_search(keyword, start_date, end_date): print("keyword_search" + " start") start_date = int(start_date) end_date = int(end_date) keyword_glo = str(keyword) read_np = np.load('url_data.npy',allow_pickle = True ) df = pd.read_csv("HW1-1_3.0.csv") df = df.drop(df.columns[[0]], axis=1) buf1 = np.array(df.date) >= start_date buf2 = np.array(df.date) <= end_date article_list = list(map(one_to_article, read_np)) keyword_list = list(map(article_if_keyword, article_list)) legel_index = buf1 * buf2 * keyword_list a_href_list = list(map(one_to_article_href, read_np)) buf = list(itertools.compress(a_href_list, legel_index)) merge_href = list(itertools.chain(*buf)) print("number of keyword articles: %d" % sum(legel_index)) output_df = pd.DataFrame({'col1': merge_href }) output_name = "keyword(%s)[%s-%s].txt" % (keyword_glo, start_date, end_date) output_df.to_csv(output_name, sep = ":", index = False, header = False) # In[49]: if( sys.argv[1] == "keyword"): keyword_glo = str(sys.argv[2]) keyword_search(sys.argv[2], sys.argv[3], sys.argv[4]) # In[ ]:
python
from urllib import quote_plus from celery.schedules import crontab class HardCoded(object): """Constants used throughout the application. All hard coded settings/data that are not actual/official configuration options for Flask, Celery, or their extensions goes here. """ ADMINS = ['[email protected]'] DB_MODELS_IMPORTS = ('pypi',) # Like CELERY_IMPORTS in CeleryConfig. ENVIRONMENT = property(lambda self: self.__class__.__name__) MAIL_EXCEPTION_THROTTLE = 24 * 60 * 60 _SQLALCHEMY_DATABASE_DATABASE = 'pypi_portal' _SQLALCHEMY_DATABASE_HOSTNAME = 'localhost' _SQLALCHEMY_DATABASE_PASSWORD = 'pypi_p@ssword' _SQLALCHEMY_DATABASE_USERNAME = 'pypi_service' class CeleryConfig(HardCoded): """Configurations used by Celery only.""" CELERYD_PREFETCH_MULTIPLIER = 1 CELERYD_TASK_SOFT_TIME_LIMIT = 20 * 60 # Raise exception if task takes too long. CELERYD_TASK_TIME_LIMIT = 30 * 60 # Kill worker if task takes way too long. CELERY_ACCEPT_CONTENT = ['json'] CELERY_ACKS_LATE = True CELERY_DISABLE_RATE_LIMITS = True CELERY_IMPORTS = ('pypi',) CELERY_RESULT_SERIALIZER = 'json' CELERY_TASK_RESULT_EXPIRES = 10 * 60 # Dispose of Celery Beat results after 10 minutes. CELERY_TASK_SERIALIZER = 'json' CELERY_TRACK_STARTED = True CELERYBEAT_SCHEDULE = { 'pypy-every-day': dict(task='pypi.update_package_list', schedule=crontab(hour='0')), } class Config(CeleryConfig): """Default Flask configuration inherited by all environments. Use this for development environments.""" DEBUG = True TESTING = False SECRET_KEY = "i_don't_want_my_cookies_expiring_while_developing" MAIL_SERVER = 'smtp.localhost.test' MAIL_DEFAULT_SENDER = '[email protected]' MAIL_SUPPRESS_SEND = True REDIS_URL = 'redis://localhost/0' SQLALCHEMY_DATABASE_URI = property(lambda self: 'mysql://{u}:{p}@{h}/{d}'.format( d=quote_plus(self._SQLALCHEMY_DATABASE_DATABASE), h=quote_plus(self._SQLALCHEMY_DATABASE_HOSTNAME), p=quote_plus(self._SQLALCHEMY_DATABASE_PASSWORD), u=quote_plus(self._SQLALCHEMY_DATABASE_USERNAME) )) class Testing(Config): TESTING = True CELERY_ALWAYS_EAGER = True REDIS_URL = 'redis://localhost/1' _SQLALCHEMY_DATABASE_DATABASE = 'pypi_portal_testing' class Production(Config): DEBUG = False SECRET_KEY = None # To be overwritten by a YAML file. ADMINS = ['[email protected]'] MAIL_SUPPRESS_SEND = False STATICS_MINIFY = True
python
import codecs import csv from django.contrib import admin from django.shortcuts import HttpResponse from django.utils.translation import gettext_lazy as _ from .models import Subscriber @admin.register(Subscriber) class SubscriberAdmin(admin.ModelAdmin): list_display = ('id', 'first_name', 'last_name', 'email', 'language', 'country', 'uploaded', 'created') list_filter = ('language', 'country', 'uploaded') search_fields = ('first_name', 'last_name', 'email') actions = ('export_selected', ) def export_selected(self, request, queryset): opts = self.model._meta response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment;filename={}.csv'.format( opts.verbose_name_plural.replace(' ', '_') ) response.write(codecs.BOM_UTF8) writer = csv.writer(response, delimiter=';') fields = [ _('id'), _('first name'), _('last name'), _('email'), _('language'), _('country'), _('date/time') ] # Write a first row with header information writer.writerow(fields) # Write data rows for obj in queryset: data_row = list() data_row.append(obj.id) data_row.append(obj.first_name) data_row.append(obj.last_name) data_row.append(obj.email) data_row.append(obj.language) data_row.append(obj.country.code) data_row.append(obj.created.strftime('%d/%m/%Y %H:%M:%S')) writer.writerow(data_row) return response export_selected.short_description = _('Export selected subscribers')
python
# -*- coding: utf-8 -*- # @Date : 2016-01-23 21:40 # @Author : leiyue ([email protected]) # @Link : https://leiyue.wordpress.com/ def async(func): from threading import Thread from functools import wraps @wraps(func) def wrapper(*args, **kwargs): thr = Thread(target=func, args=args, kwargs=kwargs) thr.start() return thr return wrapper
python
import os from .utils import safe_makedirs from config import DATA_ROOT RAW = 'raw' PRODUCTS = 'products' CORRECTED = 'corrected' ALL = 'all' FILENAME = 'filename' class Resolver(object): def __init__(self, data_root=None): if data_root is None: data_root = DATA_ROOT self.data_root = data_root def raw_directory(self, data_type, cruise=ALL, check_exists=True): raw_dir = os.path.join(self.data_root, RAW, cruise, data_type) if check_exists and not os.path.exists(raw_dir): raise KeyError('{} directory not found for {}'.format(data_type, cruise)) return raw_dir def raw_file(self, data_type, name=None, check_exists=True, **kw): if name is None: # using None so name can be used as a keyword raise ValueError('file name must be provided') raw_dir = self.raw_directory(data_type, **kw) raw_path = os.path.join(raw_dir, name) if check_exists and not os.path.exists(raw_path): raise KeyError('file {} not found'.format(raw_path)) return raw_path def product_directory(self, data_type, cruise=ALL, makedirs=False): proc_dir = os.path.join(self.data_root, PRODUCTS, cruise, data_type) if makedirs: safe_makedirs(proc_dir) return proc_dir def product_file(self, data_type, cruise, name=None, extension='json', makedirs=False): proc_dir = self.product_directory(data_type, cruise, makedirs=makedirs) name_ext = '{}.{}'.format(name, extension) return os.path.join(proc_dir, name_ext) def corrected_directory(self, data_type, cruise=ALL, makedirs=False): corr_dir = os.path.join(self.data_root, CORRECTED, cruise, data_type) if makedirs: safe_makedirs(corr_dir) return corr_dir def directories(self, data_type, cruise, skip_raw=False): dirs = [] if not skip_raw: dirs.append(self.raw_directory(data_type, cruise)) dirs.append(self.corrected_directory(data_type, cruise)) dirs.append(self.product_directory(data_type, cruise)) return dirs def cruises(self): c = [] raw = os.path.join(self.data_root, RAW) for fn in sorted(os.listdir(raw)): if not os.path.isdir(os.path.join(raw, fn)): continue if fn != ALL: c.append(fn) return c def find_file(directories, filename, extension=None): for directory in directories: path = os.path.join(directory, filename) if extension is not None: path = '{}.{}'.format(path, extension) if os.path.exists(path): return path return None ENDEAVOR = 'Endeavor' ARMSTRONG = 'Armstrong' def cruise_to_vessel(cruise): lower = cruise.lower() if lower.startswith('en'): return ENDEAVOR elif lower.startswith('ar'): return ARMSTRONG else: raise KeyError('cannot determine vessel for {}'.format(cruise))
python
import dataclasses import itertools import time import typing import ratelimit import requests from loguru import logger GameID = typing.NewType("GameID", int) PatchVersion = typing.NewType("PatchVersion", tuple[str, str]) CALLS_PER_SECOND = 1 DEFAULT_RETRY_ATTEMPTS = (0, 1, 2, 5, 10, 30) @dataclasses.dataclass(frozen=True) class DownloadResult: game_id: GameID @dataclasses.dataclass(frozen=True) class DownloadedGame(DownloadResult): data: dict[str, typing.Any] response: requests.Response @dataclasses.dataclass(frozen=True) class FailedDownloadAttempt(DownloadResult): attempt_number: int response: requests.Response @dataclasses.dataclass(frozen=True) class SkippedDownloadAttempt(DownloadResult): pass @dataclasses.dataclass(frozen=True) class MismatchedPatchDownloadAttempt(DownloadResult): game_patch: typing.Optional[PatchVersion] expected_patch: PatchVersion response: requests.Response Downloader = typing.Callable[..., requests.Response] def get_patch(game_data: dict[str, typing.Any]) -> typing.Optional[PatchVersion]: first_player = game_data.get("userGames", [{}])[0] patch_version = first_player.get("versionMajor") hotfix_version = first_player.get("versionMinor") if patch_version is not None and hotfix_version is not None: return PatchVersion((patch_version, hotfix_version)) return None @ratelimit.sleep_and_retry @ratelimit.limits(calls=CALLS_PER_SECOND, period=1) def download_game( game_id: GameID, api_token: typing.Optional[str] = None, url: str = "https://open-api.bser.io/v1/games", ) -> requests.Response: """ Downloads the data of a given match, bounded by the API call request limit. """ return _download_game_unlimited(game_id, api_token, url) def _download_game_unlimited( game_id: GameID, api_token: typing.Optional[str] = None, url: str = "https://open-api.bser.io/v1/games", ) -> requests.Response: """ Downloads the data of a given match, IGNORING API call request limit. Only use in the test suite! """ if api_token is None: with open("key.secret", "r") as f: api_token = f.read() headers = {"x-api-key": api_token, "accept": "application/json"} complete_url = f"{url}/{game_id}" logger.debug(f"Requesting game_id=<{game_id}>") response = requests.get(complete_url, headers=headers) return response class PatchDownloader: def __init__( self, *, retry_time_in_seconds: tuple[float, ...] = DEFAULT_RETRY_ATTEMPTS, game_filter_predicate: typing.Callable[[GameID], bool] = (lambda _: True), downloader: Downloader = download_game, ): self.retry_time_in_seconds = retry_time_in_seconds self.game_filter_predicate = game_filter_predicate self.downloader = downloader def download_patch( self, starting_game_id: GameID ) -> typing.Iterable[DownloadResult]: # force download of starting game to get patch starting_game = self._attempt_download(starting_game_id, ignore_skip=True) if not isinstance(starting_game, DownloadedGame): raise ValueError() expected_patch = get_patch(starting_game.data) if expected_patch is None: raise ValueError() yield starting_game def yield_seq( game_ids: typing.Iterator[GameID], ) -> typing.Iterable[DownloadResult]: for gid in game_ids: result = self._attempt_download(gid, expected_patch) yield result if isinstance(result, MismatchedPatchDownloadAttempt): break backwards_ids = map( GameID, itertools.count(start=starting_game_id - 1, step=-1) ) forward_ids = map(GameID, itertools.count(start=starting_game_id + 1)) yield from yield_seq(backwards_ids) yield from yield_seq(forward_ids) def _attempt_download( self, game_id: GameID, expected_patch: typing.Optional[PatchVersion] = None, *, ignore_skip: bool = False, ) -> DownloadResult: if not ignore_skip and not self.game_filter_predicate(game_id): logger.info( f"Skipping download of game_id=<{game_id}>" ", reason=<Predicate filtered>" ) return SkippedDownloadAttempt(game_id) max_attempts = len(self.retry_time_in_seconds) attempt = 0 successful = False while not successful and attempt < max_attempts: game_resp = self.downloader(game_id) successful = ( game_resp.status_code == 200 and game_resp.json()["code"] == 200 ) if not successful: time.sleep(self.retry_time_in_seconds[attempt]) attempt += 1 if not successful: logger.info( f"Reached maximum attempts=<{attempt}>" f" for downloading game_id=<{game_id}>" ) return FailedDownloadAttempt(game_id, attempt, game_resp) game_data = game_resp.json() game_patch = get_patch(game_data) if game_patch is None: logger.warning(f"Unable to retrieve patch for game_id=<{game_id}>") if expected_patch is not None and expected_patch != game_patch: return MismatchedPatchDownloadAttempt( game_id, game_patch, expected_patch, game_resp ) return DownloadedGame(game_id, game_data, game_resp)
python
from django.core.management.base import BaseCommand import requests from datetime import date from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule, DAILY from decimal import Decimal from currency import model_choices as mch from currency.models import Rate class Command(BaseCommand): help = 'privat_archive' def handle(self, *args, **options): b = date.today() a = date.today() - relativedelta(years=4) for dt in rrule(DAILY, dtstart=a, until=b): url = f'https://api.privatbank.ua/p24api/exchange_rates?json&date=' \ f'{dt.strftime("%d-%m-%Y").replace("-", ".")}' response = requests.get(url) r_json = response.json() for rate in r_json['exchangeRate']: if 'currency' in rate: if rate['currency'] in {'USD', 'EUR'}: if 'purchaseRate' in rate and 'saleRate' in rate: currency = mch.CURR_USD if rate['currency'] == 'USD' else mch.CURR_EUR rate_kwargs = { 'created': dt, 'currency': currency, 'buy': Decimal(rate['purchaseRate']).__round__(2), 'sale': Decimal(rate['saleRate']).__round__(2), 'source': mch.SR_PRIVAT, } new_rate = Rate(**rate_kwargs) last_rate = Rate.objects.filter(currency=currency, source=mch.SR_PRIVAT).last() if last_rate is None or (new_rate.buy != last_rate.buy or new_rate.sale != last_rate.sale): new_rate.save()
python
from django.db import models from django.utils.translation import ugettext_lazy as _ class CustomerServiceReminderRel(models.Model): # Customer customer = models.ForeignKey('customers.Customer', verbose_name=_("Customer")) # Service service = models.ForeignKey('services.Service', verbose_name=_("Service")) # Reminder Fields upcoming_reminder_date = models.DateField(_("Upcoming Service Due Reminder Date")) past_reminder_date = models.DateField(_("Past Service Due Reminder Date")) class Meta: unique_together = ('customer', 'service') ordering = ('upcoming_reminder_date',) verbose_name = _("Customer Service Reminder Rel") verbose_name_plural = _("Customer Service Reminder Rels") def __str__(self): return _("{0} - {1} Reminder Rel").format(self.customer, self.service) class CustomerProductReminderRel(models.Model): # Customer customer = models.ForeignKey('customers.Customer', verbose_name=_("Customer")) # Product service = models.ForeignKey('products.Product', verbose_name=_("Product")) # Reminder Fields upcoming_reminder_date = models.DateField(_("Upcoming Product Due Reminder Date")) past_reminder_date = models.DateField(_("Past Product Due Reminder Date")) class Meta: unique_together = ('customer', 'service') ordering = ('upcoming_reminder_date',) verbose_name = _("Customer Product Reminder Rel") verbose_name_plural = _("Customer Product Reminder Rels") def __str__(self): return _("{0} - {1} Reminder Rel").format(self.customer, self.product)
python
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams params = { 'grid.color': 'k', 'grid.linestyle': 'dashdot', 'grid.linewidth': 0.6, 'font.family': 'Linux Biolinum O', 'font.size': 15, 'axes.facecolor': 'white' } rcParams.update(params) def PlotSummary(alexnet_workload, alexnet_accuracy, halfnet_workload, halfnet_accuracy, alexnet_s1, halfnet_s1, resnet): plt.figure() #plt.scatter(alexnet_workload[0], alexnet_accuracy[0], color='g', marker='x', linewidth='2') plt.plot(alexnet_workload, alexnet_accuracy, 'go-') plt.plot(halfnet_workload, halfnet_accuracy, 'b^-') plt.scatter(resnet[0], resnet[1], color='c', marker='x', linewidth='2') plt.scatter(alexnet_s1[0], alexnet_s1[1], color='m', marker='o', linewidth='1.5') plt.scatter(halfnet_s1[0], halfnet_s1[1], color='r', marker='^', linewidth='1.5') plt.legend(['MV-AlexNet', 'MVA-AlexNet-Half', 'ResNet', 'MV-AlexNet3-pool1', 'MV-AlexNet3-Half-pool1']) # plt.legend(['AlexNet', 'MV-AlexNet', 'MVA-AlexNet-Half', 'ResNet', 'MV-AlexNet3-pool1', 'MV-AlexNet3-Half-pool1']) plt.grid() plt.xlabel('Computational Workload (GMAC)') plt.ylabel('Top1 Accuracy (%)') plt.show() #plt.savefig("MVCNN-Perf.pdf", bbox_inches ='tight') def PlotAcc(nb_views, alexnet_accuracy, halfnet_accuracy, alexnet_workload, halfnet_workload): width=0.4 fig1, ax1 = plt.subplots(figsize=(12, 5)) plt.grid() plt.xlabel('Number of views') plt.xticks(nb_views, ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) plt.axis([0, 13, 81, 89]) ax1.bar(nb_views-0.5*width, alexnet_accuracy, width, alpha= 0.6, edgecolor='black', color='b') ax1.bar(nb_views-0.5*width, halfnet_accuracy, width, alpha= 0.6, edgecolor='black', color='g') ax1.set_ylabel('Top1 Accuracy (%)') ax2 = ax1.twinx() ax2.bar(nb_views+0.5*width, alexnet_workload, width, alpha= 0.6, hatch="//", edgecolor='black', color='b') ax2.bar(nb_views+0.5*width, halfnet_workload, width, alpha= 0.6, hatch="//", edgecolor='black', color='g') ax2.set_ylabel('Workload (GMAC)') ax1.legend(['MVA', 'MVA-Half']) # ax2.legend(['MVA-Half', 'MVA'], loc='lower right', bbox_to_anchor=(0, 0.5)) #plt.show() plt.savefig("NBView.pdf", bbox_inches ='tight') if __name__ == '__main__': nb_views = np.array([1, 2, 3, 4, 11, 12]) nb_views_all = np.linspace(1,12,12,endpoint=True) alexnet_accuracy = np.array([85.3, 87.4, 87.9, 88, 88.5, 88.6]) alexnet_workload = np.array([0.67, 1.33, 2, 2.66, 7.32, 7.99]) halfnet_accuracy = np.array([81.8, 84.55, 85.2, 85.55, 86.65, 86.65]) halfnet_workload = np.array([0.14, 0.39, 0.58, 0.77, 2.12, 2.31]) halfnet_s1 = np.array([0.30, 84.95]) alexnet_s1 = np.array([0.87, 87.3]) resnet = np.array([3.86, 87.1]) alexnet_accd = np.interp(nb_views_all, nb_views, alexnet_accuracy) halfnet_accd = np.interp(nb_views_all, nb_views, halfnet_accuracy) alexnet_word = 0.67 * nb_views_all halfnet_word = 0.14 * nb_views_all PlotAcc(nb_views_all, alexnet_accd, halfnet_accd, alexnet_word, halfnet_word) # PlotSummary(alexnet_workload = alexnet_workload, # halfnet_workload = halfnet_workload, # alexnet_accuracy = alexnet_accuracy, # halfnet_accuracy = halfnet_accuracy, # alexnet_s1 = alexnet_s1, # halfnet_s1 = halfnet_s1, # resnet = resnet)
python
def climbingLeaderboard(ranked, player): ranked = list(set(ranked)) ranked.sort(reverse=True) result = list() rank= len(ranked) - 1 for score in player: while score > ranked[rank] and rank > 0: rank -= 1 if score < ranked[rank]: result.insert(0, rank+2) else: result.insert(0, rank+1) for rank in result[::-1]: print(rank) ranked_count = int(input().strip()) ranked = list(map(int, input().rstrip().split())) player_count = int(input().strip()) player = list(map(int, input().rstrip().split())) climbingLeaderboard(ranked, player)
python
# this file must exist for couchdbkit to sync our design doc # and it's a good place to import signals from . import signals
python
from unittest import TestCase from unittest.case import expectedFailure from gerrit_coverage.condense import condense class TestMissingLinesToComments(TestCase): def test_empty_list(self): self.assertEqual([], condense([])) def test_single_line(self): lines = [('file', 1)] self.assertEqual([('file', (1,1))], condense(lines)) def test_two_seperated_lines(self): lines = [ ('file', 9), ('file', 1), ] expected = [ ('file', (1,1)), ('file', (9,9)), ] self.assertEqual(expected, condense(lines)) def test_multiple_following_lines(self): lines = [ ('file', 3), ('file', 2), ('file', 4), ] expected = [ ('file', (2,4)), ] self.assertEqual(expected, condense(lines)) def test_one_line_one_group(self): lines = [ ('file', 3), ('file', 5), ('file', 4), ('file', 1) ] expected = [ ('file', (1,1)), ('file', (3,5)), ] self.assertEqual(expected, condense(lines)) def test_multiple_groups(self): lines = [ ('file', 8), ('file', 9), ('file', 2), ('file', 1) ] expected = [ ('file', (1,2)), ('file', (8,9)), ] self.assertEqual(expected, condense(lines)) def test_multiple_files(self): lines = [ ('file1', 2), ('file2', 4), ('file2', 3), ('file1', 1) ] expected = [ ('file1', (1,2)), ('file2', (3,4)), ] self.assertEqual(expected, condense(lines))
python
#!/usr/bin/python3 # -*- coding: utf-8 -*- """Application of easylearn """ def run(): from eslearn.GUI.easylearn_main_run import main main()# Build
python
import cv2 Complete = cv2.imread("Velocity2RGB.png") cv2.cvtColor(Complete, cv2.COLOR_BGR2RGB) b, g, r = cv2.split(Complete) i = 0 v = 0 c = 0 f = open('VelRGBLog.txt','w') while(True): while i <= 7: h = 0 while h <= 15: if h >= 8: x = 82 + 45*h else: x = 20 + 45*h y = 20 + 45*i f.write(str(v)+' ('+str(r[y,x])+', '+str(g[y,x])+', '+str(b[y,x])+')\n') h += 1 v += 1 i += 1 cv2.imshow("Final", Complete) k = cv2.waitKey(32) if k == 32: break cv2.destroyAllWindows() f.close()
python
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. import os import sys import glob import math import types import datetime import pickle import numpy as np from collections import OrderedDict import PIL.Image #---------------------------------------------------------------------------- # Convenience wrappers for pickle. def load_pkl(filename): with open(filename, 'rb') as file: return pickle.load(file) def save_pkl(obj, filename): with open(filename, 'wb') as file: pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) #---------------------------------------------------------------------------- # Image save utils. def adjust_dynamic_range(data, drange_in, drange_out): if drange_in != drange_out: scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0])) bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale) data = data * scale + bias return data def create_image_grid(images, grid_size=None): assert images.ndim == 3 or images.ndim == 4 num, img_w, img_h = images.shape[0], images.shape[-2], images.shape[-3] if grid_size is not None: grid_w, grid_h = tuple(grid_size) else: grid_w = max(int(np.ceil(np.sqrt(num))), 1) grid_h = max((num - 1) / grid_w + 1, 1) #print("images.shape[1:-2]:",(images.shape[-1],)) grid = np.zeros( [grid_h * img_h, grid_w * img_w]+list((images.shape[-1],)), dtype=images.dtype) for idx in range(num): x = (idx % grid_w) * img_w y = (idx // grid_w) * img_h #print("x:",x) #print("y:",y) #print("grid.shape:",grid.shape) grid[y : y + img_h, x : x + img_w,...] = images[idx] return grid def convert_to_pil_image(image, drange=[0,1]): assert image.ndim == 2 or image.ndim == 3 if image.ndim == 3: if image.shape[0] == 1: image = image[0] # grayscale CHW => HW else: pass #image = image.transpose(1, 2, 0) # CHW -> HWC image = adjust_dynamic_range(image, drange, [0,255]) image = np.round(image).clip(0, 255).astype(np.uint8) format = 'RGB' if image.ndim == 3 else 'L' return PIL.Image.fromarray(image, format) def save_image(image, filename, drange=[0,1]): convert_to_pil_image(image, drange).save(filename) def save_image_grid(images, filename, drange=[0,1], grid_size=None): convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) #---------------------------------------------------------------------------- # Training utils. def rampup(epoch, rampup_length): if epoch < rampup_length: p = max(0.0, float(epoch)) / float(rampup_length) p = 1.0 - p return math.exp(-p*p*5.0) else: return 1.0 def rampdown_linear(epoch, num_epochs, rampdown_length): if epoch >= num_epochs - rampdown_length: return float(num_epochs - epoch) / rampdown_length else: return 1.0 def format_time(seconds): s = int(np.round(seconds)) if s < 60: return '%ds' % (s) elif s < 60*60: return '%dm %02ds' % (s / 60, s % 60) elif s < 24*60*60: return '%dh %02dm %02ds' % (s / (60*60), (s / 60) % 60, s % 60) else: return '%dd %dh %02dm' % (s / (24*60*60), (s / (60*60)) % 24, (s / 60) % 60) #---------------------------------------------------------------------------- # Logging of stdout and stderr to a file. class OutputLogger(object): def __init__(self): self.file = None self.buffer = '' def set_log_file(self, filename): assert self.file is None self.file = open(filename, 'wt') if self.buffer is not None: self.file.write(self.buffer) self.buffer = None def write(self, data): if self.file is not None: self.file.write(data) if self.buffer is not None: self.buffer += data def flush(self): if self.file is not None: self.file.flush() class TeeOutputStream(object): def __init__(self, child_streams, autoflush=False): self.child_streams = child_streams self.autoflush = autoflush def write(self, data): for stream in self.child_streams: stream.write(data) if self.autoflush: self.flush() def flush(self): for stream in self.child_streams: stream.flush() output_logger = None def init_output_logging(): global output_logger if output_logger is None: output_logger = OutputLogger() sys.stdout = TeeOutputStream([sys.stdout, output_logger], autoflush=True) sys.stderr = TeeOutputStream([sys.stderr, output_logger], autoflush=True) def set_output_log_file(filename): if output_logger is not None: output_logger.set_log_file(filename) #---------------------------------------------------------------------------- # Reporting of results. def create_result_subdir(result_dir, run_desc): # Select run ID and create subdir. while True: run_id = 0 for fname in glob.glob(os.path.join(result_dir, '*')): try: fbase = os.path.basename(fname) ford = int(fbase[:fbase.find('-')]) run_id = max(run_id, ford + 1) except ValueError: pass result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, run_desc)) try: os.makedirs(result_subdir) break except OSError: if os.path.isdir(result_subdir): continue raise print("Saving results to", result_subdir) set_output_log_file(os.path.join(result_subdir, 'log.txt')) # Export run details. try: import config with open(os.path.join(result_subdir, 'run.txt'), 'wt') as f: f.write('%-16s%s\n' % ('Date', datetime.datetime.today())) f.write('%-16s%s\n' % ('Device', config.theano_flags['device'])) f.write('%-16s%s\n' % ('Working dir', os.getcwd())) f.write('%-16s%s\n' % ('Executable', sys.argv[0])) f.write('%-16s%s\n' % ('Arguments', ' '.join(sys.argv[1:]))) except: pass # Export config. try: import config with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout: for k, v in sorted(config.__dict__.items()): if not k.startswith('_'): fout.write("%s = %s\n" % (k, str(v))) except: pass return result_subdir #---------------------------------------------------------------------------- # Network topology info. #def print_network_topology_info(layers): # import lasagne # print() # print("%-16s%-28s%-10s%-20s%-20s%s" % ('LayerName', 'LayerType', 'Params', 'OutputShape', 'WeightShape', 'Activation')) # print("%-16s%-28s%-10s%-20s%-20s%s" % (('---',) * 6)) # total_params = 0 # for l in lasagne.layers.get_all_layers(layers): # type_str = type(l).__name__ # nparams = sum(np.prod(p.get_value().shape) for p in l.get_params(trainable=True)) # total_params += nparams # outshape = lasagne.layers.get_output_shape(l) # try: # weights = l.W.get_value() # except AttributeError: # try: # weights = l.W_param.get_value() # except AttributeError: # weights = np.zeros(()) # weight_str = shape_to_str(weights.shape) # act_str = '' if not hasattr(l, 'nonlinearity') else l.nonlinearity.__name__ if isinstance(l.nonlinearity, types.FunctionType) else type(l.nonlinearity).__name__ # print("%-16s%-28s%-10d%-20s%-20s%s" % (l.name, type_str, nparams, shape_to_str(outshape), weight_str, act_str)) # print("%-16s%-28s%-10s%-20s%-20s%s" % (('---',) * 6)) # print("%-16s%-28s%-10d%-20s%-20s%s" % ('Total', '', total_params, '', '', '')) # print() def shape_to_str(shape): str = ['%d' % v if v else '?' for v in shape] return ', '.join(str) if len(str) else '' #---------------------------------------------------------------------------- # Locating results. def locate_result_subdir(run_id): if isinstance(run_id, str) and os.path.isdir(run_id): return run_id searchdirs = [] searchdirs += ['.'] searchdirs += ['results'] searchdirs += ['networks'] import config for searchdir in searchdirs: dir = os.path.join(config.result_dir, searchdir, str(run_id)) if os.path.isdir(dir): return dir dirs = glob.glob(os.path.join(config.result_dir, searchdir, '%s-*' % str(run_id))) if len(dirs) == 1 and os.path.isdir(dirs[0]): return dirs[0] raise IOError('Cannot locate result subdir for run', run_id) def list_network_pkls(result_subdir): pkls = sorted(glob.glob(os.path.join(result_subdir, 'network-*.pkl'))) if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl': pkls.append(pkls[0]) del pkls[0] return pkls def locate_network_pkl(result_subdir, snapshot=None): if isinstance(snapshot, str) and os.path.isfile(snapshot): return snapshot pkls = list_network_pkls(result_subdir) if len(pkls) >= 1 and snapshot is None: return pkls[-1] for pkl in pkls: try: name = os.path.splitext(os.path.basename(pkl))[0] number = int(name.split('-')[-1]) if number == snapshot: return pkl except ValueError: pass except IndexError: pass raise IOError('Cannot locate network pkl for snapshot', snapshot) #----------------------------------------------------------------------------
python
# credits to @NotThatMF on telegram for chiaki fast api # well i also borrowed the base code from him from pyrogram import Client, filters from pyrogram.types import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, Message from .. import BOT_NAME, HELP_DICT, TRIGGERS as trg from ..utils.data_parser import get_wo, get_wols from ..utils.helper import check_user @Client.on_message(filters.command(["watch", f"watch{BOT_NAME}"], prefixes=trg)) async def get_watch_order(client, message: Message): """Get List of Scheduled Anime""" x = message.text.split(" ", 1)[1] user = message.from_user.id data = get_wols(x) msg = f"Found related animes for the query {x}" buttons = [] for i in data: buttons.append([InlineKeyboardButton(str(i[1]), callback_data=f"watch_{i[0]}_{x}_{user}")]) await message.reply_text(msg, reply_markup=InlineKeyboardMarkup(buttons)) @Client.on_callback_query(filters.regex(pattern=r"watch_(.*)")) @check_user async def watch_(client, cq: CallbackQuery): kek, id_, qry, user = cq.data.split("_") msg = get_wo(int(id_)) buttons = [[InlineKeyboardButton("Back", callback_data=f"wol_{qry}_{user}")]] await cq.edit_message_text(msg, reply_markup=InlineKeyboardMarkup(buttons)) @Client.on_callback_query(filters.regex(pattern=r"wol_(.*)")) @check_user async def wls(client, cq: CallbackQuery): kek, qry, user = cq.data.split("_") data = get_wols(qry) msg = f"Found related animes for the query {qry}" buttons = [] for i in data: buttons.append([InlineKeyboardButton(str(i[1]), callback_data=f"watch_{i[0]}_{qry}_{user}")]) await cq.edit_message_text(msg, reply_markup=InlineKeyboardMarkup(buttons)) HELP_DICT["watch"] = """Use /watch cmd to get watch order of searched anime **Usage:** `/watch Detective Conan` `!watch Naruto`"""
python
#!/usr/bin/env python #-*- coding: utf-8 -*- #This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software. #The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied. """ Defines a class of exceptions used to denote a false state of being for configuration. """ class ConfigurationError(Exception): """ This exception denotes that something went wrong in the configuration. It is mostly a marker class, but also provides the type of configuration in which something went wrong. """ def __init__(self, message, configuration_type): """ Creates a new ConfigurationError. :param message: The message describing the error that occurred. :param configuration_type: The configuration type with which the error occurred. """ #Prepend the configuration type before the error message. super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message)) self.configuration_type = configuration_type #Also store it here for debugging purposes.
python
class Solution(object): def solveNQueens(self, n): """ :type n: int :rtype: List[List[str]] """ if n<1: return [] self.result=[] self.cols=set() self.pie=set() self.na=set() self.DFS(n,0,[]) return self._generate_result(n) def DFS(self,n,row,cur_state): # recursion terminator if row>=n: self.result.append(cur_state) return for col in range(n): if col in self.cols or row+col in self.pie or row-col in self.na: # not existential continue # update the flags self.cols.add(col) self.pie.add(row+col) self.na.add(row-col) self.DFS(n,row+1,cur_state+[col]) self.cols.remove(col) self.pie.remove(row+col) self.na.remove(row-col) def _generate_result(self,n): board=[] for res in self.result: for i in res: board.append("."*i+'Q'+'.'*(n-i-1)) return [board[i:i+n] for i in range(0,len(board),n)]
python
# Enter your code for "Degree Distribution" here. import csv degrees = [] students = [] for l in csv.DictReader(open("degrees.csv")): degrees.append(l) for l in csv.DictReader(open("students.csv")): students.append(l) students = sorted(students, key=lambda x: float(x["score"])) students.reverse() print(students)
python
''' Copyright 2022 Airbus SAS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' # coding: utf-8 from setuptools import setup, find_packages from datetime import date import os def __path(filename): ''''Build a full absolute path using the given filename :params filename : filename to ass to the path of this module :returns: full builded path ''' return os.path.join(os.path.dirname(__file__), filename) with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() # Manage module version using date today = date.today() # formating the date as yy.mm.dd version = today.strftime('%y.%m.%d') # check if the version.info file is existing (add a suffix to the version # in case of multiple release in a day) # it is intended that the version.info file contain only one line with the # suffix information suffix = '' if os.path.exists(__path('version.info')): suffix = open(__path('version.info')).read().strip() if len(suffix) > 0: version = f'{version}.{suffix}' setup( name='sostrades_authapi', version=version, description='Repository of SoS Trades authentication api', long_description=readme, author='Airbus SAS', url='https://idas661.eu.airbus.corp/sostrades/sostrades_authapi.git', license=license, packages=find_packages(exclude=('tests', 'docs')), include_package_data=True, python_requires='==3.7', install_requires=[ 'python-dotenv==0.12.0', 'Flask==1.1.1', 'Flask-Cors==3.0.8', 'itsdangerous==1.1.0', 'passlib==1.7.2', 'requests-toolbelt==0.9.1', 'python3-saml==1.9.0' ] )
python
import pytest from my_lib import add_elements def test_wrong_type(): with pytest.raises(TypeError): add_elements([1, 2], 6)
python
import FWCore.ParameterSet.Config as cms # Make one TrackCand for each seeder import FastSimulation.Tracking.TrackCandidateProducer_cfi hltL3TrackCandidateFromL2OIState = FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone( src = cms.InputTag("hltL3TrajSeedOIState"), SplitHits = cms.bool(False), maxSeedMatchEstimator = cms.untracked.double(200) ) hltL3TrackCandidateFromL2OIHit = hltL3TrackCandidateFromL2OIState.clone() hltL3TrackCandidateFromL2OIHit.src = "hltL3TrajSeedOIHit" hltL3TrackCandidateFromL2IOHit = hltL3TrackCandidateFromL2OIState.clone() hltL3TrackCandidateFromL2IOHit.src = "hltL3TrajSeedIOHit" # CKFTrackCandidateMaker hltMuCkfTrackCandidates = FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone() hltMuCkfTrackCandidates.src = cms.InputTag("hltMuTrackSeeds") hltMuCkfTrackCandidates.SplitHits = False # CTF track fit with material import RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi hltMuCtfTracks = RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi.ctfWithMaterialTracks.clone() hltMuCtfTracks.src = 'hltMuCkfTrackCandidates' hltMuCtfTracks.TTRHBuilder = 'WithoutRefit'
python
from y2015.day02 import * def test_part1(): assert part1("2x3x4") == 58 assert part1("1x1x10") == 43 def test_part2(): assert part2("2x3x4") == 34 assert part2("1x1x10") == 14
python
#namedtuple提供了几个有用的属性和方法来处理子类和实例。 # 所有这些内置属性都有一个前缀为下划线(_)的名称, # 在大多数Python程序中按照惯例表示私有属性。对于 namedtuple, # 然而,前缀是为了保护名称从用户提供的属性名称冲突 import collections Person = collections.namedtuple('Person', 'name age') bob = Person(name='Bob', age=30) print('Representation:', bob) print('Fields:', bob._fields) """ output: Representation: Person(name='Bob', age=30) Fields: ('name', 'age') """
python
#!/usr/bin/python import numpy as np from math import atan2, sin, cos, pi class DiffDriveController(): """ Class used for controlling the robot linear and angular velocity """ def __init__(self, max_speed, max_omega): # TODO for Student: Specify these parameters self.kp= 0.5 #0.3 self.ka= 2.0 #4 self.kb= 0.001 #0.01 self.MAX_SPEED = max_speed self.MAX_OMEGA = max_omega self.target_rho = 1.0 def update_target_rho(self, new_rho): self.target_rho = new_rho def compute_vel(self, state, goal): """ Function that computes the desired outputs given the state and goal Inputs: state - a numpy vector of size 3 by 1 with components (x,y,theta) goal - a numpy vector of size 2 by 1 specifying the location of the goal Outputs: a tuple with 3 elements v - a number specifying the forward speed (in m/s) of the robot (should be no more than max_speed) omega - a number specifying the angular velocity (in rad/s) of the robot (should be no more than max_omega) done - a boolean value specifying if the robot has reached its goal (or is close enough """ # YOUR CODE HERE #print "goal: ", goal #print "state: ", state dx = goal[0] - state[0] dy = goal[1] - state[1] theta = state[2] rho = np.sqrt(dx**2 + dy**2) pos_beta = atan2(dy,dx) #NOTE, I CHANGED THE DEFINITION BETA TO BE +ATAN2, SO NOW kb > 0 alpha = pos_beta - theta if(alpha >= pi): alpha -= 2*pi elif(alpha < -pi): alpha += 2*pi v = self.kp * rho if(v < -self.MAX_SPEED): v = -self.MAX_SPEED elif(v > self.MAX_SPEED): v = self.MAX_SPEED w = self.ka*alpha + self.kb*pos_beta if(w < -self.MAX_OMEGA): w = -self.MAX_OMEGA elif(w > self.MAX_OMEGA): w = self.MAX_OMEGA #~ if(v < 0.15): #~ v = 0.15 #~ if(abs(w) < 0.5): #~ v = 0.15 #~ else: #~ v = 0.0 #~ if(w < 0): #~ w = -1.0 #~ else: #~ w = 1.0 done = False if(rho < self.target_rho): v = 0.0 w = 0.0 done = True return v,w,done, alpha, pos_beta
python
import os import pathlib from glob import glob import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from tensorflow.keras import Sequential from tensorflow.keras.layers import * from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import * from tensorflow_hub import KerasLayer SEED = 999 def build_network(base_model, classes): return Sequential([ base_model, Dense(classes), Softmax() ]) def load_images_and_labels(image_paths, target_size=(256, 256)): images = [] labels = [] for image_path in image_paths: image = load_img(image_path, target_size=target_size) image = img_to_array(image) label = image_path.split(os.path.sep)[-2] images.append(image) labels.append(label) return np.array(images), np.array(labels) dataset_path = (pathlib.Path.home() / '.keras' / 'datasets' / 'flowers17') files_pattern = (dataset_path / 'images' / '*' / '*.jpg') image_paths = [*glob(str(files_pattern))] CLASSES = {p.split(os.path.sep)[-2] for p in image_paths} X, y = load_images_and_labels(image_paths) X = X.astype('float') / 255.0 y = LabelBinarizer().fit_transform(y) (X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, random_state=SEED) model_url = ('https://tfhub.dev/google/imagenet/' 'resnet_v1_152/feature_vector/4') base_model = KerasLayer(model_url, input_shape=(256, 256, 3)) base_model.trainable = False model = build_network(base_model, len(CLASSES)) BATCH_SIZE = 32 augmenter = ImageDataGenerator(horizontal_flip=True, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, fill_mode='nearest') train_generator = augmenter.flow(X_train, y_train, BATCH_SIZE) EPOCHS = 20 model.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=1e-3), metrics=['accuracy']) history = model.fit(train_generator, steps_per_epoch=len(X_train) // BATCH_SIZE, validation_data=(X_test, y_test), epochs=EPOCHS) result = model.evaluate(X_test, y_test) print(f'Test accuracy: {result[1]}')
python
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # setting up libraries used in the program from __future__ import print_function from dronekit import connect import exceptions import socket import time import sys import os # clear screen os.system("clear") try: # print out the instruction print ("Take RC car's controller raw reading.") # connect to pixhawk print ("\nWith baudrate = %d." % 57600) print ("Connect to serial port = %s." % "/dev/ttyS0") vehicle = connect("/dev/ttyS0", heartbeat_timeout = 30, baud = 57600) # print out instruction print ("\nTo end the program press [CTRL] + [c].\n") # take 3 [s] break time.sleep(3) # infinity loop while True: # reading rc input from channel 1 to channel 12 a = vehicle.channels['1'] b = vehicle.channels['2'] c = vehicle.channels['3'] d = vehicle.channels['4'] e = vehicle.channels['5'] f = vehicle.channels['6'] g = vehicle.channels['7'] h = vehicle.channels['8'] # print out the input print ("CH1 %s, CH2 %s, CH3 %s, CH4 %s, CH5 %s, CH6 %s, CH7 %s, CH8 %s." % (a, b, c, d, e, f, g, h)) # sleep for 3 second time.sleep(3) except KeyboardInterrupt: print ("\n\n[CTRL] + [c] detected.") finally: print ("Program is terminated.") vehicle.close() quit()
python
"""Turn objects from the hyperbolic module into matplotlib figures. """ import copy import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle, Arc, PathPatch, Rectangle from matplotlib.collections import LineCollection, PolyCollection, EllipseCollection from matplotlib.transforms import Affine2D from matplotlib.path import Path from geometry_tools import hyperbolic, utils, projective from geometry_tools.hyperbolic import Model #I played around with this a bit, but it's an eyeball test #TBH. Determines the radius at which we start approximating circular #arcs with straight lines. RADIUS_THRESHOLD = 80 #how far apart points can be before we decide that we ordered the #polygon wrong DISTANCE_THRESHOLD = 1e-4 #the default amount of "room" we leave outside the boundary of our model DRAW_NEIGHBORHOOD = 0.1 #when drawing objects "to infinity", how far offscreen we draw them #(as a % of the width/height) OFFSCREEN_FACTOR = 0.1 #this is a bit unpythonic since these are meant to be constants def default_model_limits(model): if model == Model.POINCARE or model == Model.KLEIN: return ((-1 - DRAW_NEIGHBORHOOD, 1 + DRAW_NEIGHBORHOOD), (-1 - DRAW_NEIGHBORHOOD, 1 + DRAW_NEIGHBORHOOD)) if model == Model.HALFSPACE: return ((-6., 6.), (-1 * DRAW_NEIGHBORHOOD, 8.)) class DrawingError(Exception): """Thrown if we try and draw an object in a model which we haven't implemented yet. """ pass class ProjectiveDrawing: def __init__(self, figsize=8, ax=None, fig=None, xlim=(-5., 5.), ylim=(-5., 5.), transform=None): if ax is None or fig is None: fig, ax = plt.subplots(figsize=(figsize, figsize)) self.xlim, self.ylim = xlim, ylim self.width = self.xlim[1] - self.xlim[0] self.height = self.ylim[1] - self.ylim[0] self.ax, self.fig = ax, fig plt.tight_layout() self.ax.axis("off") self.ax.set_aspect("equal") self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) self.transform = projective.identity(2) if transform is not None: self.transform = transform def draw_point(self, point, **kwargs): pointlist = self.transform @ point.flatten_to_unit() default_kwargs = { "color" : "black", "marker": "o", "linestyle":"none" } for key, value in kwargs.items(): default_kwargs[key] = value x, y = pointlist.affine_coords().T plt.plot(x, y, **default_kwargs) def draw_proj_segment(self, segment, **kwargs): seglist = self.transform @ segment.flatten_to_unit() default_kwargs = { "color":"black", "linewidth":1 } for key, value in kwargs.items(): default_kwargs[key] = value lines = LineCollection(seglist.endpoint_affine_coords(), **default_kwargs) self.ax.add_collection(lines) def draw_polygon(self, polygon, **kwargs): default_kwargs = { "facecolor": "none", "edgecolor": "black" } for key, value in kwargs.items(): default_kwargs[key] = value polylist = self.transform @ polygon.flatten_to_unit() polys = PolyCollection(polylist.affine_coords(), **default_kwargs) self.ax.add_collection(polys) def set_transform(self, transform): self.transform = transform def add_transform(self, transform): self.transform = transform @ self.transform def precompose_transform(self, transform): self.transform = self.transform @ transform class HyperbolicDrawing(ProjectiveDrawing): def __init__(self, figsize=8, ax=None, fig=None, facecolor="aliceblue", edgecolor="lightgray", linewidth=1, model=Model.POINCARE, xlim=None, ylim=None, transform=None): if ax is None or fig is None: fig, ax = plt.subplots(figsize=(figsize, figsize)) default_x, default_y = default_model_limits(model) self.xlim, self.ylim = xlim, ylim if xlim is None: self.xlim = default_x if ylim is None: self.ylim = default_y self.width = self.xlim[1] - self.xlim[0] self.height = self.ylim[1] - self.ylim[0] self.left_infinity = self.xlim[0] - OFFSCREEN_FACTOR * self.width self.right_infinity = self.xlim[1] + OFFSCREEN_FACTOR * self.width self.up_infinity = self.ylim[1] + OFFSCREEN_FACTOR * self.height self.down_infinity = self.ylim[0] - OFFSCREEN_FACTOR * self.height self.h_infinity = self.right_infinity - self.left_infinity self.v_infinity = self.up_infinity - self.down_infinity self.ax, self.fig = ax, fig plt.tight_layout() self.ax.axis("off") self.ax.set_aspect("equal") self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) self.facecolor = facecolor self.edgecolor = edgecolor self.linewidth = linewidth self.model = model self.transform = hyperbolic.identity(2) if transform is not None: self.transform = transform def draw_plane(self, **kwargs): default_kwargs = { "facecolor": self.facecolor, "edgecolor": self.edgecolor, "linewidth": self.linewidth, "zorder": 0 } for key, value in kwargs.items(): default_kwargs[key] = value if self.model == Model.POINCARE or self.model == Model.KLEIN: plane = Circle((0., 0.), 1.0, **default_kwargs) self.ax.add_patch(plane) elif self.model == Model.HALFSPACE: xmin, xmax = self.xlim ymin, ymax = self.ylim plane = Rectangle((self.left_infinity, 0.), self.h_infinity, self.up_infinity, facecolor=self.facecolor, edgecolor=self.edgecolor, zorder=0, **kwargs) self.ax.add_patch(plane) else: raise DrawingError( "Drawing in model '{}' is not implemented".format(self.model) ) def get_vertical_segment(self, endpts): ordered_endpts = endpts[:] if (np.isnan(endpts[0,0]) or endpts[0, 0] < self.left_infinity or endpts[0, 0] > self.right_infinity): ordered_endpts = np.flip(endpts, axis=0) if (np.isnan(ordered_endpts[1, 0]) or ordered_endpts[1, 0] < self.left_infinity or ordered_endpts[1, 0] > self.right_infinity): ordered_endpts[1, 1] = self.up_infinity ordered_endpts[1, 0] = ordered_endpts[0, 0] return ordered_endpts def draw_geodesic(self, segment, radius_threshold=RADIUS_THRESHOLD, **kwargs): seglist = self.transform @ segment.flatten_to_unit() default_kwargs = { "color":"black", "linewidth":1 } for key, value in kwargs.items(): default_kwargs[key] = value if self.model not in [Model.KLEIN, Model.POINCARE, Model.HALFSPACE]: raise DrawingError( "Drawing geodesics in model '{}' is not implemented".format( self.model) ) if self.model == Model.KLEIN: lines = LineCollection(seglist.endpoint_coords(self.model), **default_kwargs) self.ax.add_collection(lines) return centers, radii, thetas = seglist.circle_parameters(model=self.model, degrees=True) for center, radius, theta, segment in zip(centers, radii, thetas, seglist): if np.isnan(radius) or radius > radius_threshold: arcpath = self.get_straight_arcpath(segment) arc = PathPatch(arcpath, **default_kwargs) self.ax.add_patch(arc) continue arc = Arc(center, radius * 2, radius * 2, theta1=theta[0], theta2=theta[1], **kwargs) self.ax.add_patch(arc) def draw_point(self, point, **kwargs): pointlist = self.transform @ point.flatten_to_unit() default_kwargs = { "color" : "black", "marker": "o", "linestyle":"none" } for key, value in kwargs.items(): default_kwargs[key] = value x, y = pointlist.coords(self.model).T plt.plot(x, y, **default_kwargs) def get_circle_arcpath(self, center, radius, theta): """Get a matplotlib path object for the circular arc representing this geometric object. """ cx, cy = center transform = Affine2D() transform.scale(radius) transform.translate(cx, cy) return transform.transform_path(Path.arc(theta[0], theta[1])) def get_straight_arcpath(self, segment): endpts = segment.endpoint_coords(self.model) if self.model == Model.POINCARE: return Path(endpts, [Path.MOVETO, Path.LINETO]) if self.model == Model.HALFSPACE: v_endpts = self.get_vertical_segment(endpts) return Path(v_endpts, [Path.MOVETO, Path.LINETO]) def get_polygon_arcpath(self, polygon, radius_threshold=RADIUS_THRESHOLD, distance_threshold=DISTANCE_THRESHOLD): vertices = np.zeros((0, 2)) codes = np.zeros((0,)) first_segment = True polysegs = polygon.get_edges() centers, radii, thetas = polysegs.circle_parameters(model=self.model) for center, radius, theta, segment in zip(centers, radii, thetas, polysegs): if not np.isnan(radius) and radius < radius_threshold: g_path = self.get_circle_arcpath(center, radius, theta) else: g_path = self.get_straight_arcpath(segment) g_verts = g_path.vertices p1, p2 = segment.get_end_pair(as_points=True) p1_opp_dist = np.linalg.norm(p1.coords(self.model) - g_verts[-1]) p2_opp_dist = np.linalg.norm(p2.coords(self.model) - g_verts[0]) if (p1_opp_dist < distance_threshold or p2_opp_dist < distance_threshold): g_verts = g_verts[::-1] g_codes = copy.deepcopy(g_path.codes) if not first_segment: g_codes[0] = Path.LINETO vertices = np.concatenate((vertices, g_verts), axis=-2) codes = np.concatenate((codes, g_codes)) first_segment = False return Path(vertices, codes) def draw_polygon(self, polygon, **kwargs): default_kwargs = { "facecolor": "none", "edgecolor": "black" } for key, value in kwargs.items(): default_kwargs[key] = value polylist = self.transform @ polygon.flatten_to_unit() if self.model == Model.KLEIN: polys = PolyCollection(polylist.coords("klein"), **default_kwargs) self.ax.add_collection(polys) elif self.model == Model.POINCARE or self.model == Model.HALFSPACE: for poly in polylist: path = self.get_polygon_arcpath(poly) self.ax.add_patch(PathPatch(path, **default_kwargs)) else: raise DrawingError( "Drawing polygons in model '{}' is not implemented".format( self.model) ) def draw_horosphere(self, horoball, **kwargs): default_kwargs = { "facecolor": "none", "edgecolor": "black" } for key, value in kwargs.items(): default_kwargs[key] = value horolist = self.transform @ horoball.flatten_to_unit() if self.model == Model.POINCARE or self.model == Model.HALFSPACE: center, radius = horolist.sphere_parameters(model=self.model) good_indices = ((radius < RADIUS_THRESHOLD) & ~np.isnan(radius) & ~(np.isnan(center).any(axis=-1))) circle_ctrs = center[good_indices] circle_radii = radius[good_indices] if len(circle_ctrs) > 0: self.ax.add_collection( EllipseCollection(circle_radii * 2, circle_radii * 2, 0, units="xy", offsets=circle_ctrs, transOffset=self.ax.transData, **default_kwargs) ) if self.model == Model.HALFSPACE: for horoball in horolist[~good_indices]: height = horoball.ref_coords(self.model)[1] h_rect = Rectangle((self.left_infinity, height), self.h_infinity, self.up_infinity - height, **default_kwargs) self.ax.add_patch(h_rect) else: raise DrawingError( "Drawing horospheres in model '{}' is not implemented.".format( self.model) ) def draw_horoarc(self, horoarc, **kwargs): default_kwargs = { "facecolor": "none", "edgecolor": "black" } for key, value in kwargs.items(): default_kwargs[key] = value if self.model != Model.POINCARE and self.model != Model.HALFSPACE: raise DrawingError( "Drawing horoarcs in model '{}' is not implemented.".format( self.model) ) horolist = self.transform @ horoarc.flatten_to_unit() endpts = horolist.endpoint_coords(model=self.model) centers, radii, thetas = horolist.circle_parameters(model=self.model) for center, radius, theta, endpt in zip(centers, radii, thetas, endpts): if np.isnan(radius) or radius > RADIUS_THRESHOLD: path = Path(endpt, [Path.MOVETO, Path.LINETO]) pathpatch = PathPatch(path, **default_kwargs) self.ax.add_patch(pathpatch) else: arc = Arc(center, radius * 2, radius * 2, theta1=theta[0], theta2=theta[1], **default_kwargs) self.ax.add_patch(arc) def draw_boundary_arc(self, boundary_arc, **kwargs): default_kwargs = { "edgecolor": "black", "linewidth": 3 } for key, value in kwargs.items(): default_kwargs[key] = value arclist = self.transform @ boundary_arc.flatten_to_unit() if self.model == Model.POINCARE or self.model == Model.KLEIN: centers, radii, thetas = arclist.circle_parameters(model=self.model) for center, radius, theta in zip(centers, radii, thetas): arc = Arc(center, radius * 2, radius * 2, theta1=theta[0], theta2=theta[1], **default_kwargs) self.ax.add_patch(arc) elif self.model == Model.HALFSPACE: endpoints = arclist.endpoint_coords(self.model, ordered=True) endpoints[..., 1] = 0. endpoints[np.isnan(endpoints)[..., 0], 0] = np.inf # first, draw all the lines where we go left to right leftright = (endpoints[..., 0, 0] < endpoints[..., 1, 0]) leftright_endpts = endpoints[leftright] leftright_arcs = LineCollection(leftright_endpts, **default_kwargs) self.ax.add_collection(leftright_arcs) # then, draw all the lines that wrap around infinity infty_right = np.array([self.right_infinity, 0.]) infty_left = np.array([self.left_infinity, 0.]) to_right = np.broadcast_to(infty_right, endpoints[~leftright, 0].shape) left_to = np.broadcast_to(infty_left, endpoints[~leftright, 1].shape) coords1 = np.stack([endpoints[~leftright, 0], to_right], axis=-2) coords2 = np.stack([endpoints[~leftright, 1], left_to], axis=-2) right_arcs = LineCollection(coords1, **default_kwargs) left_arcs = LineCollection(coords2, **default_kwargs) self.ax.add_collection(right_arcs) self.ax.add_collection(left_arcs) else: raise DrawingError( "Drawing boundary arcs in model '{}' is not implemented.".format( self.model) ) def show(self): plt.show()
python
""" Contains all function related to the genetic algorithm itself. E.g. selection, crossover, and mutation. This is called by the main.py module """ import copy import numpy as np import random from neural_network import apply_neuron_constraints def crossover(parents, gen_size): # If anything goes wrong, this function is complicated enough to warrant inspection new_gen = [] for i in range(gen_size): parent_1 = parents[np.random.randint(0,len(parents))] parent_2 = parents[np.random.randint(0,len(parents))] selected_parents = [parent_1, parent_2] template = copy.deepcopy(parent_1) # it's crucial that we copy parent 1 for this function to work for l in range(len(template["thetas"])): for n in range(len(template["thetas"][l].transpose())): choice = random.choice([0,1]) if choice: # Swap in parent 2 genes template["thetas"][l].transpose()[n] = selected_parents[choice]["thetas"][l].transpose()[n] template["thresholds"][l][n] = selected_parents[choice]["thresholds"][l][n] new_gen.append(template) return new_gen def mutate(networks, p_m): for i in range(len(networks)): for l in range(len(networks[i]["thetas"])): for n in range(len(networks[i]["thetas"][l].transpose())): for w in range(len(networks[i]["thetas"][l].transpose()[n])): if random.uniform(0,1) < p_m: new_value = random.randint(-2, 2) # todo verify this makes sense networks[i]["thetas"][l].transpose()[n][w] = new_value if random.uniform(0, 1) < p_m: if l < 3: networks[i]["thresholds"][l][n] = random.randint(-4, 3) else: networks[i]["thresholds"][l][n] = random.randint(-2, 1) apply_neuron_constraints(networks[i]) return networks def select_best(population, scores, num_parents): sort = sorted(range(len(scores)), key=lambda k: scores[k]) selected = [population[i] for i in sort[0:num_parents]] return selected
python
import argparse from spiderpy import SpiderApi def main(): """Main function.""" parser = argparse.ArgumentParser(description="Run some live tests against the API") parser.add_argument( 'username', type=str, help="Your email address") parser.add_argument( 'password', type=str, help="Your password") args = parser.parse_args() username = args.username password = args.password api = SpiderApi(username, password, 5) unique_id = None # Get thermostats print("Get thermostats") thermostats = api.get_thermostats() print("Number of thermostats: ", len(thermostats)) print("Listing thermostats:") for thermostat in thermostats: print(thermostat) temp_target_curr = thermostat.target_temperature temp_list = [(temp_target_curr - 1), temp_target_curr] for temp in temp_list: print("Set temperature to " + str(temp) + " degrees") thermostat.set_temperature(temp) assert (temp == thermostat.target_temperature), "Failed to set target temperature" if thermostat.has_operation_mode: operation_mode_list = thermostat.operation_values if operation_mode_list[-1] != thermostat.operation_mode: operation_mode_list.reverse() for operation_mode in operation_mode_list: print("Set to " + operation_mode) thermostat.set_operation_mode(operation_mode) assert thermostat.operation_mode == operation_mode, "Failed to set operation mode" if thermostat.has_fan_mode: fan_speed_curr = thermostat.current_fan_speed print("Current fan speed: " + str(fan_speed_curr)) speed_list = thermostat.fan_speed_values speed_list.reverse() for speed in speed_list: print("Set fan speed to " + speed) speed_set = thermostat.set_fan_speed(speed) assert speed_set & (thermostat.current_fan_speed == speed), "Failed to set fan speed" if fan_speed_curr is not None: print("Set fan speed back to " + str(fan_speed_curr)) thermostat.set_fan_speed(fan_speed_curr) if unique_id is not None: print("Retrieve by id") thermostat = api.get_thermostat(unique_id) print(thermostat) # Get power plugs unique_id = None print("Get power plugs") power_plugs = api.get_power_plugs() print("Number of power plugs: ", len(power_plugs)) print("Listing power plugs:") for power_plug in power_plugs: print(power_plug) print("Turn on the power plug") #power_plug.turn_on() if unique_id is not None: print("Retrieve by id") power_plug = api.get_power_plug(unique_id) print(power_plug) if __name__ == '__main__': main()
python
from django import forms from django.utils.translation import ugettext as _ from django.core.exceptions import ValidationError from django.contrib.auth.models import User from datetimewidget.widgets import DateTimeWidget from .models import Event, Proposal, Activity class CustomDateTimeWidget(DateTimeWidget): def format_output(self, *args, **kwargs): return super(CustomDateTimeWidget, self)\ .format_output(*args, **kwargs).replace( '<i class="icon-th"></i>', '<i class="icon-th hide"></i>') class CustomTimeInputWidget(forms.TimeInput): input_type = 'time' class EventForm(forms.ModelForm): class Meta: model = Event exclude = ['author', 'jury'] widgets = { 'title': forms.TextInput(attrs={'class': 'inline-input'}), 'closing_date': CustomDateTimeWidget(attrs={ 'id': 'id_closing_date', 'class': 'inline-input', 'placeholder': 'Closing Date' }), } class InviteForm(forms.ModelForm): email = forms.EmailField(label=_('User email')) class Meta: model = Event fields = [] def add_to_jury(self): email = self.cleaned_data.get('email') try: user = User.objects.get(email=email) except User.DoesNotExist: raise ValidationError( _(u'The "%s" are not a Speakerfight user. ' u'For now, we just allow already joined users.') % email) if self.instance.jury.users.filter(pk=user.pk).exists(): raise ValidationError( _(u'The "@%s" already is being part of this jury.') % user) self.instance.jury.users.add(user) class ProposalForm(forms.ModelForm): description = forms.CharField(required=True, widget=forms.Textarea()) class Meta: model = Proposal exclude = [ 'event', 'author', 'track', 'rate', 'is_approved', 'track_order', 'activity_type', 'start_timetable', 'end_timetable', ] widgets = { 'title': forms.TextInput(attrs={'class': 'inline-input'}), 'slides_url': forms.TextInput( attrs={ 'class': 'form-control', 'placeholder': 'jane_doe/talk', }), } class ActivityForm(forms.ModelForm): # Removing the Proposal type from the available options activity_type = forms.ChoiceField( choices=[actitvity_type for actitvity_type in Activity.ACTIVITY_TYPES if actitvity_type[0] != Activity.PROPOSAL]) class Meta: model = Activity fields = [ 'title', 'description', 'activity_type', 'start_timetable', 'end_timetable', ] widgets = { 'title': forms.TextInput(attrs={'class': 'inline-input'}), 'start_timetable': CustomTimeInputWidget(format='%H:%M'), 'end_timetable': CustomTimeInputWidget(format='%H:%M'), } class ActivityTimetableForm(forms.ModelForm): class Meta: model = Activity fields = [ 'start_timetable', 'end_timetable', ] widgets = { 'start_timetable': CustomTimeInputWidget(format='%H:%M'), 'end_timetable': CustomTimeInputWidget(format='%H:%M'), }
python
# -*- coding: utf-8 -*- """ usage: python3 plot_features.py --segment size 10 """ import sys import os sys.path.insert(0, os.path.join(os.path.dirname( os.path.realpath(__file__)), "../")) from Functions import plot_functions as pf from Functions import utils as ut if __name__ == '__main__': segment_size = ut.parse_CLI() pf.plot_features(segment_size)
python
"""Spotbugs java tool class to detect bugs inside the project""" import re import shlex import xmltodict from eze.core.enums import VulnerabilityType, ToolType, SourceType, Vulnerability from eze.core.tool import ToolMeta, ScanResult from eze.utils.cli import extract_version_from_maven, run_async_cli_command from eze.utils.io import create_tempfile_path, write_json from eze.utils.language.java import ignore_groovy_errors class JavaSpotbugsTool(ToolMeta): """Spotbugs SAST tool class""" TOOL_NAME: str = "java-spotbugs" TOOL_TYPE: ToolType = ToolType.SAST SOURCE_SUPPORT: list = [SourceType.JAVA] SHORT_DESCRIPTION: str = "opensource java SAST tool class" INSTALL_HELP: str = """In most cases all that is required is java and mvn installed https://maven.apache.org/download.cgi test if installed with mvn --version """ MORE_INFO: str = """ https://spotbugs.github.io/ https://github.com/spotbugs/spotbugs https://spotbugs.readthedocs.io/en/latest/maven.html Tips and Tricks =========================== You can add files to include or exclude to customise your output https://spotbugs.readthedocs.io/en/latest/filter.html """ # https://github.com/spotbugs/spotbugs/blob/master/LICENSE LICENSE: str = """LGPL""" EZE_CONFIG: dict = { "INCLUDE_FULL_REASON": { "type": bool, "default": True, "help_text": """Optional include the full reason in report Warning: on production might want to set this to False to prevent found Secrets appearing in reports""", }, "REPORT_FILE": { "type": str, "default": create_tempfile_path("tmp-java-spotbugs.json"), "default_help_value": "<tempdir>/.eze-temp/tmp-java-spotbugs.json", "help_text": "output report location (will default to tmp file otherwise)", }, "MVN_REPORT_FILE": { "type": str, "default": "target/spotbugsXml.xml", "help_text": "maven output spotbugsXml.xml location, will be loaded, parsed and copied to <REPORT_FILE>", }, } TOOL_LANGUAGE = "java" TOOL_CLI_CONFIG = { "CMD_CONFIG": { # tool command prefix # https://spotbugs.github.io/spotbugs-maven-plugin/check-mojo.html "BASE_COMMAND": shlex.split( "mvn -B -Dmaven.javadoc.skip=true -Dmaven.test.skip=true install com.github.spotbugs:spotbugs-maven-plugin:check" ) } } @staticmethod def check_installed() -> str: """Method for detecting if tool installed and ready to run scan, returns version installed""" version = extract_version_from_maven("com.github.spotbugs:spotbugs-maven-plugin") return version async def run_scan(self) -> ScanResult: """ Method for running a synchronous scan using tool :raises EzeError """ completed_process = await run_async_cli_command(self.TOOL_CLI_CONFIG["CMD_CONFIG"], self.config, self.TOOL_NAME) with open(self.config["MVN_REPORT_FILE"]) as xml_file: spotbugs_report = xmltodict.parse(xml_file.read(), force_list={"BugInstance", "BugPattern"}) write_json(self.config["REPORT_FILE"], spotbugs_report) report = self.parse_report(spotbugs_report) if completed_process.stderr: warnings = ignore_groovy_errors(completed_process.stderr) for warning in warnings: report.warnings.append(warning) return report def parse_report(self, parsed_json: dict) -> ScanResult: """convert report json into ScanResult""" report_results = parsed_json["BugCollection"] vulnerabilities_list = [] if "BugInstance" in report_results: bug_patterns = {} for bug_pattern in report_results["BugPattern"]: if bug_pattern["@type"] in bug_patterns: continue bug_patterns[bug_pattern["@type"]] = bug_pattern["Details"] for bug_instance in report_results["BugInstance"]: bug_sourceline = bug_instance["Class"]["SourceLine"] path = bug_sourceline["@sourcepath"] reason = bug_instance["ShortMessage"] line = bug_sourceline["@start"] + "-" + bug_sourceline["@end"] raw_code = bug_instance["LongMessage"] name = reason summary = f"'{reason}', in {path}" details = re.sub("<[^>]*>", "", bug_patterns[bug_instance["@type"]]) recommendation = f"Investigate '{path}' Lines {line} for '{reason}' \n {details}" bug_category = bug_instance["@category"] priority = {"1": "high", "2": "medium", "3": "low"}[bug_instance["@priority"]] # only include full reason if include_full_reason true if self.config["INCLUDE_FULL_REASON"]: recommendation += " Full Match: " + raw_code vulnerabilities_list.append( Vulnerability( { "vulnerability_type": VulnerabilityType.code.name, "name": name, "version": None, "overview": summary, "recommendation": recommendation, "language": "java", "severity": priority, "identifiers": { "spotbugs-code": f"{bug_instance['@type']}:{bug_instance['ShortMessage']} ({bug_category})" }, "metadata": None, "file_location": {"path": path, "lines": bug_sourceline["@start"]}, } ) ) report = ScanResult( { "tool": self.TOOL_NAME, "vulnerabilities": vulnerabilities_list, } ) return report
python
import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="0" import argparse import keras import numpy as np import pandas as pd from ashrae.utils import ( MODEL_PATH, timer, make_dir, rmsle, load_data, get_validation_months, ) parser = argparse.ArgumentParser(description="") parser.add_argument("--overwrite", action="store_true", help="If True then overwrite existing files") parser.add_argument("--normalize_target", action="store_true", help="If True then normalize the meter_reading by dividing by log1p(square_feet).") FEATURES = [ # building meta features "square_feet", "year_built", "floor_count", # cat cols "building_id", "site_id", "primary_use", "hour", "weekday", "weekday_hour", "building_weekday_hour", "building_weekday", "building_hour", # raw weather features "air_temperature", "cloud_coverage", "dew_temperature", "precip_depth_1_hr", "sea_level_pressure", "wind_direction", "wind_speed", # derivative weather features "air_temperature_mean_lag7", "air_temperature_max_lag7", "air_temperature_min_lag7", "air_temperature_std_lag7", "air_temperature_mean_lag73", "air_temperature_max_lag73", "air_temperature_min_lag73", "air_temperature_std_lag73", # time features "hour_x", "hour_y", "weekday_x", "weekday_y", "is_holiday", # target encoding features "gte_meter_building_id_hour", "gte_meter_building_id_weekday", ] CAT_COLS = [ "building_id", "site_id", "primary_use", "hour", "weekday", "weekday_hour", "building_weekday_hour", "building_weekday", "building_hour", ] NUM_COLS = [x for x in FEATURES if x not in CAT_COLS] def get_inputs(df): inputs = {col: np.array(df[col]) for col in CAT_COLS} inputs["numerical_inputs"] = df[NUM_COLS].values return inputs, df.target.values def train_mlp( train, valid, cat_counts, save_name, lr=1e-3, lr_decay=1e-4, batch_size=512, epochs=25, emb_l2_reg=1e-3, emb_dim=1, n_dense_max=256, n_dense_min=32, n_layers=3, dropout=0.5): #------------------------- with timer("Create model"): # inputs num_inputs = keras.layers.Input(shape=(len(NUM_COLS),), name="numerical_inputs") cat_inputs = [keras.layers.Input(shape=(1,), name=x) for x in CAT_COLS] # embedding emb_inputs = [] for x,i in zip(cat_counts, cat_inputs): emb = keras.layers.Embedding( input_dim=cat_counts[x], output_dim=emb_dim, embeddings_regularizer=keras.regularizers.l2(emb_l2_reg)) emb = keras.layers.Flatten()(emb(i)) emb_inputs.append(emb) # mlp inputs = keras.layers.Concatenate(name="general_features")([num_inputs, *emb_inputs]) for i in range(n_layers): n_dense = int(max((0.5**i)*n_dense_max, n_dense_min)) inputs = keras.layers.Dense(n_dense, activation="relu")(inputs) inputs = keras.layers.Dropout(dropout)(inputs) inputs = keras.layers.BatchNormalization()(inputs) # output outputs = keras.layers.Dense(1, activation=None, name="outputs")(inputs) model = keras.models.Model( inputs = [num_inputs, *cat_inputs], outputs = outputs ) # compile model.compile( loss=keras.losses.mean_squared_error, optimizer=keras.optimizers.Adam(lr=lr, decay=lr_decay) ) model.summary() #------------------------- msg = f'Training {save_name} - train# {train.shape} val# {valid.shape}' with timer(msg): model.fit( *get_inputs(train), batch_size=batch_size, epochs=epochs, validation_data=get_inputs(valid), callbacks=[ keras.callbacks.EarlyStopping( patience=2, verbose=1 ), keras.callbacks.ModelCheckpoint( save_name, # f"{MODEL_PATH}/model_oof.hdf5" save_best_only=True, verbose=1, monitor='val_loss', mode='min' ) ] ) return if __name__ == "__main__": """ python scripts/03_train_mlp_meter.py --normalize_target python scripts/03_train_mlp_meter.py """ args = parser.parse_args() with timer("Loading data"): train = load_data("train_nn_meter") train = train.loc[train.is_bad_meter_reading==0].reset_index(drop=True) with timer("Preprocesing"): meter_cat_counts = train.groupby(["meter"])[CAT_COLS].agg(lambda x: len(np.unique(x))) # get base file name model_name = f"mlp-split_meter" make_dir(f"{MODEL_PATH}/{model_name}") with timer("Training"): for seed in [0]: #for n_months in [1,2,3,4,5,6]: for n_months in [3]: #@Matt, n_months=3 brings optimal tradeoff between single model performance and diversity for the ensemble # validation_months_list = get_validation_months(n_months) #@Matt, fixed the bug -> hard-coded n_months validation_months_list = get_validation_months(n_months) for fold_, validation_months in enumerate(validation_months_list): for m in range(4): # create sub model path if args.normalize_target: sub_model_path = f"{MODEL_PATH}/{model_name}/target_normalization/meter_{m}" make_dir(sub_model_path) else: sub_model_path = f"{MODEL_PATH}/{model_name}/no_normalization/meter_{m}" make_dir(sub_model_path) # create model version model_version = "_".join([ str(seed), str(n_months), str(fold_), ]) # check if we can skip this model full_sub_model_name = f"{sub_model_path}/{model_version}.h5" if os.path.exists(full_sub_model_name): if not args.overwrite: break # get this months indices trn_idx = np.where(np.isin(train.month, validation_months, invert=True))[0] val_idx = np.where(np.isin(train.month, validation_months, invert=False))[0] #rint(f"split meter: train size {len(trn_idx)} val size {len(val_idx)}") # remove indices not in this meter trn_idx = np.intersect1d(trn_idx, np.where(train.meter == m)[0]) val_idx = np.intersect1d(val_idx, np.where(train.meter == m)[0]) #rint(f"split meter: train size {len(trn_idx)} val size {len(val_idx)}") # fit model train_mlp( train = train.loc[trn_idx, FEATURES+["target"]], valid = train.loc[val_idx, FEATURES+["target"]], cat_counts = dict(meter_cat_counts.loc[m]), save_name = full_sub_model_name )
python
"""介绍numpy的基本知识""" import numpy as np """[[1, 2, 3],[2, 3, 4]]只是列表形式""" # 将列表转换为数组 array = np.array([[1, 2, 3], [2, 3, 4]]) print(array) print('number of dim', array.ndim) # 数组维数 print('shape', array.shape) # 数组的形式 print('size', array.size) # 数组的大小 """ number of dim 2 shape (2, 3) size 6 """
python
# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.openstack.common import units from nova import test from nova.virt.hyperv import constants from nova.virt.hyperv import vhdutilsv2 class VHDUtilsV2TestCase(test.NoDBTestCase): """Unit tests for the Hyper-V VHDUtilsV2 class.""" _FAKE_VHD_PATH = "C:\\fake_path.vhdx" _FAKE_PARENT_VHD_PATH = "C:\\fake_parent_path.vhdx" _FAKE_FORMAT = 3 _FAKE_MAK_INTERNAL_SIZE = units.Gi _FAKE_TYPE = 3 _FAKE_JOB_PATH = 'fake_job_path' _FAKE_RET_VAL = 0 _FAKE_VHD_FORMAT = 'vhdx' _FAKE_BLOCK_SIZE = 33554432 _FAKE_LOG_SIZE = 1048576 _FAKE_LOGICAL_SECTOR_SIZE = 4096 _FAKE_METADATA_SIZE = 1048576 _FAKE_VHD_INFO = {'ParentPath': _FAKE_PARENT_VHD_PATH, 'Format': _FAKE_FORMAT, 'BlockSize': _FAKE_BLOCK_SIZE, 'LogicalSectorSize': _FAKE_LOGICAL_SECTOR_SIZE, 'Type': _FAKE_TYPE} def setUp(self): self._vhdutils = vhdutilsv2.VHDUtilsV2() self._vhdutils._conn = mock.MagicMock() self._vhdutils._vmutils = mock.MagicMock() self._vhdutils.get_vhd_format = mock.MagicMock( return_value=self._FAKE_VHD_FORMAT) self._fake_file_handle = mock.MagicMock() self._fake_vhd_info_xml = ( '<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData">' '<PROPERTY NAME="BlockSize" TYPE="uint32">' '<VALUE>33554432</VALUE>' '</PROPERTY>' '<PROPERTY NAME="Caption" TYPE="string">' '<VALUE>Virtual Hard Disk Setting Data</VALUE>' '</PROPERTY>' '<PROPERTY NAME="Description" TYPE="string">' '<VALUE>Setting Data for a Virtual Hard Disk.</VALUE>' '</PROPERTY>' '<PROPERTY NAME="ElementName" TYPE="string">' '<VALUE>fake_path.vhdx</VALUE>' '</PROPERTY>' '<PROPERTY NAME="Format" TYPE="uint16">' '<VALUE>%(format)s</VALUE>' '</PROPERTY>' '<PROPERTY NAME="InstanceID" TYPE="string">' '<VALUE>52794B89-AC06-4349-AC57-486CAAD52F69</VALUE>' '</PROPERTY>' '<PROPERTY NAME="LogicalSectorSize" TYPE="uint32">' '<VALUE>512</VALUE>' '</PROPERTY>' '<PROPERTY NAME="MaxInternalSize" TYPE="uint64">' '<VALUE>%(max_internal_size)s</VALUE>' '</PROPERTY>' '<PROPERTY NAME="ParentPath" TYPE="string">' '<VALUE>%(parent_path)s</VALUE>' '</PROPERTY>' '<PROPERTY NAME="Path" TYPE="string">' '<VALUE>%(path)s</VALUE>' '</PROPERTY>' '<PROPERTY NAME="PhysicalSectorSize" TYPE="uint32">' '<VALUE>4096</VALUE>' '</PROPERTY>' '<PROPERTY NAME="Type" TYPE="uint16">' '<VALUE>%(type)s</VALUE>' '</PROPERTY>' '</INSTANCE>' % {'path': self._FAKE_VHD_PATH, 'parent_path': self._FAKE_PARENT_VHD_PATH, 'format': self._FAKE_FORMAT, 'max_internal_size': self._FAKE_MAK_INTERNAL_SIZE, 'type': self._FAKE_TYPE}) super(VHDUtilsV2TestCase, self).setUp() def test_get_vhd_info(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.GetVirtualHardDiskSettingData.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL, self._fake_vhd_info_xml) vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH) self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path']) self.assertEqual(self._FAKE_PARENT_VHD_PATH, vhd_info['ParentPath']) self.assertEqual(self._FAKE_FORMAT, vhd_info['Format']) self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE, vhd_info['MaxInternalSize']) self.assertEqual(self._FAKE_TYPE, vhd_info['Type']) def test_create_dynamic_vhd(self): self._vhdutils.get_vhd_info = mock.MagicMock( return_value={'Format': self._FAKE_FORMAT}) mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE, constants.DISK_FORMAT_VHDX) self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called) def test_create_differencing_vhd(self): self._vhdutils.get_vhd_info = mock.MagicMock( return_value={'ParentPath': self._FAKE_PARENT_VHD_PATH, 'Format': self._FAKE_FORMAT}) mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH, self._FAKE_PARENT_VHD_PATH) self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called) def test_reconnect_parent_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] self._vhdutils._get_vhd_info_xml = mock.MagicMock( return_value=self._fake_vhd_info_xml) mock_img_svc.SetVirtualHardDiskSettingData.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH, self._FAKE_PARENT_VHD_PATH) mock_img_svc.SetVirtualHardDiskSettingData.assert_called_once_with( VirtualDiskSettingData=self._fake_vhd_info_xml) def test_resize_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.ResizeVirtualHardDisk.return_value = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock( return_value=self._FAKE_MAK_INTERNAL_SIZE) self._vhdutils.resize_vhd(self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE) mock_img_svc.ResizeVirtualHardDisk.assert_called_once_with( Path=self._FAKE_VHD_PATH, MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE) self.mock_get = self._vhdutils.get_internal_vhd_size_by_file_size self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE) def _test_get_vhdx_internal_size(self, vhd_type): self._vhdutils.get_vhd_info = mock.MagicMock() self._vhdutils.get_vhd_parent_path = mock.Mock( return_value=self._FAKE_PARENT_VHD_PATH) if vhd_type == 4: self._vhdutils.get_vhd_info.side_effect = [ {'Type': vhd_type}, self._FAKE_VHD_INFO] else: self._vhdutils.get_vhd_info.return_value = self._FAKE_VHD_INFO self._vhdutils._get_vhdx_log_size = mock.MagicMock( return_value=self._FAKE_LOG_SIZE) self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock( return_value=(self._FAKE_METADATA_SIZE, 1024)) self._vhdutils._get_vhdx_block_size = mock.MagicMock( return_value=self._FAKE_BLOCK_SIZE) file_mock = mock.MagicMock() with mock.patch('__builtin__.open', file_mock): internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE)) self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE, internal_size) def test_get_vhdx_internal_size_dynamic(self): self._test_get_vhdx_internal_size(3) def test_get_vhdx_internal_size_differencing(self): self._test_get_vhdx_internal_size(4) def test_get_vhdx_current_header(self): VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024] fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00', '\x02\x00\x00\x00\x00\x00\x00\x00'] self._fake_file_handle.read = mock.MagicMock( side_effect=fake_sequence_numbers) offset = self._vhdutils._get_vhdx_current_header_offset( self._fake_file_handle) self.assertEqual(offset, VHDX_HEADER_OFFSETS[1]) def test_get_vhdx_metadata_size(self): fake_metadata_offset = '\x01\x00\x00\x00\x00\x00\x00\x00' fake_metadata_size = '\x01\x00\x00\x00' self._fake_file_handle.read = mock.MagicMock( side_effect=[fake_metadata_offset, fake_metadata_size]) metadata_size, metadata_offset = ( self._vhdutils._get_vhdx_metadata_size_and_offset( self._fake_file_handle)) self.assertEqual(metadata_size, 1) self.assertEqual(metadata_offset, 1) def test_get_block_size(self): self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock( return_value=(self._FAKE_METADATA_SIZE, 1024)) fake_block_size = '\x01\x00\x00\x00' self._fake_file_handle.read = mock.MagicMock( return_value=fake_block_size) block_size = self._vhdutils._get_vhdx_block_size( self._fake_file_handle) self.assertEqual(block_size, 1) def test_get_log_size(self): fake_current_header_offset = 64 * 1024 self._vhdutils._get_vhdx_current_header_offset = mock.MagicMock( return_value=fake_current_header_offset) fake_log_size = '\x01\x00\x00\x00' self._fake_file_handle.read = mock.MagicMock( return_value=fake_log_size) log_size = self._vhdutils._get_vhdx_log_size(self._fake_file_handle) self.assertEqual(log_size, 1)
python
# Generated by Django 2.2.1 on 2019-06-26 11:23 import django.db.models.deletion from django.conf import settings from django.db import migrations, models import libs.django.db.models.base_model class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='GpxPoint', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='등록일')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='수정일')), ('record_time', models.DateTimeField(verbose_name='기록 시간')), ('latitude', models.FloatField(verbose_name='latitude')), ('longitude', models.FloatField(verbose_name='longitude')), ('elevation', models.FloatField(verbose_name='elevation')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='user')), ], options={ 'verbose_name': 'GPX Point', 'verbose_name_plural': 'GPX Point 리스트', 'db_table': 'gpx_point', 'unique_together': {('user', 'record_time')}, }, bases=(libs.django.db.models.base_model.EqualizeMixin, models.Model), ), ]
python
# coding: utf-8 import os import sys from importlib import import_module # import local modules from .utils import load_config, build_vocab, Tokenizer BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_MODULE = "classifier" class Classifier(): def __init__(self, args): self.args = args self.nn_models = ['TextCNN','TextRNN','TextRCNN','TextRNN_Att'] if args['model_name'] in self.nn_models: module = import_module(BASE_MODULE+'.'+"nn_classifier") else: module = import_module(BASE_MODULE+'.'+args['model_name']) self.model = module.Model(self.args) def train(self, X_train, y_train, X_dev=None, y_dev=None, evaluate_test=False): if self.args['model_name'] == 'FastText': # FastText不需要dev dataset if evaluate_test: test_report, test_acc = self.model.train(X_train, y_train, evaluate_test=True) return test_report, test_acc else: self.model.train(X_train, y_train, evaluate_test=False) return None, None else: if evaluate_test: test_report, test_acc = self.model.train(X_train, y_train, X_dev, y_dev, evaluate_test=True) return test_report, test_acc else: self.model.train(X_train, y_train, X_dev, y_dev, evaluate_test=False) return None, None def predict(self, X, model_path='', model=None, tokenizer=None): if self.args['model_name'] in ['Bert','XLNet']: predclass = self.model.predict(X, model_path=model_path, model=model, tokenizer=tokenizer, show_process=True) else: predclass = self.model.predict(X, model_path=model_path, model=model) return predclass def load_model(self, model_path): model = self.model.load_model(model_path) if self.args['model_name'] in ['Bert','XLNet']: tokenizer = self.model.load_tokenizer(model_path) return model, tokenizer else: return model, None if __name__ == '__main__': # load data import pandas as pd data = pd.read_csv('data/2_categories_data.csv') X_train = data['text'].tolist() y_train = data['label'].tolist() # load config config_path = "./config.ini" model_name = "Bert" args = load_config(config_path, model_name) args['model_name'] = model_name args['save_path'] = "output/%s"%model_name # build vocab if vocab file does not exists # load tokenizer #tokenizer = Tokenizer(args['word_level'], args['preprocess'], args['lang']) #vocab = build_vocab(X_train, tokenizer, args['vocab_path'], max_vocab_size=args['max_vocab_size'], min_freq=1) print(args) # train clf = Classifier(args) clf.train(X_train, y_train, evaluate_test=True) predclass = clf.predict(X_train)
python
import numpy as np import pycircstat import BirdSongToolbox.free_epoch_tools as fet from BirdSongToolbox.import_data import ImportData from BirdSongToolbox.context_hand_labeling import label_focus_context, first_context_func, last_context_func from src.analysis.ml_pipeline_utilities import all_label_instructions import src.analysis.hilbert_based_pipeline as hbp import src.analysis.ml_pipeline_utilities as mlpu from src.analysis.context_utility import birds_context_obj, all_last_syllable def selected_motifs_to_remove(bird_id='z007', session='day-2016-09-11'): """For visualization motifs were removed to illustrate more stereotyped behavior, this is to compensate for not dynamically time warping. The reports created to first show this phenomena doesn't remove these motifs""" if bird_id == 'z020': exemplar_chan = 11 if session == 'day-2016-06-03': # Day 1 # Cherry Pick Motifs for the Visualization: first_rm = [0, 1, 3, 4, 5, 8, 11, 16, 17, 19, 20, 23, 26, 32, 35, 36, 39] # Last 3 are from code glitch last_rm = [0, 1, 2, 3, 15, 16, 17, 19, 21, 25, 26, 27, 28, 31, 34, 36, 37, 39, 42, 44] elif session == 'day-2016-06-05': # Day 2 # Cherry Pick Motifs for the Visualization: # 4 first_rm = [2, 3, 7, 9, 10, 15, 17, 18, 27, 29] # Last 3 are from code glitch last_rm = [0, 2, 4, 10, 11, 12, 19, 25, 27, 29, 31] elif bird_id == 'z007': exemplar_chan = 17 if session == 'day-2016-09-10': # Day 1 # Cherry Pick Motifs for the Visualization: first_rm = [11, 12, 13] # Last 3 are from code glitch last_rm = [1, 5] elif session == 'day-2016-09-11': # Day 2 # Cherry Pick Motifs for the Visualization: first_rm = [6, 13, 14, 15, 16, 20, 31, 7, 8, 36] # Last 3 are from code glitch last_rm = [6, 11, 13, 17, 19, 20, 21, 33] elif bird_id == 'z017': exemplar_chan = 14 if session == 'day-2016-06-19': # Day 1 # Cherry Pick Motifs for the Visualization: first_rm = [0, 1, 6, 7, 21, 30, 33] # Last 3 are from code glitch last_rm = [6, 16, 17, 22, 27, 28, 34] elif session == 'day-2016-06-21': # Day 2 # Cherry Pick Motifs for the Visualization: first_rm = [1, 4, 13, 19, 20, 24, 29, 31, 32] # Last 3 are from code glitch last_rm = [1, 2, 8, 11, 12, 20, 26, 30, ] # 10? else: raise NameError # Somehow Used a Subject and Day that wasn't shown in the paper return first_rm, last_rm, exemplar_chan # This should be moved to a visualization specific module from src.analysis.chunk_spectral_perturbation_report import plot_behavior_test from src.analysis.context_utility import birds_context_obj def get_itpc_statistical_significance(bird_id='z007', session='day-2016-09-11'): zdata = ImportData(bird_id=bird_id, session=session) # Get Handlabels chunk_labels_list, chunk_onsets_list = fet.get_chunk_handlabels(handlabels_list=zdata.song_handlabels) # Switch to the Log Spaced Bins freq_bins = 100 fc_lo = np.logspace(np.log10(2), np.log10(220), freq_bins) fc_hi = np.logspace(np.log10(3), np.log10(250), freq_bins) proc_data = hbp.itc_phase_chunk(neural_chunks=zdata.song_neural, fs=1000, l_freqs=fc_lo, h_freqs=fc_hi, verbose=True) # Helper Function to create the properly initialized context class testclass = birds_context_obj(bird_id=bird_id) # Get the Context Array for the Day's Data test_context = testclass.get_all_context_index_arrays(chunk_labels_list) # Select Labels Using Flexible Context Selection first_syll = label_focus_context(focus=1, labels=chunk_labels_list, starts=chunk_onsets_list[0], contexts=test_context, context_func=first_context_func) last_syll = label_focus_context(focus=all_last_syllable[bird_id], labels=chunk_labels_list, starts=chunk_onsets_list[1], contexts=test_context, context_func=last_context_func) # Set the Context Windows first_window = (-500, 800) last_window = (-800, 300) first_rm, last_rm, exemplar_chan = selected_motifs_to_remove(bird_id=bird_id, session=session) # Clip around Events of Interest all_firsts = fet.get_event_related_nd_chunk(chunk_data=proc_data, chunk_indices=first_syll, fs=1000, window=first_window) all_lasts = fet.get_event_related_nd_chunk(chunk_data=proc_data, chunk_indices=last_syll, fs=1000, window=last_window) # Correct The Shape of the Data all_firsts = fet.event_shape_correction(all_firsts, original_dim=3) all_lasts = fet.event_shape_correction(all_lasts, original_dim=3) # Remove the selected motifs all_firsts = np.delete(all_firsts, first_rm, axis=0) all_lasts = np.delete(all_lasts, last_rm, axis=0) # First Motif ITPC first_itc = pycircstat.resultant_vector_length(np.asarray(all_firsts), axis=0) first_itc_p, first_itc_z = pycircstat.rayleigh(np.asarray(all_firsts), axis=0) # Last Motif ITPC last_itc = pycircstat.resultant_vector_length(np.asarray(all_lasts), axis=0) last_itc_p, last_itc_z = pycircstat.rayleigh(np.asarray(all_lasts), axis=0) # Steps to Getting the Values that I want: # Print the Maximum P-value for First print("Print the Maximum P-value for First:") print(np.max(first_itc_p[:, exemplar_chan, :][first_itc_z[:, exemplar_chan, :] > 5])) print("") # Print the P-values for Z>5 for First print("Print the P-values for Z>5 for First:") print(np.max(first_itc_p[:, exemplar_chan, :][first_itc_z[:, exemplar_chan, :] > 5])) print("") # Print the Maximum P-value for last print("Print the Maximum P-value for last:") print(np.max(last_itc_p[:, exemplar_chan, :][last_itc_z[:, exemplar_chan, :] > 5])) print("") # Print the P-values for Z>5 for last print("Print the P-values for Z>5 for last:") print(np.max(last_itc_p[:, exemplar_chan, :][last_itc_z[:, exemplar_chan, :] > 5])) print("") def get_itpc_single_statistical_significance(bird_id='z007', session='day-2016-09-11'): zdata = ImportData(bird_id=bird_id, session=session) # Get Handlabels chunk_labels_list, chunk_onsets_list = fet.get_chunk_handlabels(handlabels_list=zdata.song_handlabels) # Switch to the Log Spaced Bins freq_bins = 100 fc_lo = np.logspace(np.log10(2), np.log10(220), freq_bins) fc_hi = np.logspace(np.log10(3), np.log10(250), freq_bins) proc_data = hbp.itc_phase_chunk(neural_chunks=zdata.song_neural, fs=1000, l_freqs=fc_lo, h_freqs=fc_hi, verbose=True) # Helper Function to create the properly initialized context class testclass = birds_context_obj(bird_id=bird_id) # Get the Context Array for the Day's Data test_context = testclass.get_all_context_index_arrays(chunk_labels_list) label_instructions = all_label_instructions[bird_id] # Removing the Silence due to its special needs times_of_interest = fet.label_extractor(all_labels=chunk_labels_list, starts=chunk_onsets_list[0], label_instructions=label_instructions) # Grab the Neural Activity Centered on Each event set_window = (-500, 500) chunk_events = fet.event_clipper_nd(data=proc_data, label_events=times_of_interest, fs=1000, window=set_window) chunk_events = mlpu.balance_classes(chunk_events) def run_itc_analysis(chunk_events_data): # Run the ITC over each Label Type # test_itc = pycircstat.resultant_vector_length(np.asarray(label_focus), axis=0) # test_itc_p, test_itc_z = pycircstat.rayleigh(np.asarray(label_focus), axis=0) itc_results_vector = [] itc_results_p = [] itc_results_z = [] for label_type in chunk_events_data: itc_vector = pycircstat.resultant_vector_length(np.asarray(label_type), axis=0) itc_p, itc_z = pycircstat.rayleigh(np.asarray(label_type), axis=0) itc_results_vector.append(itc_vector) itc_results_p.append(itc_p) itc_results_z.append(itc_z) return np.asarray(itc_results_vector), np.asarray(itc_results_p), np.asarray(itc_results_z) _, _, exemplar_chan = selected_motifs_to_remove(bird_id=bird_id, session=session) itc_results_vector, itc_results_p, itc_results_z = run_itc_analysis(chunk_events_data=chunk_events) # Steps to Getting the Values that I want: # Print the Maximum P-value Accross all Syllables print("Print the Maximum P-value for First:") print(np.max(itc_results_p[:, :, exemplar_chan, :][itc_results_z[:, :, exemplar_chan, :] > 5])) print("") # Print the P-values for Z>5 Accross all Syllables print("Print the P-values for Z>5 for First:") print(np.max(itc_results_p[:, :, exemplar_chan, :][itc_results_z[:, :, exemplar_chan, :] > 5])) print("")
python