content
stringlengths
0
894k
type
stringclasses
2 values
#!/usr/bin/env python3 from calendar import monthrange year = int( input() ) ( startDay, endDay ) = monthrange( year-543, 2 ) print( endDay )
python
#!/usr/bin/python3 import os import time from datetime import datetime as dt import logging #from .database.database import * from threading import Thread class BloquearSites: def __init__(self): logging.basicConfig(filename="C:\\ConectaIT\\modules" + '\\logs\\logBloquearSites.log') #self.db = DataBase() self.hosts_path = "C:\Windows\System32\drivers\etc\hosts" self.redirect = "127.0.0.1" self.stop = False self.webSiteList = [ "facebook.com", "www.facebook.com", "gmail.com", "www.gmail.com", "topconecta.com.br", "www.topconecta.com.br" ] while True: if dt(dt.now().year, dt.now().month, dt.now().day, 8) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, 16): logging.info("Woorking Hours") with open(self.hosts_path, 'r+') as file: self.content = file.read() for website in self.webSiteList: if website in self.content: pass else: file.write(self.redirect+" "+website+"\n") else: with open(self.hosts_path, "r+") as file: content = file.readlines() file.seek(0) for line in content: if not any(website in line for website in self.webSiteList): file.write(line) file.truncate() logging.info("Fun hours...") time.sleep(5) BloquearSites()
python
#numeric integration using the 2-point trapezoidal rule from math import * EPSILON = .0001 #base length of trapezoids def evaluate_function(func, a): func_at_a = eval(func.replace('x', str(a))) return func_at_a #doesnt yet take into account ability of domain a,b to have b<a def integrate(func, domain): start_val = float(domain.strip().split(',')[0]) end_val = float(domain.strip().split(',')[1]) sum = 0.0 interval_len = float(end_val - start_val) # in case of flooring evaluate_at = start_val if start_val == end_val: return 0 else: while evaluate_at < end_val: sum += EPSILON*(evaluate_function(func, evaluate_at) + evaluate_function(func, evaluate_at + EPSILON))*.5 evaluate_at += EPSILON length_completed = evaluate_at - start_val percentage_completed = (length_completed/interval_len)*100 per_comp = '%3d%%' % int(percentage_completed) print '\b\b\b\b\b' + per_comp, return '%.3f' % sum while True: # main program loop function = raw_input('enter function: ') evaluate = raw_input('enter domain for integral: ') if ',' in evaluate: integral = integrate(function, evaluate) print '\n'+str(integral) elif evaluate: domain = evaluate+','+evaluate integral = integrate(function, domain) print str(integral) else: break
python
__author__ = 'Jay Hennessy <[email protected]>' __license__ = 'MIT' import os __version__ = '0.0.1' # fantraxpy version VERSION = '17.0.0' # fantrax version URL = 'https://www.fantrax.com/fxpa/req' FANTRAX_TOKEN = os.environ.get('FANTRAX_TOKEN', None)
python
""" This module contains tests for the utility functions in the test_mapping module. """ import pytest from sqlalchemy import ( Column, Index, Integer, UniqueConstraint, ) from sqlalchemy.orm import registry from galaxy.model import _HasTable from . import ( collection_consists_of_objects, has_index, has_unique_constraint, ) from ...testing_utils import ( get_stored_instance_by_id, initialize_model, persist, ) def test_has_index(session): assert has_index(Bar.__table__, ("field1",)) assert not has_index(Foo.__table__, ("field1",)) def test_has_unique_constraint(session): assert has_unique_constraint(Bar.__table__, ("field2",)) assert not has_unique_constraint(Foo.__table__, ("field1",)) def test_collection_consists_of_objects(session): # create objects foo1 = Foo() foo2 = Foo() foo3 = Foo() # store objects persist(session, foo1) persist(session, foo2) persist(session, foo3) # retrieve objects from storage stored_foo1 = get_stored_instance_by_id(session, Foo, foo1.id) stored_foo2 = get_stored_instance_by_id(session, Foo, foo2.id) stored_foo3 = get_stored_instance_by_id(session, Foo, foo3.id) # verify retrieved objects are not the same python objects as those we stored assert stored_foo1 is not foo1 assert stored_foo2 is not foo2 assert stored_foo3 is not foo3 # trivial case assert collection_consists_of_objects([stored_foo1, stored_foo2], foo1, foo2) # empty collection and no objects assert collection_consists_of_objects([]) # ordering in collection does not matter assert collection_consists_of_objects([stored_foo2, stored_foo1], foo1, foo2) # contains wrong object assert not collection_consists_of_objects([stored_foo1, stored_foo3], foo1, foo2) # contains wrong number of objects assert not collection_consists_of_objects([stored_foo1, stored_foo1, stored_foo2], foo1, foo2) # if an object's primary key is not set, it cannot be equal to another object foo1.id, stored_foo1.id = None, None assert not collection_consists_of_objects([stored_foo1], foo1) # Test utilities mapper_registry = registry() @mapper_registry.mapped class Foo(_HasTable): __tablename__ = "foo" id = Column(Integer, primary_key=True) field1 = Column(Integer) @mapper_registry.mapped class Bar(_HasTable): __tablename__ = "bar" id = Column(Integer, primary_key=True) field1 = Column(Integer) field2 = Column(Integer) __table_args__ = ( Index("ix", "field1"), UniqueConstraint("field2"), ) @pytest.fixture(scope="module") def init_model(engine): """Create model objects in the engine's database.""" # Must use the same engine as the session fixture used by this module. initialize_model(mapper_registry, engine)
python
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-03-22 14:40 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('content', models.CharField(default='New Comment', max_length=1000)), ('time', models.DateTimeField(default=django.utils.timezone.now)), ('owner', models.CharField(default='User', max_length=100)), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('title', models.CharField(max_length=50)), ('partition', models.CharField(choices=[('Chat', 'Chat'), ('Announcement', 'Announcement'), ('Transaction', 'Transaction'), ('Activity', 'Activity')], default='Chat', max_length=50)), ('owner', models.CharField(default='User', max_length=100)), ('content', models.CharField(blank=True, max_length=1000)), ('time', models.DateTimeField(default=django.utils.timezone.now)), ], ), migrations.AddField( model_name='comment', name='post', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.Post'), ), ]
python
# Mod01.py from MyCalc01 import Calc01 from MyCalc01.Calc01 import * x= 100; y= 200 print(Calc01.__name__) Calc01.Sum(x, y) Mul(x,y)
python
class ValidationException(Exception): pass class DataApiException(Exception): "For errors raised when reading from data api"
python
# Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is # licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ensure import functools import ci.log import ci.util import concourse.client.api import ctx import model.concourse def lookup_cc_team_cfg( concourse_cfg_name, cfg_set, team_name, ) -> model.concourse.ConcourseTeamConfig: for cc_team_cfg in cfg_set._cfg_elements('concourse_team_cfg'): if cc_team_cfg.team_name() != team_name: continue if concourse_cfg_name != cc_team_cfg.concourse_endpoint_name(): continue return cc_team_cfg raise KeyError(f'No concourse team config for team name {team_name} found') @ensure.ensure_annotations def client_from_parameters( base_url: str, password: str, team_name: str, username: str, verify_ssl: bool = True, concourse_api_version=None, ) -> concourse.client.api.ConcourseApiBase: """ returns a concourse-client w/ the credentials valid for the current execution environment. The returned client is authorised to perform operations in the same concourse-team as the credentials provided calling this function. """ concourse_api = concourse.client.api.ConcourseApiFactory.create_api( base_url=base_url, team_name=team_name, verify_ssl=verify_ssl, concourse_api_version=concourse_api_version, ) concourse_api.login( username=username, passwd=password, ) return concourse_api @functools.lru_cache() @ensure.ensure_annotations def client_from_cfg_name( concourse_cfg_name: str, team_name: str, cfg_factory=None, ): if not cfg_factory: cfg_factory = ci.util.ctx().cfg_factory() concourse_team_config = lookup_cc_team_cfg( concourse_cfg_name=concourse_cfg_name, cfg_set=cfg_factory, team_name=team_name, ) concourse_endpoint = cfg_factory.concourse_endpoint( concourse_team_config.concourse_endpoint_name() ) return client_from_parameters( base_url=concourse_endpoint.base_url(), password=concourse_team_config.password(), team_name=team_name, username=concourse_team_config.username(), ) def client_from_env( team_name: str=None, ): cfg_set = ctx.cfg_set() if not team_name: team_name = ci.util.check_env('CONCOURSE_CURRENT_TEAM') concourse_team_config = lookup_cc_team_cfg( concourse_cfg_name=cfg_set.concourse().name(), cfg_set=cfg_set, team_name=team_name, ) concourse_endpoint = cfg_set.concourse_endpoint( concourse_team_config.concourse_endpoint_name() ) return client_from_parameters( base_url=concourse_endpoint.base_url(), password=concourse_team_config.password(), team_name=team_name, username=concourse_team_config.username(), )
python
# -*- coding: utf-8 -*- """ Class for a multi-panel structure """ __version__ = '1.0' __author__ = 'Noemie Fedon' import sys import numpy as np sys.path.append(r'C:\BELLA') from src.BELLA.parameters import Parameters from src.BELLA.constraints import Constraints from src.BELLA.panels import Panel from src.BELLA.reduced_multipanels import ReducedMultiPanel class MultiPanel(): """ Class for multi-panel structures """ def __init__(self, panels, boundary_weights=None): """Create object for storing multi-panel structures information""" # list of panels (classes) self.panels = panels if not isinstance(panels, list): raise MultiPanelDefinitionError( 'Attention, panels must be a list!') # total area of structure self.area = sum([el.area for el in panels]) # total area for all patches self.area_patches = sum([el.area * el.n_plies for el in panels]) # minimum ply count self.n_plies_min = min((el.n_plies for el in panels)) # maximum ply count self.n_plies_max = max((el.n_plies for el in panels)) self.is_thick_panels = [panel.n_plies == self.n_plies_max \ for panel in self.panels] # number of panels self.n_panels = len(panels) # number of plies in the laminates self.n_plies_in_panels = np.array([self.panels[ind_panel].n_plies \ for ind_panel in range(self.n_panels)]) self.has_a_middle_ply() self.identify_one_thickest_panel() self.calc_panel_boundary_dict(panels, boundary_weights) def should_you_use_BELLA(self): """ Tells the user when using LAYLA is better than employing BELLA Displays a message when BELLA is employed to design a composite laminate structure with one panel to indicate that LAYLA is better suited for the task than BELLA. Returns ------- None. """ if self.n_panels == 1: print(""" You are using BELLA to design a composite laminate structure with one panel. LAYLA is better suited for this task than BELLA, please consider using LAYLA instead of BELLA.""") def filter_target_lampams(self, constraints, obj_func_param): """ filters applied to the lamination parameters to account for orthotropy requirements """ for panel in self.panels: panel.filter_target_lampams(constraints, obj_func_param) def filter_lampam_weightings(self, constraints, obj_func_param): """ filter of the lamination-parameter weightings in the panel objective function to account for the design guidelines lampam_weightings_3: for blending steps 3 (contain penalty for out-of-plane orthotropy and may contain penalty for balance) lampam_weightings: for all other blending steps (contain penalty for out-of-plane orthotropy and does not contain penalty for balance) """ for panel in self.panels: panel.filter_lampam_weightings(constraints, obj_func_param) def from_mp_to_blending_strip(self, constraints, n_plies_ref_panel=1): """ performs the blending step 2: maps the multi-panel structure to a blending strip, i.e. a series a panels in a row """ self.reduced = ReducedMultiPanel(self, constraints, n_plies_ref_panel) def calc_panel_boundary_dict(self, panels, boundary_weights): """ checks that all panels have a different ID collates all the panel boundaries in self.boundaries checks that all panels are connected """ ## checks that all panels have a different ID self.dict_ID_to_indices = dict() for ind_panel, panel in enumerate(panels): panel.ID_code = ind_panel self.dict_ID_to_indices[panel.ID] = ind_panel if len(self.dict_ID_to_indices) != self.n_panels: raise MultiPanelDefinitionError(""" Several panels with the same index!""") # print('dict_ID_to_indices', self.dict_ID_to_indices) ## create the dictionary of panel boundaries self.boundaries = [] for ind_panel, panel in enumerate(panels): neighbours = [self.dict_ID_to_indices[neighbour] \ for neighbour in panel.neighbour_panels] for elem in neighbours: self.boundaries.append(np.sort([ind_panel, elem])) self.boundaries.append(np.flip(np.sort([ind_panel, elem]))) if len(self.boundaries) == 0: self.boundaries = np.array((), int).reshape((0,2)) else: self.boundaries = np.unique(self.boundaries, axis=0) # print('boundaries', self.boundaries) ## checks that all panels are connected visited_nodes = [] set_avail_nodes = set([0]) while len(set_avail_nodes) != 0 and len(visited_nodes) < self.n_panels: current_node = set_avail_nodes.pop() visited_nodes.append(current_node) for elem in self.boundaries: if elem[0] == current_node and elem[1] not in visited_nodes\ and elem[1] not in set_avail_nodes: set_avail_nodes.add(elem[1]) # print('visited_nodes', visited_nodes) if not len(visited_nodes) == self.n_panels: raise MultiPanelDefinitionError(""" The panels of the multipanel-component are not all connected!""") if len(self.boundaries) == 0: self.boundaries = np.array((), int).reshape((0,2)) else: self.boundaries = np.unique( np.array([np.sort(elem) for elem in self.boundaries]), axis=0) # print('boundaries', self.boundaries) ## dictionary with panel Ids self.boundaries_in_IDs = np.empty((self.boundaries.shape[0], 2), int) for ind_row, (first, second) in enumerate(self.boundaries): self.boundaries_in_IDs[ind_row, 0] = self.panels[first].ID self.boundaries_in_IDs[ind_row, 1] = self.panels[second].ID ## reorganise the boundary weightings self.boundary_weights_in_IDs = dict() self.boundary_weights = dict() if boundary_weights is not None: for weight in boundary_weights.values(): if weight < 0: raise Exception( 'The boundary weightings should be positive.') if len(boundary_weights) < self.boundaries.shape[0]: print(len(boundary_weights), self.boundaries) raise Exception( 'Insufficient number of boundary weightings.') for ind_panel1, ind_panel2 in self.boundaries_in_IDs: ind_panel1_mod = self.dict_ID_to_indices[ind_panel1] ind_panel2_mod = self.dict_ID_to_indices[ind_panel2] ind_panel1, ind_panel2 = sorted((ind_panel1, ind_panel2)) ind_panel1_mod, ind_panel2_mod = sorted((ind_panel1_mod, ind_panel2_mod)) weight = boundary_weights.get((ind_panel1, ind_panel2), None) if weight: self.boundary_weights_in_IDs[ (ind_panel1, ind_panel2)] = weight self.boundary_weights[ (ind_panel1_mod, ind_panel2_mod)] = weight else: weight = boundary_weights.get( (ind_panel2, ind_panel1), None) if not weight: raise Exception('Missing boundary weightings.') self.boundary_weights_in_IDs[ (ind_panel2, ind_panel1)] = weight self.boundary_weights[ (ind_panel2_mod, ind_panel1_mod)] = weight else: # all boundary weightings set to one for ind_panel1, ind_panel2 in self.boundaries_in_IDs: ind_panel1_mod = self.dict_ID_to_indices[ind_panel1] ind_panel2_mod = self.dict_ID_to_indices[ind_panel2] ind_panel1, ind_panel2 = sorted((ind_panel1, ind_panel2)) ind_panel1_mod, ind_panel2_mod = sorted((ind_panel1_mod, ind_panel2_mod)) self.boundary_weights_in_IDs[(ind_panel1, ind_panel2)] = 1 self.boundary_weights[(ind_panel1_mod, ind_panel2_mod)] = 1 return 0 def has_a_middle_ply(self): """ returns: - middle_ply_indices: the locations of middle plies per panel (0 if no middle ply) - has_middle_ply: True if one panel at least has a middle ply - thick_panel_has_middle_ply: True if thickest panel has a middle ply """ # locations of middle plies per panel (0 if no middle ply) self.middle_ply_indices = np.array( [self.panels[ind_panel].middle_ply_index \ for ind_panel in range(self.n_panels)]) self.has_middle_ply = bool(sum(self.middle_ply_indices)) if self.has_middle_ply and self.n_plies_max % 2: self.thick_panel_has_middle_ply = True else: self.thick_panel_has_middle_ply = False def calc_ply_drops(self, inner_step): """ returns a vector of the number of ply drops at each panel boundary of the blending strip for the inner_step-eme group of plies """ n_ply_drops = np.zeros((self.reduced.n_panels,), dtype='int16') for index, panel in enumerate(self.reduced.panels): n_ply_drops[index] = self.reduced.n_plies_per_group[inner_step] \ - panel.n_plies_per_group[inner_step] return n_ply_drops def calc_weight(self, density_area): """ returns the weight of the multipanel structure """ return density_area*sum([panel.area*panel.n_plies \ for panel in self.panels]) def calc_weight_per_panel(self, density_area): """ returns the weight of the multipanel structure per panel """ self.weight_ref_per_panel = density_area * \ np.array([panel.area*panel.n_plies for panel in self.panels]) def calc_weight_from_sst(self, sst, density_area): """ returns the weight of the multipanel structure from a stacking sequence table """ return density_area*sum([panel.area * sum(sst[ind_panel] != -1) \ for ind_panel, panel in enumerate(self.panels)]) def identify_neighbour_panels(self): """ returns the indices of the neighbouring panels for each panel """ liste = [] for ind_panel in range(self.n_panels): liste.append([]) for boundary in self.boundaries: liste[boundary[0]].append(boundary[1]) liste[boundary[1]].append(boundary[0]) return liste def identify_one_thickest_panel(self): """ returns the index of one of the thickest panels """ for ind_panel, panel in enumerate(self.panels): if panel.n_plies == self.n_plies_max: self.ind_thick = ind_panel return 0 raise Exception(""" The maximum number of plies should be the ply count of a panel""") def identify_thickest_panels(self, sym=False): """ returns the index of all of the thickest panels """ liste = [] if sym and self.n_plies_max % 2 == 1: # midlle ply in thickest panels for ind_panel, panel in enumerate(self.panels): if panel.n_plies == self.n_plies_max \ or panel.n_plies == self.n_plies_max - 1: liste.append(ind_panel) else: for ind_panel, panel in enumerate(self.panels): if panel.n_plies == self.n_plies_max: liste.append(ind_panel) if liste: return liste raise Exception(""" The maximum number of plies should be the ply count of a panel""") def __repr__(self): " Display object " to_add = '' # number of groups if hasattr(self, 'n_groups'): to_add = to_add + 'Number of groups : ' + str(self.n_groups) \ + '\n' # number of plies per group for thickest laminates if hasattr(self, 'n_plies_per_group'): to_add = to_add + 'Max number of plies per group : ' \ + str(self.n_plies_per_group) + '\n' # position of the group first plies for thickest laminates if hasattr(self, 'n_first_plies'): to_add = to_add + 'Position first plies : ' \ + str(self.n_first_plies) + '\n' return f""" Number of panels : {self.n_panels} Maximum number of plies in a panel: {self.n_plies_max} Index of one of the thickest panels: {self.ind_thick} Area : {self.area} Area for all patches: {self.area_patches} Panel boundary matrix : {self.boundaries_in_IDs} """ + to_add class MultiPanelDefinitionError(Exception): " Errors during the definition of a multi-panel structure" if __name__ == "__main__": print('*** Test for the class MultiPanel ***\n') constraints = Constraints( sym=True, dam_tol=False, covering=False, pdl_spacing=True, min_drop=2) parameters = Parameters(constraints=constraints, n_plies_ref_panel=48) n_plies_target1 = 48 n_plies_target2 = 46 n_plies_target3 = 40 n_plies_target4 = 40 panel1 = Panel(ID=1, n_plies=n_plies_target1, constraints=constraints, neighbour_panels=[2]) panel2 = Panel(ID=2, n_plies=n_plies_target2, constraints=constraints, neighbour_panels=[1, 3]) panel3 = Panel(ID=3, n_plies=n_plies_target3, constraints=constraints, neighbour_panels=[2, 4]) panel4 = Panel(ID=4, n_plies=n_plies_target4, constraints=constraints, neighbour_panels=[3]) multipanel = MultiPanel([panel1, panel2, panel3, panel4]) print(multipanel) from src.BELLA.divide_panels import divide_panels divide_panels(multipanel, parameters, constraints) print('multipanel.reduced.n_plies_in_panels', multipanel.reduced.n_plies_in_panels) print('multipanel.calc_ply_drops(0)', multipanel.calc_ply_drops(0)) print('multipanel.reduced.n_plies_per_group', multipanel.reduced.n_plies_per_group) print('multipanel.reduced.middle_ply_indices', multipanel.reduced.middle_ply_indices)
python
import warnings import dateutil.parser from requests import Session from time import sleep from .config import ( # noqa __version__, API_ROOT, DEFAULT_USER_AGENT, API_KEY_ENV_VAR, ENVIRON_API_KEY, ) session = Session() session.headers.update({"Accept": "application/json"}) session.headers.update({"User-Agent": DEFAULT_USER_AGENT}) if ENVIRON_API_KEY: session.headers.update({"X-Api-Key": ENVIRON_API_KEY}) else: warnings.warn(f"Warning: No API Key found, set {API_KEY_ENV_VAR}") class APIError(RuntimeError): """ Raised when the Open States API returns an error """ pass class NotFound(APIError): """Raised when the API cannot find the requested object""" pass def _make_params(**kwargs): return {k: v for k, v in kwargs.items() if v is not None} def _get(uri, params=None): """ An internal method for making API calls and error handling easy and consistent Args: uri: API URI params: GET parameters Returns: JSON as a Python dictionary """ def _convert_timestamps(result): """Converts a string timestamps from an api result API to a datetime""" if isinstance(result, dict): for key in result.keys(): if key in ( "created_at", "updated_at", "latest_people_update", "latest_bill_update", ): try: result[key] = dateutil.parser.parse(result[key]) except ValueError: pass elif isinstance(result[key], dict): result[key] = _convert_timestamps(result[key]) elif isinstance(result[key], list): result[key] = [_convert_timestamps(r) for r in result[key]] elif isinstance(result, list): result = [_convert_timestamps(r) for r in result] return result def _convert(result): """Convert results to standard Python data structures""" result = _convert_timestamps(result) return result url = f"{API_ROOT}/{uri}" response = session.get(url, params=params) if response.status_code != 200: if response.status_code == 404: raise NotFound(f"Not found: {response.url}") else: raise APIError(response.text) return _convert(response.json()) def set_user_agent(user_agent): """Appends a custom string to the default User-Agent string (e.g. ``pyopenstates/__version__ user_agent``)""" session.headers.update({"User-Agent": f"{DEFAULT_USER_AGENT} {user_agent}"}) def set_api_key(apikey): """Sets API key. Can also be set as OPENSTATES_API_KEY environment variable.""" session.headers["X-Api-Key"] = apikey def get_metadata(state=None, include=None, fields=None): """ Returns a list of all states with data available, and basic metadata about their status. Can also get detailed metadata for a particular state. Args: state: The abbreviation of state to get detailed metadata on, or leave as None to get high-level metadata on all states. include: Additional includes. fields: An optional list of fields to return; returns all fields by default Returns: Dict: The requested :ref:`Metadata` as a dictionary """ uri = "jurisdictions" params = dict() if include: params["include"] = _include_list(include) if state: uri += "/" + _jurisdiction_id(state) state_response = _get(uri, params=params) if fields is not None: return {k: state_response[k] for k in fields} else: return state_response else: params["page"] = "1" params["per_page"] = "52" return _get(uri, params=params)["results"] def get_organizations(state): uri = "jurisdictions" uri += "/" + _jurisdiction_id(state) state_response = _get(uri, params={"include": "organizations"}) return state_response["organizations"] def _alt_parameter(param, other_param, param_name, other_param_name): """ensure that only one name was specified""" if param and other_param: raise ValueError( f"cannot specify both {param_name} and variant {other_param_name}" ) elif other_param: warnings.warn(f"{other_param_name} is deprecated, use {param_name}") return other_param return param def search_bills( jurisdiction=None, identifier=None, session=None, chamber=None, classification=None, subject=None, updated_since=None, created_since=None, action_since=None, sponsor=None, sponsor_classification=None, q=None, # control params sort=None, include=None, page=1, per_page=10, all_pages=True, # alternate names for other parameters state=None, ): """ Find bills matching a given set of filters For a list of each field, example values, etc. see https://v3.openstates.org/docs#/bills/bills_search_bills_get """ uri = "bills/" args = {} jurisdiction = _alt_parameter(state, jurisdiction, "state", "jurisdiction") if jurisdiction: args["jurisdiction"] = jurisdiction if session: args["session"] = session if chamber: args["chamber"] = chamber if classification: args["classification"] = classification if subject: args["subject"] = subject if updated_since: args["updated_since"] = updated_since if created_since: args["created_since"] = created_since if action_since: args["action_since"] = action_since if sponsor: args["sponsor"] = sponsor if sponsor_classification: args["sponsor_classification"] = sponsor_classification if q: args["q"] = q if sort: args["sort"] = sort if include: args["include"] = include results = [] if all_pages: args["per_page"] = 20 args["page"] = 1 else: args["per_page"] = per_page args["page"] = page resp = _get(uri, params=args) results += resp["results"] if all_pages: while resp["pagination"]["page"] < resp["pagination"]["max_page"]: args["page"] += 1 sleep(1) resp = _get(uri, params=args) results += resp["results"] return results def get_bill(uid=None, state=None, session=None, bill_id=None, include=None): """ Returns details of a specific bill Can be identified by the Open States unique bill id (uid), or by specifying the state, session, and legislative bill ID Args: uid: The Open States unique bill ID state: The postal code of the state session: The legislative session (see state metadata) bill_id: Yhe legislative bill ID (e.g. ``HR 42``) **kwargs: Optional keyword argument options, such as ``fields``, which specifies the fields to return Returns: The :ref:`Bill` details as a dictionary """ args = {"include": include} if include else {} if uid: if state or session or bill_id: raise ValueError( "Must specify an Open States bill (uid), or the " "state, session, and bill ID" ) uid = _fix_id_string("ocd-bill/", uid) return _get(f"bills/{uid}", params=args) else: if not state or not session or not bill_id: raise ValueError( "Must specify an Open States bill (uid), " "or the state, session, and bill ID" ) return _get(f"bills/{state.lower()}/{session}/{bill_id}", params=args) def search_legislators( jurisdiction=None, name=None, id_=None, org_classification=None, district=None, include=None, ): """ Search for legislators. Returns: A list of matching :ref:`Legislator` dictionaries """ params = _make_params( jurisdiction=jurisdiction, name=name, id=id_, org_classification=org_classification, district=district, include=include, ) return _get("people", params)["results"] def get_legislator(leg_id): """ Gets a legislator's details Args: leg_id: The Legislator's Open States ID fields: An optional custom list of fields to return Returns: The requested :ref:`Legislator` details as a dictionary """ leg_id = _fix_id_string("ocd-person/", leg_id) return _get("people/", params={"id": [leg_id]})["results"][0] def locate_legislators(lat, lng, fields=None): """ Returns a list of legislators for the given latitude/longitude coordinates Args: lat: Latitude long: Longitude fields: An optional custom list of fields to return Returns: A list of matching :ref:`Legislator` dictionaries """ return _get( "people.geo/", params=dict(lat=float(lat), lng=float(lng), fields=fields) )["results"] def search_districts(state, chamber): """ Search for districts Args: state: The state to search in chamber: the upper or lower legislative chamber fields: Optionally specify a custom list of fields to return Returns: A list of matching :ref:`District` dictionaries """ if chamber: chamber = chamber.lower() if chamber not in ["upper", "lower"]: raise ValueError('Chamber must be "upper" or "lower"') organizations = get_organizations(state=state) for org in organizations: if org["classification"] == chamber: return org["districts"] def _fix_id_string(prefix, id): if id.startswith(prefix): return id else: return prefix + id def _jurisdiction_id(state): if state.startswith("ocd-jurisdiction/"): return state else: return f"ocd-jurisdiction/country:us/state:{state.lower()}/government" def _include_list(include): if include is None: return None elif isinstance(include, str): return [include] elif isinstance(include, (list, tuple)): return include else: raise ValueError("include must be a str or list")
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # Distributed under the terms of the MIT License. """ Script to append window and cage COM's to a CIF. Author: Andrew Tarzia Date Created: 19 Feb 2019 """ import sys from ase.io import read import pywindow as pw import logging import os import atools def main(): if (not len(sys.argv) == 3): print(""" Usage: append_all_COM.py pdb ignore pdb: file (.pdb) : to analyze and add pseudo atoms to ('*.pdb' for all in working dir) ignore (str) : string to use to ignore certain files (set NONE if not used) """) sys.exit() if '*' in sys.argv[1]: from glob import glob if sys.argv[2] != 'NONE': pdbs = sorted( [i for i in glob(sys.argv[1]) if sys.argv[2] not in i] ) else: pdbs = sorted([i for i in glob(sys.argv[1])]) logging.info(f'{len(pdbs)} pdbs to analyze') else: pdbs = [sys.argv[1]] count = 1 for file in pdbs: # do not redo if os.path.isfile(file.replace('.pdb', '_appended.cif')): count += 1 continue logging.info(f'doing {file}: {count} of {len(pdbs)}') ASE_structure = read(file) if ASE_structure is None: count += 1 continue pdb = file if '_nosolv' in pdb: # if solvent is removed and pdb is used, then this is # already the rebuilt structure struct = pw.MolecularSystem.load_file(pdb) struct.make_modular() else: # rebuild system struct = atools.modularize(file=pdb) # print(struct) if struct is None: # handle pyWindow failure sys.exit(f'pyWindow failure on {pdb}') # run analysis COM_dict = atools.analyze_rebuilt( struct, atom_limit=20, file_prefix=file.replace('.pdb', ''), verbose=False, include_coms=True ) # append atoms to ASE structure as pseudo atoms and write out # new CIF atools.append_and_write_COMs( COM_dict, ASE_structure, file, suffix='.pdb' ) count += 1 if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='') main()
python
# Copyright (C) 2013 by Brian Neal. # This file is part of m209, the M-209 simulation. # m209 is released under the MIT License (see LICENSE.txt). """test_converter.py - Unit tests for the M209 class for the M-209 simulation.""" import unittest from .. import M209Error from ..converter import M209 # Data taken from Mark J. Blair's AA key list AA_LUGS = '0-4 0-5*4 0-6*6 1-0*5 1-2 1-5*4 3-0*3 3-4 3-6 5-6' AA_PIN_LIST = [ 'FGIKOPRSUVWYZ', 'DFGKLMOTUY', 'ADEFGIORTUVX', 'ACFGHILMRSU', 'BCDEFJKLPS', 'EFGHIJLMNP' ] AA_CHECK = 'QLRRN TPTFU TRPTN MWQTV JLIJE J' class M209TestCase(unittest.TestCase): def test_invalid_set_pins(self): """Ensure invalid inputs raise errors.""" m = M209() pins = 'BFJKLOSTUWXZ' self.assertRaises(M209Error, m.set_pins, -1, pins) self.assertRaises(M209Error, m.set_pins, -2, pins) self.assertRaises(M209Error, m.set_pins, 6, pins) self.assertRaises(M209Error, m.set_pins, 7, pins) self.assertRaises(M209Error, m.set_pins, 100, pins) def test_invald_set_all_pins(self): m = M209() self.assertRaises(M209Error, m.set_all_pins, 'A') bad_pins1 = AA_PIN_LIST * 2 self.assertRaises(M209Error, m.set_all_pins, bad_pins1) bad_pins2 = ['ABCD', 'EFGH', 'XYZ'] self.assertRaises(M209Error, m.set_all_pins, bad_pins2) def letter_check(self, lugs, pin_list, check): """Generic letter check routine""" pt = 'A' * 26 ct = check m = M209(lugs, pin_list) result = m.encrypt(pt) self.assertEqual(result, ct) self.assertEqual(m.letter_counter, 26) m.letter_counter = 0 m.set_key_wheels('A' * 6) result = m.decrypt(ct) self.assertEqual(result, pt) self.assertEqual(m.letter_counter, 26) def test_aa_letter_check(self): """See if we can pass a letter check using Mark J. Blair's AA key list.""" self.letter_check(AA_LUGS, AA_PIN_LIST, AA_CHECK) def test_yl_letter_check(self): """See if we can pass a letter check using Mark J. Blair's YL key list.""" lugs = '1-0 2-0*4 0-3 0-4*3 0-5*3 0-6*11 2-5 2-6 3-4 4-5' pin_list = [ 'BFJKLOSTUWXZ', 'ABDJKLMORTUV', 'EHJKNPQRSX', 'ABCHIJLMPQR', 'BCDGJLNOPQS', 'AEFHIJP', ] check = 'OZGPK AFVAJ JYRZW LRJEG MOVLU M' self.letter_check(lugs, pin_list, check) def test_fm_letter_check(self): """See if we can pass a letter check using Mark J. Blair's FM key list.""" lugs = '1-0 2-0*8 0-3*7 0-4*5 0-5*2 1-5 1-6 3-4 4-5' pin_list = [ 'BCEJOPSTUVXY', 'ACDHJLMNOQRUYZ', 'AEHJLOQRUV', 'DFGILMNPQS', 'CEHIJLNPS', 'ACDFHIMN' ] check = 'TNMYS CRMKK UHLKW LDQHM RQOLW R' self.letter_check(lugs, pin_list, check) def test_no_group(self): m = M209(AA_LUGS, AA_PIN_LIST) result = m.encrypt('A' * 26, group=False) expected = AA_CHECK.replace(' ', '') self.assertEqual(result, expected) def test_encrpyt_no_spaces(self): m = M209() self.assertRaises(M209Error, m.encrypt, 'ATTACK AT DAWN', spaces=False) def test_encrypt_spaces(self): m = M209(AA_LUGS, AA_PIN_LIST) wheels = 'YGXREL' m.set_key_wheels(wheels) result1 = m.encrypt('ATTACK AT DAWN') m.set_key_wheels(wheels) result2 = m.encrypt('ATTACKZATZDAWN', spaces=False) m.set_key_wheels(wheels) result3 = m.encrypt('ATTACKZATZDAWN', spaces=True) self.assertTrue(result1 == result2 == result3) def test_decrpyt_no_spaces(self): m = M209() self.assertRaises(M209Error, m.decrypt, 'ATTACK AT DAWN', spaces=False) def test_decrypt_no_z_sub(self): m = M209(AA_LUGS, AA_PIN_LIST) pt = 'ATTACK AT DAWN' wheels = 'YGXREL' m.set_key_wheels(wheels) ct = m.encrypt(pt) m.set_key_wheels(wheels) result = m.decrypt(ct, z_sub=False) self.assertEqual(pt.replace(' ', 'Z'), result) def test_set_pins_vs_all_pins(self): m1 = M209(AA_LUGS, AA_PIN_LIST) pt = 'ATTACK AT DAWN' wheels = 'YGXREL' m1.set_key_wheels(wheels) ct1 = m1.encrypt(pt) m2 = M209() m2.set_drum_lugs(AA_LUGS) for n, pins in enumerate(AA_PIN_LIST): m2.set_pins(n, pins) m2.set_key_wheels(wheels) ct2 = m2.encrypt(pt) self.assertEqual(ct1, ct2) def test_get_settings(self): m = M209(AA_LUGS, AA_PIN_LIST) settings = m.get_settings() self.assertEqual(settings.lugs, AA_LUGS) self.assertEqual(settings.pin_list, AA_PIN_LIST)
python
from os import listdir import json from pymongo import MongoClient prediction_output = '../data/prediction_output/' # Edit def connect_db(mode, db_name): client = MongoClient('localhost', 27017) db = client[db_name] collection = db['train'] if mode == 'train' else db['val'] return collection if __name__ == '__main__': mode = 'train' # Edit db_name = 'geovisuals_bdd' collection = connect_db(mode, db_name) for file_name in listdir(prediction_output): trip_id = file_name.split('.')[0] output_file = prediction_output + file_name with open(output_file) as json_file: try: output_data = json.load(json_file) tcnn1 = output_data['tcnn1'] if 'tcnn1' in output_data else [] cnn_lstm = output_data['cnn_lstm'] if 'cnn_lstm' in output_data else [] fcn_lstm = output_data['fcn_lstm'] if 'fcn_lstm' in output_data else [] try: res = collection.update({ 'trip_id': trip_id },{ '$set':{ 'predict.tcnn1': tcnn1, 'predict.cnn_lstm': cnn_lstm, 'predict.fcn_lstm': fcn_lstm } }, upsert=True, multi=False) print('Update response: ', res) print(trip_id) except Exception as e: print('error: ' + str(e)) except Exception as e: print('error: ' + str(e))
python
from wsgiref.util import FileWrapper from django.conf import settings from django.http import Http404, HttpResponse, StreamingHttpResponse from django.shortcuts import get_object_or_404, redirect from django.template.response import TemplateResponse from django.urls import reverse from wagtail.core import hooks from wagtail.core.forms import PasswordViewRestrictionForm from wagtail.core.models import CollectionViewRestriction from wagtail.documents.models import document_served, get_document_model from wagtail.utils import sendfile_streaming_backend from wagtail.utils.sendfile import sendfile def serve(request, document_id, document_filename): Document = get_document_model() doc = get_object_or_404(Document, id=document_id) # We want to ensure that the document filename provided in the URL matches the one associated with the considered # document_id. If not we can't be sure that the document the user wants to access is the one corresponding to the # <document_id, document_filename> pair. if doc.filename != document_filename: raise Http404('This document does not match the given filename.') for fn in hooks.get_hooks('before_serve_document'): result = fn(doc, request) if isinstance(result, HttpResponse): return result # Send document_served signal document_served.send(sender=Document, instance=doc, request=request) try: local_path = doc.file.path except NotImplementedError: local_path = None if local_path: # Use wagtail.utils.sendfile to serve the file; # this provides support for mimetypes, if-modified-since and django-sendfile backends if hasattr(settings, 'SENDFILE_BACKEND'): return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename) else: # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND return sendfile( request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile ) else: # We are using a storage backend which does not expose filesystem paths # (e.g. storages.backends.s3boto.S3BotoStorage). # Fall back on pre-sendfile behaviour of reading the file content and serving it # as a StreamingHttpResponse wrapper = FileWrapper(doc.file) response = StreamingHttpResponse(wrapper, content_type='application/octet-stream') response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename # FIXME: storage backends are not guaranteed to implement 'size' response['Content-Length'] = doc.file.size return response def authenticate_with_password(request, restriction_id): """ Handle a submission of PasswordViewRestrictionForm to grant view access over a subtree that is protected by a PageViewRestriction """ restriction = get_object_or_404(CollectionViewRestriction, id=restriction_id) if request.method == 'POST': form = PasswordViewRestrictionForm(request.POST, instance=restriction) if form.is_valid(): restriction.mark_as_passed(request) return redirect(form.cleaned_data['return_url']) else: form = PasswordViewRestrictionForm(instance=restriction) action_url = reverse('wagtaildocs_authenticate_with_password', args=[restriction.id]) password_required_template = getattr(settings, 'DOCUMENT_PASSWORD_REQUIRED_TEMPLATE', 'wagtaildocs/password_required.html') context = { 'form': form, 'action_url': action_url } return TemplateResponse(request, password_required_template, context)
python
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. from datadog_api_client.v1.model_utils import ( ModelNormal, cached_property, ) def lazy_import(): from datadog_api_client.v1.model.agent_check import AgentCheck from datadog_api_client.v1.model.host_meta_install_method import HostMetaInstallMethod globals()["AgentCheck"] = AgentCheck globals()["HostMetaInstallMethod"] = HostMetaInstallMethod class HostMeta(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ validations = {} @cached_property def openapi_types(): lazy_import() return { "agent_checks": ([AgentCheck],), "agent_version": (str,), "cpu_cores": (int,), "fbsd_v": ([str],), "gohai": (str,), "install_method": (HostMetaInstallMethod,), "mac_v": ([str],), "machine": (str,), "nix_v": ([str],), "platform": (str,), "processor": (str,), "python_v": (str,), "socket_fqdn": (str,), "socket_hostname": (str,), "win_v": ([str],), } attribute_map = { "agent_checks": "agent_checks", "agent_version": "agent_version", "cpu_cores": "cpuCores", "fbsd_v": "fbsdV", "gohai": "gohai", "install_method": "install_method", "mac_v": "macV", "machine": "machine", "nix_v": "nixV", "platform": "platform", "processor": "processor", "python_v": "pythonV", "socket_fqdn": "socket-fqdn", "socket_hostname": "socket-hostname", "win_v": "winV", } read_only_vars = {} def __init__(self, *args, **kwargs): """HostMeta - a model defined in OpenAPI Keyword Args: agent_checks ([AgentCheck]): [optional] A list of Agent checks running on the host. agent_version (str): [optional] The Datadog Agent version. cpu_cores (int): [optional] The number of cores. fbsd_v ([str]): [optional] An array of Mac versions. gohai (str): [optional] JSON string containing system information. install_method (HostMetaInstallMethod): [optional] mac_v ([str]): [optional] An array of Mac versions. machine (str): [optional] The machine architecture. nix_v ([str]): [optional] Array of Unix versions. platform (str): [optional] The OS platform. processor (str): [optional] The processor. python_v (str): [optional] The Python version. socket_fqdn (str): [optional] The socket fqdn. socket_hostname (str): [optional] The socket hostname. win_v ([str]): [optional] An array of Windows versions. """ super().__init__(kwargs) self._check_pos_args(args) @classmethod def _from_openapi_data(cls, *args, **kwargs): """Helper creating a new instance from a response.""" self = super(HostMeta, cls)._from_openapi_data(kwargs) self._check_pos_args(args) return self
python
import demistomock as demisto from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import] from CommonServerUserPython import * # noqa: E402 lgtm [py/polluting-import] # IMPORTS from typing import Tuple, Optional import traceback import dateparser import httplib2 import urllib.parse from oauth2client import service_account from googleapiclient import discovery import json import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() # CONSTANTS SERVICE_NAME = "pubsub" SERVICE_VERSION = "v1" SCOPES = ["https://www.googleapis.com/auth/cloud-platform"] ISO_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" LAST_RUN_TIME_KEY = "fetch_time" LAST_RUN_FETCHED_KEY = "fetched_ids" """ HELPER CLASSES """ class GoogleNameParser: """ Used to easily transform Google Cloud Pub/Sub names """ FULL_PROJECT_PREFIX = "projects/{}" FULL_TOPIC_PREFIX = "/topics/{}" FULL_SUBSCRIPTION_PREFIX = "/subscriptions/{}" FULL_SNAPSHOT_PREFIX = "/snapshots/{}" @staticmethod def get_project_name(project_id): return GoogleNameParser.FULL_PROJECT_PREFIX.format(project_id) @staticmethod def get_topic_name(project_id, topic_id): return GoogleNameParser.get_project_name( project_id ) + GoogleNameParser.FULL_TOPIC_PREFIX.format(topic_id) @staticmethod def get_subscription_project_name(project_id, subscription_id): return GoogleNameParser.get_project_name( project_id ) + GoogleNameParser.FULL_SUBSCRIPTION_PREFIX.format(subscription_id) @staticmethod def get_subscription_topic_name(project_id, topic_id, subscription_id): return GoogleNameParser.get_topic_name( project_id, topic_id ) + GoogleNameParser.FULL_SUBSCRIPTION_PREFIX.format(subscription_id) @staticmethod def get_snapshot_project_name(project_id, snapshot_id): return GoogleNameParser.get_project_name( project_id ) + GoogleNameParser.FULL_SNAPSHOT_PREFIX.format(snapshot_id) # disable-secrets-detection-start class BaseGoogleClient: """ A Client class to wrap the google cloud api library as a service. """ def __init__( self, service_name: str, service_version: str, client_secret: dict, scopes: list, proxy: bool, insecure: bool, **kwargs, ): """ :param service_name: The name of the service. You can find this and the service here https://github.com/googleapis/google-api-python-client/blob/master/docs/dyn/index.md :param service_version: The version of the API. :param client_secret: A string of the generated credentials.json :param scopes: The scope needed for the project. (i.e. ['https://www.googleapis.com/auth/cloud-platform']) :param proxy: Proxy flag :param insecure: Insecure flag :param kwargs: Potential arguments dict """ credentials = service_account.ServiceAccountCredentials.from_json_keyfile_dict( client_secret, scopes=scopes ) if proxy or insecure: http_client = credentials.authorize( self.get_http_client_with_proxy(proxy, insecure) ) self.service = discovery.build( service_name, service_version, http=http_client ) else: self.service = discovery.build( service_name, service_version, credentials=credentials ) @staticmethod def get_http_client_with_proxy(proxy, insecure): """ Create an http client with proxy with whom to use when using a proxy. :param proxy: Whether to use a proxy. :param insecure: Whether to disable ssl and use an insecure connection. :return: """ if proxy: proxies = handle_proxy() https_proxy = proxies.get("https") http_proxy = proxies.get("http") proxy_conf = https_proxy if https_proxy else http_proxy # if no proxy_conf - ignore proxy if proxy_conf: if not proxy_conf.startswith("https") and not proxy_conf.startswith( "http" ): proxy_conf = "https://" + proxy_conf parsed_proxy = urllib.parse.urlparse(proxy_conf) proxy_info = httplib2.ProxyInfo( proxy_type=httplib2.socks.PROXY_TYPE_HTTP, proxy_host=parsed_proxy.hostname, proxy_port=parsed_proxy.port, proxy_user=parsed_proxy.username, proxy_pass=parsed_proxy.password, ) return httplib2.Http( proxy_info=proxy_info, disable_ssl_certificate_validation=insecure ) return httplib2.Http(disable_ssl_certificate_validation=insecure) # disable-secrets-detection-end class PubSubClient(BaseGoogleClient): def __init__( self, default_project, default_subscription, default_max_msgs, client_secret, **kwargs, ): super().__init__(client_secret=client_secret, **kwargs) self.default_project = default_project if not default_project: self.default_project = self._extract_project_from_client_secret( client_secret ) self.default_subscription = default_subscription self.default_max_msgs = default_max_msgs def _extract_project_from_client_secret(self, client_secret): """Extracts project name from a client secret json""" project_id = client_secret.get("project_id") if isinstance(project_id, list): project_id = project_id[0] return project_id def _create_subscription_body( self, ack_deadline_seconds, expiration_ttl, labels, message_retention_duration, push_attributes, push_endpoint, retain_acked_messages, topic_name, ): """Create a subscription body""" if push_endpoint or push_attributes: push_config = assign_params( pushEndpoint=push_endpoint, attributes=push_attributes, ) else: push_config = None body = assign_params( topic=topic_name, pushConfig=push_config, ackDeadlineSeconds=ack_deadline_seconds, retainAckedMessages=retain_acked_messages, messageRetentionDuration=message_retention_duration, labels=labels, expirationPolicy=assign_params(ttl=expiration_ttl), ) return body def _create_topic_body(self, allowed_persistence_regions, kms_key_name, labels): """Create a topic body""" message_storage_policy = assign_params( allowedPersistenceRegions=allowed_persistence_regions ) body = assign_params( labels=labels, messageStoragePolicy=message_storage_policy, kmsKeyName=kms_key_name, ) return body def list_topic(self, project_id, page_size, page_token=None): """Get topic list from GoogleClient""" return ( self.service.projects() .topics() .list(project=project_id, pageSize=page_size, pageToken=page_token) .execute() ) def list_topic_subs(self, topic_id, page_size, page_token=None): """Get topic subscriptions from GoogleClient""" return ( self.service.projects() .topics() .subscriptions() .list(topic=topic_id, pageSize=page_size, pageToken=page_token) .execute() ) def list_project_subs(self, project_id, page_size, page_token=None): """Get project subscriptions list from GoogleClient""" return ( self.service.projects() .subscriptions() .list(project=project_id, pageSize=page_size, pageToken=page_token) .execute() ) def get_sub(self, sub_name): """Get subscription by name from GoogleClient""" return ( self.service.projects().subscriptions().get(subscription=sub_name).execute() ) def publish_message(self, project_id, topic_id, req_body): """Publish a topic message via GoogleClient""" return ( self.service.projects() .topics() .publish( topic=GoogleNameParser.get_topic_name(project_id, topic_id), body=req_body, ) .execute() ) def pull_messages(self, sub_name, max_messages, ret_immediately=True): """ Pull messages for the subscription :param sub_name: Subscription name :param max_messages: The maximum number of messages to return for this request. Must be a positive integer :param ret_immediately: when set to true will return immediately, otherwise will be async :return: Messages """ req_body = {"returnImmediately": ret_immediately, "maxMessages": max_messages} return ( self.service.projects() .subscriptions() .pull(subscription=sub_name, body=req_body) .execute() ) def ack_messages(self, sub_name, acks): """ Ack a list of messages :param sub_name: subscription name :param acks: ack ids to ack :return: """ body = {"ackIds": acks} return ( self.service.projects() .subscriptions() .acknowledge(subscription=sub_name, body=body) .execute() ) def create_subscription( self, sub_name, topic_name, push_endpoint, push_attributes, ack_deadline_seconds, retain_acked_messages, message_retention_duration, labels, expiration_ttl, ): """ Creates a subscription :param sub_name: full sub name :param topic_name: full topic name :param push_endpoint: A URL locating the endpoint to which messages should be pushed. :param push_attributes: Input format: "key=val" pairs sepearated by ",". :param ack_deadline_seconds: The amount of time Pub/Sub waits for the subscriber to ack. :param retain_acked_messages: if 'true' then retain acknowledged messages :param message_retention_duration: How long to retain unacknowledged messages :param labels: Input format: "key=val" pairs sepearated by ",". :param expiration_ttl: The "time-to-live" duration for the subscription. :return: Subscription """ body = self._create_subscription_body( ack_deadline_seconds, expiration_ttl, labels, message_retention_duration, push_attributes, push_endpoint, retain_acked_messages, topic_name, ) return ( self.service.projects() .subscriptions() .create(name=sub_name, body=body) .execute() ) def update_subscription( self, sub_name, topic_name, update_mask, push_endpoint, push_attributes, ack_deadline_seconds, retain_acked_messages, message_retention_duration, labels, expiration_ttl, ): """ Updates a subscription :param sub_name: full sub name :param topic_name: full topic name :param update_mask: Indicates which fields in the provided subscription to update. :param push_endpoint: A URL locating the endpoint to which messages should be pushed. :param push_attributes: Input format: "key=val" pairs sepearated by ",". :param ack_deadline_seconds: The amount of time Pub/Sub waits for the subscriber to ack. :param retain_acked_messages: if 'true' then retain acknowledged messages :param message_retention_duration: How long to retain unacknowledged messages :param labels: Input format: "key=val" pairs sepearated by ",". :param expiration_ttl: The "time-to-live" duration for the subscription. :return: Subscription """ sub_body = self._create_subscription_body( ack_deadline_seconds, expiration_ttl, labels, message_retention_duration, push_attributes, push_endpoint, retain_acked_messages, topic_name, ) body = assign_params(subscription=sub_body, updateMask=update_mask) return ( self.service.projects() .subscriptions() .patch(name=sub_name, body=body) .execute() ) def create_topic( self, topic_name, labels, allowed_persistence_regions, kms_key_name ): """ Create a topic in the project :param topic_name: name of the topic to be created :param labels: "key=val" pairs sepearated by ",".' :param allowed_persistence_regions: an str representing a list of IDs of GCP regions :param kms_key_name: The full name of the Cloud KMS CryptoKey to be used to restrict access on this topic. :return: Topic """ body = self._create_topic_body( allowed_persistence_regions, kms_key_name, labels ) return ( self.service.projects() .topics() .create(name=topic_name, body=body) .execute() ) def delete_topic(self, topic_name): """ Deletes a topic in the project :param topic_name: name of the topic to be created :return: Delete response """ return self.service.projects().topics().delete(topic=topic_name).execute() def update_topic( self, topic_name, labels, allowed_persistence_regions, kms_key_name, update_mask ): """ Updates a topic in the project :param topic_name: name of the topic to be updated :param labels: "key=val" pairs sepearated by ",".' :param allowed_persistence_regions: an str representing a list of IDs of GCP regions :param kms_key_name: The full name of the Cloud KMS CryptoKey to be used to restrict access on this topic. :param update_mask: Indicates which fields in the provided topic to update. :return: Topic """ topic = self._create_topic_body( allowed_persistence_regions, kms_key_name, labels ) body = assign_params(topic=topic, updateMask=update_mask) return ( self.service.projects().topics().patch(name=topic_name, body=body).execute() ) def subscription_seek_message(self, subscription_name, time_string, snapshot=None): """ Seeks messages in subscription :param subscription_name: Subscription to seek messages for :param time_string: A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, :param snapshot: The snapshot to seek to. :return: Empty string if successful """ body = assign_params(time=time_string, snapshot=snapshot) return ( self.service.projects() .subscriptions() .seek(subscription=subscription_name, body=body) .execute() ) def get_topic_snapshots_list(self, topic_name, page_size, page_token=None): """ Get snapshots list :param topic_name: The name of the topic from which this snapshot is retaining messages. :param page_size: Max number of results :param page_token: Next page token as returned from the API. :return: """ return ( self.service.projects() .topics() .snapshots() .list(topic=topic_name, pageSize=page_size, pageToken=page_token) .execute() ) def get_project_snapshots_list(self, project_name, page_size, page_token): """ Get snapshots list :param project_name: The name of the project from which this snapshot is retaining messages. :param page_size: Max number of results :param page_token: Next page token as returned from the API. :return: Snapshot list """ return ( self.service.projects() .snapshots() .list(project=project_name, pageSize=page_size, pageToken=page_token) .execute() ) def create_snapshot(self, subscription_name, snapshot_name, labels): """ Create a snapshot :param subscription_name: The subscription whose backlog the snapshot retain :param snapshot_name: The name of the snapshot :param labels: labels dict :return: Snapshot """ body = assign_params(subscription=subscription_name, labels=labels) return ( self.service.projects() .snapshots() .create(name=snapshot_name, body=body) .execute() ) def update_snapshot( self, snapshot_name, topic_name, update_mask, expire_time, labels ): """ :param snapshot_name: The name of the snapshot :param topic_name: The ID of the topic from which this snapshot is retaining messages. :param update_mask: Indicates which fields in the provided snapshot to update. :param expire_time: A timestamp in RFC3339 UTC "Zulu" format :param labels: labels dict :return: Snapshot """ snapshot = assign_params( name=snapshot_name, topic=topic_name, expireTime=expire_time, labels=labels ) body = assign_params(snapshot=snapshot, updateMask=update_mask) return ( self.service.projects() .snapshots() .patch(name=snapshot_name, body=body) .execute() ) def delete_snapshot(self, snapshot_name): """ Delete a snapshot :param snapshot_name: full snapshot name :return: Empty response """ return ( self.service.projects().snapshots().delete(snapshot=snapshot_name).execute() ) """ HELPER FUNCTIONS""" def init_google_client( service_account_json, default_subscription, default_project, default_max_msgs, insecure, **kwargs, ) -> PubSubClient: """ Initializes google client :param service_account_json: A string of the generated credentials.json :param default_subscription: Default subscription to use :param default_project: Default project to use :param default_max_msgs: Max messages to pull per fetch :param insecure: Flag - do not validate https certs :param kwargs: :return: """ try: service_account_json = json.loads(service_account_json) client = PubSubClient( default_project=default_project, default_subscription=default_subscription, default_max_msgs=default_max_msgs, service_name=SERVICE_NAME, service_version=SERVICE_VERSION, client_secret=service_account_json, scopes=SCOPES, insecure=insecure, **kwargs, ) return client except ValueError as e: return_error( "Failed to parse Service Account Private Key in json format, please make sure you entered it correctly" ) raise e def message_to_incident(message): """ Create incident from a message """ published_time_dt = dateparser.parse(message.get("publishTime")) incident = { "name": f'Google PubSub Message {message.get("messageId")}', "rawJSON": json.dumps(message), "occurred": convert_datetime_to_iso_str(published_time_dt), } return incident def get_messages_ids_and_max_publish_time(msgs): """ Get message IDs and max publish time from given pulled messages """ msg_ids = set() max_publish_time = None for msg in msgs: msg_ids.add(msg.get("messageId")) publish_time = msg.get("publishTime") if publish_time: publish_time = dateparser.parse(msg.get("publishTime")) if not max_publish_time: max_publish_time = publish_time else: max_publish_time = max(max_publish_time, publish_time) if max_publish_time: max_publish_time = convert_datetime_to_iso_str(max_publish_time) return msg_ids, max_publish_time def convert_datetime_to_iso_str(publish_time): """ Converts datetime to str in "%Y-%m-%dT%H:%M:%S.%fZ" format :param publish_time: Datetime :return: date str in "%Y-%m-%dT%H:%M:%S.%fZ" format """ try: return publish_time.strftime(ISO_DATE_FORMAT) except ValueError: return publish_time.strftime("%Y-%m-%dT%H:%M:%SZ") def attribute_pairs_to_dict(attrs_str: Optional[str], delim_char: str = ","): """ Transforms a string of multiple inputs to a dictionary list :param attrs_str: attributes separated by key=val pairs sepearated by ',' :param delim_char: delimiter character between atrribute pairs :return: """ if not attrs_str: return attrs_str attrs = {} regex = re.compile(r"(.*)=(.*)") for f in attrs_str.split(delim_char): match = regex.match(f) if match is None: raise ValueError(f"Could not parse field: {f}") attrs.update({match.group(1): match.group(2)}) return attrs """ COMMAND FUNCTIONS """ def test_module(client: PubSubClient, is_fetch: bool): """ Returning 'ok' indicates that the integration works like it is supposed to: 1. Connection to the service is successful. 2. Fetch incidents is configured properly :param client: GoogleClient :return: 'ok' if test passed, anything else will fail the test. """ client.list_topic( GoogleNameParser.get_project_name(client.default_project), page_size=1 ) if is_fetch: client.pull_messages( GoogleNameParser.get_subscription_project_name( client.default_project, client.default_subscription ), max_messages=1, ) return "ok" def topics_list_command( client: PubSubClient, project_id: str, page_size: str = None, page_token: str = None, ) -> Tuple[str, dict, dict]: """ Get topics list by project_id Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: project name :param page_size: page size :param page_token: page token, as returned from the api :return: list of topics """ full_project_name = GoogleNameParser.get_project_name(project_id) res = client.list_topic(full_project_name, page_size, page_token) topics = list(res.get("topics", [])) next_page_token = res.get("nextPageToken") readable_output = tableToMarkdown( f"Topics for project {project_id}", topics, ["name"] ) outputs = {"GoogleCloudPubSubTopics(val && val.name === obj.name)": topics} if next_page_token: outputs["GoogleCloudPubSub.Topics.nextPageToken"] = next_page_token readable_output += f"**Next Page Token: {next_page_token}**" return readable_output, outputs, res def publish_message_command( client: PubSubClient, topic_id: str, project_id: str, data: str = None, attributes: str = None, ) -> Tuple[str, dict, dict]: """ Publishes message in the topic Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param project_id: project name :param topic_id: topic name without project name prefix :param attributes: message attributes separated by key=val pairs sepearated by ',' :param data: message data str :param client: GoogleClient :return: list of topics """ body = get_publish_body(attributes, data) published_messages = client.publish_message(project_id, topic_id, body) output = [] for msg_id in published_messages.get("messageIds"): output.append( { "topic": topic_id, "messageId": msg_id, "data": data, "attributes": body.get("attributes"), } ) ec = {"GoogleCloudPubSubPublishedMessages(val.messageId === obj.messageId)": output} return ( tableToMarkdown( "Google Cloud PubSub has published the message successfully", output, removeNull=True, headerTransform=pascalToSpace, ), ec, published_messages, ) def get_publish_body(message_attributes, message_data): """ Creates publish messages body from given arguments :param message_attributes: message attributes :param message_data: message data :return: publish message body """ message = {} if message_data: # convert to base64 string message["data"] = str(base64.b64encode(message_data.encode("utf8")))[2:-1] if message_attributes: message["attributes"] = attribute_pairs_to_dict(message_attributes) body = {"messages": [message]} return body def pull_messages_command( client: PubSubClient, subscription_id: str, project_id: str, max_messages: str = None, ack: str = None, ) -> Tuple[str, dict, list]: """ Pulls messages from the subscription Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: project name :param subscription_id: Subscription name to pull messages from :param max_messages: The maximum number of messages to return for this request. Must be a positive integer :param ack: Acknowledge the messages pulled if set to true. :return: list of messages """ full_subscription_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) raw_msgs = client.pull_messages(full_subscription_name, max_messages) if "receivedMessages" in raw_msgs: acknowledges, msgs = extract_acks_and_msgs(raw_msgs) ec = { "GoogleCloudPubSubPulledMessages(val && val.messageId === obj.messageId)": msgs } if ack == "true": client.ack_messages(full_subscription_name, acknowledges) hr = tableToMarkdown("Google Cloud PubSub Messages", msgs, removeNull=True) return hr, ec, raw_msgs else: return "No new messages found", {}, raw_msgs def ack_messages_command( client: PubSubClient, ack_ids: str, subscription_id: str, project_id: str, ) -> Tuple[str, dict, list]: """ ACKs previously pulled messages using ack Ids Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param ack_ids: csv str with ack ids :param project_id: project name :param subscription_id: Subscription name to pull messages from :return: Success message """ sub_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) ack_ids = argToList(ack_ids) raw_res = client.ack_messages(sub_name, ack_ids) title = f"Subscription {subscription_id} had the following ids acknowledged" readable_output = tableToMarkdown(title, ack_ids, headers=["ACK ID"]) return readable_output, {}, raw_res def extract_acks_and_msgs(raw_msgs, add_ack_to_msg=True): """ Extracts acknowledges and message data from raw_msgs :param raw_msgs: Raw messages object :param add_ack_to_msg: Boolean flag - if true, will add ack to message under "ackId" :return: """ msg_list = [] acknowledges = [] if isinstance(raw_msgs, dict): rcvd_msgs = raw_msgs.get("receivedMessages", []) for raw_msg in rcvd_msgs: msg = raw_msg.get("message", {}) decoded_data = str(msg.get("data", "")) try: decoded_data = str(base64.b64decode(decoded_data))[2:-1] except Exception: # display message with b64 value pass msg["data"] = decoded_data ack_id = raw_msg.get("ackId") if ack_id: acknowledges.append(ack_id) if add_ack_to_msg: msg["ackId"] = ack_id msg_list.append(msg) return acknowledges, msg_list def subscriptions_list_command( client: PubSubClient, project_id: str, page_size: str = None, page_token: str = None, topic_id: str = None, ) -> Tuple[str, dict, dict]: """ Get subscription list by project_id or by topic_id Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: project name :param page_size: page size :param page_token: page token, as returned from the api :param topic_id: topic name :return: list of subscriptions """ title = "Subscriptions" if topic_id: full_topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) raw_response = client.list_topic_subs(full_topic_name, page_size, page_token) subs = [{"name": sub} for sub in raw_response.get("subscriptions", [])] next_page_token = raw_response.get("nextPageToken") title += f" for topic {topic_id} in project {project_id}" readable_output = tableToMarkdown( title, subs, headers=["name"], headerTransform=pascalToSpace ) else: full_project_name = GoogleNameParser.get_project_name(project_id) raw_response = client.list_project_subs( full_project_name, page_size, page_token ) subs = raw_response.get("subscriptions", "") next_page_token = raw_response.get("nextPageToken") title += f" in project {project_id}" for sub in subs: sub["deliveryType"] = "Push" if sub.get("pushConfig") else "Pull" readable_output = tableToMarkdown( title, subs, headers=["name", "topic", "ackDeadlineSeconds", "labels"], headerTransform=pascalToSpace, ) outputs = {"GoogleCloudPubSubSubscriptions(val && val.name === obj.name)": subs} if next_page_token: outputs["GoogleCloudPubSubSubscriptions.nextPageToken"] = next_page_token readable_output += f"**Next Page Token: {next_page_token}**" return readable_output, outputs, raw_response def get_subscription_command( client: PubSubClient, subscription_id: str, project_id: str ) -> Tuple[str, dict, dict]: """ Get subscription list by project_id or by topic_id Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param subscription_id: :param client: GoogleClient :param project_id: project name :return: subscription """ full_sub_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) sub = client.get_sub(full_sub_name) sub["deliveryType"] = "Push" if sub.get("pushConfig") else "Pull" title = f"Subscription {subscription_id}" readable_output = tableToMarkdown(title, sub, headerTransform=pascalToSpace) outputs = {"GoogleCloudPubSubSubscriptions(val && val.name === obj.name)": sub} return readable_output, outputs, sub def create_subscription_command( client: PubSubClient, subscription_id: str, topic_id: str, project_id: str, push_endpoint: str = "", push_attributes: str = "", ack_deadline_seconds: str = "", retain_acked_messages: str = "", message_retention_duration: str = "", labels: str = "", expiration_ttl: str = "", ) -> Tuple[str, dict, dict]: """ Creates a subscription Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: Name of the project from which the subscription is receiving messages. :param subscription_id: Name of the created subscription. :param topic_id: Name of the topic from which the subscription is receiving messages. :param push_endpoint: A URL locating the endpoint to which messages should be pushed. :param push_attributes: Input format: "key=val" pairs sepearated by ",". :param ack_deadline_seconds: The amount of time Pub/Sub waits for the subscriber to ack. :param retain_acked_messages: if 'true' then retain acknowledged messages :param message_retention_duration: How long to retain unacknowledged messages :param labels: Input format: "key=val" pairs sepearated by ",". :param expiration_ttl: The "time-to-live" duration for the subscription. :return: Created subscription """ full_sub_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) full_topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) labels = attribute_pairs_to_dict(labels) push_attributes = attribute_pairs_to_dict(push_attributes) raw_sub = client.create_subscription( full_sub_name, full_topic_name, push_endpoint, push_attributes, ack_deadline_seconds, retain_acked_messages, message_retention_duration, labels, expiration_ttl, ) sub = dict(raw_sub) title = f"Subscription {subscription_id} was created successfully" readable_output = tableToMarkdown(title, sub) sub["projectName"] = project_id sub["subscriptionName"] = subscription_id sub["deliveryType"] = "Push" if sub.get("pushConfig") else "Pull" outputs = {"GoogleCloudPubSubSubscriptions": sub} return readable_output, outputs, raw_sub def update_subscription_command( client: PubSubClient, subscription_id: str, topic_id: str, update_mask: str, project_id: str, push_endpoint: str = "", push_attributes: str = "", ack_deadline_seconds: str = "", retain_acked_messages: str = "", message_retention_duration: str = "", labels: str = "", expiration_ttl: str = "", ) -> Tuple[str, dict, dict]: """ Creates a subscription Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: Name of the project from which the subscription is receiving messages. :param subscription_id: Name of the created subscription. :param topic_id: Name of the topic from which the subscription is receiving messages. :param update_mask: Indicates which fields in the provided subscription to update. :param push_endpoint: A URL locating the endpoint to which messages should be pushed. :param push_attributes: Input format: "key=val" pairs sepearated by ",". :param ack_deadline_seconds: The amount of time Pub/Sub waits for the subscriber to ack. :param retain_acked_messages: if 'true' then retain acknowledged messages :param message_retention_duration: How long to retain unacknowledged messages :param labels: Input format: "key=val" pairs sepearated by ",". :param expiration_ttl: The "time-to-live" duration for the subscription. :return: Created subscription """ full_sub_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) full_topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) labels = attribute_pairs_to_dict(labels) push_attributes = attribute_pairs_to_dict(push_attributes) raw_sub = client.update_subscription( full_sub_name, full_topic_name, update_mask, push_endpoint, push_attributes, ack_deadline_seconds, retain_acked_messages, message_retention_duration, labels, expiration_ttl, ) sub = dict(raw_sub) title = f"Subscription {subscription_id} was updated successfully" readable_output = tableToMarkdown(title, sub) sub["projectName"] = project_id sub["subscriptionName"] = subscription_id sub["deliveryType"] = "Push" if sub.get("pushConfig") else "Pull" outputs = {"GoogleCloudPubSubSubscriptions(val && val.name === obj.name)": sub} return readable_output, outputs, raw_sub def create_topic_command( client: PubSubClient, topic_id: str, project_id: str, allowed_persistence_regions: str = "", kms_key_name: str = None, labels: str = None, ) -> Tuple[str, dict, dict]: """ Creates a topic :param client: PubSub client instance :param project_id: project ID :param topic_id: topic ID :param labels: "key=val" pairs sepearated by ",".' :param allowed_persistence_regions: an str representing a list of IDs of GCP regions :param kms_key_name: The full name of the Cloud KMS CryptoKey to be used to restrict access on this topic. :return: Created topic """ topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) allowed_persistence_regions = argToList(allowed_persistence_regions) labels = attribute_pairs_to_dict(labels) raw_topic = client.create_topic( topic_name, labels, allowed_persistence_regions, kms_key_name ) title = f"Topic **{topic_id}** was created successfully" readable_output = tableToMarkdown(title, raw_topic, headerTransform=pascalToSpace) outputs = {"GoogleCloudPubSubTopics": raw_topic} return readable_output, outputs, raw_topic def delete_topic_command( client: PubSubClient, project_id: str, topic_id: str ) -> Tuple[str, dict, dict]: """ Delete a topic :param client: PubSub client instance :param project_id: project ID :param topic_id: topic ID :return: Command success/error message """ topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) raw_topic = client.delete_topic(topic_name) readable_output = f"Topic **{topic_id}** was deleted successfully" return readable_output, {}, raw_topic def update_topic_command( client: PubSubClient, project_id: str, topic_id: str, update_mask: str, allowed_persistence_regions: str = "", kms_key_name: str = None, labels: str = None, ) -> Tuple[str, dict, dict]: """ Creates a topic :param client: PubSub client instance :param project_id: project ID :param topic_id: topic ID :param labels: "key=val" pairs sepearated by ",".' :param allowed_persistence_regions: an str representing a list of IDs of GCP regions :param kms_key_name: The full name of the Cloud KMS CryptoKey to be used to restrict access on this topic. :param update_mask: Indicates which fields in the provided topic to update. :return: Created topic """ topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) allowed_persistence_regions = argToList(allowed_persistence_regions) labels = attribute_pairs_to_dict(labels) raw_topic = client.update_topic( topic_name, labels, allowed_persistence_regions, kms_key_name, update_mask ) title = f"Topic {topic_id} was updated successfully" readable_output = tableToMarkdown(title, raw_topic, headerTransform=pascalToSpace) outputs = {"GoogleCloudPubSubTopics(val && val.name === obj.name)": raw_topic} return readable_output, outputs, raw_topic def seek_message_command( client: PubSubClient, project_id: str, subscription_id: str, time_string: str = None, snapshot: str = None, ) -> Tuple[str, dict, dict]: """ Get topics list by project_id Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: ID of the subscription, without project/topic prefix. :param subscription_id: ID of the project from which the subscription is receiving messages. :param time_string: A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, :param snapshot: The snapshot to seek to. :return: list of topics """ if not time_string and not snapshot: return_error("Please provide either a time_string or a snapshot") sub_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) raw_res = client.subscription_seek_message(sub_name, time_string, snapshot) readable_output = ( "Message seek was successful for **" + (f"time: {time_string}" if time_string else f"snapshot:{snapshot}") + "**" ) return readable_output, {}, raw_res def snapshot_list_command( client: PubSubClient, project_id: str, topic_id: str = None, page_size: str = None, page_token: str = None, ) -> Tuple[str, dict, dict]: """ Get snapshots list by project_id or topic_id Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: project id :param topic_id: :param page_size: page size :param page_token: page token, as returned from the api :return: list of snapshots """ if topic_id: topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) res = client.get_topic_snapshots_list(topic_name, page_size, page_token) title = f"Snapshots for topic {topic_id}" else: project_name = GoogleNameParser.get_project_name(project_id) res = client.get_project_snapshots_list(project_name, page_size, page_token) title = f"Snapshots for project {project_id}" snapshots = list(res.get("snapshots", [])) next_page_token = res.get("nextPageToken") readable_output = tableToMarkdown(title, snapshots, ["name"]) outputs = {"GoogleCloudPubSubSnapshots(val && val.name === obj.name)": snapshots} if next_page_token: outputs["GoogleCloudPubSub.Snapshots.nextPageToken"] = next_page_token readable_output += f"**Next Page Token: {next_page_token}**" return readable_output, outputs, res def snapshot_create_command( client: PubSubClient, project_id: str, subscription_id: str, snapshot_id: str, labels: str = None, ) -> Tuple[str, dict, dict]: """ Create a snapshot Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: project id :param subscription_id: The subscription whose backlog the snapshot retains. :param snapshot_id: The id of the snapshot. :param labels: Input format: "key=val" pairs sepearated by ",". :return: list of topics """ subscription_name = GoogleNameParser.get_subscription_project_name( project_id, subscription_id ) snapshot_name = GoogleNameParser.get_snapshot_project_name(project_id, snapshot_id) labels = attribute_pairs_to_dict(labels) raw_snapshot = client.create_snapshot(subscription_name, snapshot_name, labels) title = f"Snapshot **{snapshot_id}** was created successfully" readable_output = tableToMarkdown( title, raw_snapshot, headerTransform=pascalToSpace ) outputs = {"GoogleCloudPubSubSnapshots": raw_snapshot} return readable_output, outputs, raw_snapshot def snapshot_update_command( client: PubSubClient, project_id: str, topic_id: str, snapshot_id: str, update_mask: str, expire_time: str = None, labels: str = None, ) -> Tuple[str, dict, dict]: """ Updates a snapshot Requires one of the following OAuth scopes: https://www.googleapis.com/auth/pubsub https://www.googleapis.com/auth/cloud-platform :param client: GoogleClient :param project_id: ID of the project from which the subscription is receiving messages. :param topic_id: The ID of the topic from which this snapshot is retaining messages. :param snapshot_id: The id of the snapshot. :param update_mask: Indicates which fields in the provided snapshot to update. :param expire_time: The snapshot is guaranteed to exist up until this time :param labels: An object containing a list of "key": value pairs :return: """ snapshot_name = GoogleNameParser.get_snapshot_project_name(project_id, snapshot_id) topic_name = GoogleNameParser.get_topic_name(project_id, topic_id) labels = attribute_pairs_to_dict(labels) raw_snapshot = client.update_snapshot( snapshot_name, topic_name, update_mask, expire_time, labels ) title = f"Snapshot **{snapshot_id}** was updated successfully" readable_output = tableToMarkdown( title, raw_snapshot, headerTransform=pascalToSpace ) outputs = { "GoogleCloudPubSubSnapshots(val && val.name === obj.name)": raw_snapshot } return readable_output, outputs, raw_snapshot def snapshot_delete_command( client: PubSubClient, project_id: str, snapshot_id: str ) -> Tuple[str, dict, dict]: """ Delete a topic :param client: PubSub client instance :param project_id: The ID of the project from which the subscription is receiving messages. :param snapshot_id: The id of the snapshot. :return: Command success/error message """ snapshot_name = GoogleNameParser.get_snapshot_project_name(project_id, snapshot_id) raw_res = client.delete_snapshot(snapshot_name) readable_output = f"Snapshot **{snapshot_id}** was deleted successfully" return readable_output, {}, raw_res def fetch_incidents( client: PubSubClient, last_run: dict, first_fetch_time: str, ack_incidents: bool ): """ This function will execute each interval (default is 1 minute). :param client: GoogleClient initialized with default_project, default_subscription and default_max_msgs :param last_run: last run dict containing last run data :param first_fetch_time: how long ago should the subscription seek in first fetch :param ack_incidents: Boolean flag - when set to True will ack back the fetched messages :return: incidents: Incidents that will be created in Demisto """ sub_name = GoogleNameParser.get_subscription_project_name( client.default_project, client.default_subscription ) # Setup subscription for fetch last_run_fetched_ids, last_run_time = setup_subscription_last_run( client, first_fetch_time, last_run, sub_name, ack_incidents ) # Pull unique messages if available msgs, msg_ids, acknowledges, max_publish_time = try_pull_unique_messages( client, sub_name, last_run_fetched_ids, last_run_time, retry_times=1 ) # Handle fetch results return handle_fetch_results( client, sub_name, last_run, acknowledges, last_run_time, max_publish_time, msg_ids, msgs, ack_incidents, ) def setup_subscription_last_run( client, first_fetch_time, last_run, sub_name, ack_incidents ): """ Setups the subscription last run data, and seeks the subscription to a previous time if relevant :param client: PubSub client :param first_fetch_time: First fetch time provided by the user :param last_run: Last run dict :param sub_name: Name of the subscription :param ack_incidents: ACK flag - if true, will not use seek except for first time fetch :return: """ last_run_fetched_ids = set() # Handle first time fetch if not last_run or LAST_RUN_TIME_KEY not in last_run: last_run_time, _ = parse_date_range(first_fetch_time, ISO_DATE_FORMAT) # Seek previous message state client.subscription_seek_message(sub_name, last_run_time) else: last_run_time = last_run.get(LAST_RUN_TIME_KEY) last_run_fetched_val = last_run.get(LAST_RUN_FETCHED_KEY) if last_run_fetched_val: last_run_fetched_ids = set(last_run_fetched_val) if not ack_incidents: # Seek previous message state client.subscription_seek_message(sub_name, last_run_time) return last_run_fetched_ids, last_run_time def try_pull_unique_messages( client, sub_name, previous_msg_ids, last_run_time, retry_times=0 ): """ Tries to pull unique messages for the subscription :param client: PubSub client :param sub_name: Subscription name :param previous_msg_ids: Previous message ids set :param last_run_time: previous run time :param retry_times: How many times to retry pulling :return: 1. Unique list of messages 2. Unique set of message ids 3. Messages acks 4. max_publish_time """ res_msgs = None res_msg_ids = None res_acks = None res_max_publish_time = None raw_msgs = client.pull_messages(sub_name, client.default_max_msgs) if "receivedMessages" in raw_msgs: res_acks, msgs = extract_acks_and_msgs(raw_msgs) # continue only if messages were extracted successfully if msgs: msg_ids, max_publish_time = get_messages_ids_and_max_publish_time(msgs) new_msg_ids = msg_ids.difference(previous_msg_ids) # all messages are unique - return as is if len(new_msg_ids) == len(msg_ids): return msgs, msg_ids, res_acks, max_publish_time # no new messages - retry -1 elif len(new_msg_ids) == 0 and retry_times > 0: demisto.debug( f"GCP_PUBSUB_MSG Duplicates with max_publish_time: {max_publish_time}" ) return try_pull_unique_messages( client, sub_name, previous_msg_ids, retry_times - 1 ) # clean non-unique ids from raw_msgs else: filtered_raw_msgs = filter_non_unique_messages( raw_msgs, previous_msg_ids, last_run_time ) res_acks, res_msgs = extract_acks_and_msgs(filtered_raw_msgs) ( res_msg_ids, res_max_publish_time, ) = get_messages_ids_and_max_publish_time(res_msgs) return res_msgs, res_msg_ids, res_acks, res_max_publish_time def is_unique_msg(msg, previous_msg_ids, previous_run_time): """ Determines if message is unique given previous message ids, and that it's greater than previous run time :param msg: raw Message object :param previous_msg_ids: set of previously fetched message ids :param previous_run_time: previous run time string :return: True if message is unique """ message_dict = msg.get("message", {}) if message_dict: msg_id = message_dict.get("messageId") msg_pub_time = message_dict.get("publishTime", "") return msg_id not in previous_msg_ids and msg_pub_time > previous_run_time return False def filter_non_unique_messages(raw_msgs, previous_msg_ids, previous_run_time): """ Filters messages that appear in previous_msg_ids or are older than the previous_run_time :param raw_msgs: Raw message object :param previous_msg_ids: :param previous_run_time: :return: """ raw_msgs = raw_msgs.get("receivedMessages", []) # filter messages using `previous_msg_ids` and `previous_run_time` filtered_raw_msgs = list( filter( lambda msg: is_unique_msg(msg, previous_msg_ids, previous_run_time), raw_msgs, ) ) return {"receivedMessages": filtered_raw_msgs} def handle_fetch_results( client, sub_name, last_run, acknowledges, last_run_time, max_publish_time, pulled_msg_ids, pulled_msgs, ack_incidents, ): """ Handle the fetch results :param client: PubSub Client :param sub_name: Subscription name :param last_run: last run dict :param acknowledges: acknowledges to make given ack_incidents is True :param last_run_time: last run time :param max_publish_time: max publish time of pulled messages :param pulled_msg_ids: pulled message ids :param pulled_msgs: pulled messages :param ack_incidents: ack incidents flag :return: incidents and last run """ incidents = [] if pulled_msg_ids and max_publish_time: if last_run_time <= max_publish_time: # Create incidents for msg in pulled_msgs: incident = message_to_incident(msg) incidents.append(incident) # ACK messages if relevant if ack_incidents: client.ack_messages(sub_name, acknowledges) # Recreate last run to return with new values last_run = { LAST_RUN_TIME_KEY: max_publish_time, LAST_RUN_FETCHED_KEY: list(pulled_msg_ids), } # We didn't manage to pull any unique messages, so we're trying to increment micro seconds - not relevant for ack elif not ack_incidents: last_run_time_dt = dateparser.parse( max_publish_time if max_publish_time else last_run_time ) assert last_run_time_dt is not None last_run_time = convert_datetime_to_iso_str( last_run_time_dt + timedelta(microseconds=1) ) # Update last run time last_run[LAST_RUN_TIME_KEY] = last_run_time return incidents, last_run def main(): params = demisto.params() client = init_google_client(**params) command = demisto.command() LOG(f"Command being called is {command}") try: commands = { "gcp-pubsub-topic-publish-message": publish_message_command, "gcp-pubsub-topic-messages-pull": pull_messages_command, "gcp-pubsub-topic-ack-messages": ack_messages_command, "gcp-pubsub-topic-subscriptions-list": subscriptions_list_command, "gcp-pubsub-topic-subscription-get-by-name": get_subscription_command, "gcp-pubsub-topic-subscription-create": create_subscription_command, "gcp-pubsub-topic-subscription-update": update_subscription_command, "gcp-pubsub-topics-list": topics_list_command, "gcp-pubsub-topic-create": create_topic_command, "gcp-pubsub-topic-delete": delete_topic_command, "gcp-pubsub-topic-update": update_topic_command, "gcp-pubsub-topic-messages-seek": seek_message_command, "gcp-pubsub-topic-snapshots-list": snapshot_list_command, "gcp-pubsub-topic-snapshot-create": snapshot_create_command, "gcp-pubsub-topic-snapshot-update": snapshot_update_command, "gcp-pubsub-topic-snapshot-delete": snapshot_delete_command, } if command == "test-module": demisto.results(test_module(client, params.get("isFetch"))) elif command == "fetch-incidents": ack_incidents = params.get("ack_incidents") first_fetch_time = params.get("first_fetch_time").rstrip() last_run = demisto.getLastRun() incidents, last_run = fetch_incidents( client, last_run, first_fetch_time, ack_incidents ) demisto.incidents(incidents) demisto.setLastRun(last_run) else: args = demisto.args() # project_id is expected to be in all commands. If not provided defaults on client.default_project if "project_id" not in args: args["project_id"] = client.default_project return_outputs(*commands[command](client, **args)) # type: ignore[operator] # Log exceptions except Exception as e: return_error( f"Failed to execute {demisto.command()} command. Error: {str(e)} , traceback: {traceback.format_exc()}" ) if __name__ in ("__main__", "__builtin__", "builtins"): main()
python
from django.shortcuts import render, redirect from django import forms from django.contrib import messages from django.contrib.auth.forms import PasswordChangeForm from django.contrib.auth import authenticate, login, update_session_auth_hash from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.http import HttpResponseRedirect, HttpResponse from django.contrib.sites.shortcuts import get_current_site from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.template.loader import render_to_string from django.core.mail import EmailMessage from django.db.models import Count from django.contrib.auth.views import LoginView from django.contrib.messages.views import SuccessMessageMixin from .forms import RegisterForm, ProfileEdit, NewRegister from .token import account_activation_token from applications.events_news.models import Event, Attendees from applications.alumniprofile.models import Profile, Constants from applications.news.models import News from applications.gallery.models import Album from applications.geolocation.views import addPoints import datetime from django.utils import timezone from itertools import chain # Create your views here. class LoginFormView(SuccessMessageMixin, LoginView): template_name = 'AlumniConnect/login.html' redirect_authenticated_user = True # success_url = '/' success_message = "Logged in successfully!" def index(request): sname = None if request.user.is_authenticated: sname = request.user.get_short_name() now = timezone.now() events = Event.objects.filter(start_date__gte=now).order_by('start_date').annotate( count=Count('attendees__user_id')) events_completed = Event.objects.filter(end_date__lt=now).order_by('-start_date').annotate( count=Count('attendees__user_id')) # Add Check here news = News.objects.filter().order_by('-date') # messages.success(request, 'Your password was successfully updated!') events_to_display = list(chain(events, events_completed))[:3] albums_list = Album.objects.order_by('-created').annotate(images_count=Count('albumimage'))[:3] return render(request, "AlumniConnect/index.html", {'name': sname, 'events': events_to_display, 'news': news, 'albums': albums_list}) def alumniBody(request): return render(request, "AlumniConnect/alumnibody.html") def alumniCard(request): return render(request, "AlumniConnect/alumnicard.html") def gallery(request): return render(request, "AlumniConnect/gallery.html") def job_posting(request): return render(request, "AlumniConnect/job_posting.html") # def jobboard(request): # return render(request, "env/Lib/site-packages/gallery.html") def register(request): check = False l = None if request.method == 'POST': form = RegisterForm(request.POST) print(request.POST) if form.is_valid(): batch = form.cleaned_data.get('batch') branch = form.cleaned_data.get('branch') programme = form.cleaned_data.get('programme') l = Profile.objects.filter(batch=batch, programme=programme, branch=branch) print('Testing output\n') print(l) check = True else: form = RegisterForm() return render(request, 'AlumniConnect/registration.html', {'form': form, 'check': check, 'l': l}) def reg_no_gen(degree_, spec_, year): degree = {"B.Tech": "1", "B.Des": '2', "M.Tech": '3', "M.Des": '4', "PhD": '5'} spec = {"NA": '00', "CSE": "01", "ECE": "02", "ME": "03", "MT": "04", "NS": "05", "DS": "06"} last_reg_no = Profile.objects.filter(year_of_admission=year).order_by('user__date_joined').last() # print(last_reg_no) new_reg_no = (int(str(last_reg_no.reg_no)[-4:]) + 1) if last_reg_no else 1 return degree[degree_] + spec[spec_] + str(year)[2:] + str(convert_int(new_reg_no, 4)) def convert_int(number, decimals): return str(number).zfill(decimals) def new_register(request): if request.method == 'POST': form = NewRegister(request.POST, request.FILES) # print (request.POST) if form.is_valid(): try: first_name, last_name = request.POST['name'].split(' ', 1) except: first_name = request.POST['name'] last_name = "" # print (form.cleaned_data.get('date_of_joining')) profile = form.save(commit=False) profile.reg_no = reg_no_gen(profile.programme, profile.branch, profile.year_of_admission) profile.country = request.POST['country'] profile.state = request.POST['state'] profile.city = request.POST['city'] password = User.objects.make_random_password(length=10) # password = '12345678' user = User.objects.create_user( username=str(form.cleaned_data.get('roll_no')), first_name=first_name, last_name=last_name, email=str(form.cleaned_data.get('email')), password=password, is_active=True ) profile.user = user profile.save() mappt = addPoints({'city': str(request.POST['city']), 'state': str(request.POST['state']), 'country': str(request.POST['country'])}) print('Adding Map Point Status: ' + str(mappt)) return render(request, 'AlumniConnect/confirm_email.html') else: form = NewRegister() return render(request, 'AlumniConnect/profileedit.html', {'form': form, 'edit': False}) @login_required def profileedit(request, id): if request.user.username == id: profile = Profile.objects.get(roll_no=id) if request.method == 'POST': form = ProfileEdit(request.POST, request.FILES, instance=profile) if form.is_valid(): profile = form.save() profile.save() return HttpResponseRedirect('/profile/' + id) else: print("here") form = ProfileEdit(instance=profile) return render(request, 'AlumniConnect/profileedit.html', {'form': form, 'C': profile.country, 's': profile.state, 'c': profile.city, 'edit': True}) else: return HttpResponseRedirect('/') def activate(request, uidb64, token): print('inside activate') try: uid = urlsafe_base64_decode(uidb64) print(uid) u = User.objects.get(username=uid) print(u) except(TypeError, ValueError, OverflowError): u = None if u is not None and account_activation_token.check_token(u, token): u.is_active = True u.save() login(request, u) # return HttpResponse('Thank you for your email confirmation. Now you can login your account.') return HttpResponseRedirect('/password/') else: return HttpResponse('Activation link is invalid!') return redirect('/') @login_required def change_password(request): if request.method == 'POST': form = PasswordChangeForm(request.user, request.POST) if form.is_valid(): user = form.save() update_session_auth_hash(request, user) # Important! messages.success(request, 'Your password was successfully updated!') return redirect('home') else: messages.error(request, 'Please correct the error below.') else: form = PasswordChangeForm(request.user) return render(request, 'AlumniConnect/change_password.html', {'form': form})
python
import math, itertools from functools import lru_cache def distance_squared(p1, p2): x1, y1 = p1 x2, y2 = p2 dx, dy = x1 - x2, y2 - y1 return dx * dx + dy * dy def points_up_tile_size_px(size): return math.floor(size * math.sqrt(3)), size * 2 def flats_up_tile_size_px(size): return size * 2, math.floor(size * math.sqrt(3)) @lru_cache(maxsize=128) def points_up_tile_center_point(grid_position, width, height, offset): x, y = grid_position dx, dy = offset height = math.floor(height * 3/4) # stagger odd rows if y % 2: dx += width // 2 # diamond-shaped grid x += y // 2 return (x * width + dx, y * height + dy) @lru_cache(maxsize=128) def flats_up_tile_center_point(grid_position, width, height, offset): x, y = grid_position dx, dy = offset width = math.floor(width * 3/4) # stagger odd columns if x % 2: dy += height // 2 # diamond-shaped grid y += x // 2 return (x * width + dx, y * height + dy) def points_up_tile_corner_point(radius, index, position_px): theta = math.tau * index / 6 + math.tau / 12 x, y = position_px return (radius * math.cos(theta) + x, radius * math.sin(theta) + y) def flats_up_tile_corner_point(radius, index, position_px): theta = math.tau * index / 6 x, y = position_px return (radius * math.cos(theta) + x, radius * math.sin(theta) + y) @lru_cache(maxsize=128) def points_up_tile_corner_points(grid_position, width, height, offset): radius = height // 2 position_px = points_up_tile_center_point(grid_position, width, height, offset) return [points_up_tile_corner_point(radius, i, position_px) for i in range(6)] @lru_cache(maxsize=128) def flats_up_tile_corner_points(grid_position, width, height, offset): radius = width // 2 position_px = flats_up_tile_center_point(grid_position, width, height, offset) return [flats_up_tile_corner_point(radius, i, position_px) for i in range(6)] class HexTile: def __init__(self, grid_x, grid_y, size_px, points_up): self.grid_position = (grid_x, grid_y) self.neighbours = [] if points_up: self.width, self.height = points_up_tile_size_px(size_px) else: self.width, self.height = flats_up_tile_size_px(size_px) self.points_up = points_up def __str__(self): return f'{self.grid_position}' def __repr__(self): return f'HexTile{self.grid_position}' def center_point(self, offset=0): if self.points_up: return points_up_tile_center_point( self.grid_position, self.width, self.height, offset) else: return flats_up_tile_center_point( self.grid_position, self.width, self.height, offset) def corner_points(self, offset=0): if self.points_up: return points_up_tile_corner_points( self.grid_position, self.width, self.height, offset) else: return flats_up_tile_corner_points( self.grid_position, self.width, self.height, offset) def distance_squared(self, position, offset): return distance_squared(self.center_point(offset), position) class HexGrid: def __init__(self, width, height, tile_size, points_up): self.width = width self.height = height self.tiles = { (x,y): HexTile(x, y, tile_size, points_up) for (x,y) in itertools.product(range(width), range(height)) } for tile in self.tiles.values(): self.populate_neighbours(tile) def populate_neighbours(self, tile): x, y = tile.grid_position if x > 0: tile.neighbours.append(self.tiles[(x-1, y)]) if x < self.width-1: tile.neighbours.append(self.tiles[(x+1, y)]) if y > 0: tile.neighbours.append(self.tiles[(x, y-1)]) if x < self.width-1: tile.neighbours.append(self.tiles[(x+1, y-1)]) if y < self.height-1: tile.neighbours.append(self.tiles[(x, y+1)]) if x > 0: tile.neighbours.append(self.tiles[(x-1, y+1)]) def find_path(self, from_tile, to_tiles, filter, visited=None): if visited == None: visited = [] if not filter(from_tile) or from_tile in visited: return None if from_tile in to_tiles: return [from_tile] visited.append(from_tile) for neighbour in from_tile.neighbours: result = self.find_path(neighbour, to_tiles, filter, visited) if result != None: result.append(from_tile) return result return None def top_row(self): return [self.tiles[(x, 0)] for x in range(self.width)] def bottom_row(self): return [self.tiles[(x, self.height-1)] for x in range(self.width)] def left_column(self): return [self.tiles[(0, y)] for y in range(self.height)] def right_column(self): return [self.tiles[(self.width-1, y)] for y in range(self.height)]
python
class Board: ROW_COL_BASE = 1 def __init__(self, board, N): """ Builds a Board from a list of blocks and the size of the board. The list must hold N^2 blocks. :param board: the list with the blocks. :param N: the size of the board (length = width = N) :return: a new Board with the blocks setup as passed. """ self.board = board self.N = N @classmethod def fromMatrix(cls, blocks): """ Builds a Board from a matrix of blocks, i.e. from a list of list of blocks. :param blocks: the matrix with the blocks :return: a new Board with the blocks setup as passed. """ l = [] for row in blocks: for block in row: l.append(block) N = len(row) return cls(l, N) def dimension(self): """ board dimension N """ return self.N # public int hamming() # number of blocks out of place def manhattan(self): ''' :return: the sum of Manhattan distances between blocks and goal ''' manhval = 0 for idx, tile in enumerate(self.board): if tile == 0: continue rm = (tile -1) / self.N - idx / self.N cm = (tile -1) % self.N - idx % self.N manhval += abs(rm) + abs(cm) return manhval def isgoal(self): ''' is this board the goal board? :return: a boolean, true if this board is the goal board ''' NN = self.N * self.N for idx, val in enumerate(self.board): if (idx != (NN-1) and val != idx +1): return False return True def _index(self, row, col): return self.N * (row - self.ROW_COL_BASE) + (col - self.ROW_COL_BASE) def twin(self): """ a board that is obtained by exchanging any pair of blocks :return: Board """ twin = self.board[:] idx11 = self._index(1, 1) idx12 = self._index(1, 2) idx21 = self._index(2, 1) idx22 = self._index(2, 2) if (twin[idx11] == 0): twin[idx12] = self.board[idx22] # 0 A twin[idx22] = self.board[idx12] # x B else: if (twin[idx12] == 0): twin[idx11] = self.board[idx21] # A 0 twin[idx21] = self.board[idx11] # B x else: twin[idx11] = self.board[idx12] # A B twin[idx12] = self.board[idx11] ## ? ? return Board(twin, self.N); def neighbors(self): """ all neighboring boards :return: a list of neighbours Boards """ blankidx = self.board.index(0) brow = blankidx / self.N bcol = blankidx % self.N idx0 = self._index(brow + 1, bcol + 1) nbrs = [] if brow > 0: # move space up nbrs.append(self._neighborBoard(brow, bcol + 1, idx0)) if brow < self.N - 1: # move space down nbrs.append(self._neighborBoard(brow + 2, bcol + 1, idx0)) if bcol > 0: # move space left nbrs.append(self._neighborBoard(brow + 1, bcol, idx0)) if bcol < self.N - 1: # move space right nbrs.append(self._neighborBoard(brow + 1, bcol + 2, idx0)) return nbrs def _neighborBoard(self, brow, bcol, idx0): idxdst = self._index(brow, bcol) # +1 is for 1 based row and cols brd = self.board[:] brd[idx0] = self.board[idxdst]; brd[idxdst] = self.board[idx0]; b = Board(brd, self.N) return b def __eq__(self, other): if other is None: return False if other is self: return True if type(other) is type(self): return self.__dict__ == other.__dict__ return False def __ne__(self, other): if other is None: return True return not self.__eq__(other) def toString(self): """ :return: the string representation of this board. """ s = '{N}\n'.format(N = self.N) for idx, val in enumerate(self.board): s += '{0:2d}'.format(val) if ((idx + 1) % self.N) == 0: s += '\n' return s # public static void main(String[] args) // unit tests (not graded)
python
#!/usr/bin/env python3 """ Clean up (making tar) a single simulation directory after successful cybershake submissions """ import os import glob import shutil import tarfile import argparse from qcore import utils SUBMISSION_DIR_NAME = "submission_temp" SUBMISSION_TAR = "submission.tar" SUBMISSION_FILES = [ "flist_*", "*_header.cfg", "machine_*.json", "submit.sh", "*.template", "*py", "*.pyc", ] SUBMISSION_SL_LOGS = ["*.sl", "*.err", "*.out"] LF_DIR_NAME = "LF_temp" LF_SUB_DIR_NAME = "OutBin" LF_TAR = "LF.tar" LF_FILES = ["Rlog", "Restart", "SlipOut"] def tar_files(directory_to_tar, archive_name): """params: directory_to_tar:source dir of all files to tar archive_name: output dir for the tar.gz """ if os.path.isfile(archive_name): open_type = "a" print("Adding files to existing tar") else: open_type = "w" print("Start making new tar") try: with tarfile.open(archive_name, open_type) as tar: tar.add(directory_to_tar, arcname=os.path.basename(directory_to_tar)) except Exception as e: print("Failed to make tar with exception {}".format(e)) else: print("Finished adding files to tar") def move_files(sim_dir, dest_dir, file_patterns): """ move all files that match any of the specified file patterns from sim dir to dest dir :param sim_dir: path to source realization folder, eg. /home/melody.zhu/Albury/Runs/Albury/Albury_HYP15-21_S1384 :param dest_dir: path to destination dir :param file_patterns: a list of files/file_pattern to copy :return: """ for f in file_patterns: for p in glob.glob1(sim_dir, f): try: shutil.move(os.path.join(sim_dir, p), os.path.join(dest_dir, p)) except Exception as e: print( "error while copy ing file from {} to {}\n{}".format( sim_dir, dest_dir, e ) ) def create_temp_dirs(sim_dir, outer_dir_name, inner_dir_name=""): """ creates two nested temp dirs containing files to be tared :param sim_dir: path to realization folder :param outer_dir_name: name of temporary dir for storing submission/lf related files to be tared :param inner_dir_name: name of sub_dir inside the temporary dir for storing submission/lf related files to be tared :return: paths to outer_dir and inner dir """ outer_dir = os.path.join(sim_dir, outer_dir_name) utils.setup_dir(outer_dir) inner_dir = "" if inner_dir_name is not "": inner_dir = os.path.join(sim_dir, outer_dir_name, inner_dir_name) utils.setup_dir(inner_dir) return outer_dir, inner_dir def clean_up_submission_lf_files( sim_dir, submission_files_to_tar=[], lf_files_to_tar=[] ): """ main function for moving, taring submission/lf files and deleting any temporary dirs created :param submission_files_to_tar: a list of additional submission related files to tar :param lf_files_to_tar: a list of additional lf related files to tar :return: creates submisson and lf tar.gz """ submission_files_to_tar += SUBMISSION_FILES + SUBMISSION_SL_LOGS lf_files_to_tar += LF_FILES # create temporary submission dir submission_dir, _ = create_temp_dirs(sim_dir, SUBMISSION_DIR_NAME) # create temporary lf dir lf_dir, lf_sub_dir = create_temp_dirs(sim_dir, LF_DIR_NAME, LF_SUB_DIR_NAME) # move files to submission dir move_files(sim_dir, submission_dir, submission_files_to_tar) tar_files(submission_dir, os.path.join(sim_dir, SUBMISSION_TAR)) # move files to lf dir move_files(os.path.join(sim_dir, "LF"), lf_dir, lf_files_to_tar) # copy e3d segments to lf sub dir e3d_segs_dir = os.path.join(sim_dir, "LF", "OutBin") for f in os.listdir(e3d_segs_dir): if "-" in f: # e3d segments have '-' in the name shutil.move(os.path.join(e3d_segs_dir, f), os.path.join(lf_sub_dir, f)) tar_files(lf_dir, os.path.join(sim_dir, LF_TAR)) # remove temporary submission and lf dir shutil.rmtree(lf_dir) shutil.rmtree(submission_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "sim_dir", help="path to realization dir eg./home/melody.zhu/Albury/Runs/Albury/Albury_HYP15-21_S1384", ) parser.add_argument( "-submission", "--submission_files_to_tar", nargs="+", default=[], help="Please specify additional submission related file(s)/file_pattern(with '*') to tar separated by a space(if more than one). Default is {}".format( " ".join(SUBMISSION_FILES + SUBMISSION_SL_LOGS) ), ) parser.add_argument( "-lf", "--lf_files_to_tar", nargs="+", default=[], help="Please specify additional LF related file(s)/file_pattern(with '*')to tar separated by a space(if more than one). Default is {}".format( " ".join(LF_FILES) ), ) args = parser.parse_args() clean_up_submission_lf_files( args.sim_dir, submission_files_to_tar=args.submission_files_to_tar, lf_files_to_tar=args.lf_files_to_tar, )
python
# -*- coding: utf-8 -*- """ Spyder Editor APRI和FIB4推测肝纤维化或肝硬化情况 This is a temporary script file. """ import math #APRI缩写:AST to Platelet Ratio Index #AST单位iu/l #PRI单位10**9/L #如果APRI>2,可能有肝硬化 def APRI(AST,upper_AST,PRI): apri=((AST*1.0/upper_AST)*100)/PRI return apri #FIB-4缩写Fibrosis-4 #age单位:年 #AST和ALT单位:U/L,(U/L和iu/L一般可以通用,前者是中国单位,后者是国际单位) def FIB4(age,AST,ALT,PRI): fib4=(age*AST)/(PRI*math.sqrt(ALT)) return fib4 #肝情况推测 def Liver_condition(apri,fib4): if apri>2: print ("可能发生肝硬化") print("如果是慢性乙肝感染者,需要考虑抗病毒药物治疗") if fib4<1.45: print("无明显肝纤维化或2级以下肝纤维化(轻度纤维化)") if fib4>3.25: print("肝纤维化程度为3~4级或以上") #提示 def Print_warming(): print("因算法不断改进,计算结果仅供参考。请随访感染科或肝病科专业医生") def Print_unit(): print("生化指标来自肝功检测和血常规检测") print("AST单位:iu/l") print("ALT单位:U/L") print("PRI单位:10**9/L") print("年龄单位:年") print("U/L和iu/L一般可以通用,前者是中国单位,后者是国际单位") #提示 Print_warming() #输出生化值单位 print("-"*30) Print_unit() print("-"*30) print("") print("") #输入参数 print("请输入以下参数(例如10,23.5等等):") AST=float(input("天门冬氨酸转移酶值(AST):")) upper_AST=float(input("天门冬氨酸转移酶(AST)上限值:")) ALT=float(input("丙氨酸氨基转移酶值(ALT):")) PRI=float(input("血小板计数值(PRI):")) age=float(input("年龄:")) apri=APRI(AST,upper_AST,PRI) fib4=FIB4(age,AST,ALT,PRI) print("-"*30) print("") print("") print("推测结果:") #肝情况推测 Liver_condition(apri,fib4)
python
""" :class:`Registrable` is a "mixin" for endowing any base class with a named registry for its subclasses and a decorator for registering them. """ import importlib import logging from collections import defaultdict from typing import ( Callable, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple, Type, TypeVar, cast, ) from .exceptions import ConfigurationError, IntegrationMissingError, RegistryKeyError from .from_params import FromParams from .util import ( could_be_class_name, find_integrations, find_submodules, import_module_and_submodules, ) logger = logging.getLogger(__name__) _T = TypeVar("_T") _RegistrableT = TypeVar("_RegistrableT", bound="Registrable") _SubclassRegistry = Dict[str, Tuple[type, Optional[str]]] class Registrable(FromParams): """ Any class that inherits from ``Registrable`` gains access to a named registry for its subclasses. To register them, just decorate them with the classmethod ``@BaseClass.register(name)``. After which you can call ``BaseClass.list_available()`` to get the keys for the registered subclasses, and ``BaseClass.by_name(name)`` to get the corresponding subclass. Note that the registry stores the subclasses themselves; not class instances. In most cases you would then call :meth:`~tango.common.from_params.FromParams.from_params()` on the returned subclass. You can specify a default by setting ``BaseClass.default_implementation``. If it is set, it will be the first element of :meth:`list_available()`. Note that if you use this class to implement a new ``Registrable`` abstract class, you must ensure that all subclasses of the abstract class are loaded when the module is loaded, because the subclasses register themselves in their respective files. You can achieve this by having the abstract class and all subclasses in the ``__init__.py`` of the module in which they reside (as this causes any import of either the abstract class or a subclass to load all other subclasses and the abstract class). """ _registry: ClassVar[DefaultDict[type, _SubclassRegistry]] = defaultdict(dict) default_implementation: Optional[str] = None @classmethod def register( cls, name: str, constructor: Optional[str] = None, exist_ok: bool = False ) -> Callable[[Type[_T]], Type[_T]]: """ Register a class under a particular name. :param name: The name to register the class under. :param constructor: The name of the method to use on the class to construct the object. If this is given, we will use this method (which must be a ``@classmethod``) instead of the default constructor. :param exist_ok: If True, overwrites any existing models registered under ``name``. Else, throws an error if a model is already registered under ``name``. Examples -------- To use this class, you would typically have a base class that inherits from ``Registrable``:: class Vocabulary(Registrable): ... Then, if you want to register a subclass, you decorate it like this:: @Vocabulary.register("my-vocabulary") class MyVocabulary(Vocabulary): def __init__(self, param1: int, param2: str): ... Registering a class like this will let you instantiate a class from a config file, where you give ``"type": "my-vocabulary"``, and keys corresponding to the parameters of the ``__init__`` method (note that for this to work, those parameters must have type annotations). If you want to have the instantiation from a config file call a method other than the constructor, either because you have several different construction paths that could be taken for the same object (as we do in ``Vocabulary``) or because you have logic you want to happen before you get to the constructor (as we do in ``Embedding``), you can register a specific ``@classmethod`` as the constructor to use, like this:: @Vocabulary.register("my-vocabulary-from-instances", constructor="from_instances") @Vocabulary.register("my-vocabulary-from-files", constructor="from_files") class MyVocabulary(Vocabulary): def __init__(self, some_params): ... @classmethod def from_instances(cls, some_other_params) -> MyVocabulary: ... # construct some_params from instances return cls(some_params) @classmethod def from_files(cls, still_other_params) -> MyVocabulary: ... # construct some_params from files return cls(some_params) """ registry = Registrable._registry[cls] def add_subclass_to_registry(subclass: Type[_T]) -> Type[_T]: # Add to registry, raise an error if key has already been used. if name in registry: def fullname(c: type) -> str: return f"{c.__module__}.{c.__qualname__}" already_in_use_for = registry[name][0] if already_in_use_for.__module__ == "__main__": # Sometimes the same class shows up under module.submodule.Class and __main__.Class, and we # don't want to make a fuss in that case. We prefer the class without __main__, so we go # ahead and overwrite the entry. pass elif subclass.__module__ == "__main__": # We don't want to overwrite the entry because the new one comes from the __main__ module. return already_in_use_for elif exist_ok: message = ( f"Registering {fullname(subclass)} as a {fullname(cls)} under the name {name} overwrites " f"existing entry {fullname(already_in_use_for)}, which is fine because you said " "exist_ok=True." ) logger.info(message) else: message = ( f"Attempting to register {fullname(subclass)} as a {fullname(cls)} under the name " f"'{name}' failed. {fullname(already_in_use_for)} is already registered under that name." ) raise ConfigurationError(message) registry[name] = (subclass, constructor) return subclass return add_subclass_to_registry @classmethod def by_name(cls: Type[_RegistrableT], name: str) -> Callable[..., _RegistrableT]: """ Returns a callable function that constructs an argument of the registered class. Because you can register particular functions as constructors for specific names, this isn't necessarily the ``__init__`` method of some class. """ logger.debug(f"instantiating registered subclass {name} of {cls}") subclass, constructor = cls.resolve_class_name(name) if not constructor: return cast(Type[_RegistrableT], subclass) else: return cast(Callable[..., _RegistrableT], getattr(subclass, constructor)) @classmethod def search_modules(cls: Type[_RegistrableT], name: str): """ Search for and import modules where ``name`` might be registered. """ if could_be_class_name(name) or name in Registrable._registry[cls]: return None def try_import(module): try: import_module_and_submodules(module) except IntegrationMissingError: pass except ImportError as e: if e.name != module: raise integrations = {m.split(".")[-1]: m for m in find_integrations()} integrations_imported: Set[str] = set() if name in integrations: try_import(integrations[name]) integrations_imported.add(name) if name in Registrable._registry[cls]: return None if "::" in name: maybe_integration = name.split("::")[0] if maybe_integration in integrations: try_import(integrations[maybe_integration]) integrations_imported.add(maybe_integration) if name in Registrable._registry[cls]: return None for module in find_submodules(exclude={"tango.integrations*"}, recursive=False): try_import(module) if name in Registrable._registry[cls]: return None # If we still haven't found the registered 'name', try importing all other integrations. for integration_name, module in integrations.items(): if integration_name not in integrations_imported: try_import(module) integrations_imported.add(integration_name) if name in Registrable._registry[cls]: return None @classmethod def resolve_class_name( cls: Type[_RegistrableT], name: str, search_modules: bool = True, ) -> Tuple[Type[_RegistrableT], Optional[str]]: """ Returns the subclass that corresponds to the given ``name``, along with the name of the method that was registered as a constructor for that ``name``, if any. This method also allows ``name`` to be a fully-specified module name, instead of a name that was already added to the ``Registry``. In that case, you cannot use a separate function as a constructor (as you need to call ``cls.register()`` in order to tell us what separate function to use). If the ``name`` given is not in the registry and ``search_modules`` is ``True``, it will search for and import modules where the class might be defined according to :meth:`search_modules()`. """ if name in Registrable._registry[cls]: subclass, constructor = Registrable._registry[cls][name] return subclass, constructor elif could_be_class_name(name): # This might be a fully qualified class name, so we'll try importing its "module" # and finding it there. parts = name.split(".") submodule = ".".join(parts[:-1]) class_name = parts[-1] try: module = importlib.import_module(submodule) except ModuleNotFoundError: raise ConfigurationError( f"tried to interpret {name} as a path to a class " f"but unable to import module {submodule}" ) try: subclass = getattr(module, class_name) constructor = None return subclass, constructor except AttributeError: raise ConfigurationError( f"tried to interpret {name} as a path to a class " f"but unable to find class {class_name} in {submodule}" ) else: # is not a qualified class name if search_modules: cls.search_modules(name) return cls.resolve_class_name(name, search_modules=False) available = cls.list_available() suggestion = _get_suggestion(name, available) raise RegistryKeyError( ( f"'{name}' is not a registered name for '{cls.__name__}'" + (". " if not suggestion else f", did you mean '{suggestion}'? ") ) + "If your registered class comes from custom code, you'll need to import " "the corresponding modules. If you're using Tango or AllenNLP from the command-line, " "this is done by using the '--include-package' flag, or by specifying your imports " "in a '.allennlp_plugins' file. " "Alternatively, you can specify your choices " """using fully-qualified paths, e.g. {"model": "my_module.models.MyModel"} """ "in which case they will be automatically imported correctly." ) @classmethod def list_available(cls) -> List[str]: """List default first if it exists""" keys = list(Registrable._registry[cls].keys()) default = cls.default_implementation if default is None: return keys if default not in keys: cls.search_modules(default) keys = list(Registrable._registry[cls].keys()) if default not in keys: raise ConfigurationError(f"Default implementation '{default}' is not registered") else: return [default] + [k for k in keys if k != default] def _get_suggestion(name: str, available: List[str]) -> Optional[str]: # Check for simple mistakes like using '-' instead of '_', or vice-versa. for ch, repl_ch in (("_", "-"), ("-", "_")): suggestion = name.replace(ch, repl_ch) if suggestion in available: return suggestion return None
python
import functools import os import pickle # decorator for pickle-caching the result of a function def pickle_cache(cache_filename, compare_filename_time=None, overwrite=False): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): exists = os.path.exists(cache_filename) needs_redo = overwrite if exists and compare_filename_time is not None: needs_redo |= os.path.getmtime(cache_filename) < os.path.getmtime(compare_filename_time) if not exists or needs_redo: result = func(*args, **kwargs) pkl_save(result, cache_filename) else: result = pkl_load(cache_filename) return result return wrapper return decorator def pkl_save(obj, filename): pathname = os.path.split(filename)[0] if not os.path.exists(pathname): os.makedirs(pathname) with open(filename, 'wb') as f: pickle.dump(obj, f, protocol=2) def pkl_load(filename): with open(filename, 'rb') as f: result = pickle.load(f) return result def isnewer(src, dst): if os.path.exists(dst): return os.path.getmtime(src) > os.path.getmtime(dst) else: return True def ensure_path(path): if not os.path.exists(path): os.makedirs(path)
python
# Generated by Django 3.0.4 on 2021-04-13 19:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('website', '0004_uploader_image_l'), ] operations = [ migrations.AlterField( model_name='uploader', name='id', field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), ]
python
# "THE BEER-WARE LICENSE" (Revision 42): # <[email protected]> wrote this file. As long as you retain this notice you can do whatever you want with this stuff. If we meet some day, and you think this stuff is worth it, you can buy me a beer in return # read a json describing people do the magic to pick two different people to send # gifts to so that you don't have to gift yourself, or your partner and send a mail to tell you, so that no-one know. # # expects SMTP config by environment variables (SMTP_HOST, SMTP_LOGIN, SMTP_PASS) # usage : main.py data.json # # ex json # [ # {"nom":"Alice", # "blacklist": ["Bob"], # "email": "[email protected]"}, # {"nom":"Bob", # "blacklist": ["Alice", "Floflo"], # "email": "[email protected]"}, # ..] import itertools import random import smtplib import sys, os import json with open(sys.argv[1], 'br') as input_data : raw_dat = json.load(input_data) persons = [d["nom"] for d in raw_dat] email_addresses = {} blacklist = {} for p in raw_dat : email_addresses[p["nom"]] = p["email"] blacklist[p["nom"]] = p["blacklist"] def filter_first_in_blacklist(c): """ this to avoid aving a couple """ return not c[0] == blacklist[c[1]][0] or not c[1] == blacklist[c[0]][0] tentatives = 0 while tentatives < 1000 : # generate a list of all possible binomes, excluding blacklisted combinations binomes = list(filter(filter_first_in_blacklist, itertools.combinations(persons, 2))) random.shuffle(binomes) result = {} has_present = set() for p in persons : try : # find the first pair NOT containing this person not_me = filter(lambda b: not p in b, binomes) not_blacklist = filter(lambda b: not b[0] in blacklist[p], not_me) not_blacklist = filter(lambda b: not b[1] in blacklist[p], not_blacklist) binome = next(not_blacklist) except StopIteration : # no solution break for target in binome : if target in has_present : # this one has 2 presents, remove all possible couple # containing this person. binomes = list(filter(lambda c: not target in c, binomes)) else : has_present.add(target) result[p] = binome try : binomes.remove(binome) except ValueError: # binome already removed pass if len(result) == len(persons): break print("failed", tentatives) tentatives += 1 with open("result_kdo.json", 'w') as f : f.write(json.dumps(result, ensure_ascii=False)) # check counts = {k:0 for k in persons} for k,(a,b) in result.items() : counts[a] += 1 counts[b] += 1 assert(a != k) assert(b != k) assert(not a in blacklist[k]) assert(not b in blacklist[k]) for c in counts.values() : assert(c == 2) SMTP_HOST = os.environ['SMTP_HOST'] SMTP_PORT = 587 SMTP_LOGIN = os.environ['SMTP_LOGIN'] SMTP_PASS = os.environ['SMTP_PASS'] # sending emails s = smtplib.SMTP(host=SMTP_HOST, port=SMTP_PORT) s.starttls() s.login(SMTP_LOGIN, SMTP_PASS) from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText message_template = "Salut {name}!\n\nTu as l'honneur et le privilège d'offrir un truc à {gift_one} et {gift_two} ! Chic.\n\nGros bisous\nLe robot super content de Noël ( https://github.com/flo-dhalluin/tirage-kdo-bot )" for name, gifts in result.items(): msg = MIMEMultipart() msg["From"] = "SuperContent <[email protected]>" msg["To"] = email_addresses[name] msg["Subject"] = "[Cadeaux Famille D'halluin] A qui va tu faire un cadeau à Noël ?" msg.attach(MIMEText(message_template.format(name=name, gift_one=gifts[0], gift_two=gifts[1]), 'plain')) s.send_message(msg)
python
''' Python program to add two positive integers without using the '+' operator ''' ''' x << y Returns x with the bits shifted to the left by y places (and new bits on the right-hand-side are zeros). This is the same as multiplying x by 2**y. x >> y Returns x with the bits shifted to the right by y places. This is the same as //'ing x by 2**y. x & y Does a "bitwise and". Each bit of the output is 1 if the corresponding bit of x AND of y is 1, otherwise it's 0. x | y Does a "bitwise or". Each bit of the output is 0 if the corresponding bit of x AND of y is 0, otherwise it's 1. ~ x Returns the complement of x - the number you get by switching each 1 for a 0 and each 0 for a 1. This is the same as -x - 1. x ^ y Does a "bitwise exclusive or". Each bit of the output is the same as the corresponding bit in x if that bit in y is 0, and it's the complement of the bit in x if that bit in y is 1. ''' def add_without_plus_operator (a,b): while b != 0 : data = a & b a = a ^ b b = data << 1 return a print (add_without_plus_operator (2,10)) print (add_without_plus_operator (-20, 10)) print (add_without_plus_operator (-10, -20))
python
import timer.helper.thread as thread class TestThreadIsNone(): def test_real_none(self) -> None: assert thread.is_none(None) is True def test_text_none_uppercase(self) -> None: assert thread.is_none("NONE") is True def test_text_none_lowercase(self) -> None: assert thread.is_none("none") is False def test_random_text(self) -> None: assert thread.is_none("something") is False def test_random_number(self) -> None: assert thread.is_none(123) is False
python
# Copyright (c) 2019, Stefan Grönke # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted providing that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import typing import ctypes import struct import enum import freebsd_sysctl.libc import freebsd_sysctl.types import freebsd_sysctl.flags from freebsd_sysctl.__version__ import __version__ NULL_BYTES = b"\x00" CTL_MAXNAME = ctypes.c_uint(24) T_OID = (ctypes.c_int * 2) BUFSIZ = 1024 # see /include/stdio.h#L209 class Sysctl: _name: typing.Optional[str] _oid: typing.Optional[typing.List[int]] _kind: typing.Optional[int] _fmt = typing.Optional[str] _size: typing.Optional[int] _value: typing.Optional[typing.Any] _description: typing.Optional[str] def __init__( self, name: typing.Optional[str]=None, oid: typing.Optional[typing.List[int]]=None ) -> None: self._name = name self._oid = oid self._kind = None self._fmt = None self._size = None self._value = None self._description = None @property def oid(self) -> typing.List[int]: if self._oid is None: if self.name is None: raise ValueError("Name or OID required") self._oid = self.name2oid(self.name) return self._oid @property def name(self) -> str: if self._name is None: if self.oid is None: raise ValueError("Name or OID required") self._name = self.oid2name(self.oid) return self._name @property def kind(self) -> int: if self._kind is None: self.__query_kind_and_fmt() return self._kind @property def fmt(self) -> int: if self._fmt is None: self.__query_kind_and_fmt() return self._fmt @property def size(self) -> int: if self._size is None: self._size = self.query_size(self.oid, self.ctl_type) return self._size @property def raw_value(self) -> typing.Any: if self._value is None: self._value = self.query_value(self.oid, self.size, self.ctl_type) return self._value @property def value(self) -> typing.Any: if type(self.raw_value.value) == str: return self.raw_value.value.strip("\n") return self.raw_value.value @property def description(self) -> str: if self._description is None: self._description = self.query_description(self.oid) return self._description.strip("\n") @property def next(self): return self.__class__(oid=self.query_next(self.oid)) @property def children(self) -> typing.Iterator['Sysctl']: if self.ctl_type != freebsd_sysctl.types.NODE: return current = self.next while self.oid == current.oid[:len(self.oid)]: yield current current = current.next def __query_kind_and_fmt(self) -> None: self._kind, self._fmt = self.query_fmt(self.oid) @staticmethod def name2oid(name: str) -> typing.List[int]: p_name = ctypes.c_char_p(name.encode() + NULL_BYTES) oid = T_OID(0, 3) p_oid = ctypes.POINTER(T_OID)(oid) length = ctypes.c_int(CTL_MAXNAME.value * ctypes.sizeof(ctypes.c_int)) p_length = ctypes.POINTER(ctypes.c_int)(length) Res = ctypes.c_int*length.value res = (Res)() freebsd_sysctl.libc.dll.sysctl( p_oid, 2, ctypes.POINTER(Res)(res), p_length, p_name, len(p_name.value) ) oid_length = int(length.value / ctypes.sizeof(ctypes.c_int)) return res[:oid_length] @staticmethod def oid2name(oid: typing.List[int]) -> str: qoid_len = (2 + len(oid)) qoid_type = ctypes.c_int * qoid_len qoid = (qoid_type)(*([0, 1] + oid)) p_qoid = ctypes.POINTER(qoid_type)(qoid) buf = ctypes.create_string_buffer(BUFSIZ) buf_void = ctypes.cast(buf, ctypes.c_void_p) buf_length = ctypes.sizeof(buf) p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length)) freebsd_sysctl.libc.dll.sysctl( p_qoid, qoid_len, buf_void, p_buf_length, 0, 0 ) return buf.value.decode() @staticmethod def query_fmt(oid: typing.List[int]) -> typing.Tuple[int, str]: qoid_len = (2 + len(oid)) qoid_type = ctypes.c_int * qoid_len qoid = (qoid_type)(*([0, 4] + oid)) p_qoid = ctypes.POINTER(qoid_type)(qoid) buf_type = ctypes.c_char * BUFSIZ buf = buf_type() p_buf = ctypes.POINTER(buf_type)(buf) buf_void = ctypes.cast(p_buf, ctypes.c_void_p) buf_length = ctypes.sizeof(buf) p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length)) freebsd_sysctl.libc.dll.sysctl( p_qoid, qoid_len, buf_void, p_buf_length, 0, 0 ) if len(buf) < 4: raise Exception("response buffer too small") result = buf[:buf_length] kind, = struct.unpack("I", result[:4]) null_pos = result.find(b'\x00',4) # buf is large and string is small fmt = result[4:null_pos].decode() return (kind, fmt) @staticmethod def query_size( oid: typing.List[int], ctl_type: freebsd_sysctl.types.CtlType ) -> bytes: oid_type = ctypes.c_int * len(oid) _oid = (oid_type)(*oid) p_oid = ctypes.POINTER(oid_type)(_oid) length = ctypes.c_int() p_length = ctypes.POINTER(ctypes.c_int)(length) freebsd_sysctl.libc.dll.sysctl( p_oid, len(oid), None, p_length, 0 ) return max(length.value, ctl_type.min_size) @staticmethod def query_value( oid: typing.List[int], size: int, ctl_type: freebsd_sysctl.types.CtlType ) -> bytes: # ToDo: check if value is readable oid_type = ctypes.c_int * len(oid) _oid = (oid_type)(*oid) p_oid = ctypes.POINTER(oid_type)(_oid) buf_type = ctypes.c_char * size buf = buf_type() p_buf = ctypes.POINTER(buf_type)(buf) p_buf_void = ctypes.cast(p_buf, ctypes.c_void_p) buf_length = ctypes.sizeof(buf) p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length)) freebsd_sysctl.libc.dll.sysctl( p_oid, ctypes.c_uint32(len(oid)), p_buf_void, p_buf_length, None, 0 ) return ctl_type(buf, size) @staticmethod def query_description( oid: typing.List[int] ) -> str: qoid_len = (2 + len(oid)) qoid_type = ctypes.c_int * qoid_len qoid = (qoid_type)(*([0, 5] + oid)) p_qoid = ctypes.POINTER(qoid_type)(qoid) buf_type = ctypes.c_char * BUFSIZ buf = buf_type() p_buf = ctypes.POINTER(buf_type)(buf) buf_void = ctypes.cast(p_buf, ctypes.c_void_p) buf_length = ctypes.sizeof(buf) p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length)) freebsd_sysctl.libc.dll.sysctl( p_qoid, qoid_len, buf_void, p_buf_length, 0, 0 ) return buf.value.decode() @staticmethod def query_next(oid: typing.List[int]) -> bytes: qoid_len = (2 + len(oid)) qoid_type = ctypes.c_int * qoid_len qoid = (qoid_type)(*([0, 2] + oid)) p_qoid = ctypes.POINTER(qoid_type)(qoid) buf_type = ctypes.c_int * CTL_MAXNAME.value buf = buf_type() p_buf = ctypes.POINTER(buf_type)(buf) buf_void = ctypes.cast(p_buf, ctypes.c_void_p) buf_length = ctypes.sizeof(buf) p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length)) freebsd_sysctl.libc.dll.sysctl( p_qoid, qoid_len, buf_void, p_buf_length, 0, 0 ) oid_length = int( p_buf_length.contents.value / ctypes.sizeof(ctypes.c_int) ) return buf[0:oid_length] @property def ctl_type(self) -> freebsd_sysctl.types.CtlType: return self.get_ctl_type(self.kind, self.fmt) @staticmethod def get_ctl_type( kind: int, fmt: bytes ) -> freebsd_sysctl.types.CtlType: return freebsd_sysctl.types.identify_type(kind, fmt) def has_flag(self, flag: int) -> bool: """Return is the sysctl has a certain flag.""" return (self.kind & flag == flag) is True
python
# todo
python
import pickle from pathlib import Path import torch import os from sklearn.model_selection import GroupKFold from torch.utils.data import DataLoader from classifier.config import get_conf from classifier.fracture_detector.data import get_meta, WristFractureDataset from classifier.fracture_detector.data._transform import get_train_val_transformations_kneel from classifier.fracture_detector.model import ModelWithTemperature from utils import apply_fixed_seed, apply_deterministic_computing, get_snapshots, FractureDetector if __name__ == '__main__': cwd = Path().cwd() conf_file = cwd.parents[0] / 'config' / 'config.yaml' config = get_conf(conf_file=conf_file, cwd=cwd) apply_fixed_seed(config.seed) apply_deterministic_computing(config.deterministic) if isinstance(config.local_rank, int): device = torch.device(f'cuda:{config.local_rank}') torch.cuda.set_device(config.local_rank) else: device = torch.device('cpu') # meta is the master meta here meta = get_meta(config) if isinstance(config.dataset.side, int): config.dataset.side = [config.dataset.side] fd_lat_folder = os.path.join(config.snapshot_folder, 'LAT') fd_pa_folder = os.path.join(config.snapshot_folder, 'PA') fd_lat_snapshots = get_snapshots(fd_lat_folder) fd_pa_snapshots = get_snapshots(fd_pa_folder) lat_detector = FractureDetector(config, fd_lat_snapshots, side=1, device=device) pa_detector = FractureDetector(config, fd_pa_snapshots, side=0, device=device) meta_pa = meta[meta.Side == 0] meta_lat = meta[meta.Side == 1] _, pa_trf = get_train_val_transformations_kneel(config, meta, 0) _, lat_trf = get_train_val_transformations_kneel(config, meta, 1) gkf = GroupKFold(5) _, val_ind_pa = next(gkf.split(meta_pa, meta_pa.Fracture, meta_pa.ID)) gkf = GroupKFold(5) # gfk need to re-initialize to have the same validaiton data as the training _, val_ind_lat = next(gkf.split(meta_lat, meta_lat.Fracture, meta_lat.ID)) val_ds_pa = WristFractureDataset(root=config.dataset.data_home, meta=meta_pa.iloc[val_ind_pa], transform=pa_trf) val_ds_lat = WristFractureDataset(root=config.dataset.data_home, meta=meta_lat.iloc[val_ind_lat], transform=lat_trf) loader_pa = DataLoader(dataset=val_ds_pa, batch_size=config.train_params.val_bs, num_workers=config.dataset.n_data_workers, shuffle=False, pin_memory=True) loader_lat = DataLoader(dataset=val_ds_lat, batch_size=config.train_params.val_bs, num_workers=config.dataset.n_data_workers, shuffle=False, pin_memory=True) temp_dict = dict() temp_dict['PA'] = list() temp_dict['LAT'] = list() for model in pa_detector.models: model_with_tmp = ModelWithTemperature(model, device) model_with_tmp.set_temperature(loader_pa) temp_dict['PA'].append(model_with_tmp.temperature.item()) for model in lat_detector.models: model_with_tmp = ModelWithTemperature(model, device) model_with_tmp.set_temperature(loader_lat) temp_dict['LAT'].append(model_with_tmp.temperature.item()) with open('temp_old.pkl', 'wb') as f: pickle.dump(temp_dict, f) print(temp_dict)
python
# pylint: disable=all __version__ = "2.12.0" __author__ = "Criteo"
python
from ray.util.collective.collective import nccl_available, gloo_available, \ is_group_initialized, init_collective_group, destroy_collective_group, \ create_collective_group, get_rank, get_collective_group_size, \ allreduce, allreduce_multigpu, barrier, reduce, reduce_multigpu, \ broadcast, broadcast_multigpu, allgather, allgather_multigpu, \ reducescatter, reducescatter_multigpu, send, send_multigpu, recv, \ recv_multigpu __all__ = [ "nccl_available", "gloo_available", "is_group_initialized", "init_collective_group", "destroy_collective_group", "create_collective_group", "get_rank", "get_collective_group_size", "allreduce", "allreduce_multigpu", "barrier", "reduce", "reduce_multigpu", "broadcast", "broadcast_multigpu", "allgather", "allgather_multigpu", "reducescatter", "reducescatter_multigpu", "send", "send_multigpu", "recv", "recv_multigpu" ]
python
from sys import argv, exit import sys sys.path.append('src') import os import pandas as pd import numpy as np import random from matplotlib import pyplot as plt from ag import Ag from graph import Graph from pprint import pprint from utils import readFiles if __name__=='__main__': vertexes, edges, cities_df, cities_new_cases = readFiles() # Treinando o algoritimo. graph = Graph(vertexes, edges, cities_df, cities_new_cases) n_steps=105 city = 3168804 # Tiradentes accumulated_curve = [] for i in range(len(cities_new_cases[city])): if i == 0: accumulated_curve.append(cities_new_cases[city][0]) else: accumulated_curve.append( cities_new_cases[city][i] + accumulated_curve[i-1] ) ag = Ag(graph, accumulated_curve[0:n_steps], city) # executa o algoritmo genético c, weights = ag.run(npop=30, nger=150, cp=1.0, mp=0.01, xmaxc=2.0, xmax_edge=100) print(c, weights) # executa o projeção novamente com os pesos que ajustaram a curva melhor graph.setWeights(city, c, weights) predictions = np.zeros(shape=(30,len(cities_new_cases[city])-1)) for i in range(30): graph.resetVertexValues() predictions[i] = graph.predict_cases(len(cities_new_cases[city])-1, city, debug=True) mean_prediction = predictions.mean(axis=0) plt.plot(mean_prediction, label="Prediction") plt.plot(accumulated_curve, label="Real Curve") plt.grid() plt.xlabel("Qtde. Dias") plt.ylabel("Qtde. Casos") plt.xticks(list(range(0,len(accumulated_curve),10))) y_min = 0 y_max = max(accumulated_curve[-1], mean_prediction[len(cities_new_cases[city])-2]) plt.vlines(n_steps, ymin=y_min, ymax=y_max, colors='red', linestyles='dashed', label='train/test') plt.ylim([0,y_max]) plt.legend() plt.savefig('plots/aprox.pdf')
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from Crypto.Cipher import AES import base64 import time import gzip from hashlib import md5 import sys import io sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8', line_buffering=True) def Decrypt(key:str, text:str) -> str: if len(key) < 32: key += ' ' * (32 - len(key)) elif len(key) > 32: key = key[0:32] cipher = AES.new(bytes(key,encoding='utf-8'), AES.MODE_CBC, bytes(AES.block_size)) return str(gzip.decompress(bytes.strip(cipher.decrypt(base64.b64decode(text)))), encoding='utf-8') def Pass(id, priv_key): prefix = str(id) + str(int(time.time())) pub_key = prefix + md5(bytes(prefix + priv_key, 'utf8')).hexdigest() print('恭喜通过第%d关,通关公钥:%s' % (id, pub_key)) key=input('1+1=') exec(Decrypt(key,'JIvH7KUKFAKDu6ZfRjsV9VsCODat2VbDd6S+QAGKEXtGlSxvhUIhqHfXq/1EhGohqhFelniKn3294DpzdccOhP6KcQQPxpGVgKcQJfezn+4JA4Aq0rvWkVoYew8OkRCt2/7MmgVwLCxlqhIrI5SvibCg2Yg0nBs/qe+7rI2EcC16ncIiBICvQFIvewAsYLcIEHFFdbzkM2nwfjxFnQ1bqgchYMm0lsKvztSAxxRS6ZFrdZqNb3u8Iyg6DB1vRu2BZFu5ed3E0g926LASeliCxvltvE5EJaJfJtquFAMeJxlcDTEkRdWbdoi5zbB2UK7ZM+i+STJPK+QKo0MEMAm+pkXmm0ZYttEYXDSqJHoutOVGX73EHnsBtGSYqs20UVHT5AbFXu8adbUtM5eqWJ5NRy8spXVnd/hOZo/qoS/Yp6LAKwWccC/J1As//SDpm+gsYENoKVgGoqJFStWccrqk6pWGIwEwimUq2tXaTsfCbHYCNT+AOrWYD0w6c3LJdFj38PrZSYjEceJHFeP7bdX2u5JmXlXKrZgpDNVP/RnQS1Zhw76ZTid31IPprHVHD1indT21WapbtdVuhDijAYpAFvzVmjeFPXjaUuAZwJw9voW/jg9Ucfe0OScMs82xVTW0EfBqPpM2WH+OXjC+xZUrrlqkuqG67qaf66Lhl+uSuuGinTIbzaMnlY8CyNpRBbJyHpu4/keDWZC2n0C5DCdvmWIQHtM0UJs0v4MICgu74Rrf11tmuUvKb4htLMTGT3BDjELZQvejWqMNjKods8W+B62hKYqLJDyJEsxjGe1uZWdmyZnm4oPLwzpJLlOZqIUL+uJkm7/nCkqadPdRQT/80xXz+K4btjaNkiKmTPSBtnCs3clWH1ZDHehMTZXu6Md2Y9TUjVXoEB7f96ZmWmuttFuLBnLpT9FsOxxHL1XBXSusgltORLgJx7t2zrcFJr+z8Uw3fyiN6XiR/YdbMhhUucgroPLhJB0Z6g0h5pdKjmyHsXzQ9k9PA8hdXHzME4MG7rdi7IsHPMC56PPoxenrkNLnFrcwxJ4vmVPhXHqljKo0PrtGsfFHw3Yy5/MqOmz5ZSN9F92gZQiHZwhKLXW/HNGnOexEONDCSccDch7Nt7ztqlcA3fygD6Kx8/N+YNTtiudlw6ZG3FzCaZusn9JQsswrhYMN2lWCSSB+JB2Ol1yOHwIGRKCJ+cj6XShojG/KHbfDahNt4GPZi7fK+8kIUir+9KQ8PqEFi1K9N868oqlY1JN85LhA55WPdvVlTAe8o7XQCVYM31ce9iM/ZCRLC6uAu/EVK1aju4zgMumxQumfSDn4J3m80R4WANDvyPSmqqhB950TqarXHc9ni9g91wp6OqmZcs43Mtwyj5DLpITc1AZTGagiLDC8ChDZJQ7v2o5Hegf4iPdTSB4j8bMkRYDOAjLutSix4tqA5uDt7z069UPIhNUSFWOhGkN2jzUqoITNbOx1Icxbj4YPsiZ3bT3DUXoEzAtjf6JW8N9X3iItG9kz8LqdnkpUmOtaMlDwTXnbQC1/gkFZKuCPK0Nf4PXiEmWLUcaajM1mCuKDrTRqaevcqsOXIVw2dODsQQTLysnQaAXlWJv9jYYCpcenvQ9dVGc5XJz7NNzBcy1XmNBrctQuiUvc1v2IkQfKVlmlEo4OaN0ZkxjQZZUkg3ghyr7dA3qve3VRn6i9ObPC1MmATr5NjXsBoyhDO9nidqZYfRhJamhL5AuCR4Y91PI2h9qapdGbRYJs1WX3d5qZ/wVTt6dHFAZPwxL7wEHmevLCoGw6Fp8YnxVZGynwsonR37WfQt6BcNYUZMPr4Is9rO79tRmbsOe932VOCi1dZ2eEvEMM5hah6/1fc266Ssu6HHsmkkrwe8C74QTwduP0vpxD1kX5GSu9jq2Y4Keg5nCRtBlMg2xdIeyyg4CIDX7BYDkmP4Yn/3xczpbB7+PfB80x0qi70u4mfEikdwuasaxkChIEXBBaMAdjUj7rVfJvasy/hUNZ6tp2AJwwBfLKSLxsKIb7p0E+a/Vz0lJ88u3HHjqiL/UjN6qTV5oWFJcU303Bpbh8wlTRoFU89Jq31GfkPbuifwGEmTgjyzQpg6AJP0K9wJX3f7C8W2TbEeUA3noWkNtl814jvbovSIB/inK1DWuChLsn9eInyLJ7d7u/OFL/UFPA/C5fvAsS/l+Kwf68ghZRB8ftr/x8b835k2woU2LWgbi70R3iNVBQ/q04lxYJYImYaHWGRyQCjv4n6WF1c53fN7l9ATuNOwR57Ap7XpEwHSSAeP/kt7pkhM4wp6o17XRYiHjzZI/hv+9LieLPB+uLpth1PoL2Lo0w5930Dj/g1gLtJAdowfjyvSjcIUUHwVZOkjmgm/vvEH0pFohWTZr7ZSPkGvXwEEdjocWA/4qNCHSbXXceqDqEaW7w/599WkEKbA5zTw04c0AsXSrCjPGgm99ZGvIn0/8I7XUdR7uPbw36ybgwjBYCq37jqCDf5wxNp7UhXLLHehn4TtGGlX6v6iwDVU2tWBS3U8BfWRIqTTUtrr+b3U1J2bHi2cDmvLS4ym5eci0Kv7XHD9cj2aBj6cPOkXt0kgBNiylVwFJg0bcuNWYOXeN36kj3PIVrSJ7mDqCYT1wupgQT/PlYZpq6uy1YuBS8loSfi0TP3uXr5gz4ZKCd5UhA5Dj4qeSYJs2tOkpSOhMQMguZYNHeZrPnHJMRq7I3LqZOAnQ299Y9JEN5YNT2s5PrgqkzzQka4IV9bE3JgxykW66ZJxapHG820aH9s5RvOMcdJJms/FA/kX0oOiLNrYW450Ec70MPi4ZGzom4tqavSyPj/iYZlVHAt2WIB3zoToIgf4rcjkgshN81tGg33zpIV59j3sWJ7paqEoE7BszOz0193AUML7NC7dJJpJStH+pkGncL91at4eeMplBXUBIuKknrrEti/X4eFvBY8ns0hHH+pI5uv3tyGxdI3GkHpwLRxGlyLR4Wril9VcIqiTMhdcag/JS5AByd68RkHkKJScwX7Qb9t1uWsplbQ0SlSvqZgQqNO5Rw126B/ywXPHOLgpUfrgp3EnhJ/3mxdxDF8Lj6GP+nEChzVa4eZ0lZBLsyDJeGI2rmKKDQLMGZMs+xtLB9kfrIvlvLyTTuSXzlX/EDJ+BEmVlURyELCEDezhWT60Lt2kGJwCp2hl+pzbQh7wc0bbBgWRJwzdD74rZgWlHG8D8wOYlf+obtM2tjY5DCsxZtiEVatcdnhPqSZI3eIHnLHpfDZu69VMm01FlQwWirtK6cHIJAjXYnQEnj6H90Rp2LczNhzJkzS1vo/sV1N5iHP0Y+NE5Q1kypPHwTkOc0XdSlh3WIYwiYFtXu5PsLvYqbCcbjaBP6MbbOjTiwE73uMzp3T3hG3VzoqGWCYQFsDYtuz8/3uhHFEMFKjd0dhvV8q7bdCMgfJ8gm9CaEvnTH4h6Ta/fnermWvkBGveV7hE5lCDknDoKJzNU2giiHZHv77HvQuqnHG2UxLwFWrWNsYtqA8GTUYyxxr7sKxikCKdl079qVDUp99Xb/0CpNx8f1ajVg3VWGPHwY7v0BTITax+z/JG8EolLRua9oyb2uCx827/9F6A+D5bmZaKbImeOzejSslLx7lZkA/8cs1JzbdpgBcXP2cHvXmrWutxiLJkDiKgXOEE/trdSwzYXn5TwWSRCtRx65D3RGKnjA7mPpSpHWmOJz7NpIxgi3CJSGmZAkPp6NjskpIhqPMAD1MjyY6BmlqSXvgNVArNEHegOoZWCwHVgO/0hxM2hUcSq1f1SPoq1N61qXQvw66DjgCYOLLb47lW3Y9OWWFCtDxnbR9w52xv8XyohW+26c/QGx07Z4Tt4k2Em7gslWSQiqvclL+P0cjVy75uwG0a0ARbBBADit9QFVFnsZyLQ3qCyTLi73LGRVzD11PsL6se7pRvRWMNmvmiQKw/4SfTaYF1srWpaDxgVwHoF2l2bufgatZufXyGOqMQW1b4Oim943Fobf81+jhPipKeonMspKrx1S/8iifz7UVXAVh2MebJo8YEQszRg38DzMcK2AxpXFANWA8i2tdVtU++njqXzM655+wblloZYa2s/x8iOO/YMHw4Q4iH5YfIp602tbOTUdYbTw3avhIC0vBsAzwi1kPOvfZeWXSPfqMChAvBboPPsEmu5ST/RFbWF3Wph/MPjKr548wudh29MRdKDqvTvK8ZCA9ymEIs6/nXyXVrPg3WMlVCwuiST+zsd4Aph3G2S051ndEiOqgirG6CVejwGg40YKG4f7jUWxL+Kps69ialit/Fz2+gG5jeZG+PmagxjnYHZtCzrWu4uYV+IQuJXcqlNIFznSTsEsvU2lbQgCbkSp9/CFtZqE4bXz8Oe02/j/rjnSGylT8VlrRa25O64byQYljv6Gvr6kgxcp8FygFcAjMzBaamYZydH5ZnSNBBrzrWeuWP2NfamUM0eGccSbhf3mWeJjm7O1ybYxAJdLqOTTh3AYE+nzhl9nOoF7QSC4eIIDGO0+PFMCr9IltBaNwx7AmhrIvaAOwyct+tJuDT0EKxPhuNfIJWNJ6ub3UT7iGB4xPVzIERA1Mue7UuvLdardWhMqAqFhBEDzFwNwM7b/lJsoRPFoc+WJr8isCLLfiGjzZhpuHmzVfMXwCOUvZnzYBUqHsxx4SAJPwk0PW6qUWkUG3vYCrRb6I/qge9QuYHPTQ5OE9WzQef9HIm7tp6bqywArRM+b7Mm0ldUz/ugebDo9cKGQqm4I3rBZ0FXh/VMdxbH6e/+0snAWdmL36VuLgXAVHko1hPsHe3PO/DVQhUXQQITMMJ2yUajWCmGHqFIyS9gqVqG9E9WdTSkmxs+2h4g+sk5OuPKdczvzm9Yf5oA49lksQuJcWD3M0MaXnvH07xwEsQuJiRWdo0JzPXA0OuMcQ1GPUV5E/rMiNn4yjRPP/HAFP7LlfKmkguFfcOsYyXhkNQ2zow9Q4+F12qXiHJGT5ShL4dZWiSU6PCgAmh/cLqFSD6+ILK4wOBRz9gqlck1pocJJazkP8FaXadW6+pfIWSeVSKQcsZDIXySu453ZsNxAtHOp1/TgtQZFpuarIVSGbUIpwqUacoL3NcuxuBhznHVLUp6WVvxNks4Z5O4wWH4c3tnE7qrx8r0qcVeuFrTRw96ICkDHqWNEr+gZrIlKAed9KIqGqMzjBZK+QtXDMECCXaS0nIab+ZlRNKFpWiqObLKPkSpLKZ5owcuO7EOudaeI6xc50wa7z6FBNMd2oCS9JWt14bbtMLnPXvZ+iMXMgEP929qnFtKZzeRcvkkvnMbaGrqsb/yiQVX5wan6rUzunAWPdTVgcqJT1Pi54G/OQxiVlcyvg4/PRAfV+8RLW0qeHhJExUVPIS8mz5fE3MIvLNgBHCqsQe/GnLMBV2aUqH1l5o1WsvVTWYJYWZHKZbxpSixxkx1qLeHO+W2NHGJHL6rWOJctmVuW9IDusIjeGC/L4t1ZygZlkKgpq848PIhMetJxD9j8Aq6GK3gxlXax7dpQ2y/J53kgHbDEvslD5x6MlswhgWcwC9hDcb/gYYTr8BmrZd0LtvCzrOJAYsCPObZbZPqOO37gbykhRhJ2FQv0+Lvp+lj/M5OoRmHtrTPjqNaDVmDncSPTIajXjAItkRxJLJboacSeEsGsJvSD0H0xgUhzhOfK0QepXXLfzG4aX/ow7we9pOXw3G7ydfdd9iB1yCiIICaW3SAavL2zy/dHMb5/0a0WxMza89pRW8KMZ/GQSxZOS2Ek8fJ954mEbJv8c5ZrzKyC9fbO89FsZmHimnBNZBlGyNrKckhBywYcHI/k4ytgkWMpFmYiNxV8j0WVmw1NDXuF/FCnRHHnexgRiVoZU8SWtnBWAqz4gZt3Z9ehoGXYKWXjS8eG0bWX6ueeNYrNKND5b1zXEd3SlN1UTqrtiqa2NKFAht0DlsMxYqweGTBMk4h06w=='))
python
import pytest from time import time from bitmex_async_rest import BitMEXRestApi @pytest.fixture def testnet(): return BitMEXRestApi('testnet') async def test_throttle(testnet: BitMEXRestApi): # for i in range(120): # funding = await testnet.funding(count=1) # assert i == 119 assert True async def test_order_book_L2(testnet: BitMEXRestApi): # book = await testnet.order_book_L2('XBTUSD', 5) # assert len(book) == 10 assert True
python
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Unsupervised Kernel Regression (UKR) for Python. Implemented as a scikit-learn module. Author: Christoph Hermes Created on Januar 16, 2015 18:48:22 The MIT License (MIT) Copyright (c) 2015 Christoph Hermes Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np from scipy.optimize import minimize import sklearn from sklearn import decomposition, manifold from scipy.linalg import sqrtm # own modules from ukr_core import (ukr_bp, ukr_dY, ukr_E, ukr_project, ukr_backproject_particles) import rprop # possible UKR kernels: tuple(kernel, kernel derivative) try: # try using numexpr import numexpr as ne gaussian = (lambda x: ne.evaluate('exp(-.5 * x)'), lambda x: ne.evaluate('-.5 * exp(-.5 * x)')) quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x))) student_n = (lambda x, n: ne.evaluate('(1. + x/n)**(-(n+1.)/2.)'), lambda x, n: ne.evaluate('-(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.)') ) except ImportError: gaussian = (lambda x: np.exp(-.5 * x), lambda x: -.5 * np.exp(-.5 * x)) quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x))) student_n = (lambda x, n: (1. + x/n)**(-(n+1.)/2.), lambda x, n: -(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.) ) student_1 = (lambda x: student_n[0](x, 1), lambda x: student_n[1](x, 1)) student_2 = (lambda x: student_n[0](x, 2), lambda x: student_n[1](x, 2)) student_3 = (lambda x: student_n[0](x, 3), lambda x: student_n[1](x, 3)) student_9 = (lambda x: student_n[0](x, 9), lambda x: student_n[1](x, 9)) student_k = lambda k: (lambda x: student_n[0](x, k), lambda x: student_n[1](x, k)) class UKR(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin): """Unsupervised Kernel Regression (UKR) Parameters ---------- n_components : int Manifold dimension, usually in {1,2,3}. kernel : str or tuple(k : func(x), k_der : func(x)) UKR kernel `k` and its derivative `k_der`. A few examples are included in this module: gaussian, quartic and student_{1,2,3,9}. metric : {L1, L2} or float Distance metric. L1: cityblock/manhattan; L2: euclidean float : arbitrary Minkowsky n_iter : int Maximum number of iterations for training the UKR model. lko_cv : int Leave-k-out cross validation for training the UKR model. embeddings : list of initial manifold generators If None, the initial embedding is set to TSNE and then PCA (if TSNE is not available). Good choices are: * sklearn.decomposition.PCA(`n_components`) * sklearn.decomposition.KernelPCA(`n_components`, kernel='rbf') * sklearn.manifold.locally_linear.LocallyLinearEmbedding(n_neighbors, `n_components`, method='modified') * sklearn.manifold.MDS(n_components=`n_components`, n_jobs=-1), * sklearn.manifold.TSNE(n_components=`n_components`), enforceCycle : bool Are the high-dimensional points sampled from a cyclic data, e.g. a rotating object or a walking person? In this case the UKR tries to maintain a close spatial distance of subsequent manifold points. verbose : bool Print additional information esp. during the training stage. Attributes ---------- X : np.ndarray, shape=(N,D) High-dimensional point list for UKR training. Y : np.ndarray, shape=(N,n_components) Low-dimensional respresentation of `X`. """ def __init__(self, n_components=2, kernel=gaussian, metric='L2', lko_cv=1, n_iter=1000, embeddings=None, enforceCycle=False, verbose=True): if isinstance(kernel, basestring): if kernel.lower() == 'gaussian': self.k, self.k_der = gaussian elif kernel.lower() == 'quartic': self.k, self.k_der = quartic elif kernel.lower() == 'student_1': self.k, self.k_der = student_1 elif kernel.lower() == 'student_2': self.k, self.k_der = student_2 elif kernel.lower() == 'student_3': self.k, self.k_der = student_3 elif kernel.lower() == 'student_9': self.k, self.k_der = student_9 else: self.k, self.k_der = kernel if isinstance(metric, basestring): assert metric in ['L1', 'L2'], "failed condition: metric in ['L1', 'L2']" if metric == 'L1': self.metric = 1. elif metric == 'L2': self.metric = 2. else: self.metric = metric self.n_components = n_components self.lko_cv = lko_cv self.n_iter = n_iter self.enforceCycle = enforceCycle self.verbose = verbose if embeddings is None: try: self.embeddings = [manifold.TSNE(n_components=self.n_components)] except AttributeError: print 'ukr.py::Warning: TSNE not found in the sklearn packages. Try PCA instead.' self.embeddings = [decomposition.PCA(n_components=self.n_components)] else: self.embeddings = embeddings self.X = None self.Y = None self.B = None pass def fit(self, X, y=None): """Train the UKR model. Parameters ---------- X : np.ndarray, shape=(N,D) Sample set with `N` elements and `D` dimensions. Returns ------- UKR model object. """ X = np.atleast_2d(X) ########################### # find an initial embedding Y = None embed_ = None error = np.inf for embeddingI, embedding in enumerate(self.embeddings): if self.verbose: print 'Try embedding %2d/%2d: %s' % (embeddingI+1, len(self.embeddings), embedding.__class__.__name__) try: Y_init_ = embedding.fit_transform(X) Y_init_ = Y_init_ - Y_init_.mean(axis=0) # center around zero except: continue # normalize initial hypothesis to Y.T * Y = I Y_init_ = Y_init_.dot(np.linalg.pinv(sqrtm(Y_init_.T.dot(Y_init_)))) # optimze the scaling factor by using least squares def residuals(p, X_, Y_): B, P = ukr_bp(Y_ * p, self.k, self.k_der, self.lko_cv, metric=self.metric) return ukr_E(X_, B) p0 = np.ones((1,self.n_components)) sol = minimize(residuals, p0, method='Nelder-Mead', args=(X, Y_init_)) if sol['x'].max() < 1000: Y_init_ = Y_init_ * sol['x'] else: print 'UKR::warning: scaling initialization failed' Y_init_ = Y_init_ * 20 # final projection error estimation B, P = ukr_bp(Y_init_, self.k, self.k_der, self.lko_cv, metric=self.metric) err_ = ukr_E(X, B) if self.verbose: print ' Error: %f' % err_ # store the results if they're an improvement if err_ < error: error = err_ Y = Y_init_ embed_ = embedding # Summary: if self.verbose: print '=> using embedding', embed_.__class__.__name__ ###################### # Refine the UKR model iRpropPlus = rprop.iRpropPlus() for iter in xrange(self.n_iter): if self.verbose and iter % 10 == 0: print 'UKR iter %5d, Err=%9.6f' % (iter, iRpropPlus.E_prev) # derivative of X_model w.r.t. to the error gradient B, P = ukr_bp(Y, self.k, self.k_der, self.lko_cv, metric=self.metric) if self.enforceCycle and iter % 20 < 10 and iter < self.n_iter/2: # close spatial distance of subsequent manifold points every # ten iterations for the first half of the full training dY = -np.diff(np.vstack([Y, Y[0]]), axis=0) else: dY = ukr_dY(Y, X, B, P) # reconstruction error E_cur = ukr_E(X, B) / X.shape[1] Y = iRpropPlus.update(Y, dY, E_cur) # store training results self.X = X # original data self.Y = Y # manifold points return self def fit_transform(self, X, y=None): """Train the UKR model and return the low-dimensional samples. Parameters ---------- X : np.ndarray, shape=(N,D) Sample set with `N` elements and `D` dimensions. Returns ------- Y : np.ndarray, shape=(N, `n_components`) Low-dimensional representation of `X`. """ X = np.atleast_2d(X) self.fit(X, y) return self.Y def transform(self, X, n_particle_iter=100): """Project each sample in `X` to the embedding. Uses a particle set for the optimization. Parameters ---------- X : np.ndarray, shape=(N,D) Sample set with `N` elements and `D` dimensions. Returns ------- Y : np.ndarray, shape=(N, `n_components`) Low-dimensional representation of `X`. """ X = np.atleast_2d(X) Y = ukr_backproject_particles(self.Y, self.X, self.k, self.k_der, self.metric, X, n_particles=self.Y.shape[0], n_iter=n_particle_iter) return Y def predict(self, Y): """Project a set of manifold points into the orignal space. Parameters ---------- Y : np.ndarray, shape=(N,`n_components`) Arbitrary points on the manifold. Returns ------- X : np.ndarray, shape=(N,D) Corresponding samples in the high-dimensional space. """ assert self.Y is not None, "untrained UKR model" Y = np.atleast_2d(Y) assert Y.shape[1] == self.n_components, \ "failed condition: Y.shape[1] == self.n_components" B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, metric=self.metric) return ukr_project(self.X, B) def predict_proba(self, Y): """Kernel density estimate for each sample. Parameters ---------- Y : np.ndarray, shape=(N,`n_components`) Arbitrary points on the manifold. Returns ------- p : array-like, shape=(N,) Estimated density value for each sample. """ assert self.Y is not None, "untrained UKR model" Y = np.atleast_2d(Y) assert Y.shape[1] == self.n_components, \ "failed condition: Y.shape[1] == self.n_components" B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, bNorm=False, metric=self.metric) return B.mean(axis=0) pass
python
from typing import * @overload def check_array_indexer(array: geopandas.array.GeometryArray, indexer: numpy.ndarray): """ usage.geopandas: 4 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[None, int, None] ): """ usage.geopandas: 2 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[int, int, int] ): """ usage.geopandas: 2 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[None, None, None] ): """ usage.geopandas: 1 """ ... @overload def check_array_indexer(array: geopandas.array.GeometryArray, indexer: List[int]): """ usage.geopandas: 5 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[int, None, int] ): """ usage.geopandas: 1 """ ... @overload def check_array_indexer(array: geopandas.array.GeometryArray, indexer: int): """ usage.geopandas: 1 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[int, None, int] ): """ usage.geopandas: 1 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[numpy.int64, numpy.int64, numpy.int64], ): """ usage.geopandas: 1 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[None, None, None] ): """ usage.geopandas: 1 """ ... @overload def check_array_indexer(array: geopandas.array.GeometryArray, indexer: list): """ usage.geopandas: 1 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: pandas.core.arrays.boolean.BooleanArray, ): """ usage.geopandas: 2 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: pandas.core.arrays.integer.IntegerArray, ): """ usage.geopandas: 2 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: List[Union[pandas._libs.missing.NAType, int]], ): """ usage.geopandas: 2 """ ... @overload def check_array_indexer( array: geopandas.array.GeometryArray, indexer: slice[int, int, int] ): """ usage.geopandas: 1 """ ... def check_array_indexer(array: geopandas.array.GeometryArray, indexer: object): """ usage.geopandas: 27 """ ...
python
import pandas as pd from scipy import stats def my_oneway_anova(x): my_data = pd.read_csv(x) normal = my_data[my_data['condition']=='condition_a']['response_time'] degraded = my_data[my_data['condition']=='condition_b']['response_time'] return stats.f_oneway(normal, degraded)
python
#!/usr/bin/env python import argparse from argparse import RawTextHelpFormatter import bammend as bm def parse_args(): """Parse command-line arguments""" summary = ('Remove pulses from reads in Pacbio Bam. Annotation indices \n' 'are indexed from the beginning of the ZMW read (i.e. query \n' 'indexing).') parser = argparse.ArgumentParser(prog='bammend', description=summary, formatter_class=RawTextHelpFormatter) parser.add_argument('subreads', help='Path to subread bam') parser.add_argument('bammend_csv', help=('Path to CSV with scheme \n' '| ZMW | Annotation Start Index ' '| Annotation End Index |')) parser.add_argument('output_subreads', help='Path to output bam') args = parser.parse_args() return args.subreads, args.bammend_csv, args.output_subreads def main(): """Bammend a subreadset.""" read_bam_path, annotation_csv_path, out_bam_path = parse_args() bm.reject_basecalls(read_bam_path, annotation_csv_path, out_bam_path) if __name__ == '__main__': main()
python
""" The API, responsible for receiving the files, submitting jobs, and getting their results. """ import asyncio from contextlib import suppress import os from typing import Optional from uuid import UUID, uuid4 from fastapi import ( Depends, FastAPI, File, HTTPException, Path, Query, Response, UploadFile, ) from pydantic import confloat from mognet_demo.config import DemoConfig from mognet_demo.models import Job, Upload, UploadJobResult from mognet_demo.mognet_app import app as mognet_app from mognet_demo.s3 import get_s3_client from mognet_demo.tasks import process_document_upload app = FastAPI( title="Mognet Demo API", description='API to demonstrate how to use Mognet in a "real world" application.', ) # We need to connect the Mognet application # to the backends (Redis and RabbitMQ) before we can use it. # # Here we leverage FastAPI (or rather, Starlette)'s event system # to do this. @app.on_event("startup") async def connect_mognet_app_on_startup(): await mognet_app.connect() # And for completeness, we close it here too. @app.on_event("shutdown") async def close_mognet_app_on_shutdown(): await mognet_app.close() @app.post("/jobs", response_model=Job) async def upload_document( file: UploadFile = File(...), config: DemoConfig = Depends(DemoConfig.instance), ): """ Upload a file to have it be processed in the background. This will return an object with a `job_id` which can be used to then get the result. """ # Upload the file to S3 upload = Upload( upload_id=uuid4(), file_name=os.path.basename(os.path.normpath(file.filename)), ) async with get_s3_client(config) as s3: await s3.put_object( Bucket=config.s3.bucket_name, Key=f"uploads/{upload.upload_id}", Body=await file.read(), ) # Here, we create a Request to run the task. # This will create an object holding: # # - It's ID # - The task to run # - The arguments # # It is possible to configure this object # with more parameters (check the mognet.Request class), # either through it's constructor or through it's fields. req = mognet_app.create_request(process_document_upload, upload=upload) # Here we _submit_ the Request to be run on the background. # This returns a Result object. # Each submitted Request has a corresponding Result on the Result Backend. # If one were to await `res`, then the caller would wait until the task finished # running, and the result would hold either the result, or it would raise an exception # (in case of failure). res = await mognet_app.submit(req) # Here, we return an object that can be used for client-side tracking. # We don't store this in a database, as that's beyond the scope of this demo. # # However, if you need to store this information (for auditing purposes), # you can create a database table / collection that holds information about each Request # that was submitted. return Job(job_id=res.id) JobWaitTime = confloat(gt=0, le=30) @app.get("/jobs/{job_id}/result", response_model=UploadJobResult) async def get_job_result( job_id: UUID = Path(...), wait: Optional[JobWaitTime] = Query( None, description="**ADVANCED**: Optionally delay the return of this endpoint, in case the job isn't yet finished.", ), ) -> UploadJobResult: """ Get the result of a job. This endpoint can be used to poll for the result of a job. A good way to see the `wait` parameter in action is to stop the Mognet Worker before launching a task (because they are very fast). """ # To get the Result for a job, one should do so via the app's # `result_backend.get()` method. This will fetch the result from it. # # Note that there's no guarantee of the persistence of the Result Backend, assuming # you're using Redis (due to key eviction policies and TTLs). # By default, Mognet will keep results for 7 days. # See the mognet.AppConfig class for more details. # # Therefore, this method may return None, and we should handle it accordingly. res = await mognet_app.result_backend.get(job_id) if res is None: raise HTTPException(404, f"Job {job_id} not found") # **ADVANCED**: We can do a small optimization: instead of the client polling with high frequency, # we can instead delay the return of this endpoint for a few seconds, in case the job isn't done. # We can use the `wait()` function for this. This results in less HTTP traffic, at the expense of # more Redis traffic. You can use the poll argument to slow down the polling period (default is 0.1s, which # is optimized for performance-sensitive scenarios). # # Note that we must handle `asyncio.TimeoutError` ourselves, which happens if the job didn't finish # during the wait period. # # Bear in mind that this also has higher resource and timeout requirements for the server, because you are keeping # connections open for long periods of time. You should take care not to allow flooding of your server. if not res.done and wait is not None: with suppress(asyncio.TimeoutError): await res.wait(timeout=wait, poll=1) # We use a wrapper class that represents both the job and it's return value. # That way, it's easier to represent with OpenAPI schemas, and also easier # to handle for the client. if not res.done: return UploadJobResult(job_id=res.id, job_status=res.state, result=None) # If this is False, it means that the job's result holds an Exception. # We decide not to retrieve it here, and instead just tell the client that the job failed. if not res.successful: return UploadJobResult(job_id=res.id, job_status=res.state, result=None) # If we get here, the job finished successfully, so get the value # and return it to the client. value = await res.get() return UploadJobResult(job_id=res.id, job_status=res.state, result=value) @app.post("/jobs/{job_id}/revoke", status_code=204, response_class=Response) async def revoke_job(job_id: UUID = Path(...)): """Revoke (abort) a job, preventing it from running.""" # Revoking a job is done via the `revoke()` method on the Mognet app. # It will do the following: # # - Mark the result as REVOKED on the Result Backend (Redis) # - Tell the Mognet Workers to cancel the running task, if any # - Do the same for the subtasks # # If a revoked task is received by a Worker, it is discarded. Likewise, # if a subtask of a revoked task is received, it is also revoked. # # If you call the `get_job_result` endpoint after calling this, # you will see that it is stored as REVOKED, unless it already finished. # You can pass `force=True` to this method if you really want to enforce it. await mognet_app.revoke(job_id)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Safe a commandline password manager using AES encryption, written in python. :: author Erick Daniszewski :: date 05 December 2015 """ import json import getpass import argparse import struct from os import mkdir, remove from os.path import join as join_path from os.path import isdir, isfile, expanduser, getsize from random import randint from Crypto.Cipher import AES from hashlib import md5, sha256 # Safe Constants SAFE_ROOT = join_path(expanduser('~'), '.safe') SAFE_CONFIG = join_path(SAFE_ROOT, 'config.json') SAFE_META = join_path(SAFE_ROOT, 'meta.json') SAFES_PATH = join_path(SAFE_ROOT, 'safes') ALL_SAFES = '__all' # Define the argument parser parser = argparse.ArgumentParser(description='Safe :: A minimalistic commandline password manager') parser.add_argument('-i', '--init', nargs='*', help='initialize a new safe.') parser.add_argument('-o', '--open', nargs='*', help='open the specified safe. this is needed for both read and write operations.') parser.add_argument('-c', '--close', nargs='?', const=ALL_SAFES, help='close the specified safe. if no safe is specified, all open safes ' 'are closed. safes will not close automatically - it is up to the user ' 'to close their safes.') parser.add_argument('-a', '--add', nargs='*', help='add a new entry to the safe. add can take 0..3 arguments, where the user will be prompted ' 'to fill in any missing arguments. the arguments are positional. the order is as follows: ' '(1) entry name, (2) username/id, (3) password') parser.add_argument('-d', '--delete', help='remove an entry from the safe.') parser.add_argument('--default', help='set the default safe. the default safe is used by commands, such as open, to determine which safe ' 'to use if none is specified.') parser.add_argument('-D', '--delete_safe', help='remove an existing safe. this removes the safe and all of its entries permanently.') parser.add_argument('-m', '--modify', nargs='?', const=False, help='modify an existing entry for the open safe. this can be used ' 'to change username/password information.') parser.add_argument('-M', '--modify_safe', nargs='*', help='modify an existing safe. this should be used if one wants to change the ' 'master password to a safe, without losing the safe contents.') parser.add_argument('-u', '--username', action='store_true', help='a flag which, when present, will include username info in an entry\'s output.') parser.add_argument('-w', '--whole', action='store_true', help='a flag which, when present, will show the full entry (all data).') parser.add_argument('-e', '--entries', action='store_true', help='show all entries (by name) which exist in the open safe.') parser.add_argument('-s', '--safes', action='store_true', help='show all safes (by name) which exist.') parser.add_argument('-f', '--force', action='store_true', help='force an action. typically, this is used with deletes in order to suppress ' 'the verification prompt.') parser.add_argument('-v', '--verbose', action='store_true', help='a flag which toggles the verbosity of safe. if set to true, additional messages ' 'will be output, such as verification of action success.') parser.add_argument('entry', nargs='?', help='the name of the entry in the safe for which the stored information will be retrieved.') # Define 'clean' state of configuration and metadata files default_cfg = dict(default_safe=None, verbose=False) default_meta = dict(safes=[]) # By default, set the verbosity to False. This will get updated based on the value # stored in the Safe configuration file at runtime. is_verbose = False # ======================================================================== # Convenience Methods # ======================================================================== def get_meta(): """ Get the Safe metadata from the metadata file. :return: A dictionary containing the metadata stored in the metadata file. :rtype: dict """ with open(SAFE_META, 'r') as f: meta = json.load(f) return meta def set_meta(data): """ Write a metadata to the metadata file. This will overwrite any existing metadata which may exist in the file. :param data: The metadata to write to the metadata file. :type data: dict :return: None """ with open(SAFE_META, 'w') as f: json.dump(data, f) def get_config_value(key): """ Get a value from the config file. :param key: The key to search for in the config file. :return: The value found in the config file, if exists. Otherwise None. """ with open(SAFE_CONFIG, 'r') as f: cfg_data = json.load(f) if key in cfg_data: return cfg_data[key] return None def overwrite_config(**kwargs): """ Update the config file based on the specified kwargs. If a key specified in the kwargs does not exist in the config file, that key:value pair will be skipped (will not be added to the config file, but also will not fail). :param kwargs: The entries in the config JSON to update. :return: None """ with open(SAFE_CONFIG, 'r+') as f: cfg_data = json.load(f) for k, v in kwargs.items(): if k in cfg_data: cfg_data[k] = v f.seek(0) json.dump(cfg_data, f) f.truncate() def toggle_config_value(to_toggle): """ Toggle values in the config file based on the specified kwargs. The only values which may be toggled are boolean values. Attempts to toggle any other type will be ignored. :param to_toggle: The entry in the config JSON to toggle. :return: The new value of the updated config field. :rtype: bool """ result = None with open(SAFE_CONFIG, 'r+') as f: cfg_data = json.load(f) if to_toggle in cfg_data and isinstance(cfg_data[to_toggle], bool): cfg_data[to_toggle] = not cfg_data[to_toggle] result = cfg_data[to_toggle] f.seek(0) json.dump(cfg_data, f) f.truncate() return result def get_open_safe(): """ Get the name of the Safe that is currently open, if it exists. There should never be more than one Safe open at a time. If multiple Safes are found to be open, this will indiscriminately close all of them to prevent erroneous writes. :return: The name of the open safe, if any. Otherwise None """ meta = get_meta() unlocked = [] for safe in meta['safes']: if safe['is_open']: unlocked.append(safe['name']) if not unlocked: return None if len(unlocked) > 1: info('more than one safe is open. closing all safes.') close_safe() return None else: return unlocked[0] def get_safe_file_paths(safe_name): """ Get the path for the open and closed Safe files. :param safe_name: Name of the Safe. :return: A tuple which contains the path of the open safe file and the path of the closed safe file. """ return join_path(SAFES_PATH, safe_name + '.open'), join_path(SAFES_PATH, safe_name) def fail(message): """ A convenience method to exit with a failure message. :param message: The message to output. :type message: str :return: None """ exit('\n[FAILED] - {}'.format(message)) def info(message): """ A convenience method to print out info messages to console. :param message: The message to output. :type message: str :return: None """ print '>> {}'.format(message) def exit_info(message): """ A convenience method to print out an info message to console and exit. :param message: The message to output. :type message: str :return: None """ exit('>> {}'.format(message)) def prompt(message): """ A convenience method to prompt the used for information. :param message: The message to output for the prompt. :type message: str :return: The value given by the user. """ return raw_input(':: {}: '.format(message)) # ======================================================================== # Encryption/Hashing Methods # ======================================================================== def get_md5_hash(to_hash): """ Generate an MD5 hash of the given value. :param to_hash: Value to crate an MD5 hash of. :type to_hash: str :return: The hexadecimal representation of the MD5 hash. """ return md5(to_hash).hexdigest() def encrypt_file(password, in_file, out_file): """ Encrypt the contents of the given file. Encrypts the contents of the in_file into the out_file. :param password: The password for the file being encrypted. :type password: str :param in_file: The name of the file to encrypt. :type in_file: str :param out_file: The name of the file to create, which contains the encrypted data. :type out_file: str :return: None """ bs = AES.block_size chunk_size = bs * 1024 key = sha256(password).digest() iv = ''.join(chr(randint(0, 0xff)) for _ in range(16)) cipher = AES.new(key, AES.MODE_CBC, iv) file_size = getsize(in_file) with open(in_file, 'rb') as in_f, open(out_file, 'wb') as out_f: out_f.write(struct.pack('<Q', file_size)) out_f.write(iv) while True: chunk = in_f.read(chunk_size) if len(chunk) == 0: break elif len(chunk) % 16 != 0: chunk += ' ' * (16 - len(chunk) % 16) out_f.write(cipher.encrypt(chunk)) def decrypt_file(password, in_file, out_file): """ Decrypt the contents of the given file. Decrypts the contents of the in_file into the out_file. :param password: The password for the file being decrypted. :param in_file: The name of the file to decrypt. :param out_file: The name of the file to create, which contains the decrypted data. :return: None """ bs = AES.block_size chunk_size = bs * 1024 key = sha256(password).digest() with open(in_file, 'rb') as in_f, open(out_file, 'wb') as out_f: orig_size = struct.unpack('<Q', in_f.read(struct.calcsize('Q')))[0] iv = in_f.read(16) cipher = AES.new(key, AES.MODE_CBC, iv) while True: chunk = in_f.read(chunk_size) if len(chunk) == 0: break out_f.write(cipher.decrypt(chunk)) out_f.truncate(orig_size) # ======================================================================== # Safe Methods # ======================================================================== def initialize(): """ Initialize Safe, ensuring the Safe directory is created. This should: * create the Safe root dir, if it does not exist * create the safes dir to hold all user Safes, if it doesnt exist * create the base config and metadata files, if they do not exist :return: None """ if not isdir(SAFE_ROOT): mkdir(SAFE_ROOT) if not isdir(SAFES_PATH): mkdir(SAFES_PATH) if not isfile(SAFE_CONFIG): with open(SAFE_CONFIG, 'w') as conf: json.dump(default_cfg, conf) if not isfile(SAFE_META): with open(SAFE_META, 'w') as meta: json.dump(default_meta, meta) def initialize_safe(name=None, password=None): """ Create a new Safe. Creates a new Safe given a name and password. If no name and password are provided via commandline args, the user will be prompted for both. Safes cannot be overwritten. If a user desires to overwrite an existing Safe with a clean Safe of the same name, the Safe should first be deleted (using the -R or --remove-safe option) and a new safe initialized. :param name: The name of the Safe to initialize. :param password: The password for the new Safe. :return: None """ if name is None: name = prompt('set safe name') if not name: name = 'safe' if name == ALL_SAFES: fail('cannot create safe with name "{}". this is a reserved name.'.format(name)) if password is None: password = getpass.getpass(':: set password for safe "{}": '.format(name)) re_password = getpass.getpass(':: re-enter password: ') if password != re_password: fail('entered passwords do not match.') # load in the data from the meta file meta = get_meta() # check if a safe with that name already exists for safe in meta['safes']: if safe['name'] == name: fail('a safe with name "{}" already exists.'.format(name)) # hash the password password_hash = get_md5_hash(password) # close the open safe, if there is one active_safe = get_open_safe() if active_safe: set_meta(meta) close_safe(active_safe) meta = get_meta() # create an entry in the meta file new_safe = { 'name': name, 'hash': password_hash, 'is_open': True } meta['safes'].append(new_safe) set_meta(meta) # create a safe file for this safe. note that the default behavior for the init # is to leave that safe open. otherwise, we would encrypt and close. with open(join_path(SAFES_PATH, name + '.open'), 'w+') as f: json.dump({'password': password, 'entries': []}, f) if is_verbose: info('created safe "{}"'.format(name)) def open_safe(name=None, password=None): """ Open an existing Safe. If no Safe is specified, the default Safe will be used. If no default Safe is specified, the user will be notified of failure and safe will exit. Opening a Safe will close any other Safe that is open, to prevent erroneous writes. :param name: Name of the safe to open. If no name is provided, the user will be prompted for a name. :param password: Password for the safe to open. If no password is provided, the user will be prompted for a password. :return: None """ if name is None: name = get_config_value('default_safe') if not name: fail('no safe name provided, and no default safe found.') info('no safe provided; choosing default ({})'.format(name)) if password is None: password = getpass.getpass(':: password: ') # close the open safe, if any active_safe = get_open_safe() if active_safe: close_safe(active_safe) meta = get_meta() for safe in meta['safes']: if safe['name'] == name: if safe['hash'] == get_md5_hash(password): if not safe['is_open']: safe_paths = get_safe_file_paths(name) decrypt_file(password, safe_paths[1], safe_paths[0]) remove(safe_paths[1]) safe['is_open'] = True else: info('the safe is already open.') else: fail('password incorrect for safe') elif safe['is_open']: close_safe(safe['name']) set_meta(meta) if is_verbose: info('opened safe "{}"'.format(name)) def close_safe(name=None): """ Close an existing open Safe. Closes the specified Safe. All Safes will be closed if no specific Safe is specified. If a specified Safe does not exist, a message may be logged, otherwise nothing will happen, as a non-existent Safe can technically be considered a closed Safe. :param name: The name of the Safe to close. :return: None """ meta = get_meta() def encrypt_safe(safe_name): safe_paths = get_safe_file_paths(safe_name) if isfile(safe_paths[0]): with open(safe_paths[0], 'r') as s: data = json.load(s) password = data['password'] encrypt_file(password, safe_paths[0], safe_paths[1]) remove(safe_paths[0]) if name is None: for safe in meta['safes']: if safe['is_open']: encrypt_safe(safe['name']) safe['is_open'] = False else: for safe in meta['safes']: if safe['name'] == name: encrypt_safe(name) safe['is_open'] = False break set_meta(meta) if is_verbose: info('closed safe "{}"'.format(name)) def set_default(safe_name): """ Set the default Safe. This updates the 'default_safe' field in the Safe config file. :param safe_name: The name of the safe to be the default. :return: None """ meta = get_meta() safe_exists = False for safe in meta['safes']: if safe['name'] == safe_name: safe_exists = True break if safe_exists: overwrite_config(default_safe=safe_name) if is_verbose: info('"{}" is now the default safe'.format(safe_name)) else: fail('could not set default safe. "{}" does not exist.'.format(safe_name)) def add_entry(name=None, username=None, password=None): """ Add an entry to the currently open Safe. An entry consists of: 1. name (entry identifier, i.e. 'github') 2. username (user identifier; be it a username or email) 3. password (the password associated with the given username) Currently, only one username/password can be associated with a given entry identifier. If no Safe is open, adding an entry fails. If an entry name is already used in the open Safe, adding an entry fails. :param name: Name of the Safe entry. :param username: Username associated with the entry. :param password: Password associated with the entry. :return: None """ if name is None: name = prompt('entry name') if username is None: username = prompt('username') if password is None: password = prompt('password') entry_data = { 'name': name, 'username': username, 'pass': password } safe = get_open_safe() if safe: safe_file = join_path(SAFES_PATH, safe + '.open') with open(safe_file, 'r') as f: safe_data = json.load(f) for entry in safe_data['entries']: if entry['name'] == name: fail('entry "{}" already exists'.format(name)) safe_data['entries'].append(entry_data) with open(safe_file, 'w') as f: json.dump(safe_data, f) else: exit_info('no open safes found. open a safe with the --open option.') if is_verbose: info('added entry "{}" to safe "{}"'.format(name, safe)) def get_entry(name, show_all=False, show_username=False): """ Retrieves an entry by name from the currently open Safe. By default, this will print out only the password associated with the specified entry. Additional commandline flags can be added in order to show additional information: * -u, --username -> show the username along with the password * -w, --whole -> show the whole json entry :param name: Name of the entry to retrieve information for. :param show_all: Flag designating that all JSON info be shown. :param show_username: Flag designating that username info be shown. :return: None """ safe = get_open_safe() if safe: safe_file = join_path(SAFES_PATH, safe + '.open') with open(safe_file, 'r') as f: safe_data = json.load(f) found = False for entry in safe_data['entries']: if entry['name'] == name: found = True if show_all: print json.dumps(entry, indent=2, separators=(',', ':\t')) elif show_username: print entry['username'] print entry['pass'] else: print entry['pass'] if not found: fail('no entry found with name "{}".'.format(name)) else: exit_info('no open safes found. open a safe with the --open option.') def modify_entry(name=None): """ Modify an entry in the currently opened Safe. :param name: The name of the entry to modify. :return: None """ safe = get_open_safe() if safe: if name is None: name = prompt('entry to modify') info('leave a prompt blank to leave the record unchanged.') new_username = prompt('new username') new_password = prompt('new password') if not new_username and not new_password: exit_info('no fields specified for modification.') safe_file = join_path(SAFES_PATH, safe + '.open') with open(safe_file, 'r') as f: safe_data = json.load(f) for entry in safe_data['entries']: if entry['name'] == name: if new_password: entry['pass'] = new_password if new_username: entry['username'] = new_username break with open(safe_file, 'w') as f: json.dump(safe_data, f) else: exit_info('no open safes found. open a safe with the --open option.') if is_verbose: info('successfully modified "{}" in safe "{}"'.format(name, safe)) def modify_safe(name=None, password=None): """ Modify a Safe. :param name: The name of the Safe to modify. :return: None """ if name is None: name = prompt('safe to modify') if password is None: password = getpass.getpass(':: password: ') meta = get_meta() found = False for safe in meta['safes']: if safe['name'] == name: found = True if safe['hash'] == get_md5_hash(password): info('leave a prompt blank to leave the record unchanged.') new_password = getpass.getpass(':: new password: ') re_rew_password = getpass.getpass(':: re-enter new password: ') if new_password != re_rew_password: fail('entered passwords do not match.') safe_paths = get_safe_file_paths(name) if not safe['is_open']: decrypt_file(password, safe_paths[1], safe_paths[0]) with open(safe_paths[0], 'r') as f: file_data = json.load(f) file_data['password'] = new_password with open(safe_paths[0], 'w') as f: json.dump(file_data, f) if not safe['is_open']: encrypt_file(new_password, safe_paths[0], safe_paths[1]) safe['hash'] = get_md5_hash(new_password) else: fail('password incorrect for safe') if not found: exit_info('safe with name "{}" not found.'.format(name)) set_meta(meta) if is_verbose: info('successfully modified safe "{}"'.format(name)) def delete_entry(name): """ Delete an entry from the currently open Safe. :param name: Name of the entry to delete. :type name: str :return: None """ safe = get_open_safe() if safe: safe_file = get_safe_file_paths(safe)[0] with open(safe_file, 'r') as f: safe_data = json.load(f) entry_list = safe_data['entries'] entry_list[:] = [x for x in entry_list if not x['name'] == name] with open(safe_file, 'w') as f: json.dump(safe_data, f) else: exit_info('no open safes found. open a safe with the --open option.') if is_verbose: info('successfully removed entry "{}"'.format(name)) def delete_safe(name, force=False): """ Delete a Safe and all of its contents. :param name: The name of the safe to delete. :param force: Flag which designates whether to prompt for validation or not. (default: False) :return: None """ if not force: verify = prompt('delete safe "{}" and all of its contents? (y/N)'.format(name)) or 'n' if verify.lower() == 'n': exit_info('aborting safe delete.') safe_paths = get_safe_file_paths(name) if isfile(safe_paths[0]): remove(safe_paths[0]) if isfile(safe_paths[1]): remove(safe_paths[1]) meta = get_meta() safes = meta['safes'] safes[:] = [x for x in safes if not x['name'] == name] set_meta(meta) if is_verbose: info('successfully deleted safe "{}"'.format(name)) def list_entries(): """ List all entries in the Safe that is currently open. :return: None """ safe = get_open_safe() if safe: safe_file = get_safe_file_paths(safe)[0] with open(safe_file, 'r') as f: safe_data = json.load(f) if len(safe_data['entries']) == 0: exit_info('no entries in the safe "{}".'.format(safe)) entries = [entry['name'] for entry in safe_data['entries']] print '\n '.join(['Entries:'] + entries) else: exit_info('no open safes found. open a safe with the --open option.') def list_safes(): """ List all initialized Safes. The Safe that is currently open will be denoted with a '*' next to the name. By design, either 0 or 1 Safe should be open at any given time, so there should never be more than one safe marked as open. :return: None """ meta = get_meta() safes = [] for safe in meta['safes']: name = '' if safe['is_open']: name += '* ' name += safe['name'] safes.append(name) if len(safes) == 0: exit_info('no safes exist.') print '\n '.join(['Safes:'] + safes) # ======================================================================== # Safe Main # ======================================================================== if __name__ == '__main__': # initialize Safe and get any arguments passed to it. initialize() args = parser.parse_args() # --------------------------------- # Get/Set the verbosity of Safe # --------------------------------- if args.verbose: is_verbose = toggle_config_value('verbose') info('set verbosity to {}'.format(is_verbose)) else: is_verbose = get_config_value('verbose') # --------------------------------- # Initialize a new Safe # --------------------------------- if args.init is not None: count = len(args.init) if count == 0: initialize_safe() elif count == 1: initialize_safe(name=args.init[0]) elif count == 2: initialize_safe(name=args.init[0], password=args.init[1]) else: parser.error('too many arguments given for --init. (accepts 0, 1, or 2 arguments)') # --------------------------------- # Open a Safe # --------------------------------- if args.open is not None: count = len(args.open) if count == 0: open_safe() elif count == 1: open_safe(name=args.open[0]) elif count == 2: open_safe(name=args.open[0], password=args.open[1]) else: parser.error('too many arguments given for --open. (accepts 0, 1, or 2 arguments)') # --------------------------------- # Add data to a Safe # --------------------------------- if args.add is not None: count = len(args.add) if count == 0: add_entry() elif count == 1: add_entry(name=args.add[0]) elif count == 2: add_entry(name=args.add[0], username=args.add[1]) elif count == 3: add_entry(name=args.add[0], username=args.add[1], password=args.add[2]) else: parser.error('too many arguments given for --add. (accepts 0, 1, 2, or 3 arguments)') # --------------------------------- # Modify an entry from a Safe # --------------------------------- if args.modify is not None: if args.modify: modify_entry(args.modify) else: modify_entry() # --------------------------------- # Modify a Safe # --------------------------------- if args.modify_safe is not None: count = len(args.modify_safe) if count == 0: modify_safe() elif count == 1: modify_safe(name=args.modify_safe[0]) elif count == 2: modify_safe(name=args.modify_safe[0], password=args.modify_safe[1]) else: parser.error('too many arguments given for --modify_safe. (accepts 0, 1, or 2 arguments)') # --------------------------------- # Delete data from a Safe # --------------------------------- if args.delete: delete_entry(args.delete) # --------------------------------- # Delete a Safe # --------------------------------- if args.delete_safe: delete_safe(args.delete_safe, args.force) # --------------------------------- # Set the default Safe # --------------------------------- if args.default: set_default(args.default) # --------------------------------- # Close an open Safe # --------------------------------- if args.close: if args.close == ALL_SAFES: close_safe() else: close_safe(args.close) # --------------------------------- # Lookup info from a Safe # --------------------------------- if args.entry: get_entry(args.entry, show_all=args.whole, show_username=args.username) # --------------------------------- # Lookup entries in a Safe # --------------------------------- if args.entries: list_entries() # --------------------------------- # Lookup all Safes # --------------------------------- if args.safes: list_safes()
python
# coding: utf-8 from __future__ import absolute_import from unittest.mock import patch from xcube_hub import api from xcube_hub.controllers.callbacks import put_callback_by_cubegen_id from xcube_hub.models.callback import Callback from test import BaseTestCase class TestCallbacksController(BaseTestCase): """CallbacksController integration test stubs""" def test_put_callback_by_job_id(self): """Test case for put_callback_by_job_id Add a callback for a job """ callback = Callback(state={'error': 'dasds'}, sender='on_end') res = put_callback_by_cubegen_id(body=callback.to_dict(), cubegen_id='test_id', token_info={ 'access_token': 'dfevgdf', 'user_id': 'helge', 'email': '[email protected]' }) expected = ({'progress': [{'sender': 'on_end', 'state': {'error': 'dasds'}}]}, 200) self.assertEqual(expected, res) # Test whether the controller returns an error when the service raises an exception def side_effect(user_id, email, cubegen_id, value): raise api.ApiError(400, 'test') with patch('xcube_hub.core.callbacks.put_callback') as p: p.side_effect = side_effect res = put_callback_by_cubegen_id(body=callback.to_dict(), cubegen_id='test_id', token_info={ 'access_token': 'dfevgdf', 'user_id': 'helge', 'email': '[email protected]' }) self.assertEqual(400, res[1]) self.assertEqual('test', res[0]['message']) self.assertGreater(len(res[0]['traceback']), 0) if __name__ == '__main__': import unittest unittest.main()
python
"""AI Engines Here is a set of AI- and ML-patterns for adavanced research of business data. """
python
from unittest.mock import MagicMock class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): return super().__call__(self, *args, **kwargs)
python
from django.db import models from django.test import TestCase from django_fsm import FSMField, transition, can_proceed class TestExceptTargetTransitionShortcut(models.Model): state = FSMField(default="new") @transition(field=state, source="new", target="published") def publish(self): pass @transition(field=state, source="+", target="removed") def remove(self): pass class Meta: app_label = "testapp" class Test(TestCase): def setUp(self): self.model = TestExceptTargetTransitionShortcut() def test_usecase(self): self.assertEqual(self.model.state, "new") self.assertTrue(can_proceed(self.model.remove)) self.model.remove() self.assertEqual(self.model.state, "removed") self.assertFalse(can_proceed(self.model.remove))
python
# Problem: https://www.hackerrank.com/challenges/py-check-strict-superset/problem set_A = set(input().split()) n = int(input()) ind = 0 for _ in range(n): set_n = set(input().split()) union_set = set_A.union(set_n) if (union_set == set_A) and (len(set_A) > len(set_n)): ind += 1 if ind == n: print(True) else: print(False)
python
import sys import os import os.path import glob def compareOutputs( expected, actual, message ): expected = expected.strip().replace('\r','').split('\n') actual = actual.strip().replace('\r','').split('\n') diff_line = 0 max_line_to_compare = min( len(expected), len(actual) ) for index in xrange(0,max_line_to_compare): if expected[index].strip() != actual[index].strip(): diff_line = index + 1 break if diff_line == 0 and len(expected) != len(actual): diff_line = max_line_to_compare+1 if diff_line == 0: return None def safeGetLine( lines, index ): index += -1 if index >= len(lines): return '' return lines[index].strip() return """ Difference in %s at line %d: Expected: '%s' Actual: '%s' """ % (message, diff_line, safeGetLine(expected,diff_line), safeGetLine(actual,diff_line) ) def safeReadFile( path ): try: return file( path, 'rt' ).read() except IOError, e: return '<File "%s" is missing: %s>' % (path,e) def runAllTests( jsontest_executable_path, input_dir = None ): if not input_dir: input_dir = os.getcwd() tests = glob.glob( os.path.join( input_dir, '*.json' ) ) failed_tests = [] for input_path in tests: print 'TESTING:', input_path, pipe = os.popen( "%s %s" % (jsontest_executable_path, input_path) ) process_output = pipe.read() status = pipe.close() base_path = os.path.splitext(input_path)[0] actual_output = safeReadFile( base_path + '.actual' ) actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' ) file(base_path + '.process-output','wt').write( process_output ) if status: print 'parsing failed' failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) ) else: expected_output_path = os.path.splitext(input_path)[0] + '.expected' expected_output = file( expected_output_path, 'rt' ).read() detail = ( compareOutputs( expected_output, actual_output, 'input' ) or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) ) if detail: print 'FAILED' failed_tests.append( (input_path, detail) ) else: print 'OK' if failed_tests: print print 'Failure details:' for failed_test in failed_tests: print '* Test', failed_test[0] print failed_test[1] print print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests), len(failed_tests) ) return 1 else: print 'All %d tests passed.' % len(tests) return 0 if __name__ == '__main__': if len(sys.argv) < 1 or len(sys.argv) > 2: print "Usage: %s jsontest-executable-path [input-testcase-directory]" % sys.argv[0] sys.exit( 1 ) jsontest_executable_path = os.path.normpath( os.path.abspath( sys.argv[1] ) ) if len(sys.argv) > 2: input_path = os.path.normpath( os.path.abspath( sys.argv[2] ) ) else: input_path = None status = runAllTests( jsontest_executable_path, input_path ) sys.exit( status )
python
from unittest import mock import pytest from django.core.exceptions import ImproperlyConfigured from django.urls import reverse from django_countries.fields import Country from django_prices_vatlayer.models import VAT from prices import Money, MoneyRange, TaxedMoney, TaxedMoneyRange from saleor.core.taxes.vatlayer import ( DEFAULT_TAX_RATE_NAME, apply_tax_to_price, get_tax_rate_by_name, get_taxed_shipping_price, get_taxes_for_address, get_taxes_for_country, ) from saleor.core.utils import get_country_name_by_code from saleor.dashboard.taxes.filters import get_country_choices_for_vat from ..utils import get_redirect_location @pytest.fixture def compare_taxes(): def fun(taxes_1, taxes_2): assert len(taxes_1) == len(taxes_2) for rate_name, tax in taxes_1.items(): value_1 = tax["value"] value_2 = taxes_2.get(rate_name)["value"] assert value_1 == value_2 return fun def test_get_tax_rate_by_name(taxes): rate_name = "pharmaceuticals" tax_rate = get_tax_rate_by_name(rate_name, taxes) assert tax_rate == taxes[rate_name]["value"] def test_get_tax_rate_by_name_fallback_to_standard(taxes): rate_name = "unexisting tax rate" tax_rate = get_tax_rate_by_name(rate_name, taxes) assert tax_rate == taxes[DEFAULT_TAX_RATE_NAME]["value"] def test_get_tax_rate_by_name_empty_taxes(product): rate_name = "unexisting tax rate" tax_rate = get_tax_rate_by_name(rate_name) assert tax_rate == 0 def test_view_checkout_with_taxes( settings, client, request_checkout_with_item, vatlayer, address ): settings.DEFAULT_COUNTRY = "PL" checkout = request_checkout_with_item checkout.shipping_address = address checkout.save() product = checkout.lines.first().variant.product product.meta = {"taxes": {"vatlayer": {"code": "standard", "description": ""}}} product.save() response = client.get(reverse("checkout:index")) response_checkout_line = response.context[0]["checkout_lines"][0] line_net = Money(amount="8.13", currency="USD") line_gross = Money(amount="10.00", currency="USD") assert response_checkout_line["get_total"].tax.amount assert response_checkout_line["get_total"] == TaxedMoney(line_net, line_gross) assert response.status_code == 200 def test_view_update_checkout_quantity_with_taxes( client, request_checkout_with_item, vatlayer, monkeypatch ): monkeypatch.setattr( "saleor.checkout.views.to_local_currency", lambda price, currency: price ) variant = request_checkout_with_item.lines.get().variant response = client.post( reverse("checkout:update-line", kwargs={"variant_id": variant.id}), {"quantity": 3}, HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) assert response.status_code == 200 assert request_checkout_with_item.quantity == 3 @pytest.mark.parametrize( "price, charge_taxes, expected_price", [ ( Money(10, "USD"), False, TaxedMoney(net=Money(10, "USD"), gross=Money(10, "USD")), ), ( Money(10, "USD"), True, TaxedMoney(net=Money("8.13", "USD"), gross=Money(10, "USD")), ), ], ) def test_get_taxed_shipping_price( site_settings, vatlayer, price, charge_taxes, expected_price ): site_settings.charge_taxes_on_shipping = charge_taxes site_settings.save() shipping_price = get_taxed_shipping_price(price, taxes=vatlayer) assert shipping_price == expected_price def test_view_taxes_list(admin_client, vatlayer): url = reverse("dashboard:taxes") response = admin_client.get(url) tax_list = response.context["taxes"].object_list assert response.status_code == 200 assert tax_list == list(VAT.objects.order_by("country_code")) def test_view_tax_details(admin_client, vatlayer): tax = VAT.objects.get(country_code="PL") tax_rates = [(rate_name, tax["value"]) for rate_name, tax in vatlayer.items()] tax_rates = sorted(tax_rates) url = reverse("dashboard:tax-details", kwargs={"country_code": "PL"}) response = admin_client.get(url) assert response.status_code == 200 assert response.context["tax"] == tax assert response.context["tax_rates"] == tax_rates def test_configure_taxes(admin_client, site_settings): url = reverse("dashboard:configure-taxes") data = { "include_taxes_in_prices": False, "display_gross_prices": False, "charge_taxes_on_shipping": False, } response = admin_client.post(url, data) assert response.status_code == 302 assert get_redirect_location(response) == reverse("dashboard:taxes") site_settings.refresh_from_db() assert not site_settings.include_taxes_in_prices assert not site_settings.display_gross_prices assert not site_settings.charge_taxes_on_shipping @mock.patch("saleor.dashboard.taxes.views.messages", create=True) @mock.patch("saleor.dashboard.taxes.views.call_command", create=True) def test_fetch_tax_rates(mocked_call_command, mocked_messages, admin_client): """Ensure a valid fetch VAT rates request is correctly handled, and is leading to the proper VAT fetching command being invoked.""" url = reverse("dashboard:fetch-tax-rates") response = admin_client.post(url) # Ensure the request was successful assert response.status_code == 302 assert get_redirect_location(response) == reverse("dashboard:taxes") assert mocked_messages.success.call_count == 1 # Ensure the get VAT rates (mocked) command was invoked mocked_call_command.assert_called_once_with("get_vat_rates") @mock.patch("saleor.dashboard.taxes.views.messages", create=True) @mock.patch("saleor.dashboard.taxes.views.logger", create=True) @mock.patch( "saleor.dashboard.taxes.views.call_command", side_effect=ImproperlyConfigured("Test"), create=True, ) def test_fetch_tax_rates_improperly_configured( mocked_call_command, mocked_logger, mocked_messages, admin_client ): """Ensure a failing VAT rate fetching is leading to an error being returned, and that error is handled.""" url = reverse("dashboard:fetch-tax-rates") response = admin_client.post(url) # Ensure the request was successful assert response.status_code == 302 assert get_redirect_location(response) == reverse("dashboard:taxes") # Ensure error was logged to the logger # and the error was returned to the client assert mocked_logger.exception.call_count == 1 assert mocked_messages.warning.call_count == 1 # Ensure the get VAT rates (mocked) command was invoked mocked_call_command.assert_called_once_with("get_vat_rates") def test_fetch_tax_rates_invalid_method(admin_client): """Ensure the GET method is not allowed for tax rates fetching""" url = reverse("dashboard:fetch-tax-rates") assert admin_client.get(url).status_code == 405 def test_tax_list_filters_empty(admin_client, vatlayer): qs = VAT.objects.order_by("country_code") url = reverse("dashboard:taxes") data = {"country_code": [""], "sort_by": [""]} response = admin_client.get(url, data) assert response.status_code == 200 assert list(response.context["filter_set"].qs) == list(qs) def test_tax_list_filters_country_code(admin_client, vatlayer): qs = VAT.objects.filter(country_code="PL") url = reverse("dashboard:taxes") data = {"country_code": ["PL"], "sort_by": [""]} response = admin_client.get(url, data) assert response.status_code == 200 assert list(response.context["filter_set"].qs) == list(qs) def test_tax_list_filters_sort_by(admin_client, vatlayer): qs = VAT.objects.order_by("-country_code") url = reverse("dashboard:taxes") data = {"country_code": [""], "sort_by": ["-country_code"]} response = admin_client.get(url, data) assert response.status_code == 200 assert list(response.context["filter_set"].qs) == list(qs) def test_get_country_choices_for_vat(vatlayer): expected_choices = [("DE", "Germany"), ("PL", "Poland")] choices = get_country_choices_for_vat() assert choices == expected_choices def test_get_taxes_for_address(address, vatlayer, compare_taxes): taxes = get_taxes_for_address(address) compare_taxes(taxes, vatlayer) def test_get_taxes_for_address_fallback_default(settings, vatlayer, compare_taxes): settings.DEFAULT_COUNTRY = "PL" taxes = get_taxes_for_address(None) compare_taxes(taxes, vatlayer) def test_get_taxes_for_address_other_country(address, vatlayer, compare_taxes): address.country = "DE" address.save() tax_rates = get_taxes_for_country(Country("DE")) taxes = get_taxes_for_address(address) compare_taxes(taxes, tax_rates) def test_get_taxes_for_country(vatlayer, compare_taxes): taxes = get_taxes_for_country(Country("PL")) compare_taxes(taxes, vatlayer) def test_get_country_name_by_code(): country_name = get_country_name_by_code("PL") assert country_name == "Poland" def test_apply_tax_to_price_do_not_include_tax(site_settings, taxes): site_settings.include_taxes_in_prices = False site_settings.save() money = Money(100, "USD") assert apply_tax_to_price(taxes, "standard", money) == TaxedMoney( net=Money(100, "USD"), gross=Money(123, "USD") ) assert apply_tax_to_price(taxes, "medical", money) == TaxedMoney( net=Money(100, "USD"), gross=Money(108, "USD") ) taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD")) assert apply_tax_to_price(taxes, "standard", taxed_money) == TaxedMoney( net=Money(100, "USD"), gross=Money(123, "USD") ) assert apply_tax_to_price(taxes, "medical", taxed_money) == TaxedMoney( net=Money(100, "USD"), gross=Money(108, "USD") ) def test_apply_tax_to_price_do_not_include_tax_fallback_to_standard_rate( site_settings, taxes ): site_settings.include_taxes_in_prices = False site_settings.save() money = Money(100, "USD") taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(123, "USD")) assert apply_tax_to_price(taxes, "space suits", money) == taxed_money def test_apply_tax_to_price_include_tax(taxes): money = Money(100, "USD") assert apply_tax_to_price(taxes, "standard", money) == TaxedMoney( net=Money("81.30", "USD"), gross=Money(100, "USD") ) assert apply_tax_to_price(taxes, "medical", money) == TaxedMoney( net=Money("92.59", "USD"), gross=Money(100, "USD") ) def test_apply_tax_to_price_include_fallback_to_standard_rate(taxes): money = Money(100, "USD") assert apply_tax_to_price(taxes, "space suits", money) == TaxedMoney( net=Money("81.30", "USD"), gross=Money(100, "USD") ) taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD")) assert apply_tax_to_price(taxes, "space suits", taxed_money) == TaxedMoney( net=Money("81.30", "USD"), gross=Money(100, "USD") ) def test_apply_tax_to_price_raise_typeerror_for_invalid_type(taxes): with pytest.raises(TypeError): assert apply_tax_to_price(taxes, "standard", 100) def test_apply_tax_to_price_no_taxes_return_taxed_money(): money = Money(100, "USD") taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD")) assert apply_tax_to_price(None, "standard", money) == taxed_money assert apply_tax_to_price(None, "medical", taxed_money) == taxed_money def test_apply_tax_to_price_no_taxes_return_taxed_money_range(): money_range = MoneyRange(Money(100, "USD"), Money(200, "USD")) taxed_money_range = TaxedMoneyRange( TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD")), TaxedMoney(net=Money(200, "USD"), gross=Money(200, "USD")), ) assert apply_tax_to_price(None, "standard", money_range) == taxed_money_range assert apply_tax_to_price(None, "standard", taxed_money_range) == taxed_money_range def test_apply_tax_to_price_no_taxes_raise_typeerror_for_invalid_type(): with pytest.raises(TypeError): assert apply_tax_to_price(None, "standard", 100)
python
#These variables are needed to make local variables in functions global custom_end='' homework_end='' assignment_end='' test_end='' quiz_end='' final_end='' custom_advance_details='' def quizzes(): while True: quiz_weight=input('How much does your quizzes weigh? or if not applicable type n/a ') #category_weight ask for the weight of the current category in this case, it would be quizzes if quiz_weight=='n/a': Menu() elif quiz_weight.isdecimal()==False: print('Please use a integer') else: while True: quiz_amount=input('How many quizzes have you taken? ') #ask for the number of assignments completed in this category if quiz_amount.isdigit()==False: print('Please use a integer') elif quiz_amount.isalpha(): print('Please use an integer') else: quiz_value=[] for scores in range(int(quiz_amount)): quiz_scores=input('Please insert scores one at a time ') while quiz_scores.isalpha()==True: print('Please use an integer') quiz_scores=input('Please insert scores one at a time ') else: quiz_scores=float(quiz_scores) #quiz_scores has to be converted into a float in order to do the calculations quiz_value.append(quiz_scores) #This allows the user to input grades and have the iterrated and added to a list for future reference quiz_weighp=int(quiz_weight)/100 # Quiz_weighp converts the category weight from an integer to a float quiz_total=sum(quiz_value) # Adds all the inputted quiz grades quiz_final=quiz_total/int(quiz_amount) # Divides the sum of all the quiz grades by the amount resulting in the final grade for the category excluding its weight global quiz_end quiz_end=int(quiz_final)*float(quiz_weighp) # Multiplies the final grade of the cateogry to corresponding weight to output how much this category affects the final grade print('Your quiz average was',quiz_final,"%. Your quizzes weigh", quiz_end,"% of your final grade.") Menu() # This function is repeated for the other hardcoded categories like test and assignments except for the custom def Test(): while True: test_weight=input('How much does your Test weigh? or if not applicable type n/a ') if test_weight=='n/a': Menu() elif test_weight.isdecimal()==False: print('Please use a integer') else: while True: test_amount=input('How many test have you taken? ') if test_amount.isdigit()==False: print('Please use a integer') else: test_value=[] for scores in range(int(test_amount)): test_scores=input('Please insert scores one at a time ') while test_scores.isalpha()==True: print("Please use an integer") test_scores=input('Please insert scores one at a time ') else: test_scores=float(test_scores) test_value.append(test_scores) test_weighp=int(test_weight)/100 test_total=sum(test_value) test_final=test_total/int(test_amount) global test_end test_end=int(test_final)*float(test_weighp) print('Your test average was',test_final,"%. Your test weigh", test_end,"% of your final grade.") Menu() def assignments(): while True: assignment_weight=input('How much does your assignments weigh? or if not applicable type n/a ') if assignment_weight=='n/a': Menu() elif assignment_weight.isdecimal()==False: print('Please use a integer') else: while True: assignment_amount=input('How many assignments have you completed? ') if assignment_amount.isdigit()==False: print('Please use a integer') else: assignment_value=[] for scores in range(int(assignment_amount)): assignment_scores=input('Please insert scores one at a time ') while assignment_scores.isalpha()==True: print("Please use an integer") assignment_scores=input('Please insert scores one at a time ') else: assignment_scores=float(assignment_scores) assignment_value.append(assignment_scores) assignment_weighp=int(assignment_weight)/100 assignment_total=sum(assignment_value) assignment_final=assignment_total/int(assignment_amount) global assignment_end assignment_end=int(assignment_final)*float(assignment_weighp) print('Your assignments average was',assignment_final,"%. Your assignments weigh", assignment_end,"% of your final grade.") Menu() def homework(): while True: homework_weight=input('How much does your homework weigh? or if not applicable type n/a ') if homework_weight=='n/a': Menu() elif homework_weight.isdecimal()==False: print('Please use a integer') else: while True: homework_amount=input('How many homework assignments have you completed? ') if homework_amount.isdigit()==False: print('Please use a integer') else: homework_value=[] for scores in range(int(homework_amount)): homework_scores=input('Please insert scores one at a time ') while homework_scores.isalpha()==True: print("Please use an integer") homework_scores=input("Please insert scores one at a time") else: homework_scores=float(homework_scores) homework_value.append(homework_scores) homework_weighp=int(homework_weight)/100 homework_total=sum(homework_value) homework_final=homework_total/int(homework_amount) global homework_end homework_end=int(homework_final)*float(homework_weighp) print('Your homework average was',homework_final,"%. Your homework weigh", homework_end,"% of your final grade.") Menu() def custom(): #The custom function allows the user to add categories they do not see on the menu, however, it only displays the average of all the categories added #They will only see the custom average weight of the categories together, if they want more details, they must use custom_advance_details custom_list=[] custom_scores=[] global custom_advance_details custom_advance_details=[] # The custom_advanced_details allows the user to view all the categories they added and their pertaining grade instead of just the average of all of them on the menu. global custom_end custom_end=sum(custom_list) #The counters job is to signal the program to end the loop once they have entered all their categories in counter=0 #the variable_customs functions job is to make the program more versatile, when adding new categories. It is nearly the same function except has more parameters. #the menu's custom grade will the the product of all the categories in this function added together and multiplied by the weight. def variable_customs(name,weight,amount): #print('so the category is', name,'with a', weight, '%', 'and a total of', amount ) for numbers in range(int(amount)): scores=input('Please provide your scores for this category one at a time ') while scores.isalpha()==True: print("Please use an integer") scores=input('Please provide your scores for this category one at a time ') else: scores=float(scores) custom_scores.append(scores) weigh_percentage=int(weight)/100 custom_totalgrades=sum(custom_scores) custom_final=custom_totalgrades/int(amount) custom_final_weight=int(custom_final)*float(weigh_percentage) custom_details=custom_name,'average is', custom_final, '% and weighs', custom_final_weight,'% of your final grade.' custom_advance_details.append(custom_details) custom_list.append(custom_final_weight) custom_amount=input('How many custom categories would you like to add? or enter n/a to return to menu ') while custom_amount: if custom_amount=='n/a': Menu() elif custom_amount.isalpha()==True: print('Please use a integer or decimal ') custom_amount=input('How many custom categories would you like to add? or enter n/a to return to menu ') else: print('You will now be asked to insert information for each category one at a time\n ') for numbers in range(int(custom_amount)): custom_name=input('Please provide a name for category ') while custom_name.isdigit()==True: print("Please use letters only") custom_name=input('Please provide a name for category ') else: #custom_scores is mentioned here again to clear the value because without it, it would just keep adding from the previous values custom_scores=[] custom_weight=input('Enter the weight of category ') while custom_weight.isdecimal()==False: print('Please use an integer') custom_weight=input('Enter the weight of category ') else: number_custom_grades=input('Please provide the number assignments completed for this category ') while number_custom_grades.isdecimal()==False: print('Please use an integer') number_custom_grades=input('Please provide the number assignments completed for this category ') else: counter+=1 variable_customs(custom_name,custom_weight,number_custom_grades) custom_end=sum(custom_list) while counter>=int(custom_amount): Menu() if custom_end=="": custom_end=0 elif custom_end>-1: custom_end=custom_end if homework_end=='': homework_end=0 elif homework_end>-1: homework_end=homework_end if test_end=='': test_end=0 elif test_end=='': test_end=test_end if quiz_end=="": quiz_end=0 elif quiz_end>-1: quiz_end=quiz_end if assignment_end=='': assignment_end=0 elif assignment_end>-1: assignment_end=assignment_end if final_end=='': final_end=0 elif final_end>-1: final_end=final_end # The codes above allows the function final to determine which categories to use. If the user did not insert any grade in a category, it will default to a 0 def final(): global final_end final_end=custom_end+homework_end+test_end+assignment_end+quiz_end+homework_end if final_end<70: print("Your final grade is",final_end,"% Unfortunately you failed.") while True: rmenu=input("Would you like to return to menu? y/n") if rmenu=="y": Menu() elif rmenu=="n": exit() else: print("Please use a valid option") elif 79>=final_end>=70: print("Your final grade is",final_end,"% So you passed good job!") while True: rmenu=input("Would you like to return to menu? y/n") if rmenu=="y": Menu() elif rmenu=="n": exit() else: print("Please use a valid option") elif 89>=final_end>=80: print("Your final grade is",final_end,"% Good job you are above average!") while True: rmenu=input("Would you like to return to menu? y/n") if rmenu=="y": Menu() elif rmenu=="n": exit() else: print("Please use a valid option") elif final_end>=90: print( 'Your final Grade is',final_end,'% Congratulations, you aced the class!') while True: rmenu=input("Would you like to return to menu? y/n") if rmenu=="y": Menu() elif rmenu=="n": exit() else: print("Please use a valid option") # function final is the final function that adds all the final grades together def Menu(): print("Hello, welcome to the Will I fail Calculator") print('This program will help calculate your final grade by finding the average weight of each pertaining category of your grade\nand then adding them together to output the final grade.') print('1: Grade Quizzes Current weight is',quiz_end,'%') print('2: Grade Test Current weight is',test_end,'%') print('3: Grade Assignments Current weight is',assignment_end,'%') print('4: Grade Homework Current weight is',homework_end,'%') print('5: Grade Custom Current weight is',custom_end,'%') print("6: Display each individual custom course's grade instead of the entire average") print('7: Calculate Final Grade (all other pertaining grades have to be filled before hand) Current final grade is', final_end,'%') print('8 Close program') Grader=input('Please Choose a category to start with by entering any of the following numbers above and press return key to confirm ') if Grader=='1': quizzes() elif Grader=='2': Test() elif Grader=='3': assignments() elif Grader=='4': homework() elif Grader=='5': custom() elif Grader=='6': print('Below are all the categories you added along with their grades') for details in custom_advance_details: details=str(details) details=details.replace("'","") details=details.replace(",","") #These replacements allows the list to be outputted like a normal sentence without all the punctuations. print(details) menu_return=input('would you like to return to the Menu? y/n? ') if menu_return=='y': Menu() else: exit() elif Grader=='7': final() elif Grader=='8': exit() else: print('That is not one of the options') Menu() Menu()
python
from msal.authority import * from msal.exceptions import MsalServiceError from tests import unittest class TestAuthority(unittest.TestCase): COMMON_AUTH_ENDPOINT = \ 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize' COMMON_TOKEN_ENDPOINT = \ 'https://login.microsoftonline.com/common/oauth2/v2.0/token' def test_wellknown_host_and_tenant(self): # Test one specific sample in straightforward way, for readability a = Authority('https://login.microsoftonline.com/common') self.assertEqual(a.authorization_endpoint, self.COMMON_AUTH_ENDPOINT) self.assertEqual(a.token_endpoint, self.COMMON_TOKEN_ENDPOINT) # Test all well known authority hosts, using same real "common" tenant for host in WELL_KNOWN_AUTHORITY_HOSTS: a = Authority('https://{}/common'.format(host)) # Note: this "common" tenant endpoints always point to its real host self.assertEqual( a.authorization_endpoint, self.COMMON_AUTH_ENDPOINT) self.assertEqual(a.token_endpoint, self.COMMON_TOKEN_ENDPOINT) @unittest.skip("As of Jan 2017, the server no longer returns V1 endpoint") def test_lessknown_host_will_return_a_set_of_v1_endpoints(self): # This is an observation for current (2016-10) server-side behavior. # It is probably not a strict API contract. I simply mention it here. less_known = 'login.windows.net' # less.known.host/ v1_token_endpoint = 'https://{}/common/oauth2/token'.format(less_known) a = Authority('https://{}/common'.format(less_known)) self.assertEqual(a.token_endpoint, v1_token_endpoint) self.assertNotIn('v2.0', a.token_endpoint) def test_unknown_host(self): with self.assertRaisesRegexp(MsalServiceError, "invalid_instance"): Authority('https://unknown.host/tenant_doesnt_matter_in_this_case') def test_unknown_host_valid_tenant_and_skip_host_validation(self): # When skipping host (a.k.a. instance) validation, # the Tenant Discovery will always use WORLD_WIDE service as instance, # so, if the tenant happens to exist there, it will find some endpoints. a = Authority('https://incorrect.host/common', validate_authority=False) self.assertEqual(a.authorization_endpoint, self.COMMON_AUTH_ENDPOINT) self.assertEqual(a.token_endpoint, self.COMMON_TOKEN_ENDPOINT) def test_unknown_host_unknown_tenant_and_skip_host_validation(self): with self.assertRaisesRegexp(MsalServiceError, "invalid_tenant"): Authority('https://unknown.host/invalid', validate_authority=False) class TestAuthorityInternalHelperCanonicalize(unittest.TestCase): def test_canonicalize_tenant_followed_by_extra_paths(self): self.assertEqual( canonicalize("https://example.com/tenant/subpath?foo=bar#fragment"), ("https://example.com/tenant", "example.com", "tenant")) def test_canonicalize_tenant_followed_by_extra_query(self): self.assertEqual( canonicalize("https://example.com/tenant?foo=bar#fragment"), ("https://example.com/tenant", "example.com", "tenant")) def test_canonicalize_tenant_followed_by_extra_fragment(self): self.assertEqual( canonicalize("https://example.com/tenant#fragment"), ("https://example.com/tenant", "example.com", "tenant")) def test_canonicalize_rejects_non_https(self): with self.assertRaises(ValueError): canonicalize("http://non.https.example.com/tenant") def test_canonicalize_rejects_tenantless(self): with self.assertRaises(ValueError): canonicalize("https://no.tenant.example.com") def test_canonicalize_rejects_tenantless_host_with_trailing_slash(self): with self.assertRaises(ValueError): canonicalize("https://no.tenant.example.com/") class TestAuthorityInternalHelperInstanceDiscovery(unittest.TestCase): def test_instance_discovery_happy_case(self): self.assertEqual( instance_discovery("https://login.windows.net/tenant"), "https://login.windows.net/tenant/.well-known/openid-configuration") def test_instance_discovery_with_unknown_instance(self): with self.assertRaisesRegexp(MsalServiceError, "invalid_instance"): instance_discovery('https://unknown.host/tenant_doesnt_matter_here') def test_instance_discovery_with_mocked_response(self): mock_response = {'tenant_discovery_endpoint': 'http://a.com/t/openid'} endpoint = instance_discovery( "https://login.microsoftonline.in/tenant.com", response=mock_response) self.assertEqual(endpoint, mock_response['tenant_discovery_endpoint'])
python
#!/usr/bin/env python3 class BarItem(object): valid_options = set(['full_text', 'short_text', 'color', 'min_width', 'align', 'name', 'instance', 'urgent', 'separator', 'separator_block_width']) COLOR_DEFAULT = '#FFFFFF' def __init__(self, name): assert(len(name) > 0) self.name = name self.output = {'name': name.lower()} def update(self): pass def get(self): return self.output def set(self, option, value): assert option in self.valid_options self.output[option] = value
python
# a1.py advice for manhattan distance taken from # https://stackoverflow.com/questions/39759721/calculating-the-manhattan-distance-in-the-eight-puzzle # https://www.geeksforgeeks.org/sum-manhattan-distances-pairs-points/ from search import * import time import random # ... SOLVED_STATE = (1, 2, 3, 4, 5, 6, 7, 8, 0) NUM_RANDOM_MOVES = 100 #PYCHARM_DEBUG = True # ______________________________________________________________________________ # A* heuristics # Duck Puzzle class DuckPuzzle(Problem): """ almost the same as eightpuzzle but now the board looks like a duck facing to the left 1 2 3 4 5 6 goal state 7 8 * Tiles slide into the blank (the *) as in the regular 8-puzzle, but now the board has a different shape which changes the possible moves. """ def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)): """ Define goal state and initialize a problem """ super().__init__(initial, goal) def find_blank_square(self, state): """Return the index of the blank square in a given state""" return state.index(0) def actions(self, state): """ Return the actions that can be executed in the given state. The result would be a list, since there are only four possible actions in any given state of the environment """ possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT'] index_blank_square = self.find_blank_square(state) unable_to_move_up = (0, 1, 4, 5) unable_to_move_down = (2, 6, 7, 8) unable_to_move_left = (0, 2, 6) unable_to_move_right = (1, 5, 8) # check which tuples the blank square is in and remove corresponding move option if index_blank_square in unable_to_move_left: possible_actions.remove('LEFT') if index_blank_square in unable_to_move_up: possible_actions.remove('UP') if index_blank_square in unable_to_move_right: possible_actions.remove('RIGHT') if index_blank_square in unable_to_move_down: possible_actions.remove('DOWN') return possible_actions def result(self, state, action): """ Given state and action, return a new state that is the result of the action. Action is assumed to be a valid action in the state duckPuzzle Shape should induce some special cases here. 1 2 3 4 5 6 7 8 * the numbers at index's 0, 1, and 2 will forever be trapped in that corner of the house. Therefore index tile 3 is a special case. Tile index 0, 1, & 2 are also their own special cases. """ # blank is the index of the blank square blank = self.find_blank_square(state) new_state = list(state) delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1} # for most numbers in normal places delta_case1 = {'UP': -2, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1} # specifically for tile at index #3 delta_case2 = {'UP': -2, 'DOWN': 2, 'LEFT': -1, 'RIGHT': 1} # Special case for tiles at index 0, 1, & 2 blank_case2 = (0, 1, 2) if blank is 3: neighbor = blank + delta_case1[action] elif blank in blank_case2: neighbor = blank + delta_case2[action] else: neighbor = blank + delta[action] new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank] return tuple(new_state) def goal_test(self, state): """ Given a state, return True if state is a goal state or False, otherwise """ return state == self.goal def check_solvability(self, state): """ Checks if the given state is solvable """ inversion = 0 for i in range(len(state)): for j in range(i + 1, len(state)): if (state[i] > state[j]) and state[i] != 0 and state[j] != 0: inversion += 1 return inversion % 2 == 0 def h(self, node): """ Return the heuristic value for a given state. Default heuristic function used is h(n) = number of misplaced tiles """ return sum(s != g for (s, g) in zip(node.state, self.goal)) def manhattan(self, node): # Goal state is puzzle = (1, 2, 3, 4, 5, 6, 7, 8, 0) # for my simplicity index has been shortened to `i` # adapted from the manhattan function in test_search.py currentstate = node.state # Grab the current state of the EightPuzzle object passed via the node i_target = {0: [2, 2], 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1]} i_state = {} index = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]] for i in range(len(currentstate)): i_state[currentstate[i]] = index[i] # initialize i_state dictionary manhattan_distance_x = 0 manhattan_distance_y = 0 for i in range(9): manhattan_distance_x += abs(i_target[i][0] - i_state[i][0]) # two indices because index was 2d array manhattan_distance_y += abs(i_target[i][1] - i_state[i][1]) return manhattan_distance_y + manhattan_distance_x def a_max(self, node): """ Return the biggest heuristic value from either manhattan or h Always grantee most efficient result and larger euristic dominates the smaller one""" h = self.h(node) manhattan = self.manhattan(node) return max(h, manhattan) def get_state(self): """Obtain Current state of EightPuzzle Object, could be useful for display""" return self.initial # END duck_puzzle class def display_Dpuzz(state): """ Helper function that displays duck_puzzle formatted properly (like a duck)""" # I am going to call the state the "board". Since the state is a tuple, all the object values are stored in an # array so that they may be printed. The blank square or 0 will be notated by a * # logically the same as display(state) above but for duckpuzzle() board = [0, 0, 0, 0, 0, 0, 0, 0, 0] # Initialize the array with zeroes for i in range(9): board[i] = state[i] if board[i] == 0: board[i] = '*' # logic below for printing a duck array QUACK if i <= 1: print(board[i], ' ', end='') if i == 1: print() elif 5 >= i > 1: print(board[i], ' ', end='') if i == 5: print() print(' ', end='') else: print(board[i], ' ', end='') print() print() def make_rand_duckPuzz(): state = SOLVED_STATE puzz = DuckPuzzle(state) for _ in range(NUM_RANDOM_MOVES): possible_actions = puzz.actions(state=state) # All currently valid moves for 0 action = random.choice(possible_actions) # Pick a valid move at random state = puzz.result(state=state, action=action) # Apply it to random state, set state to new state return DuckPuzzle(state) # Will always be a solvable puzzle as valid moves have been applied to a solved state def make_n_Dpuzz(n): # Create an Array of n Random puzzles puzzles = [] for _ in range(n): puzzles.append(make_rand_duckPuzz()) return puzzles #-----------------End Duck Puzzle functions----------------------- class EightPuzzle(Problem): """ The problem of sliding tiles numbered from 1 to 8 on a 3x3 board, where one of the squares is a blank. A state is represented as a tuple of length 9, where element at index i represents the tile number at index i (0 if it's an empty square) """ def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)): """ Define goal state and initialize a problem """ super().__init__(initial, goal) def find_blank_square(self, state): """Return the index of the blank square in a given state""" return state.index(0) def actions(self, state): """ Return the actions that can be executed in the given state. The result would be a list, since there are only four possible actions in any given state of the environment """ possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT'] index_blank_square = self.find_blank_square(state) if index_blank_square % 3 == 0: possible_actions.remove('LEFT') if index_blank_square < 3: possible_actions.remove('UP') if index_blank_square % 3 == 2: possible_actions.remove('RIGHT') if index_blank_square > 5: possible_actions.remove('DOWN') return possible_actions def result(self, state, action): """ Given state and action, return a new state that is the result of the action. Action is assumed to be a valid action in the state """ # blank is the index of the blank square blank = self.find_blank_square(state) new_state = list(state) delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1} neighbor = blank + delta[action] new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank] return tuple(new_state) def goal_test(self, state): """ Given a state, return True if state is a goal state or False, otherwise """ return state == self.goal def check_solvability(self, state): """ Checks if the given state is solvable """ inversion = 0 for i in range(len(state)): for j in range(i + 1, len(state)): if (state[i] > state[j]) and state[i] != 0 and state[j] != 0: inversion += 1 return inversion % 2 == 0 def h(self, node): """ Return the heuristic value for a given state. Default heuristic function used is h(n) = number of misplaced tiles """ return sum(s != g for (s, g) in zip(node.state, self.goal)) def manhattan(self, node): # Goal state is puzzle = (1, 2, 3, 4, 5, 6, 7, 8, 0) # for my simplicity index has been shortened to `i` # adapted from the manhattan function in test_search.py currentstate = node.state # Grab the current state of the EightPuzzle object passed via the node i_target = {0: [2, 2], 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1]} i_state = {} index = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]] # x = 0 # y = 0 for i in range(len(currentstate)): i_state[currentstate[i]] = index[i] # initialize i_state dictionary manhattan_distance_x = 0 manhattan_distance_y = 0 for i in range(9): manhattan_distance_x += abs(i_target[i][0] - i_state[i][0]) # two indices because index was 2d array manhattan_distance_y += abs(i_target[i][1] - i_state[i][1]) return manhattan_distance_y + manhattan_distance_x def a_max(self, node): """ Return the biggest heuristic value from either manhattan or h Always grantee most efficient result and larger euristic dominates the smaller one""" h = self.h(node) manhattan = self.manhattan(node) return max(h, manhattan) def get_state(self): """Obtain Current state of EightPuzzle Object, could be useful for display""" return self.initial def display(state): """ Helper function that displays the state of 8 puzzle (tuple) in 3x3 form""" # I am going to call the state the "board". Since the state is a tuple, all the object values are stored in an # array so that they may be printed. The blank square or 0 will be notated by a * board = [0, 0, 0, 0, 0, 0, 0, 0, 0] # Initialize the array with zeroes for i in range(9): board[i] = state[i] if board[i] == 0: board[i] = '*' # logic below for printing a 3x3 array if i <= 2: print(board[i], ' ', end='') if i == 2: print() elif 5 >= i > 2: print(board[i], ' ', end='') if i == 5: print() else: print(board[i], ' ', end='') print() print() def make_rand_8puzzle(): state = SOLVED_STATE puzz = EightPuzzle(state) for _ in range(NUM_RANDOM_MOVES): possible_actions = puzz.actions(state=state) # All currently valid moves for 0 action = random.choice(possible_actions) # Pick a valid move at random state = puzz.result(state=state, action=action) # Apply it to random state, set state to new state return EightPuzzle(state) # Will always be a solvable puzzle as valid moves have been applied to a solved state def make_n_puzzles(n): # Create an Array of n Random puzzles puzzles = [] for _ in range(n): puzzles.append(make_rand_8puzzle()) return puzzles # -------------------------------------End EightPuzzle Functions-------------------------------- # ----------------------------- A* Search variations!------------------------------------------- def astar_search(problem, h=None, display=False): """A* search is best-first graph search with f(n) = g(n)+h(n). You need to specify the h function when you call astar_search, or else in your Problem subclass.""" h = memoize(h or problem.h, 'h') return best_first_graph_search(problem, lambda n: n.path_cost + h(n), display) # Modify astar_search to use Manhattan Distance Heuristic def astar_manhattan(problem, h=None): """Modification on A* search to use Manhattan Distance as Heuristic""" h = memoize(h or problem.manhattan, 'h') return best_first_graph_search(problem, lambda n: n.path_cost + h(n)) # modified astar_search to use maximum of misplaced tile heuristic def astar_max(problem, h=None): h = memoize(h or problem.a_max, 'h') return best_first_graph_search(problem, lambda n: n.path_cost + h(n)) # ---------------------------------END A* Search Variations------------------------------------- def best_first_graph_search(problem, f, display=False): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) explored = set() FledTheFrontier = 0 # Tracks how many nodes have fled the frontier (were removed from it) yeeehaw test = 0 while frontier: test += 1 node = frontier.pop() FledTheFrontier += 1 # Account for all fleeing nodes if problem.goal_test(node.state): if display: print(len(explored), "paths have been expanded and", len(frontier), "paths remain in the frontier") return [node, FledTheFrontier] # Return FledFromFrontier here so that we may access it outside the func explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: del frontier[child] frontier.append(child) return None # ----------Small Functions to generate 10 (or n) puzzles for some statistical analysis---------- def eight_puzzle_analysis(): # print("data for 10 puzzles using A* search using misplaced tile Heuristic (default)") puzzles = make_n_puzzles(10) for puzz in puzzles: display(puzz.get_state()) start_time = time.time_ns() finished_puzzle = astar_search(puzz) elapsed_time = (time.time_ns() - start_time) / 1000000000 # Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER- print(elapsed_time, end='') print(',', finished_puzzle[0].path_cost, end='') print(',', finished_puzzle[1]) print() # print("data for 10 puzzles using modified A* with manhattan distance heuristic") for puzz in puzzles: start_time = time.time_ns() finished_puzzle = astar_manhattan(puzz) elapsed_time = (time.time_ns() - start_time) / 1000000000 # Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER- print(elapsed_time, end='') print(',', finished_puzzle[0].path_cost, end='') print(',', finished_puzzle[1]) print() # print("data for 10 puzzles using modified A* with max misplaced distance heuristic") for puzz in puzzles: start_time = time.time_ns() finished_puzzle = astar_max(puzz) elapsed_time = (time.time_ns() - start_time) / 1000000000 # Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER- print(elapsed_time, end='') print(',', finished_puzzle[0].path_cost, end='') print(',', finished_puzzle[1]) def duck_puzzle_analysis(): puzzles = make_n_Dpuzz(10) for puzz in puzzles: #display_Dpuzz(puzz.get_state()) start_time = time.time_ns() finished_puzzle = astar_search(puzz) elapsed_time = (time.time_ns() - start_time) / 1000000000 # Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER- print(elapsed_time, end='') print(',', finished_puzzle[0].path_cost, end='') print(',', finished_puzzle[1]) print() # print("data for 10 puzzles using modified A* with manhattan distance heuristic") for puzz in puzzles: start_time = time.time_ns() finished_puzzle = astar_manhattan(puzz) elapsed_time = (time.time_ns() - start_time) / 1000000000 # Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER- print(elapsed_time, end='') print(',', finished_puzzle[0].path_cost, end='') print(',', finished_puzzle[1]) print() # print("data for 10 puzzles using modified A* with max misplaced distance heuristic") for puzz in puzzles: start_time = time.time_ns() finished_puzzle = astar_max(puzz) elapsed_time = (time.time_ns() - start_time) / 1000000000 # Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER- print(elapsed_time, end='') print(',', finished_puzzle[0].path_cost, end='') print(',', finished_puzzle[1]) return 0 def single_duck_puzzle(): #debugging to find where misplaced action puzzle = make_rand_duckPuzz() #display_Dpuzz(puzzle.get_state()) #single_duck_puzzle() # eight_puzzle_analysis() duck_puzzle_analysis()
python
from requests_oauthlib import OAuth2Session from flask import Flask, request, redirect, session, url_for from flask.json import jsonify import os from requests_oauthlib.compliance_fixes import facebook_compliance_fix app = Flask(__name__) # This information is obtained upon registration of a new GitHub OAuth # application here: https://github.com/settings/applications/new client_id = "354731726140076" client_secret = "ce6012f0684ade3c7cb3938ba20f7446" authorization_base_url = 'https://www.facebook.com/dialog/oauth' token_url = 'https://graph.facebook.com/oauth/access_token' redirect_uri = 'https://bstore21.heliohost.us/loginsucess.html' @app.route("/") def demo(): """Step 1: User Authorization. Redirect the user/resource owner to the OAuth provider (i.e. Github) using an URL with a few key OAuth parameters. """ facebook = OAuth2Session(client_id, redirect_uri=redirect_uri) facebook = facebook_compliance_fix(facebook) authorization_url, state = facebook.authorization_url(authorization_base_url) # State is used to prevent CSRF, keep this for later. session['oauth_state'] = state return redirect(authorization_url) # Step 2: User authorization, this happens on the provider. @app.route("/callback", methods=["GET"]) def callback(): """ Step 3: Retrieving an access token. The user has been redirected back from the provider to your registered callback URL. With this redirection comes an authorization code included in the redirect URL. We will use that to obtain an access token. """ facebook = OAuth2Session(client_id, state=session['oauth_state']) facebook.fetch_token(token_url, client_secret=client_secret,authorization_response=redirect_response) # At this point you can fetch protected resources but lets save # the token and show how this is done from a persisted token # in /profile. session['oauth_token'] = token return redirect(url_for('.profile')) @app.route("/profile", methods=["GET"]) def profile(): """Fetching a protected resource using an OAuth 2 token. """ github = OAuth2Session(client_id, token=session['oauth_token']) return jsonify(github.get('https://api.github.com/user').json()) if __name__ == "__main__": # This allows us to use a plain HTTP callback os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1" app.secret_key = os.urandom(24) app.run(debug=True)
python
############################################################################ # This Python file is part of PyFEM, the code that accompanies the book: # # # # 'Non-Linear Finite Element Analysis of Solids and Structures' # # R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel # # John Wiley and Sons, 2012, ISBN 978-0470666449 # # # # The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. # # # # The latest stable version can be downloaded from the web-site: # # http://www.wiley.com/go/deborst # # # # A github repository, with the most up to date version of the code, # # can be found here: # # https://github.com/jjcremmers/PyFEM # # # # The code is open source and intended for educational and scientific # # purposes only. If you use PyFEM in your research, the developers would # # be grateful if you could cite the book. # # # # Disclaimer: # # The authors reserve all rights but do not guarantee that the code is # # free from errors. Furthermore, the authors shall not be liable in any # # event caused by the use of the program. # ############################################################################ ############################################################################ # Description: The Python file of the bezier interpolation example as # # presented in section 15.3, page 487-490. # # # # Usage: python beziertest.py # ############################################################################ from numpy import zeros,dot C = zeros(shape=[4,4,4]) C[0,0,0] = 1.0 C[0,1,1] = 1.0 C[0,1,2] = 0.5 C[0,1,3] = 0.25 C[0,2,2] = 0.5 C[0,2,3] = 0.5 C[0,3,3] = 0.25 C[1,0,0] = 0.25 C[1,1,0] = 0.5 C[1,1,1] = 0.5 C[1,2,0] = 0.25 C[1,2,1] = 0.5 C[1,2,2] = 1.0 C[1,2,3] = 0.5 C[1,3,3] = 0.5 C[2,0,0] = 0.5 C[2,1,0] = 0.5 C[2,1,1] = 1.0 C[2,2,2] = 1.0 C[2,3,3] = 1.0 C[3,0,0] = 1.0 C[3,1,1] = 1.0 C[3,2,2] = 1.0 C[3,3,3] = 1.0 coords = zeros( shape=(10,2) ) coords[0,:] = [0.0 , 0.0 ] coords[1,:] = [1.0 , 1.0 ] coords[2,:] = [1.0 , 3.0 ] coords[3,:] = [2.0 , 3.0 ] coords[4,:] = [2.5 , 1.5 ] coords[5,:] = [1.5 , 0.5 ] coords[6,:] = [3.0 , 0.0 ] coords[7,:] = [3.2 , 2.0 ] coords[8,:] = [3.8 , 2.5 ] coords[9,:] = [4.0 , 0.0 ] elems = zeros( shape=(4,4) , dtype=int ) elems[0,:] = [ 0 , 1 , 2 , 3 ] elems[1,:] = [ 1 , 2 , 3 , 4 ] elems[2,:] = [ 3 , 4 , 5 , 6 ] elems[3,:] = [ 6 , 7 , 8 , 9 ] output = [] length = 0. from pyfem.util.BezierShapeFunctions import getElemBezierData for elemNodes,Celem in zip(elems,C): sdata = getElemBezierData ( coords[elemNodes,:] , Celem , \ order = 100 , elemType='Line4') for idata in sdata: x = dot(idata.h,coords[elemNodes,:]) output.append(x) length += idata.weight print("The length of the curve is ",length) from pylab import plot, show, xlabel, ylabel plot( [x[0] for x in output], [x[1] for x in output], '-' ) plot( [x[0] for x in coords], [x[1] for x in coords], 'ro-' ) show()
python
#!/usr/bin/env python ''' @todo: turn this into a real test runner for everything in the test subdir. ''' import sys from aisutils.BitVector import BitVector from aisutils import binary import ais_msg_1 import ais_msg_8 import sls.waterlevel if __name__=='__main__': # Try to parse some binary message if False: nmeaStr='!AIVDM,1,1,,A,85OpLV1Kf98p96dWWPLSViUfJlU@SV>cDF2Wq5>`=u8CnEFGCIOq,0*70,r003669983,1165795916' msgPayload = nmeaStr.split(',')[5] print 'nmea string: ',nmeaStr print 'message payload:',msgPayload bv = binary.ais6tobitvec(msgPayload) print len(bv), bv msgDict = ais_msg_8.bin_broadcastDecode(bv) ais_msg_8.bin_broadcastPrintFields(msgDict) bv = bv[39:] print 'dac: ',bv[:10],int(bv[:10]) bv = bv[10:]; print 'fid: ',bv[: 6],int(bv[: 6]) bv = bv[ 6:]; print 'bits:',bv[:16],int(bv[:10]) bv = bv[10:]; print 'len: ',len(bv) # Position message if False: nmeaStr = '!AIVDM,1,1,,B,15Mt9B001;rgAFhGKLaRK1v2040@,0*2A' msgPayload = nmeaStr.split(',')[5] print 'nmea string: ',nmeaStr print 'message payload:',msgPayload bv = binary.ais6tobitvec(msgPayload) msgDict = ais_msg_1.positionDecode(bv) ais_msg_1.positionPrint(msgDict) # SLS try for waterlevel if True: bvStr = '010111101000001000100101000001010100110101001100011000001000000000110001100101110101000000001001010011101101000000000000001000000100000000000000' bv = BitVector(bitstring=bvStr) print type(bv) msgDict= sls.waterlevel.decode(bv) sls.waterlevel.printFields(msgDict)
python
import streamlit as st import pandas as pd from itertools import groupby from datetime import datetime import re from pretty_html_table import build_table st.set_page_config(layout='wide') JIRA_REGEX= "[A-Z]{2,}-\d+" def parse_blame(chunk): branch = chunk[0].split()[0] line_start = chunk[0].split()[1] author = chunk[1][7:] author_mail = chunk[2][13:-1] author_time_int = chunk[3][12:] author_time = datetime.fromtimestamp(int(author_time_int)) filename = chunk[-2][9:] comment_text = chunk[-1] comment = comment_text[comment_text.find("TODO"):] jira_tickets = re.findall(JIRA_REGEX, comment) jira_ticket = jira_tickets[0] if jira_tickets else None return author, author_mail, author_time, filename, comment, branch, line_start, jira_ticket def apply_tags(comment): comment = comment.lower() tags = [] uncertainty_words = {"uncertainty": ["?", "maybe", "perhaps", "should we", "probably", "might", "not sure"]} hacky_words = {"hacky": ["temporary", "hack", "hacky"]} fixme_words = {"fix": ["fixme", "fix", "bug", "incorrect"]} dependency_words = {"dependency": ["once", "when", "blocked"]} for tw in [uncertainty_words, hacky_words, fixme_words, dependency_words]: for k, v in tw.items(): for w in v: if w in comment: tags.append(k) return tags def make_table(df): return build_table(df, 'blue_light', font_size='medium', font_family='Century Gothic, sans-serif', text_align='left', width='auto', index=False, escape=False) def make_path_clickable(branch, path, line_start, prefix='https://github.com/pytorch/pytorch'): return f'<a target="_blank" href="{prefix}/blob/{branch}/{path}#L{line_start}">{path}</a>' col1, col2, col3 = st.columns(3) with col1: st.title("✅ Let's Do it") with col3: st.header("#TODO tracker") st.sidebar.write("Run **todo_script.sh** to get blame results for a repo") blame_txt = st.sidebar.file_uploader("Submit the git blame results.txt") if blame_txt: content = blame_txt.readlines() content = [line.decode("utf-8").strip() for line in content] chunks = (list(g) for k, g in groupby(content, key=lambda x: x != '--blame-end--') if k) data = [] for chunk in chunks: data.append(list(parse_blame(chunk))) df = pd.DataFrame(data, columns=['author', 'author_mail', 'author_time', 'filename', 'comment', "branch", "line_start", "jira_ticket"]) df['tags'] = df.comment.map(apply_tags) df['clickable_filename'] = df.apply(lambda x: make_path_clickable(x['branch'], x['filename'], x['line_start']), axis=1) unique_emails = df.author_mail.unique() search_option = st.sidebar.selectbox("search option", ['by email', 'by path prefix']) if search_option == 'by email': author_mail_input = st.sidebar.selectbox("email (e.g. [email protected])", [""] + unique_emails.tolist()) if author_mail_input != '': to_show_df = df[df.author_mail == author_mail_input].copy() st.write(make_table(to_show_df[['author', 'author_mail', 'author_time', 'clickable_filename', 'comment', 'tags', 'jira_ticket']]), unsafe_allow_html=True) if search_option == 'by path prefix': path_input = st.sidebar.text_input("path prefix (e.g. caffe2/contrib)") if path_input: length = len(path_input) to_show_df = df[df.filename.map(lambda x: x[:length] == path_input)].copy() st.write(make_table(to_show_df[['author', 'author_mail', 'author_time', 'clickable_filename', 'comment', 'tags', 'jira_ticket']]), unsafe_allow_html=True)
python
import ast import csv import korbinian import korbinian.utils as utils import numpy as np import os import pandas as pd import sys import time # import debugging tools from korbinian.utils import pr, pc, pn, aaa def get_TM_indices_from_TMSEG_topo_str(topo_str, TM_symbol="H"): """Get TM indices from TMSEG topology string. Code is not very elegant in comparison to a regex approach, but works fine. Parameters ---------- topo_str : str Topology string output from TMSEG. H = TM helix Note that TM orientation (N-cyto or N-out) is currently not extracted. E.g. "11111111111111HHHHHHHHHHHHHHHHHHH222222222222222222222222222222222222222222222222222222222222222222222222HHHHHHHHHHHHHHHHHHHHHHHHH" "111111111111111111111111HHHHHHHHHHHHHHHHHHHHH222222222222222HHHHHHHHHHHHHHHHHHHH111111111111111111111111111111111111HHHHHHHHHHHHHHHHHHHHHHH" "22222222222222222222222222222222HHHHHHHHHHHHHHHHHHHHHH1111111111111111111111111HHHHHHHHHHHHHHHHHHHHHHH22222222222222222222222222222222222222" "2222HHHHHHHHHHHHHHHHHHHHH11111111111111111111111111111111111111" Returns ------- TM_indices : tuple Nested tuple with start and end of all TM helices in topology string. UniProt indexing is used ("HHH111" is (1:3), not (0:3)) E.g. ((15, 33),1 (106, 130), (155, 175), (191, 210), (247, 269), (302, 323), (349, 371), (414, 434)) """ if TM_symbol in topo_str: # get indices (eg. [28, 29, 30, 31, 32, 33, 34, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 72, 73, 74, 75, 76]) M_indices = get_list_TM_residues_from_topo_string(topo_str, TM_symbol) #SiPe_indices = get_signal_peptide_indices_from_TMSEG_topo(topo_str) # get borders to TM regions(eg. [28, 34, 58, 68, 72, 76]) m_borders = [] m_borders.append(M_indices[0]) m_borders = korbinian.prot_list.parse_OMPdb.check_for_border(M_indices, m_borders) # add the last membrane index (e.g. 33) m_borders.append(M_indices[-1]) # convert to nested tuples TM_indices = convert_alternating_list_to_nested_tuples(m_borders) return TM_indices else: return () def slice_nonTMD_in_prot_list(df): """Using existing indices and sequence, slices out all the TMD sequences. Originally from TMSEG fasta parse code. Parameters ---------- df : pd.DataFrame Returns ------- df : pd.Dataframe returns the same dataframe, with added sliced sequences """ # glance at the watch start = time.clock() # set to be an empty string, which avoids the error related to inserting a python list into a cell # "ValueError: Must have equal len keys and value when setting with an iterable" df['list_of_TMDs_excl_SP'] = "" for n, acc in enumerate(df.index): ''' ~~ SLICE nonTMD sequence ~~ ''' # list of TMDs excluding signal peptides list_of_TMDs_excl_SP = df.loc[acc, 'list_of_TMDs'] # set value to avoid errors adding a list to a cell df.set_value(acc, 'list_of_TMDs_excl_SP', list_of_TMDs_excl_SP) seqstart = 0 # if any protein in list conatains a SP if 'SP01_end' in df.columns: # if THIS PARTICULAR PROTEIN contains a signal peptide sequence if isinstance(df.loc[acc, 'SP01_seq'], str): # change sequence start for nonTM to the end of the signal peptide seqstart = int(df.loc[acc, 'SP01_end']) # add the SP01 to the list of TMDs df.set_value(acc, 'list_of_TMDs', ["SP01"] + list_of_TMDs_excl_SP) # sequence from N-term. to first TMD TM01_start = int(df.loc[acc, 'TM01_start']) # NOTE THAT THIS USED TO BE nonTMD_first = df.loc[acc, 'full_seq'][0: TM01_start -1], but indexing missed the last nonTM residue. nonTMD_first = df.loc[acc, 'full_seq'][seqstart: TM01_start - 1] # start the sequence with the first segment sequence_list = [nonTMD_first] # only for multipass proteins, generate sequences between TMDs if len(list_of_TMDs_excl_SP) == 0: # no TMDs are annotated, skip to next protein continue # for multipass proteins elif len(list_of_TMDs_excl_SP) > 1: for TM_Nr in range(len(list_of_TMDs_excl_SP) - 1): # the TMD is the equivalent item in the list TMD = list_of_TMDs_excl_SP[TM_Nr] # the next TMD, which contains the end index, is the next item in the list next_TMD = list_of_TMDs_excl_SP[TM_Nr + 1] # define start of next TMD start_next = int(df.loc[acc, '%s_start' % next_TMD]) # end of current TMD end_current = int(df.loc[acc, '%s_end' % TMD]) # middle sequence between TMDs # note the "start_next - 1", used to convert uniprot indices to python indices between_TM_and_TMplus1 = df.loc[acc, 'full_seq'][end_current: start_next - 1] sequence_list.append(between_TM_and_TMplus1) last_TMD = list_of_TMDs_excl_SP[-1] # sequence from last TMD to C-term. lastTM_end = int(df.loc[acc, '%s_end' % last_TMD]) seqlen = int(df.loc[acc, 'seqlen']) nonTMD_last = df.loc[acc, 'full_seq'][lastTM_end:seqlen] sequence_list.append(nonTMD_last) # join all the sequences together sequence = "".join(sequence_list) df.loc[acc, 'nonTMD_seq'] = sequence df.loc[acc, 'len_nonTMD'] = len(sequence) if n % 50 == 0 and n != 0: sys.stdout.write(".") sys.stdout.flush() if n % 500 == 0: sys.stdout.write("\n") sys.stdout.flush() # glance at the watch again. Ruminate on time passed sys.stdout.write("\ntime taken to slice nonTMD sequences : {:0.03f} s".format(time.clock() - start)) return df def get_list_TM_residues_from_topo_string(Topo_data, TM_symbol): # get list of membrane indices # note that this is UNIPROT indexing, not python indexing m_list = [i+1 for i, topo in enumerate(Topo_data) if topo == TM_symbol] # find(Topo_data) return m_list def convert_alternating_list_to_nested_tuples(x): return tuple(zip(x[::2], x[1::2])) def parse_TMSEG_results_DEPRECATED(pathdict, s, logging): """DEPRECATED METHOD BASED ON LARGE FILE OF ALL TMSEG RESULTS USE METHODS BASED ON INDIVIDUAL TMSEG DATAFILES INSTEAD. """ logging.info("~~~~~~~~~~~~ starting parse_TMSEG_results_DEPRECATED ~~~~~~~~~~~~") # create or open dataframe for protein list summary if os.path.isfile(pathdict["prot_list_summary_csv"]): df_PLS = pd.read_csv(pathdict["prot_list_summary_csv"], index_col=0) else: df_PLS = pd.DataFrame(columns=["v", "date"]) # get the timestamp for current time t = time.ctime(time.time()) list_number = s['list_number'] # define the uniprot directory with selected records uniprot_dir = os.path.join(s["data_dir"], 'uniprot') selected_uniprot_records_flatfile = os.path.join(uniprot_dir, 'selected', 'List%02d_selected_uniprot_records_flatfile.txt' % list_number) n_aa_before_tmd = s["n_aa_before_tmd"] n_aa_after_tmd = s["n_aa_after_tmd"] list_parsed_csv = pathdict["list_parsed_csv"] # check if the lists tab says to analyse the signal peptides analyse_sp = True if "SiPe" in s["regions"] else False output = korbinian.prot_list.uniprot_parse.parse_flatfile_to_csv(selected_uniprot_records_flatfile, n_aa_before_tmd, n_aa_after_tmd, analyse_sp, logging, list_parsed_csv, slice=False) logging.info(output) TMSEG_fastalike_path = pathdict['TMSEG_fastalike'] TMSEG_top_txtoutput_path = pathdict['TMSEG_top_txtoutput'] TMSEG_nonTM_outpath = pathdict['TMSEG_nonTM'] df_parsed = pd.read_csv(pathdict["list_parsed_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC, index_col=0, low_memory=False) columns_to_keep = ['organism_domain', 'uniprot_acc', 'uniprot_all_accessions', 'uniprot_entry_name', 'uniprot_features', 'uniprot_orgclass', 'uniprot_SiPe', 'singlepass', 'typeI', 'typeII', 'uniprot_KW', 'organism', 'prot_descr', 'membrane', 'multipass', 'gene_name', 'comments_subcellular_location_uniprot', 'uniprot_SiPe', 'full_seq'] # # for datasets without SP found, turn off analyse_sp # if analyse_sp == True and 'SP01_start' in df_parsed.columns: # columns_to_keep = ['SP01_start', 'SP01_end', 'SP01_seq'] # else: # analyse_sp == False acc_list_orig = list(df_parsed.index) if os.path.isfile(TMSEG_fastalike_path): df_PLS.loc["TMSEG_fastalike_path", :] = ("exists", t) sys.stdout.write("Extracting topology from TMSEG_fastalike file.") # DEPRECATED drop the full sequence, and get from TMSEG #df_parsed.drop('full_seq', axis=1, inplace=True) # read data from file # list will have acc, seq, topology, acc, seq, topology etc input_data = [] with open(TMSEG_fastalike_path) as data_file: for line in data_file: line = line.strip() if line[0] == '>': line = line[1:] line = line.split(' ') line = line[0].split('|') uniprot_acc = line[0] input_data.append(uniprot_acc) else: input_data.append(line) # initialise pandas dataframe with uniprot accession as index df_TMSEG = pd.DataFrame(index=input_data[0::3]) # add the signal peptide definitions from UniProt, to be used for slicing the nonTMD etc later if analyse_sp: for col in ['SP01_start', 'SP01_end', 'SP01_seq']: df_TMSEG[col] = df_parsed[col] # drop unnecessary columns from df_parsed, to be merged later df_parsed = df_parsed[columns_to_keep] # add selected columns from input_data list #df_TMSEG['uniprot_entry_name'] = input_data[1::5] #df_TMSEG['prot_descr'] = input_data[2::5] df_TMSEG['full_seq'] = input_data[1::3] df_TMSEG['topo'] = input_data[2::3] acc_list_TMSEG = df_TMSEG.index.tolist() TMSEG_avail_list = set(acc_list_TMSEG).intersection(set(acc_list_orig)) TMSEG_unavail_list = list(set(acc_list_orig) - set(acc_list_TMSEG)) df_PLS.loc["n_prot_TMSEG_file"] = (len(acc_list_TMSEG), t) # create a boolean whether the TMSEG topology is available df_parsed.loc[TMSEG_avail_list,"TMSEG_avail"] = True df_parsed.loc[TMSEG_unavail_list, "TMSEG_avail"] = False # drop proteins from df_TMSEG that are not in the listxx_parsed.csv df_TMSEG = df_TMSEG.loc[TMSEG_avail_list, :] fa_dir = pathdict['TMSEG_unavail_fa_dir'] utils.make_sure_path_exists(fa_dir) for acc in TMSEG_unavail_list: out_fasta = os.path.join(fa_dir, "{}.fasta".format(acc)) seq = df_parsed.loc[acc, "full_seq"] with open(out_fasta, "w") as f: f.write(">{}\n{}".format(acc, seq)) n_prot_TMSEG_file_not_in_list = len(set(acc_list_TMSEG) - set(acc_list_orig)) logging.info("n_prot_TMSEG_file_not_in_list as not in listxx_parsed.csv = {} ({} remaining)".format(n_prot_TMSEG_file_not_in_list, len(TMSEG_avail_list))) df_PLS.loc["n_prot_TMSEG_file_not_in_list"] = (n_prot_TMSEG_file_not_in_list, t) if df_TMSEG.shape[0] == 0: return sys.stdout.write('no remaining proteins in list!') # get list of uniprot accessions of proteins where no transmembrane region was predicted list_nonTMD = [] for acc in df_TMSEG.index: if 'N' in df_TMSEG.loc[acc, 'topo']: list_nonTMD.append(acc) # write list of nonTM proteins to file # outpath = '/Volumes/Musik/Databases/TMSEG/humanU90_nonTMD.txt' file = open(TMSEG_nonTM_outpath, 'w') for line in list_nonTMD: file.write('{}\n'.format(line)) file.close() # drop proteins that do not contain TM regions df_TMSEG = df_TMSEG.drop(list_nonTMD) # create a boolean whether the TMSEG topology is available TMSEG_avail_and_TM = set(TMSEG_avail_list) - set(list_nonTMD) TMSEG_avail_but_SOL = set(acc_list_orig).intersection(set(list_nonTMD)) df_parsed["membrane"] = np.nan df_parsed.loc[TMSEG_avail_and_TM, "membrane"] = True df_parsed.loc[TMSEG_avail_but_SOL, "membrane"] = False # add seqlen and indices for all TMD and SiPe regions df_TMSEG["seqlen"] = df_TMSEG.full_seq.apply(lambda x: len(x)) #df_TMSEG['M_indices'] = df_TMSEG.topo.apply(get_list_TM_residues_from_topo_string) #df_TMSEG['SiPe_indices'] = df_TMSEG.topo.apply(get_list_TM_residues_from_topo_string, args=("S")) df_TMSEG['TM_indices'] = df_TMSEG.topo.apply(get_TM_indices_from_TMSEG_topo_str) df_TMSEG['SiPe_indices'] = df_TMSEG.topo.apply(get_TM_indices_from_TMSEG_topo_str, args=("S")) # # Creating new list (nested list) # nested_list_of_membrane_borders = [] # # ######################################################################################## # # # # # Extract the membrane indices in UniProt Indexing style # # # # # ######################################################################################## # # Filling nest with lists of start and end-points # for m_index_list in df_TMSEG.M_indices: # m_borders = [] # # add the first membrane index (e.g. 13) # m_borders.append(m_index_list[0]) # m_borders = korbinian.prot_list.parse_OMPdb.check_for_border(m_index_list, m_borders) # # add the last membrane index (e.g. 33) # m_borders.append(m_index_list[-1]) # nested_list_of_membrane_borders.append(m_borders) # # # DEPRECATED # #FOR CONSISTENCY, LEAVE INDEXING STYLE AS UNIPROT # # ######################################################################################## # # # # # # # Convert to python indexing style (NECESSARY?? NOT COMPAT WITH UNIPROT!) # # # # # # # ######################################################################################## # # array_membrane_borders = np.array(nested_list_of_membrane_borders) # # nested_list_m_borders_python_indexstyle = [] # # for subarray in array_membrane_borders: # # # convert to array # # subarray = np.array(subarray) # # # add 1 to the second index number, to allow slicing # # subarray[1::2] = subarray[1::2] + 1 # # # add to list with corrected values, python index style # # nested_list_m_borders_python_indexstyle.append(list(subarray)) # # # Creating new column, which contains start and end-points # #df_TMSEG["Membrane_Borders"] = nested_list_m_borders_python_indexstyle # # df_TMSEG["Membrane_Borders"] = nested_list_of_membrane_borders # # # Creating new column, which contains the number of TMDS # #df_TMSEG["number_of_TMDs"] = df_TMSEG.Membrane_Borders.apply(lambda x: len(x) / 2) # # df_TMSEG["TM_indices"] = df_TMSEG["Membrane_Borders"].apply(lambda x: tuple(zip(x[::2], x[1::2]))) # create a list of [TM01, TM02, TM03, etc. long_list_of_TMDs = [] for i in range(1, 50): long_list_of_TMDs.append("TM{:02d}".format(i)) ## for the .set_value function, set dtype as object df_TMSEG["list_of_TMDs"] = "" df_TMSEG["list_of_TMDs"].astype(object) sys.stdout.write('slicing TMD and nonTMD sequences:\n') for n, acc in enumerate(df_TMSEG.index): # get nested tuple of TMDs nested_tup_TMs = df_TMSEG.loc[acc, "TM_indices"] # slice long list of TMD names to get an appropriate list for that protein [TM01, TM02, TM03, etc. len_nested_tup_TMs = len(nested_tup_TMs) list_of_TMDs = long_list_of_TMDs[:len_nested_tup_TMs] # add that list to the dataframe (could also be added as a stringlist, but that's irritating somehow) #df_TMSEG.loc[acc, 'list_of_TMDs'] = list_of_TMDs df_TMSEG.set_value(acc, "list_of_TMDs", list_of_TMDs) # set seq for slicing full_seq = df_TMSEG.loc[acc, "full_seq"] # topo = dft.loc[acc, "Topology"] # iterate through all the TMDs of that protein, slicing out the sequences for i, TMD in enumerate(list_of_TMDs): TMD = list_of_TMDs[i] start, end = nested_tup_TMs[i] # with UniProt indexing, need to slice with -1, not like python index style df_TMSEG.loc[acc, "%s_start" % TMD] = start df_TMSEG.loc[acc, "%s_end" % TMD] = end # for python indexing of the TMD rather than uniprot, the start should be minus 1 python_indexing_tuple = (start - 1, end) df_TMSEG.loc[acc, "%s_seq" % TMD] = utils.slice_with_listlike(full_seq, python_indexing_tuple) df_TMSEG.loc[acc, "%s_seqlen" % TMD] = len(df_TMSEG.loc[acc, "%s_seq" % TMD]) # dft.loc[acc, TMD + "_top"] = utils.slice_with_listlike(topo, tup) #DEPRECATED, ONLY REINSTATE IF YOU REALLY WANT TMSEG SP DEFINITIONS TO STAY # # add signal peptides and their corresponding values to list_of_TMDs # if analyse_sp == True: # if type(df_parsed.loc[acc, 'SP01_seq']) == str: # list_of_TMDs.append('SP01') # df_TMSEG.set_value(acc, "list_of_TMDs", list_of_TMDs) # # code necessary for TMSEG signal peptides - depreciated by MO 20.04.2017 # SiPe_indices = df_TMSEG.loc[acc, 'SiPe_indices'] # if SiPe_indices != []: # df_TMSEG.loc[acc, 'SP01_start'] = SiPe_indices[0] # df_TMSEG.loc[acc, 'SP01_end'] = SiPe_indices[-1] # df_TMSEG.loc[acc, 'SP01_seq'] = full_seq[SiPe_indices[0]:SiPe_indices[-1]+1] # list_of_TMDs.append('SP01') # df_TMSEG.set_value(acc, "list_of_TMDs", list_of_TMDs) if n % 50 == 0 and n != 0: sys.stdout.write(". ") sys.stdout.flush() if n % 500 == 0: sys.stdout.write("\n") sys.stdout.flush() # slice out the nonTM segments with a function # note that for some reason, this is very slow after merging the dataframes df_TMSEG = slice_nonTMD_in_prot_list(df_TMSEG) #df_TOP = pd.merge(df_parsed, df_TMSEG, how="left", left_on=True, suffixes=('_list_parsed', ""))# left_index=True, right_index=False, df_TOP = df_parsed.merge(df_TMSEG, how="left", suffixes=('_list_parsed', "")) # left_index=True, right_index=False, # actually, I'd prefer to keep these for troubleshooting purposes # cols_to_drop = ['M_indices', 'SiPe_indices', 'Membrane_Borders', 'TM_indices'] # df_TMSEG.drop(cols_to_drop, axis=1, inplace=True) elif os.path.isfile(TMSEG_top_txtoutput_path): df_PLS.loc["TMSEG_top_txtoutput_path", :] = ("exists", t) """ PARSE DATA WITH THE FOLLOWING FORMAT, proteins listed one after each other IMPORTANT : this format is sub-optimal, because the sequences come from uniprot, and the predictions from TMPRED Can only be trusted when they are from the same date: best to use TMPRED output which also contains the orig sequence. --- ID: A4ZUB1 # TRANSMEM 6 18 4 # TRANSMEM 50 67 7 SIG: SIGNAL 1 22 {ECO:0000255}. TMH: TRANSMEM 53 69 Helical. {ECO:0000255}. --- """ # if the regions column in the lists tab is "TM01" instead of the usual "TM", take only the first TM take_only_the_first_TM = s["regions"] == "TM01" # create dataframe for text topology (dftt) dftt = pd.DataFrame() with open(TMSEG_top_txtoutput_path, "r") as f: acc_counter = 0 for line in f: if line[0:4] == "ID: ": acc = line.split(" ")[1].strip("\n") dftt.loc[acc_counter, "acc"] = acc acc_counter += 1 # reset the TM_counter TM_counter = 1 if line[0:10] == "# TRANSMEM": if TM_counter > 1: if take_only_the_first_TM: # skip to next line, as the first TM is already taken continue # split by tab split = line.split("\t") # the start is split[1] (end is not really necessary here) start = split[1] # note that acc_counter += 1 is already + 1 for the next protein, # therefore the dftt.loc is acc_counter-1 dftt.loc[acc_counter - 1, "TM{:02d}_start".format(TM_counter)] = start end = split[2] # note that acc_counter += 1 is already + 1 for the next protein, # therefore the dftt.loc is acc_counter-1 dftt.loc[acc_counter - 1, "TM{:02d}_end".format(TM_counter)] = end TM_counter += 1 # add an extra number_of_TMDs column, so they can be counted consistently dftt["number_of_TMDs"] = 0 for row in dftt.index: # drop TM02_start etc if they don't contain data subset = dftt.loc[row, :].dropna() # count columns n_cols = subset.shape[0] # calculate number of columns (TM01_start, TM01_end) /2, which is the number of TMDs number_of_TMDs = int((n_cols - 2) / 2) dftt.loc[row, "number_of_TMDs"] = number_of_TMDs dftt.loc[row, "list_of_TMDs"] = str(["TM{:02d}".format(n) for n in range(1, number_of_TMDs + 1)]) # set the acc as the index, so it can be merged with df_parsed dftt.set_index("acc", drop=False, inplace=True) # save temp csv with TMSEG output TMSEG_txtoutput_parsed_csv = TMSEG_top_txtoutput_path[:-4] + "TMSEG_txtoutput_parsed.csv" dftt.to_csv(TMSEG_txtoutput_parsed_csv) df = pd.merge(dftt, df_parsed, left_index=True, right_index=True, suffixes=('', '_list_parsed')) # convert from string to python list if isinstance(df['list_of_TMDs'][0], str): df['list_of_TMDs'] = df['list_of_TMDs'].dropna().apply(lambda x: ast.literal_eval(x)) # (re)define sequence length df["seqlen"] = df["full_seq"].str.len() # slice out all the TMD sequences for n, acc in enumerate(df.index): list_of_TMDs = df.loc[acc, "list_of_TMDs"] # add that list to the dataframe (could also be added as a stringlist, but that's irritating somehow) # set seq for slicing full_seq = df.loc[acc, "full_seq"] # iterate through all the TMDs of that protein, slicing out the sequences for i in range(len(list_of_TMDs)): TMD = list_of_TMDs[i] tuple_slice_indices = (df.loc[acc, "%s_start" % TMD], df.loc[acc, "%s_end" % TMD]) df.loc[acc, "%s_seq" % TMD] = utils.slice_with_listlike(full_seq, tuple_slice_indices) df.loc[acc, "%s_seqlen" % TMD] = len(df.loc[acc, "%s_seq" % TMD]) # add signal peptides and their corresponding values to list_of_TMDs if analyse_sp == True: if type(df_parsed.loc[acc, 'SP01_seq']) == str: list_of_TMDs.append('SP01') df.set_value(acc, "list_of_TMDs", list_of_TMDs) start = time.clock() # slice out the nonTM segments with a function # note that for some reason, this is very slow after merging the dataframes df_TOP = slice_nonTMD_in_prot_list(df) sys.stdout.write("\ntime taken : {:0.03f} s".format(time.clock() - start)) else: raise FileNotFoundError("None of the TMSEG combined output files were found.") # define number of TMDs (includes Signal peptides!) df_TOP["number_of_TMDs"] = df_TOP["list_of_TMDs"].dropna().apply(lambda x : len(x)) df_TOP['parse_TMSEG'] = True df_TOP.to_csv(pathdict["list_parsed_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC) logging.info("\n~~~~~~~~~~~~ parse_TMSEG_results_DEPRECATED is finished ~~~~~~~~~~~~") # def get_signal_peptide_indices_from_TMSEG_topo(Topo_data): # # as above for membrane regions # sp_list = [i for i, topo in enumerate(Topo_data) if topo == "S"] # find(Topo_data) # return sp_list
python
# -*- coding: utf-8 -*- """ Swets NDVI filtering author: Laust Færch @ DHI GRAS Created on 2020/08/29 Based on the article: Swets, D.L, Reed, B.C., Rowland, J.D., Marko, S.E., 1999. A weighted least-squares approach to temporal NDVI smoothing. In: Proceedings of the 1999 ASPRS Annual Conference, Portland, Oregon, pp. 526-536. """ import numpy as np from numba import jit from scipy import interpolate from scipy.ndimage.filters import generic_filter def _interpolate1d(data): # we need at least 2 non-nan elements if np.sum(~np.isnan(data)*1) < 2: return data good = ~np.isnan(data) # scipy interpolation finterp = interpolate.interp1d(np.flatnonzero(good), data[good], kind='linear', fill_value=np.nan, bounds_error=False) yinterp = finterp(np.arange(data.shape[0])) return yinterp # calculate the weight of each sample based on neighbours def _calc_weights(y): # for class local_peak, sloping_points, local_valley class_weights = [1.5, 0.5, 0.005] # weights defined in article left_shift = (y - np.roll(y, -1)) >= 0 right_shift = (y - np.roll(y, 1)) >= 0 peaks = left_shift & right_shift valleys = (~left_shift) & (~right_shift) slopes = (~peaks) & (~valleys) weights = np.zeros_like(y) weights[peaks] = class_weights[0] weights[slopes] = class_weights[1] weights[valleys] = class_weights[2] return weights # calculate the weighted linear regression @jit(nopython=True) def _calc_linreg(x, y, w): eps = 1e-8 sw = np.sum(w) sy = np.sum(w * y) sx = np.sum(w * x) sxy = np.sum(w * x * y) sx2 = np.sum(w * x ** 2) num = (sw * sxy - sx * sy) denom = (sw * sx2 - sx ** 2) if denom == 0: b = 0 else: b = num / denom a = (sy - b * sx) / (sw + eps) return a, b @jit(nopython=True) def _calc_linreg_wrapper_a(xyw): n = int(np.round(xyw.shape[0] / 3)) xyw = xyw.reshape(3, n) a, b = _calc_linreg(xyw[0, :], xyw[1, :], xyw[2, :]) return a @jit(nopython=True) def _calc_linreg_wrapper_b(xyw): n = int(np.round(xyw.shape[0] / 3)) xyw = xyw.reshape(3, n) a, b = _calc_linreg(xyw[0, :], xyw[1, :], xyw[2, :]) return b def _piecewise_linreg(xyw, window_width=3): n = int(np.round((window_width - 1) / 2)) piece_a = generic_filter(xyw.T, _calc_linreg_wrapper_a, size=(3, window_width), mode='nearest') piece_b = generic_filter(xyw.T, _calc_linreg_wrapper_b, size=(3, window_width), mode='nearest') # pad array piece_a = np.pad(piece_a[1, :], n, 'edge') piece_b = np.pad(piece_b[1, :], n, 'edge') smooth_a = np.convolve(piece_a, np.ones(window_width) / window_width, mode='valid') smooth_b = np.convolve(piece_b, np.ones(window_width) / window_width, mode='valid') y_est = smooth_b * xyw[:, 0] + smooth_a return y_est # Apply the swets filter on 1d array def _apply_swets1d(y): window_width = 3 # window width # dont smooth if all nan if np.all(np.isnan(y)): return y y_smoothed = np.zeros_like(y) y_smoothed[:] = np.nan x = np.flatnonzero(~np.isnan(y)) w = _calc_weights(y[x]) xyw = np.stack((x, y[x], w), axis=1) y_smoothed[x] = _piecewise_linreg(xyw, window_width) return y_smoothed # slow: 3024x3024x7 array takes approx 1 hour on my machine def swets_filter(data, do_interpolate=True, axis=2, invert=False): """ :param data: np.array((y,x,t)) NDVI raster timeseries. Each image in timeseries is shape y,x. Images are stacked along t (time dimension) nan values are initially ignored by swets. At the last step, nan values are replaced by linear interpolation :param do_interpolate: bool True if we want to apply interpolation of nan values :param axis: int Axis for the time-dimension in the array. Filtering/interpolation will be apply along this axis :param invert: bool Inversion of the data and output. This is useful for albedo where bad clous maksing will force the values up, instead of down in NDVI. :return: y_smoothed: np.array(y,x,t) """ if invert: data = data * -1 print('running filter...') y_smoothed = np.apply_along_axis(_apply_swets1d, axis, data) if do_interpolate: print('running interpolation...') y_smoothed = np.apply_along_axis(_interpolate1d, axis, y_smoothed) if invert: y_smoothed = y_smoothed * -1 return y_smoothed.astype(data.dtype)
python
import json from django import forms from commons.file import file_utils from commons import file_name_tools from wikis.attachments import attachment_tool from django.core.exceptions import ValidationError from datetime import datetime from uzuwiki.settings_static_file_engine import MAX_FILE_SIZE, MAX_FILE_SIZE_MESSAGE from logging import getLogger logger = getLogger(__name__) def validate_attachment_file_size(file): # ページサイズが巨大過ぎないかチェック if file.size > MAX_FILE_SIZE: raise ValidationError(MAX_FILE_SIZE_MESSAGE) class FileUploadFileForm(forms.Form): wiki_id = forms.CharField(label="WikiID") page_name = forms.CharField(label="ページ名") file = forms.FileField(label="ファイル", validators=[validate_attachment_file_size]) def __init__(self, wiki_id, page_dirs, **kwargs): super().__init__(**kwargs) self.fields['wiki_id'].initial = wiki_id self.fields['page_name'].initial = file_name_tools.page_dirs_to_page_name(page_dirs) def put(self, request): file = request.FILES['file'] # ページのファイルパスを取得 file_name = file_name_tools.page_name_to_file_name(self.data["page_name"]) # 添付ファイル一覧を取得 attachment_file_data = attachment_tool.get_or_new(self.data["wiki_id"], file_name) # 添付ファイルを保存する。 record = file_utils.put_static_file(self.data["wiki_id"], file.name, file) attachment_file_data["attachments"].append(record) timestamp = datetime.now().isoformat() if "created_at" not in attachment_file_data: attachment_file_data["created_at"] = timestamp attachment_file_data["updated_at"] = timestamp # 添付ファイルを保存する。 file_utils.put_file(self.data["wiki_id"], file_name + ".attachments.json", json.dumps(attachment_file_data)) class Meta: fields = ("wiki_id", "page_name", "file")
python
# Jared Dyreson # CPSC 386-01 # 2021-11-29 # [email protected] # @JaredDyreson # # Lab 00-04 # # Some filler text # """ This module contains a basic "factory" pattern for generating new Display instances """ import abc import dataclasses import functools import json import pathlib import pygame import sys import time import typing from datetime import datetime from Invaders.UI.button import Button from Invaders.Dataclasses.direction import Direction from Invaders.Dataclasses.player import Player from Invaders.Dataclasses.point import Point class Display: """ Not fully virtual class for each display to inherit from """ def __init__( self, width: int = 900, height: int = 900, color=pygame.Color("black") ): # Checks for errors encountered _, num_fail = pygame.init() if num_fail > 0: print(f"[FATAL] There were {num_fail} error(s) produced!") sys.exit(-1) else: print("[+] Game successfully initialised") pygame.font.init() self.width, self.height = width, height self._display_surface = pygame.display.set_mode( (self.width, self.height), pygame.HWSURFACE ) self.last_position = Point(-1, -1) self.background_color = color self.fps_meter = pygame.time.Clock() @abc.abstractmethod def draw(self): """ Abstract draw class that must be implemented """ raise NotImplementedError( f"Display.draw isn abstract method and should not be invoked directly" ) def get_surface(self) -> pygame.Surface: """ Obtain the current display surface to a given window @return - pygame.Surface """ return self._display_surface def clear_text(self) -> None: """ This removes all text from the screen """ self._display_surface.fill(self.background_color) def draw_image(self, img_object: pygame.Surface, position: Point) -> None: """ Draw an image object (in the form of a surface) to the screen at a given position @param img_object : currently loaded pygame surface that represents an image @param position : Cartesian coordinates that represent where on the screen to be drawn to """ self._display_surface.blit(img_object, dataclasses.astuple(position)) def write_text( self, text: str, position: Point, font, color=pygame.Color("white") ) -> None: """ Write text to the screen, thanks to @NICE for helping with this! @param text - stuff we want to write to the screen @param position - where on the screen should it be writing to @param font - current font used @param color - selected color """ lines = [line.split(" ") for line in text.splitlines()] space = font.size(" ")[0] x, y = dataclasses.astuple(position) self.last_position = position for line in lines: for word in line: word_surface = font.render(word, 0, color) width, height = word_surface.get_size() if x + width >= self.width + 100: x = position.x y += height self._display_surface.blit(word_surface, (x, y)) x += width + space x = position.x y += height def center(self) -> Point: """ Obtain the center of the current scene @return Point """ return Point(self.width // 4, self.height // 4) class HighScoreDisplay(Display): """ Class that represents the high score display """ def __init__(self, current_score: int, username: str): super().__init__() self.title_position = Point(250, 45) self.logo_position = Point(575, 435) self.break_from_draw = False self.back_button = Button( self._display_surface, Point(300, 575), 300, 50, "Quit", functools.partial(self.terminate_intro), ) self.scoreboard_file = pathlib.Path("scores/scoreboard.json") self.scores = self.obtain_high_score_list(self.scoreboard_file) self.scores.append( Player(username, current_score, datetime.now().strftime("%m/%d/%Y %H:%M")) ) self.scores = sorted(self.scores, reverse=True) def obtain_high_score_list(self, path: pathlib.Path) -> typing.List[Player]: """ Read in high score list found in a json file that is then loaded and sorted by the score obtained by a given player @param path - path to JSON file @return - typing.List[Player] """ with open(path, "r") as fp: contents = json.load(fp) return [Player(**element) for element in contents["players"]] def terminate_intro(self): """ This terminates the current scene """ self.break_from_draw = True self._display_surface.fill(self.background_color) master = {"players": []} for score in self.scores: master["players"].append(dataclasses.asdict(score)) with open(self.scoreboard_file, "w") as fp: json.dump(master, fp) pygame.quit() sys.exit() def draw(self): """ Draw all the high scores in a row like manner """ draw_loop = True while draw_loop and not self.break_from_draw: for event in pygame.event.get(): if event.type == pygame.QUIT: self.terminate_intro() self.write_text( f"HIGH SCORES", self.title_position, pygame.font.SysFont( None, 50) ) self.write_text( self.back_button.contents, self.back_button.center(), pygame.font.SysFont(None, 30), ) self.back_button.draw() for i, score in enumerate(self.scores[0:5]): x, y = dataclasses.astuple(self.center()) self.write_text( score.name, Point((x - 50), y + i * 50), pygame.font.SysFont(None, 33), ) self.write_text( str(score.score), Point((x - 50) + 200, y + i * 50), pygame.font.SysFont(None, 33), ) self.write_text( score.tod, Point((x - 50) + 400, y + i * 50), pygame.font.SysFont(None, 33), ) pygame.display.flip()
python
from intent_parser.server.intent_parser_server import app, IntentParserServer import os import logging.config import os logger = logging.getLogger(__name__) def _setup_logging(): logging.basicConfig(level=logging.INFO, format="[%(levelname)-8s] %(asctime)-24s %(filename)-23s line:%(lineno)-4s %(message)s") logger.addHandler(logging.FileHandler('intent_parser_server.log')) # Switch flask to production mode using WSGI def run(): app.config['DEBUG'] = False _setup_logging() intent_parser_server = IntentParserServer(os.environ.get("SBH_USERNAME"), os.environ.get("SBH_PASSWORD"), os.environ.get("AUTHN"), '') intent_parser_server.initialize() return app my_app = run()
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Aug 10 21:32:34 2019 @author: teddy """ from docx import Document from docx.shared import RGBColor #from docx.dml.color import ColorFormat def getText(filename): doc = Document(filename) fullText = [] for para in doc.paragraphs: fullText.append(para.text) hidden = '\n'.join(fullText) # hidden_word = list(hidden.split('\n')) document = Document() run = document.add_paragraph().add_run(hidden) font = run.font font.color.rgb = RGBColor(0, 0, 0) run = document.add_paragraph().add_run('Warning! Keyword stuffing!') font = run.font font.color.rgb = RGBColor(255, 0, 0) document.save('Show_All.docx') return 'Done!'
python
######################################################## # Copyright (c) 2015-2017 by European Commission. # # All Rights Reserved. # ######################################################## extends("BaseKPI.py") """ Consumption (Wh) ------------------ Indexed by * scope * delivery point * energy (including fuels) * test case * technology * asset name Return the annual volumes of energy demand for a given technology or contract. We here consider the flexible demand after optimization (using the consumption of the corresponding assets). """ TECHNO_TO_CONSIDER = DEMAND_TYPES|PRODUCTION_TYPES|{F_GAS_CONSUMPTION} def computeIndicator(context, indexFilter, paramsIndicator, kpiDict): timeStepDuration = getTimeStepDurationInHours(context) selectedScopes = indexFilter.filterIndexList(0, getScopes()) selectedDeliveryPoints = indexFilter.filterIndexList(1, getDeliveryPoints(context)) selectedEnergies = indexFilter.filterIndexList(2, getEnergies(context, includedEnergies=PRODUCED_ENERGIES)) selectedTestCases = indexFilter.filterIndexList(3, context.getResultsIndexSet()) selectedTechnologies = indexFilter.filterIndexList(4, getTechnologies(context, includeFinancialAssetTypes=True, includedTechnologies=TECHNO_TO_CONSIDER)) selectedAssets = indexFilter.filterIndexList(5, getAssets(context, includeFinancialAssets=True, includedTechnologies=TECHNO_TO_CONSIDER)) selectedAssetsByScope = getAssetsByScope(context, selectedScopes, includeFinancialAssets=True, includedAssetsName=selectedAssets, includedTechnologies = selectedTechnologies) consumptionDict = getConsumptionDict(context, selectedScopes, selectedTestCases, selectedEnergies, selectedDeliveryPoints, selectedAssetsByScope, indexByAsset=True) for index in consumptionDict: kpiDict[index] = consumptionDict[index].getSumValue() * timeStepDuration * MW_TO_W_CONVERSION return kpiDict def get_indexing(context) : baseIndexList = [getScopesIndexing(), getDeliveryPointsIndexing(context), getEnergiesIndexing(context, includedEnergies=PRODUCED_ENERGIES), getTestCasesIndexing(context), getTechnologiesIndexing(context, includeFinancialAssetTypes=True, includedTechnologies=TECHNO_TO_CONSIDER), getAssetsIndexing(context, includeFinancialAssets=True, includedTechnologies=TECHNO_TO_CONSIDER)] return baseIndexList IndicatorLabel = "Consumption" IndicatorUnit = "Wh" IndicatorDeltaUnit = "Wh" IndicatorDescription = "Total consumption per technology" IndicatorParameters = [] IndicatorIcon = "" IndicatorCategory = "Results" IndicatorTags = " Power System, Gas System, Power Markets "
python
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ ########################################################################## ZipMe : GAE Content Downloader ########################################################################## Just add this lines in your app.yaml : - url: /zipme script: zipme.py ########################################################################## """ # manatlan #from google.appengine.ext import webapp #from google.appengine.ext.webapp.util import run_wsgi_app import webapp2 as webapp from google.appengine.api import users #import wsgiref.handlers import zipfile import datetime import os,re,sys,stat from cStringIO import StringIO def createZip(path): def walktree (top = ".", depthfirst = True): names = os.listdir(top) if not depthfirst: yield top, names for name in names: try: st = os.lstat(os.path.join(top, name)) except os.error: continue if stat.S_ISDIR(st.st_mode): for (newtop, children) in walktree (os.path.join(top, name), depthfirst): yield newtop, children if depthfirst: yield top, names list=[] for (basepath, children) in walktree(path,False): for child in children: f=os.path.join(basepath,child) if os.path.isfile(f): f = f.encode(sys.getfilesystemencoding()) list.append( f ) f=StringIO() file = zipfile.ZipFile(f, "w") for fname in list: nfname=os.path.join(os.path.basename(path),fname[len(path)+1:]) file.write(fname, nfname , zipfile.ZIP_DEFLATED) file.close() f.seek(0) return f class ZipMaker(webapp.RequestHandler): def get(self): if users.is_current_user_admin(): folder = os.path.dirname(__file__) self.response.headers['Cache-Control'] = 'public, max-age=60' # self.response.headers['Last-Modified'] = lastmod.strftime("%a, %d %b %Y %H:%M:%S GMT") expires = datetime.datetime.now() + datetime.timedelta(minutes=1) self.response.headers['Expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT") ffdate = datetime.datetime.now() fdate = ffdate.strftime("%d-%b-%Y_%H-%M-%S") self.response.headers['Content-Type'] ='application/zip; name="zipme_%s_%s.zip"' % (fdate, os.path.basename(folder)) self.response.headers['Content-Disposition'] = 'attachment; filename="zipme_%s_%s.zip"' % (fdate, os.path.basename(folder)) fid=createZip(folder) while True: buf=fid.read(2048) if buf=="": break self.response.out.write(buf) fid.close() else: self.response.headers['Content-Type'] = 'text/html' self.response.out.write("<a href=\"%s\">You must be admin</a>." % users.create_login_url("/zipme")) #def main(): app = webapp.WSGIApplication( [('/zipme', ZipMaker)], debug=False) # wsgiref.handlers.CGIHandler().run(application) # run_wsgi_app(application) #if __name__ == "__main__": # main()
python
""" Models for mongo database """ # from pymongo.write_concern import WriteConcern from pymodm import MongoModel, fields class Testing(MongoModel): onefield = fields.CharField() # NOTE: do not touch connection here, see experiments/mongo.py # class Meta: # connection_alias = 'test' # # write_concern = WriteConcern(j=True) # FIXME: two fields are missing in ExternalAccounts class wf_do(MongoModel): dc_identifier = fields.CharField() dc_title = fields.CharField() dc_subject = fields.CharField() dc_creator = fields.CharField() dc_contributor = fields.CharField() dc_publisher = fields.CharField() dc_type = fields.CharField() dc_format = fields.CharField() dc_date = fields.DateTimeField() dc_coverage_x = fields.FloatField() dc_coverage_y = fields.FloatField() dc_coverage_z = fields.FloatField() dc_coverage_t_min = fields.DateTimeField() dc_coverage_t_max = fields.DateTimeField() dcterms_available = fields.DateTimeField() dcterms_dateAccepted = fields.DateTimeField() dc_rights = fields.CharField() dcterms_isPartOf = fields.CharField() fileId = fields.CharField() irods_path = fields.CharField() # class Meta: # write_concern = WriteConcern(j=True) # connection_alias = MYDB
python
# Generated by Django 2.2.1 on 2019-05-10 22:15 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('wells', '0082_auto_20190510_0000'), ('wells', '0082_merge_20190510_1926'), ] operations = [ ]
python
from unittest import TestCase from semanticpy.vector_space import VectorSpace from nose.tools import * class TestSemanticPy(TestCase): def setUp(self): self.documents = ["The cat in the hat disabled", "A cat is a fine pet ponies.", "Dogs and cats make good pets.","I haven't got a hat."] def it_should_search_test(self): vectorSpace = VectorSpace(self.documents) eq_(vectorSpace.search(["cat"]), [0.14487566959813258, 0.1223402602604157, 0.07795622058966725, 0.05586504042763477]) def it_should_find_return_similarity_rating_test(self): vectorSpace = VectorSpace(self.documents) eq_(vectorSpace.related(0), [1.0, 0.9922455760198575, 0.08122814162371816, 0.0762173599906487])
python
#! /usr/bin/env python from yices import * cfg = Config() cfg.default_config_for_logic('QF_BV') ctx = Context(cfg) bv32_t = Types.bv_type(32) x = Terms.new_uninterpreted_term(bv32_t, 'x') y = Terms.new_uninterpreted_term(bv32_t, 'y') zero = Terms.bvconst_integer(32, 0) fmla0 = Terms.bvsgt_atom(x, zero) fmla1 = Terms.bvsgt_atom(y, zero) fmla2 = Terms.bvslt_atom(Terms.bvadd(x, y), x) ctx.assert_formulas([fmla0, fmla1, fmla2]) status = ctx.check_context() if status == Status.SAT: model = Model.from_context(ctx, 1) model_string = model.to_string(80, 100, 0) print(model_string) xval = model.get_value(x) yval = model.get_value(y) print('x = {0}\ny = {1}'.format(xval, yval)) cfg.dispose() ctx.dispose() Yices.exit()
python
"""A virtual pumpkin which flash neopixels and play sound""" import random import math import time import board import digitalio import audioio import busio import adafruit_vl53l0x import adafruit_thermistor import neopixel ######################### # -- slide switch to enable/disable running loop slide_switch = digitalio.DigitalInOut(board.SLIDE_SWITCH) ######################### # -- Audio setup spkren = digitalio.DigitalInOut(board.SPEAKER_ENABLE) spkren.switch_to_output() spkren.value = 0 audioout = audioio.AudioOut(board.SPEAKER) laugh1 = audioio.WaveFile(open("laugh1.wav", "rb")) laugh2 = audioio.WaveFile(open("laugh2.wav", "rb")) laughs = [laugh1, laugh2] music1 = audioio.WaveFile(open("thriller16k.wav", "rb")) music2 = audioio.WaveFile(open("ghostbusters16k.wav", "rb")) musics = [music1, music2] # -- intialise random generator temp = adafruit_thermistor.Thermistor(board.TEMPERATURE, 10000, 10000, 25, 3950) seed = int(math.modf(temp.temperature)[0]*10000000) random.seed(seed) ######################### # -- Distance sensor i2c = busio.I2C(board.SCL, board.SDA) vl53 = adafruit_vl53l0x.VL53L0X(i2c) ######################### # -- neopixels pixels = neopixel.NeoPixel(board.NEOPIXEL, 10) ORANGE = (255, 75, 0) COLORS = [(0, 0, 0), (255, 0, 0), (255, 255, 0), (0, 255, 0), (0, 255, 255), (0, 0, 255), (255, 0, 255), (255, 255, 255)] MAXBRIGHT = 0.7 pixels.brightness = 0.0 pixels.fill(ORANGE) ######################### # -- animation 1 def anim1(ao): pixels.fill(ORANGE) while not ao or ao.playing: pixels.brightness = MAXBRIGHT time.sleep(0.15) pixels.brightness = 0.0 time.sleep(0.2) def anim2(ao): pixels.fill(COLORS[0]) while not ao or ao.playing: pix1 = random.randrange(10) pix2 = random.randrange(10) while pix2 == pix1: pix2 = random.randrange(10) pix3 = random.randrange(10) while pix3 == pix1 or pix3 == pix2: pix3 = random.randrange(10) pixels[pix1] = COLORS[random.randrange(1, 8)] pixels[pix2] = COLORS[random.randrange(1, 8)] pixels[pix3] = COLORS[random.randrange(1, 8)] pixels.brightness = MAXBRIGHT time.sleep(0.2) pixels.brightness = MAXBRIGHT pixels[pix1] = COLORS[0] pixels[pix2] = COLORS[0] pixels[pix3] = COLORS[0] ######################### # -- Main loop def pumpkin(): # -- Wait for trigger print("WAITING TRIGGER") distance = 1000000 while distance > 1000: time.sleep(1) distance = vl53.range print("Distance: ", distance) random.randrange(5) # -- Play random laugh laugh = random.randrange(len(laughs)) print("laugh: ", laugh) audioout.play(laughs[laugh]) anim1(audioout) # -- Play random music music = random.randrange(len(musics)) print("music: ", music) audioout.play(musics[music]) anim2(audioout) print("completed") time.sleep(10) while slide_switch.value: pumpkin()
python
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import os import json import ctypes as ct from .._constants import VSCODE_CREDENTIALS_SECTION def _c_str(string): return ct.c_char_p(string.encode("utf-8")) class _SECRET_SCHEMA_ATTRIBUTE(ct.Structure): _fields_ = [ ("name", ct.c_char_p), ("type", ct.c_uint), ] class _SECRET_SCHEMA(ct.Structure): _fields_ = [ ("name", ct.c_char_p), ("flags", ct.c_uint), ("attributes", _SECRET_SCHEMA_ATTRIBUTE * 2), ] _PSECRET_SCHEMA = ct.POINTER(_SECRET_SCHEMA) try: _libsecret = ct.cdll.LoadLibrary("libsecret-1.so.0") _libsecret.secret_password_lookup_sync.argtypes = [ ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_void_p, ] _libsecret.secret_password_lookup_sync.restype = ct.c_char_p _libsecret.secret_password_free.argtypes = [ct.c_char_p] except OSError: _libsecret = None def _get_user_settings_path(): app_data_folder = os.environ["HOME"] return os.path.join(app_data_folder, ".config", "Code", "User", "settings.json") def _get_user_settings(): path = _get_user_settings_path() try: with open(path) as file: data = json.load(file) environment_name = data.get("azure.cloud", "Azure") return environment_name except IOError: return "Azure" def _get_refresh_token(service_name, account_name): if not _libsecret: return None err = ct.c_int() attributes = [_SECRET_SCHEMA_ATTRIBUTE(_c_str("service"), 0), _SECRET_SCHEMA_ATTRIBUTE(_c_str("account"), 0)] pattributes = (_SECRET_SCHEMA_ATTRIBUTE * 2)(*attributes) schema = _SECRET_SCHEMA() pschema = _PSECRET_SCHEMA(schema) ct.memset(pschema, 0, ct.sizeof(schema)) schema.name = _c_str("org.freedesktop.Secret.Generic") # pylint: disable=attribute-defined-outside-init schema.flags = 2 # pylint: disable=attribute-defined-outside-init schema.attributes = pattributes # pylint: disable=attribute-defined-outside-init p_str = _libsecret.secret_password_lookup_sync( pschema, None, ct.byref(err), _c_str("service"), _c_str(service_name), _c_str("account"), _c_str(account_name), None, ) if err.value == 0: return p_str.decode("utf-8") return None def get_credentials(): try: environment_name = _get_user_settings() credentials = _get_refresh_token(VSCODE_CREDENTIALS_SECTION, environment_name) return credentials except Exception: # pylint: disable=broad-except return None
python
# -*- coding: utf-8 -*- # # Copyright (C) 2021 Northwestern University. # # invenio-subjects-mesh is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see LICENSE file for more # details. """MeSH subjects_mesh.yaml writer.""" from pathlib import Path import yaml def write_yaml( entries, filepath=Path(__file__).parent / "vocabularies/subjects_mesh.yaml" ): """Write the MeSH yaml file. Return filepath to written file. """ with open(filepath, "w") as f: yaml.dump(list(entries), f) return filepath
python
# Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The QoS Specs Implementation""" from oslo_db import exception as db_exc from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.i18n import _, _LE, _LW from cinder.volume import volume_types LOG = logging.getLogger(__name__) CONTROL_LOCATION = ['front-end', 'back-end', 'both'] def create(context, name, specs=None): """Creates qos_specs. :param specs dictionary that contains specifications for QoS e.g. {'consumer': 'front-end', 'total_iops_sec': 1000, 'total_bytes_sec': 1024000} """ consumer = specs.get('consumer') if consumer: # If we need to modify specs, copy so we don't cause unintended # consequences for the caller specs = specs.copy() del specs['consumer'] values = dict(name=name, consumer=consumer, specs=specs) LOG.debug("Dict for qos_specs: %s", values) qos_spec = objects.QualityOfServiceSpecs(context, **values) qos_spec.create() return qos_spec def update(context, qos_specs_id, specs): """Update qos specs. :param specs: dictionary that contains key/value pairs for updating existing specs. e.g. {'consumer': 'front-end', 'total_iops_sec': 500, 'total_bytes_sec': 512000,} """ LOG.debug('qos_specs.update(): specs %s' % specs) try: qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) if 'consumer' in specs: qos_spec.consumer = specs['consumer'] # If we need to modify specs, copy so we don't cause unintended # consequences for the caller specs = specs.copy() del specs['consumer'] # Update any values in specs dict qos_spec.specs.update(specs) qos_spec.save() except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, qos_specs=specs) return qos_spec def delete(context, qos_specs_id, force=False): """Marks qos specs as deleted. 'force' parameter is a flag to determine whether should destroy should continue when there were entities associated with the qos specs. force=True indicates caller would like to mark qos specs as deleted even if there was entities associate with target qos specs. Trying to delete a qos specs still associated with entities will cause QoSSpecsInUse exception if force=False (default). """ if qos_specs_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) qos_spec = objects.QualityOfServiceSpecs.get_by_id( context, qos_specs_id) qos_spec.destroy(force) def delete_keys(context, qos_specs_id, keys): """Marks specified key of target qos specs as deleted.""" if qos_specs_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) # Previous behavior continued to delete keys until it hit first unset one, # so for now will mimic that. In the future it would be useful to have all # or nothing deletion of keys (or at least delete all set keys), # especially since order of keys from CLI to API is not preserved currently try: for key in keys: try: del qos_spec.specs[key] except KeyError: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id) finally: qos_spec.save() def get_associations(context, qos_specs_id): """Get all associations of given qos specs.""" try: types = objects.VolumeTypeList.get_all_types_for_qos(context, qos_specs_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) msg = _('Failed to get all associations of ' 'qos specs %s') % qos_specs_id LOG.warning(msg) raise exception.CinderException(message=msg) result = [] for vol_type in types: result.append({ 'association_type': 'volume_type', 'name': vol_type.name, 'id': vol_type.id }) return result def associate_qos_with_type(context, specs_id, type_id): """Associate qos_specs with volume type. Associate target qos specs with specific volume type. :param specs_id: qos specs ID to associate with :param type_id: volume type ID to associate with :raises VolumeTypeNotFound: if volume type doesn't exist :raises QoSSpecsNotFound: if qos specs doesn't exist :raises InvalidVolumeType: if volume type is already associated with qos specs other than given one. :raises QoSSpecsAssociateFailed: if there was general DB error """ try: get_qos_specs(context, specs_id) res = volume_types.get_volume_type_qos_specs(type_id) if res.get('qos_specs', None): if res['qos_specs'].get('id') != specs_id: msg = (_("Type %(type_id)s is already associated with another " "qos specs: %(qos_specs_id)s") % {'type_id': type_id, 'qos_specs_id': res['qos_specs']['id']}) raise exception.InvalidVolumeType(reason=msg) else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to associate qos specs ' '%(id)s with type: %(vol_type_id)s'), dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, type_id=type_id) def disassociate_qos_specs(context, specs_id, type_id): """Disassociate qos_specs from volume type.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate(context, specs_id, type_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to disassociate qos specs ' '%(id)s with type: %(vol_type_id)s'), dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=type_id) def disassociate_all(context, specs_id): """Disassociate qos_specs from all entities.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate_all(context, specs_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=None) def get_all_specs(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all non-deleted qos specs.""" return objects.QualityOfServiceSpecsList.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def get_qos_specs(ctxt, spec_id): """Retrieves single qos specs by id.""" if spec_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return objects.QualityOfServiceSpecs.get_by_id(ctxt, spec_id)
python
# Задача 3. Вариант 30. #Напишите программу, которая выводит имя "Илья Арнольдович Файзильберг", и запрашивает его псевдоним. # Shemenev A.V # 14.03.16 print("Герой нашей программы Илья Арнольдович Файзильберг") print("Под каким именем мы знаем этого человека? Ваш ответ:") x=input() print ("Все верно, псевдоним - " +x) input()
python
from models.generators.fcn32s import FCN32s from models.generators.fcn16s import FCN16s from models.generators.fcn8s import FCN8s
python
from django.conf.urls import url from messenger import views urlpatterns = [ url(r'^messenger/send', views.send, name='send'), url(r'^messenger/read', views.read, name='read') ]
python
#!/usr/bin/env python3 import argparse import progressbar import requests import os import sys sourceapp = "AS50559-DIVD_NL" def rest_get(call,resource,retries=3): url = "https://stat.ripe.net/data/{}/data.json?resource={}&sourceapp={}".format(call,resource,sourceapp) try: response = requests.get(url, timeout = 1) except KeyboardInterrupt: sys.exit() except: if retries > 0: return rest_get(call,resource,retries-1) else: return "Timeout" reply = response.json() return reply['data'] def get_info(line) : # Get abuse info # https://stat.ripe.net/data/abuse-contact-finder/data.<format>?<parameters> abuse_reply = rest_get("abuse-contact-finder",line) contacts = abuse_reply['anti_abuse_contacts']['abuse_c'] if len(contacts) > 0 : abuse_email = contacts[0]['email'] else: abuse_email = "Not found" # Get ASN # https://stat.ripe.net/data/network-info/data.json?resource=194.5.73.5 asn_reply = rest_get("network-info",line) asn = asn_reply['asns'][0] prefix = asn_reply['prefix'] # Get ASN info if asn in asns: asn_data = asns[asn] else: asn_data = rest_get("as-overview",asn) asns[asn] = asn_data holder = asn_data['holder'] # Get geolocation if prefix in locations: location_data = locations[prefix] else: location_data = rest_get("maxmind-geo-lite",prefix) city=location_data['located_resources'][0]['locations'][0]['city'] country=location_data['located_resources'][0]['locations'][0]['country'] print('"{}","{}","{}","{}","{}","{}","{}"'.format(line,abuse_email,prefix,asn,holder,country,city)) if args.output : outfile.write('"{}","{}","{}","{}","{}","{}","{}"\n'.format(line,abuse_email,prefix,asn,holder,country,city)) outfile.flush() parser = argparse.ArgumentParser(description='Get abuse and location information for IPs', allow_abbrev=False) parser.add_argument('input', type=str, metavar="INPUT.txt", nargs="*", default="/dev/stdin", help="Either a list files with one IP address per line or a IP address [default: stdin]") parser.add_argument('--output', "-o", type=str, metavar="OUTPUT.csv", help="output csv file") args = parser.parse_args() if isinstance(args.input,str): files = [args.input] else : files = args.input asns = {} locations = {} if args.output : outfile = open(args.output,"w") if args.output : outfile.write('ip,abuse,prefix,asn,holder,country,city\n') print('ip,abuse,prefix,asn,holder,country,city') for f in files: if os.path.isfile(f): file = open(f,"r") for line in file.readlines(): line = line.strip() try: get_info(line) except: print("Error with '{}'".format(line), file=sys.stderr) else: try: get_info(f) except: print("Error with '{}'".format(line), file=sys.stderr)
python
# Every line of these files consists of an image, i.e. 785 numbers between 0 and 255. size 28 x 28 # first no is label import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras import backend as K batch_size = 128 num_classes = 10 epochs = 12 # data (x_train, y_train), (x_test, y_test) = mnist.load_data() img_rows, img_cols = 28, 28 #plot the first image in the dataset # plt.imshow(x_train[0]) # reshape for channel last x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) #one-hot encoding from tensorflow.keras.utils import to_categorical y_train = to_categorical(y_train) y_test = to_categorical(y_test) #create model model = Sequential() # layers model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=input_shape)) # kernel size is filter matrix size model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) # softmax makes output sum up to 1 # compile model using accuracy to measure model performance model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # train the model seqModel = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=3) # evaluate model on test set model.evaluate(x_train,y_train,batch_size=128) # returns loss and accuracy # predict first 10 images in the test set print(model.predict(x_test[:10])) # actual results for first 10 images in test set print(y_test[:10]) # save model as hdf5 file from tensorflow.keras.models import load_model model.save('mnist.h5')
python
#!/usr/bin/env python """ @package mi.dataset.parser.test @file marine-integrations/mi/dataset/parser/test/ @author Jeff Roy @brief Test code for a wc_sbe_cspp data parser wc_sbe_cspp is based on cspp_base.py test_wc_sbe_cspp.py fully tests all of the capabilities of the base parser. That level of testing is omitted from this test suite """ import os from nose.plugins.attrib import attr from mi.core.log import get_logger from mi.dataset.test.test_parser import ParserUnitTestCase from mi.dataset.dataset_parser import DataSetDriverConfigKeys from mi.dataset.driver.wc_sbe.cspp.resource import RESOURCE_PATH from mi.core.exceptions import RecoverableSampleException from mi.dataset.parser.cspp_base import \ METADATA_PARTICLE_CLASS_KEY, \ DATA_PARTICLE_CLASS_KEY from mi.dataset.parser.wc_sbe_cspp import \ WcSbeCsppParser, \ WcSbeEngRecoveredDataParticle, \ WcSbeEngTelemeteredDataParticle, \ WcSbeMetadataRecoveredDataParticle, \ WcSbeMetadataTelemeteredDataParticle, \ WcSbeDataTypeKey log = get_logger() @attr('UNIT', group='mi') class WcSbeCsppParserUnitTestCase(ParserUnitTestCase): """ wc_sbe_cspp Parser unit test suite """ def setUp(self): ParserUnitTestCase.setUp(self) self.config = { WcSbeDataTypeKey.WC_SBE_CSPP_TELEMETERED: { DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { METADATA_PARTICLE_CLASS_KEY: WcSbeMetadataTelemeteredDataParticle, DATA_PARTICLE_CLASS_KEY: WcSbeEngTelemeteredDataParticle, } }, WcSbeDataTypeKey.WC_SBE_CSPP_RECOVERED: { DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { METADATA_PARTICLE_CLASS_KEY: WcSbeMetadataRecoveredDataParticle, DATA_PARTICLE_CLASS_KEY: WcSbeEngRecoveredDataParticle, } }, } def test_simple(self): """ Read test data and pull out data particles Assert that the results are those we expected. """ file_path = os.path.join(RESOURCE_PATH, '11079364_WC_SBE.txt') stream_handle = open(file_path, 'rU') # Note: since the recovered and telemetered parser and particles are common # to each other, testing one is sufficient, will be completely tested # in driver tests parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_RECOVERED), stream_handle, self.exception_callback) particles = parser.get_records(20) log.debug("*** test_simple Num particles %s", len(particles)) self.assert_particles(particles, '11079364_WC_SBE_recov.yml', RESOURCE_PATH) stream_handle.close() def test_simple_telem(self): """ Read test data and pull out data particles Assert that the results are those we expected. """ file_path = os.path.join(RESOURCE_PATH, '11079364_WC_SBE.txt') stream_handle = open(file_path, 'rU') # Note: since the recovered and telemetered parser and particles are common # to each other, testing one is sufficient, will be completely tested # in driver tests parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_TELEMETERED), stream_handle, self.exception_callback) particles = parser.get_records(20) log.debug("*** test_simple Num particles %s", len(particles)) self.assert_particles(particles, '11079364_WC_SBE_telem.yml', RESOURCE_PATH) # check the first particle, which should be the metadata particle (recovered) stream_handle.close() def test_get_many(self): """ Read test data and pull out multiple data particles at one time. Assert that the results are those we expected. """ file_path = os.path.join(RESOURCE_PATH, '11079364_WC_SBE.txt') stream_handle = open(file_path, 'rU') # Note: since the recovered and telemetered parser and particles are common # to each other, testing one is sufficient, will be completely tested # in driver tests parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_TELEMETERED), stream_handle, self.exception_callback) # try to get 2000 particles, there are more data records in the file # so should get 2000 including the meta data particles = parser.get_records(2000) log.debug("*** test_get_many Num particles %s", len(particles)) self.assertEqual(len(particles), 2000) stream_handle.close() def test_bad_data(self): """ Ensure that bad data is skipped when it exists. """ # the first useful record in this file is corrupted and will be ignored # we expect to get the metadata particle with the # timestamp from the 2nd data record and all of the valid engineering # data records file_path = os.path.join(RESOURCE_PATH, '11079364_BAD_WC_SBE.txt') stream_handle = open(file_path, 'rU') log.info(self.exception_callback_value) parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_RECOVERED), stream_handle, self.exception_callback) parser.get_records(20) self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException)) stream_handle.close()
python
from datetime import datetime from os.path import dirname, join import pytest # noqa from city_scrapers_core.constants import ADVISORY_COMMITTEE, PASSED from city_scrapers_core.utils import file_response from freezegun import freeze_time from city_scrapers.spiders.cuya_audit import CuyaAuditSpider test_response = file_response( join(dirname(__file__), "files", "cuya_audit.html"), url="http://bc.cuyahogacounty.us/en-US/Audit-Committee.aspx", ) test_detail_response = file_response( join(dirname(__file__), "files", "cuya_audit_detail.html"), url="http://bc.cuyahogacounty.us/en-US/AuditCommitteeMtg-090519.aspx", ) spider = CuyaAuditSpider() freezer = freeze_time("2019-09-19") freezer.start() parsed_items = [item for item in spider.parse(test_response)] parsed_item = [item for item in spider._parse_detail(test_detail_response)][0] freezer.stop() def test_count(): assert len(parsed_items) == 5 def test_title(): assert parsed_item["title"] == "Audit Committee" def test_description(): assert parsed_item["description"] == "" def test_start(): assert parsed_item["start"] == datetime(2019, 9, 5, 9, 0) def test_end(): assert parsed_item["end"] == datetime(2019, 9, 5, 23, 30) def test_time_notes(): assert parsed_item["time_notes"] == "" def test_id(): assert parsed_item["id"] == "cuya_audit/201909050900/x/audit_committee" def test_status(): assert parsed_item["status"] == PASSED def test_location(): assert parsed_item["location"] == spider.location def test_source(): assert ( parsed_item["source"] == "http://bc.cuyahogacounty.us/en-US/AuditCommitteeMtg-090519.aspx" ) def test_links(): assert parsed_item["links"] == [ { "href": "http://bc.cuyahogacounty.us/ViewFile.aspx?file=9RBPLk%2fewj3DObNVTeTkjQ%3d%3d", # noqa "title": "Agenda", } ] def test_classification(): assert parsed_item["classification"] == ADVISORY_COMMITTEE def test_all_day(): assert parsed_item["all_day"] is False
python
from django.db import models from djchoices import DjangoChoices, ChoiceItem class UserStatuses(DjangoChoices): enter_address = ChoiceItem() enter_name = ChoiceItem() start = ChoiceItem() allowed = ChoiceItem() enter_org_name = ChoiceItem() enter_role = ChoiceItem() allowed_group = ChoiceItem() approve = ChoiceItem() enter_user_org = ChoiceItem() # enter_family = ChoiceItem() class Organisation(models.Model): vk_id = models.IntegerField() name = models.CharField(max_length=100, null=True, blank=True) class UserData(models.Model): address = models.CharField(max_length=100, null=True, blank=True) name = models.CharField(max_length=100, null=True, blank=True) vk_id = models.IntegerField(null=True) is_organisation = models.BooleanField(default=False) approved = models.BooleanField(default=None, null=True) organisation = models.ForeignKey(to=Organisation, on_delete=models.SET_NULL, null=True) def __str__(self): return f"{self.name} vk.com/id{self.vk_id}\n" class VkSession(models.Model): user = models.ForeignKey(UserData, null=True, blank=True, on_delete=models.SET_NULL) status = models.CharField(max_length=100, choices=UserStatuses.choices, default=UserStatuses.start)
python
my_list= [3, 4, 6, 2] my_list1 = list(("Hello World"))
python
from itertools import product as product from math import sqrt as sqrt import numpy as np import torch import torch.nn as nn import torch.nn.init as init from torch.autograd import Function # from utils.box_utils import decode, nms # from utils.config import Config class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gama = scale self.eps = 1e-10 self.weight = nn.Parameter(torch.tensor(self.n_channels)) self.reset_parameters() def resrt_parameters(self): init.constant_(self.weight, self.gama) def forward(self,x): norm=x.pow(2).sum(dim=1,keepdim=True).sqrt()+self.eps # x/norm x=torch.div(x,norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x return out
python
from django.shortcuts import render from django.template import RequestContext, loader from django.http import HttpResponse, HttpResponseRedirect from django.conf import settings from django.http import JsonResponse import simplejson as json import requests from django import template import urllib from collections import defaultdict from itertools import islice from django.shortcuts import render from django.http import HttpResponse,HttpResponseRedirect from django.template import loader from os import listdir from os.path import isfile, join, isdir from .models import Jobs from django.db import IntegrityError import time import json from django.http import JsonResponse from django.shortcuts import redirect import requests from urllib.parse import urlparse from datetime import datetime import os import os.path from django.utils.dateparse import parse_datetime from datetime import timedelta from datetime import datetime from django.urls import reverse import pytz import boto3 import base64 from operator import itemgetter import subprocess from boto.s3.connection import S3Connection from boto.s3.key import Key import os import zipfile import uuid from django.http import HttpResponse import io from django import template from boto.s3.connection import S3Connection from boto.s3.key import Key from console.models import * from urllib.parse import urlparse def credentials_check(f): def wrap(request, *args, **kwargs): count = credentials.objects.filter().count() count1 = github.objects.filter().count() count2 = local_directory.objects.filter().count() if (count != 0 and count1 != 0 and count2 != 0): result = credentials.objects.raw('SELECT * FROM console_credentials LIMIT 1;') global AWS_ACCESS_KEY_ID global AWS_SECRET_ACCESS_KEY global bucket_name AWS_ACCESS_KEY_ID = result[0].aws_access_key_id AWS_SECRET_ACCESS_KEY = result[0].aws_secret_access_key bucket_name = result[0].bucket_name else: return HttpResponseRedirect("/settings/") return f(request, *args, **kwargs) wrap.__doc__ = f.__doc__ wrap.__name__ = f.__name__ return wrap @credentials_check def drive(request): try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' global proc if 'start' in request.POST: try: exist_controller = controller.objects.latest('id') controller_mode = exist_controller.training except: controller_mode = '' if controller_mode != '': proc = subprocess.Popen(["python", updated_local_directory_name+"/manage.py", "drive",controller_mode]) else: proc = subprocess.Popen(["python", updated_local_directory_name+"/manage.py", "drive"]) elif 'stop' in request.POST: try: proc.kill() except: print("No proc is running") template = loader.get_template('console/home.html') return HttpResponse(template.render({}, request)) def kill_proc(request): try: autopilot_proc.kill() except: print("no autopilot proc") return HttpResponseRedirect('/jobs/') def save_local_directory(request): message = "" try: credential = credentials.objects.latest('id') aws_key_id = credential.aws_access_key_id except: aws_key_id = '' if request.method == "POST": local_directory_name = request.POST.get('local_directory') if local_directory_name != None: try: exist_local_directory= local_directory.objects.latest('id') local_directory.objects.filter(id=exist_local_directory.id).update(name=local_directory_name) message = "Local Directory has been updated" except: new_local_directory = local_directory(name=local_directory_name) new_local_directory.save() message = "Local Directory has been saved" try: updated_name = github.objects.latest('id') updated_repo_name = updated_name.name updated_extension = updated_name.extension except: updated_repo_name = '' updated_extension = '' try: updated_controller = controller.objects.latest('id') updated_training_controller = updated_controller.training except: updated_training_controller = '' try: updated_local_directory = local_directory.objects.latest('id') updated_local_directory_name = updated_local_directory.name except: updated_local_directory_name = '' template = loader.get_template('console/local_directory.html') return HttpResponse(template.render({'status': message,'local_directory': updated_local_directory_name, 'training_controller': updated_training_controller, 'updated_extension': updated_extension, 'updated_repo': updated_repo_name, 'AWS_KEY': aws_key_id}, request)) @credentials_check def display_data_folders(request): try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' list_data = os.popen('ls '+updated_local_directory_name+'/data/').read() directories = list_data.split() dataFolders = [] for dir in directories: direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + dir).read() direcPath = direcPath.split() if os.path.isdir(direcPath[0]): try: if os.path.exists(direcPath[0] + '/donkeycar-console.json') == True: print("it exists") else: with open(direcPath[0] + '/donkeycar-console.json', 'w') as outfile: noImages = os.popen('ls -l '+updated_local_directory_name+'/data/' + dir + ' | grep .jpg | wc -l').read() noImages.strip() print(noImages) noImages = int(noImages) year = os.popen('date +"%Y"').read() time = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $8}'").read() month = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $6}'").read() day = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $7}'").read() date = year + " " + month + " " + day + " " + time d = datetime.strptime(date, '%Y\n %b\n %d\n %H:%M\n') d = d.strftime('%Y-%m-%d %H:%M') json.dump({"name": dir, "no": noImages, "date": d, "remarks": []}, outfile) with open(direcPath[0] + '/donkeycar-console.json', 'r') as result: data = json.load(result) dataFolders.append(data) except json.JSONDecodeError: os.system('sudo rm -r ' + direcPath[0] + '/donkeycar-console.json') dataFolders.sort(key=itemgetter('date'), reverse=True) iterator = islice(dataFolders, 10) for item in iterator: print(item) dir = item["name"] direcPath = os.popen('echo ' + updated_local_directory_name + '/data/' + dir).read() direcPath = direcPath.split() with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile: data = json.load(outfile) tmp = data["no"] noImages = os.popen('ls -l ' + updated_local_directory_name + '/data/' + dir + ' | grep .jpg | wc -l').read() data["no"] = noImages with open(direcPath[0] + '/donkeycar-console.json', 'w') as jsonFile: json.dump(data, jsonFile) print(dataFolders) context = { 'result': dataFolders, } return render(request, 'console/data_folders.html', context) @credentials_check def getfiles(request): try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' result = request.GET.get('dir', '') print(result) zip_io = io.BytesIO() direcPath = os.popen('echo '+updated_local_directory_name+'/data/').read() direcPath = direcPath.split() with zipfile.ZipFile(zip_io, mode='w', compression=zipfile.ZIP_DEFLATED) as backup_zip: for f in os.listdir(direcPath[0] + result): backup_zip.write(direcPath[0] + result + '/' + f) response = HttpResponse(zip_io.getvalue(), content_type='application/x-zip-compressed') response['Content-Disposition'] = 'attachment; filename=%s' % result + ".zip" response['Content-Length'] = zip_io.tell() return response @credentials_check def delete_data(request): name= request.GET.get('name', '') try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' os.system('sudo rm -r '+updated_local_directory_name+'/data/'+name) return HttpResponseRedirect('/data/') @credentials_check def delete_data_folder_comment(request): comment= request.GET.get('comment', '') name= request.GET.get('name', '') try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' if (id and name): direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + name).read() direcPath = direcPath.split() with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile: data = json.load(outfile) with open(direcPath[0] + '/donkeycar-console.json', 'w') as writefile: (data['remarks']).remove(comment) json.dump(data, writefile) return HttpResponseRedirect('/data/') @credentials_check def add_data_folder_comment(request): data_name = request.POST['name'] print(data_name) data_comment = request.POST['var'] try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + data_name).read() direcPath = direcPath.split() with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile: data = json.load(outfile) print(data['remarks']) print(len(data['remarks'])) with open(direcPath[0] + '/donkeycar-console.json', 'w') as writefile: (data['remarks']).append(data_comment) json.dump(data, writefile) return HttpResponse('success') def sizify(value): if value < 512000: value = value / 1024.0 ext = 'kb' elif value < 4194304000: value = value / 1048576.0 ext = 'mb' else: value = value / 1073741824.0 ext = 'gb' return '%s %s' % (str(round(value, 2)), ext) @credentials_check def list_jobs(request): jobs = Jobs.objects.order_by('-date')[:30] for job in jobs: import re list = re.findall("'(.*?)'", job.tubs) job.tubs = list if job.size != 'N/A': job.size=sizify(int(job.size)) context = { 'models': jobs, } template = loader.get_template('console/jobs.html') return HttpResponse(template.render(context, request)) def grouping(l): d = defaultdict(list) print(d) for key,value, role in l: print("key_l",key,value,role) new_key = str(key) + "?" + value d[new_key].append(role) for new_key in d: d[new_key] = ' | '.join(d[new_key]) print(d.items()) return list(d.items()) def save_controller_settings(request): message = "" try: credential = credentials.objects.latest('id') aws_key_id = credential.aws_access_key_id except: aws_key_id = '' if request.method == "POST": training_controller = request.POST.get('training_controller') if training_controller != None : try: exist_controller = controller.objects.latest('id') controller.objects.filter(id=exist_controller.id).update(training=training_controller) message = "Controller settings have been updated" except: new_controller = controller( training=training_controller) new_controller.save() message = "Controller settings have been updated" try: updated_name = github.objects.latest('id') updated_repo_name = updated_name.name updated_extension = updated_name.extension except: updated_repo_name = '' updated_extension = '' try: updated_controller = controller.objects.latest('id') updated_training_controller = updated_controller.training except: updated_training_controller = '' try: updated_local_directory = local_directory.objects.latest('id') updated_local_directory_name = updated_local_directory.name except: updated_local_directory_name = '' template = loader.get_template('console/controller.html') return HttpResponse(template.render({'local_directory': updated_local_directory_name,'controller_message': message,'training_controller':updated_training_controller,'updated_extension':updated_extension,'updated_repo':updated_repo_name,'AWS_KEY':aws_key_id}, request)) @credentials_check def list_jobs_success(request): jobs = Jobs.objects.order_by('-date')[:30] for job in jobs: import re list = re.findall("'(.*?)'", job.tubs) job.tubs = list if job.size != 'N/A': job.size=sizify(int(job.size)) context = { 'models': jobs, 'success': "New Job Added !" } template = loader.get_template('console/jobs.html') return HttpResponse(template.render(context, request)) def save_credentials(request): message = "" if request.method == "POST": id = uuid.uuid4() bucket_name = "donkeycar-console-"+ str(id) UPDATED_AWS_ACCESS_KEY_ID = request.POST.get('key1') UPDATED_AWS_SECRET_ACCESS_KEY = request.POST.get('key2') if ((UPDATED_AWS_ACCESS_KEY_ID != None) & (UPDATED_AWS_SECRET_ACCESS_KEY != None)): client = boto3.client('s3', aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID, aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY) sts = boto3.client('sts', aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID, aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY) try: response = sts.get_caller_identity() try: client.create_bucket(Bucket=bucket_name) conn = S3Connection(aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID, aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY) bucket = conn.get_bucket(bucket_name) k = bucket.new_key('models/') k.set_contents_from_string('') k = bucket.new_key('data/') k.set_contents_from_string('') count = credentials.objects.filter().count() if count == 0: credential = credentials( aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID, aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY, bucket_name= bucket_name) credential.save() message = "Credentials have been updated !" else: credential = credentials.objects.latest('id') credentials.objects.filter(id=credential.id).update(aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID, aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY) message = "Credentials have been updated !" except Exception as e1: print(e1) message = "Can't Create S3 bucket: Check IAM Permissions and re-enter your credentials" except Exception as e: print(e) message = "Incorrect Credentials" try: credential = credentials.objects.latest('id') aws_key_id = credential.aws_access_key_id except: aws_key_id = '' try: updated_name = github.objects.latest('id') updated_repo_name = updated_name.name updated_extension = updated_name.extension except: updated_repo_name = '' updated_extension = '' try: updated_controller = controller.objects.latest('id') updated_training_controller = updated_controller.training except: updated_training_controller = '' try: updated_local_directory = local_directory.objects.latest('id') updated_local_directory_name = updated_local_directory.name except: updated_local_directory_name = '' template = loader.get_template('console/credentials.html') return HttpResponse(template.render({'message': message,'local_directory': updated_local_directory_name,'training_controller':updated_training_controller,'AWS_KEY': aws_key_id,'updated_repo':updated_repo_name,'updated_extension':updated_extension}, request)) def save_github_repo(request): message = "" try: credential = credentials.objects.latest('id') aws_key_id = credential.aws_access_key_id except: aws_key_id = '' if request.method == "POST": repo = request.POST.get('repo') extension = request.POST.get('extension') print(repo) result = os.system('git ls-remote ' + repo) if result == 0: if repo != None: try: exist_repo = github.objects.latest('id') github.objects.filter(id=exist_repo.id).update(name=repo) github.objects.filter(id=exist_repo.id).update(extension=extension) message = "Github Repository has been updated" except: new_github = github(name=repo,extension=extension) new_github.save() message = "Github Repository has been updated" else: message = "Please enter a git repository" try: updated_name = github.objects.latest('id') updated_repo_name = updated_name.name updated_extension = updated_name.extension except: updated_repo_name = '' updated_extension = '' try: updated_controller = controller.objects.latest('id') updated_training_controller = updated_controller.training except: updated_training_controller = '' try: updated_local_directory = local_directory.objects.latest('id') updated_local_directory_name = updated_local_directory.name except: updated_local_directory_name = '' template = loader.get_template('console/github.html') return HttpResponse(template.render({'status': message,'local_directory': updated_local_directory_name,'training_controller':updated_training_controller,'updated_extension':updated_extension,'updated_repo':updated_repo_name,'AWS_KEY':aws_key_id}, request)) @credentials_check def delete_remark(request): id = request.GET.get('id', '') remarks.objects.filter(id=id).delete() return HttpResponseRedirect('/jobs/') @credentials_check def delete_job(request): id= request.GET.get('id', '') Jobs.objects.filter(id=id).delete() return HttpResponseRedirect('/jobs/') def add_remark(request): job_id = request.POST['id'] print(job_id) comment = request.POST['var'] print(comment) remark = remarks(remark=comment) remark.save() job = Jobs.objects.get(id=job_id) job.Comments.add(remark) return HttpResponse('success') def verify_logs(id,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name): conn = S3Connection(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) bucket = conn.get_bucket(bucket_name) s3 = boto3.resource('s3',aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) for key in bucket.list(): if key.name == 'job_'+ str(id) +'.log': url_to_download= "https://s3.amazonaws.com/"+bucket_name+"/"+ key.name Jobs.objects.filter(id=id).update(log_url=url_to_download) object_acl = s3.ObjectAcl(bucket_name, key.name) object_acl.put(ACL='public-read') if key.name == 'job_'+ str(id) +'_commands.log': url1_to_download= "https://s3.amazonaws.com/"+bucket_name+"/"+ key.name Jobs.objects.filter(id=id).update(commands_log_url=url1_to_download) object_acl = s3.ObjectAcl(bucket_name, key.name) object_acl.put(ACL='public-read') @credentials_check def cancel_request(request): client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1') id = request.GET.get('id', '') job = Jobs.objects.get(id=id) client.terminate_instances( InstanceIds=[ job.instance_id ] ) Jobs.objects.filter(id=id).update(state='Canceled') Jobs.objects.filter(id=id).update(duration='0') return HttpResponseRedirect('/jobs/') def convert_timedelta(duration): days, seconds = duration.days, duration.seconds hours = days * 24 + seconds // 3600 minutes = (seconds % 3600) // 60 seconds = (seconds % 60) return hours, minutes, seconds @credentials_check def copy_local(request): id = request.GET.get('id', '') try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' path = os.popen('echo '+updated_local_directory_name+'/models/').read() path = path.split() try: updated_repo = github.objects.latest('id') extension = updated_repo.extension except: extension = '' if extension != '': model_name = 'job_' + str(id) + extension else: model_name = 'job_' + str(id) response_url = download_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name) o = urlparse(response_url) key_path = o.path.split('/', 1)[1] s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) if( os.path.exists(path[0]+model_name) == True ): print("it exists") else: s3.Object(bucket_name,key_path.split('/', 1)[1] + '/' + model_name).download_file(path[0] + model_name) return HttpResponseRedirect('/jobs/') def download_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name): s3 = boto3.client('s3',aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1') url = '{}/{}/{}'.format(s3.meta.endpoint_url,bucket_name,'models') return url @credentials_check def autopilot(request): id = request.GET.get('id', '') try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' path = os.popen('echo '+updated_local_directory_name+'/models/').read() path = path.split() try: updated_repo = github.objects.latest('id') extension = updated_repo.extension except: extension = '' if extension != '': model_name = 'job_' + str(id) + extension else: model_name = 'job_' + str(id) job_name = 'job_' + str(id) response_url = download_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name) o = urlparse(response_url) key_path = o.path.split('/', 1)[1] s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) if (os.path.exists(path[0] + job_name) == True): print("it exists") else: s3.Object(bucket_name, key_path.split('/', 1)[1] + '/' + model_name).download_file( path[0] + job_name) global autopilot_proc autopilot_proc = subprocess.Popen(["python", updated_local_directory_name+"/manage.py", "drive", "--model", updated_local_directory_name+"/models/" + job_name]) return HttpResponseRedirect('/jobs/') def get_car_status_autopilot(request): try: poll = autopilot_proc.poll() if poll == None: response = 'Autopilot' else: response = '' except: response = '' return HttpResponse(response) def get_car_status_training(request): try: poll = proc.poll() if poll == None: response = 'Training' return HttpResponse(response) else: response = '' except: response = '' return HttpResponse(response) @credentials_check def home(request): template = loader.get_template('console/home.html') return HttpResponse(template.render({}, request)) def upload_to_s3(AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,bucket_name): s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1') url = '{}/{}/{}'.format(s3.meta.endpoint_url, bucket_name, 'data') return url @credentials_check def create_job(request): try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' choices = ['g2.2xlarge', 'g2.8xlarge', 'p2.xlarge', 'p3.2xlarge', 'p3.8xlarge'] errorMessage = "" conn = S3Connection(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) message = "" job_number = Jobs.objects.filter().count() if request.method == "POST": checked_data = request.POST.getlist('chk[]') instance_type = request.POST.get('choice') availability_zone = request.POST.get('AZ') max_time = request.POST.get('max_time') request_time = request.POST.get('request_time') if max_time == '': max_time = 15 if request_time == '': request_time = 2 try: availability_zone = availability_zone.split() price = availability_zone[1] except: print("no availability") if len(checked_data) == 0 or int(max_time) >= 60: if len(checked_data) == 0 and int(max_time) >= 60: message = " No selected items and EC2 Termination Time maximum must be 60 minutes " elif len(checked_data) == 0: message = " No selected items" elif int(max_time) >= 60: message = "EC2 Termination Time maximum must be 60 minutes " else: job = Jobs( tubs=checked_data, state="Pending", job_number=job_number + 1, instance=instance_type, price=price, availability_zone=availability_zone[0], instance_max=max_time) job.save() selected_data = "" dataPath = os.popen('echo '+updated_local_directory_name+'/data/').read() dataPath = dataPath.split() for dir in checked_data: selected_data += " " + dir print(selected_data) if len(selected_data) != 0: try: updated_repo = github.objects.latest('id') extension= updated_repo.extension except: extension = '' if extension != '' : model_name = 'job_' + str(job.id)+ extension else: model_name = 'job_' + str(job.id) job_name = 'job_' + str(job.id) os.chdir(dataPath[0]) current_path = os.popen('pwd').read() print(current_path) os.system('tar -zcf job_' + str(job.id) + '.tar.gz ' + selected_data) tarfile_size = os.popen("ls -sh job_" + str(job.id) + ".tar.gz | awk '{print $1}'").read() print(tarfile_size) Jobs.objects.filter(id=job.id).update(tarfile_size=tarfile_size) current_path = os.popen('pwd').read() current_path = current_path.split() response_url = upload_to_s3(AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,bucket_name) o = urlparse(response_url) path = o.path.split('/', 1)[1] s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) tarfile_name = 'job_' + str(job.id) + '.tar.gz' s3.meta.client.upload_file(os.path.join(current_path[0], tarfile_name), bucket_name, path.split('/', 1)[1] + '/' + tarfile_name) if instance_type != '': termination_time = (Jobs.objects.get(id=job.id)).instance_max github_repo = github.objects.latest('id') try: request_id = launch_ec2_instance(model_name,job_name,AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,github_repo.name,request_time,availability_zone[0],instance_type,termination_time,bucket_name) Jobs.objects.filter(id=job.id).update(request_id=request_id) except Exception as e: print(e) job.delete() Jobs.objects.filter(id=job.id).update(date=datetime.now()) else: errorMessage = " Enter an instance type " print(errorMessage) job.delete() os.system('rm -r job_' + str(job.id) + '.tar.gz ') return HttpResponseRedirect('/jobs/success/') list_data = os.popen('ls '+updated_local_directory_name+'/data/').read() directories = list_data.split() dataFolders = [] print(directories) for dir in directories: direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + dir).read() direcPath = direcPath.split() if os.path.isdir(direcPath[0]): if os.path.exists(direcPath[0] + '/donkeycar-console.json') == True: with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile: data = json.load(outfile) print(data) tmp = data["no"] noImages = os.popen('ls -l ' + updated_local_directory_name + '/data/' + dir + ' | grep .jpg | wc -l').read() data["no"] = noImages with open(direcPath[0] + '/donkeycar-console.json', 'w') as jsonFile: json.dump(data, jsonFile) else: with open(direcPath[0] + '/donkeycar-console.json', 'w') as outfile: noImages = os.popen('ls -l '+updated_local_directory_name+'/data/' + dir + ' | grep .jpg | wc -l').read() noImages.strip() noImages = int(noImages) year = os.popen('date +"%Y"').read() time = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $8}'").read() month = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $6}'").read() day = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $7}'").read() date = year + " " + month + " " + day + " " + time d = datetime.strptime(date, '%Y\n %b\n %d\n %H:%M\n') d = d.strftime('%Y-%m-%d %H:%M') json.dump({"name": dir, "no": noImages, "date": d, "remarks": []}, outfile) with open(direcPath[0] + '/donkeycar-console.json', 'r') as result: data = json.load(result) dataFolders.append(data) dataFolders.sort(key=itemgetter('date'), reverse=True) jobs = Jobs.objects.order_by('-date')[:30] for job in jobs: if job.size != 'N/A': job.size = sizify(int(job.size)) context = { 'models': jobs, 'result': dataFolders, 'message': message, 'errorMessage': errorMessage, 'choices': choices, } return render(request, 'console/create_job.html',context) @credentials_check def update_status_by_id(request): client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1') now = datetime.now(pytz.utc) id = request.GET.get('id', '') job = Jobs.objects.get(id=id) verify_logs(job.id, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,bucket_name) if job.request_id != "0": if now > job.date + timedelta(minutes=job.request_time): try: response = client.describe_spot_instance_requests( SpotInstanceRequestIds=[ job.request_id ] ) value = response['SpotInstanceRequests'][0]['Status']['Code'] Jobs.objects.filter(id=job.id).update(request_state=value) instance_id = response['SpotInstanceRequests'][0]['InstanceId'] Jobs.objects.filter(id=job.id).update(instance_id=instance_id) except Exception as e: print(e) now = datetime.now(pytz.utc) print("now", now) if job.state == 'Pending': if job.request_state == 'schedule-expired': Jobs.objects.filter(id=job.id).update(state='Failed') Jobs.objects.filter(id=job.id).update(duration='0') else: conn = S3Connection(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) bucket = conn.get_bucket(bucket_name) for key in bucket.list('models'): name = key.name.split('/') print(key) date = key.last_modified print(date) print("job.date" + str(job.date)) try: updated_repo = github.objects.latest('id') extension = updated_repo.extension except: extension = '' if extension != '': model_name = 'job_' + str(job.id) + extension else: model_name = 'job_' + str(job.id) if name[1] == model_name: Jobs.objects.filter(id=job.id).update(state='succeeded') Jobs.objects.filter(id=job.id).update(size=key.size) duration = parse_datetime(date) - job.date hours, minutes, seconds = convert_timedelta(duration) time = str(minutes) + " m and " + str(seconds) + " s" print(time) Jobs.objects.filter(id=job.id).update(duration=time) elif now > job.date + timedelta(minutes=job.instance_max): Jobs.objects.filter(id=job.id).update(state='Failed') Jobs.objects.filter(id=job.id).update(duration='0') job = Jobs.objects.get(id=id) if job.request_state == 'instance-terminated-by-user' and job.state == 'Pending': Jobs.objects.filter(id=job.id).update(state='Failed') Jobs.objects.filter(id=job.id).update(duration='0') return HttpResponseRedirect('/jobs/') def launch_ec2_instance(model_name,job_name,AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,github_repo,request_time,availability_zone,instance_type,termination_time,bucket_name): print("Launching Ec2 Instance") client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1') print("Starting") tarfile = job_name + '.tar.gz' termination_time_s = str(termination_time) termination_time_before_minute = str(termination_time - 2) try: print("trying") print(github_repo) response = client.request_spot_instances( InstanceCount=1, Type='one-time', ValidFrom=datetime.now(pytz.utc) + timedelta(seconds=4), ValidUntil=datetime.now(pytz.utc) + timedelta(minutes=int(request_time)), LaunchSpecification={ 'ImageId': 'ami-f3a9c18c', 'InstanceType': instance_type, 'Placement': { 'AvailabilityZone': availability_zone }, 'BlockDeviceMappings': [ {'DeviceName': '/dev/sda1', 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 40 } } ], 'UserData': base64.b64encode(b'''#!/bin/bash timestamp() { date +"%T" } echo " Start The Script " timestamp echo "sudo halt" | at now + ''' + (termination_time_s).encode('utf8') + b''' minutes export LC_ALL="en_US.UTF-8" export LC_CTYPE="en_US.UTF-8" source /home/ubuntu/env/bin/activate echo " Configure AWS credentials " timestamp aws --version aws configure set aws_access_key_id ''' + (AWS_ACCESS_KEY_ID).encode('utf8') + b''' && aws configure set aws_secret_access_key ''' + ( AWS_SECRET_ACCESS_KEY).encode('utf8') + b''' && aws s3 cp s3://''' + (bucket_name).encode( 'utf8') + b'''/data/''' + tarfile.encode('utf8') + b''' /home/ubuntu echo "aws s3 cp /var/log/cloud-init-output.log s3://''' + ( bucket_name).encode( 'utf8') + b'''/''' + job_name.encode( 'utf8') + b'''_commands.log && aws s3 cp /''' + job_name.encode( 'utf8') + b'''.log s3://''' + (bucket_name).encode( 'utf8') + b'''/" | at now + ''' + (termination_time_before_minute).encode('utf8') + b''' minutes echo " Cloning the github repository " timestamp git clone -b master --single-branch ''' + (github_repo).encode('utf8') + b''' donkeycar echo " Install the dependencies " timestamp pip install -e donkeycar echo " Create d2 repository " timestamp donkey createcar --path ~/d2 donkey createcar ~/d2 echo " Uncompress the tar file " timestamp sudo tar -zxf /home/ubuntu/''' + tarfile.encode('utf8') + b''' -C /root/d2/data export PATH=/usr/local/cuda/bin:$PATH export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64" export CUDA_HOME=/usr/local/cuda echo " Install tensorflow-gpu " timestamp pip install tensorflow-gpu==1.10 echo " Start Training " timestamp python ~/d2/manage.py train --model /root/d2/models/''' + model_name.encode( 'utf8') + b''' >> ''' + job_name.encode('utf8') + b'''.log echo " Finish Training " timestamp echo " Upload the model to S3 " timestamp aws s3 cp /root/d2/models/''' + model_name.encode('utf8') + b''' s3://''' + ( bucket_name).encode('utf8') + b'''/models/ echo " Finish uploading the model to S3 " timestamp aws s3 cp /''' + job_name.encode('utf8') + b'''.log s3://''' + ( bucket_name).encode('utf8') + b'''/ aws s3 cp /var/log/cloud-init-output.log s3://''' + (bucket_name).encode( 'utf8') + b'''/''' + job_name.encode('utf8') + b'''_commands.log echo "sudo halt" | at now + 1 minutes ''').decode("ascii") } ) request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId'] except Exception as e: print(e) return request_id @credentials_check def list_jobs_timeout(request): jobs = Jobs.objects.order_by('-date')[:30] for job in jobs: import re list = re.findall("'(.*?)'", job.tubs) job.tubs = list if job.size != 'N/A': job.size=sizify(int(job.size)) context = { 'models': jobs, 'timeout': "No Job was created ! Please Try again" } template = loader.get_template('console/jobs.html') return HttpResponse(template.render(context, request)) @credentials_check def delete_empty_folders(request): try: Local_directory = local_directory.objects.latest('id') updated_local_directory_name = Local_directory.name except: updated_local_directory_name = '' list_data = os.popen('ls ' + updated_local_directory_name + '/data/').read() directories = list_data.split() print(directories) for dir in directories: direcPath = os.popen('echo ' + updated_local_directory_name + '/data/' + dir).read() direcPath = direcPath.split() if os.path.isdir(direcPath[0]): noImages = os.popen( 'ls -l ' + updated_local_directory_name + '/data/' + dir + ' | grep .jpg | wc -l').read() noImages.strip() print(noImages) noImages = int(noImages) if noImages == 0 : os.system('sudo rm -r '+direcPath[0]) return HttpResponseRedirect('/data/') def check_availability_zone(instance_type): client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY,region_name='us-east-1') response = client.describe_spot_price_history( InstanceTypes=[ instance_type ], ProductDescriptions=[ 'Linux/UNIX', ], MaxResults=6, ) List= response['SpotPriceHistory'] List.sort(key=itemgetter('SpotPrice')) models = { az['AvailabilityZone'] for az in List} listAZ = list(models) newlist=[] for l in listAZ: listA = [x for x in List if x['AvailabilityZone']== l] newlist.append(l + " " + listA[0]['SpotPrice'] + "/H") return newlist def display_local_repo(request): choices_dir = os.popen("find ~/ -type d -exec test -e '{}'/models -a -e '{}'/data \; -print").read() list = choices_dir.split('\n') dire ="" for l in list : dire = dire + l + "##" return HttpResponse(dire) def display_availability(request,name): response = check_availability_zone(name) return HttpResponse(response)
python
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. # This file should only be present in a source checkout, and never in a release # package, to allow us to determine whether we're running in a development or # production mode.
python
import math if __name__ != "common": from objects import glob import time import json from common.ripple import userUtils def load_achievement_data(ACHIEVEMENT_BASE, ACHIEVEMENT_KEYS, ACHIEVEMENT_STRUCT): LENGTH = 0 ACHIEVEMENTS = [] for struct in ACHIEVEMENT_STRUCT: LENGTH = max(LENGTH, len(ACHIEVEMENT_KEYS[struct]) * ACHIEVEMENT_STRUCT[struct]) entry = {x:0 for x in ACHIEVEMENT_STRUCT} for i in range(LENGTH): for struct in ACHIEVEMENT_STRUCT: entry[struct] = math.floor(i / ACHIEVEMENT_STRUCT[struct]) % len(ACHIEVEMENT_KEYS[struct]) format_data = {x:ACHIEVEMENT_KEYS[x][entry[x]] for x in ACHIEVEMENT_KEYS} ACHIEVEMENTS.append({x: ACHIEVEMENT_BASE[x].format_map(format_data) for x in ACHIEVEMENT_BASE}) return ACHIEVEMENTS, LENGTH def get_usercache(userID): user_cache = glob.redis.get("lets:user_achievement_cache:{}".format(userID)) if user_cache is None: user_cache = {} else: user_cache = json.loads(user_cache.decode("utf-8")) if "version" not in user_cache: # Load from sql database user_cache["version"] = userUtils.getAchievementsVersion(userID) db_achievements = [x["achievement_id"] for x in glob.db.fetchAll("SELECT achievement_id FROM users_achievements WHERE user_id = %s", [userID])] if "achievements" in user_cache: user_cache["achievements"] += db_achievements else: user_cache["achievements"] = db_achievements # Remove duplicates after merge user_cache["achievements"] = list(set(user_cache["achievements"])) return user_cache def add_pending_achievement(userID, achievementID): user_cache = get_usercache(userID) if len([x for x in user_cache["achievements"] if x in [achievementID, -achievementID]]) > 0: print("Tried to add achievement:{} to user:{}, but failed due to duplicate entry.".format(achievementID, userID)) return user_cache["achievements"].append(-achievementID) # Remove duplicates after merge user_cache["achievements"] = list(set(user_cache["achievements"])) glob.redis.set("lets:user_achievement_cache:{}".format(userID), json.dumps(user_cache), 1800) userUtils.unlockAchievement(userID, achievementID)
python
from hashlib import pbkdf2_hmac, md5 import binascii from Crypto.Cipher import AES import os import sys def generate_key(title_id, pwd): # remove 00 padding from title id title_idGen = title_id[2:] # get secret string, append title id, and convert to binary string secret = binascii.unhexlify('fd040105060b111c2d49' + title_idGen) # get md5 hash of secret hashed_secret = md5(secret).digest() # key is a pbkdf2 hash with sha1 base using hashed_secret as salt and 20 iterations non_encrypted_key = binascii.hexlify(pbkdf2_hmac('sha1', pwd.encode(), hashed_secret, 20, 16)) title_id += '0000000000000000' title_id = binascii.unhexlify(title_id) ckey = binascii.unhexlify(get_ckey()) title_key = binascii.unhexlify(non_encrypted_key) encryptor = AES.new(key=ckey, mode=AES.MODE_CBC, IV=title_id) encrypted_title_key = encryptor.encrypt(title_key) # return as hexstring return binascii.hexlify(encrypted_title_key) def get_ckey() -> str: if not os.path.exists('ckey.txt'): print('Common key was not found. Please create a file called ckey.txt and write the cmmon key in the first line.') sys.exit(0) with open('ckey.txt', 'r') as f: return f.readline().replace('\r', '').replace('\n', '') def verify_ckey(): if md5(get_ckey().upper().encode()).hexdigest() == '35ac5994972279331d97094fa2fb97fc': return True def main(tid, password='mypass'): return generate_key(tid, password);
python
# https://github.com/python-poetry/poetry/issues/11 import glob import os from distutils.command.build_ext import build_ext from distutils.core import Extension from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError def filter_extension_module(name, lib_objs, lib_headers): return Extension( "thumbor.ext.filters.%s" % name, ["thumbor/ext/filters/%s.c" % name] + lib_objs, libraries=["m"], include_dirs=["thumbor/ext/filters/lib"], depends=["setup.py"] + lib_objs + lib_headers, extra_compile_args=["-Wall", "-Wextra", "-Werror", "-Wno-unused-parameter"], ) def gather_filter_extensions(): files = glob.glob("thumbor/ext/filters/_*.c") lib_objs = glob.glob("thumbor/ext/filters/lib/*.c") lib_headers = glob.glob("thumbor/ext/filters/lib/*.h") return [ filter_extension_module(f[0:-2].split("/")[-1], lib_objs, lib_headers) for f in files ] class BuildFailed(Exception): pass class ExtBuilder(build_ext): # This class allows C extension building to fail. def run(self): try: build_ext.run(self) except (DistutilsPlatformError, FileNotFoundError): pass def build_extension(self, ext): try: build_ext.build_extension(self, ext) except ( CCompilerError, DistutilsExecError, DistutilsPlatformError, ValueError, ): pass def build(setup_kwargs): """Needed for the poetry building interface.""" if "CFLAGS" not in os.environ: os.environ["CFLAGS"] = "" setup_kwargs.update( dict( ext_modules=gather_filter_extensions(), cmdclass={"build_ext": ExtBuilder}, packages=["thumbor"], package_dir={"thumbor": "thumbor"}, include_package_data=True, package_data={"": ["*.xml"]}, ) )
python
# -*- coding: utf-8 -*- # Created by: Michael Lan import os import sys import re from PySide2.QtWidgets import QApplication, QMainWindow from PySide2 import QtGui, QtWidgets, QtCore from ui_main import Ui_Dialog from pyside_material import apply_stylesheet from dbconnection import connect, insert_data, close, validate_duplicate class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.ui = Ui_Dialog() self.ui.setupUi(self) self.name, self.dni, self.phone, self.addres, self.birth = ('', '', '', '', '') self.dni_register = '' self.path = 'dbCrokiAlitas.db' self.ui.txtName.installEventFilter(self) self.ui.txtDNI.installEventFilter(self) self.ui.txtPhone.installEventFilter(self) self.ui.txtAddres.installEventFilter(self) self.ui.txtBirth.installEventFilter(self) self.ui.btnSaveUser.clicked.connect(self.save_user) #---------------------------------------------------------------------- def eventFilter(self, obj, event): if event.type() == QtCore.QEvent.FocusOut: if self.ui.txtName is obj: if self.validate_txt(self.ui.txtName.text()): self.name = self.ui.txtName.text().title().strip() self.ui.txtName.setText(self.name) else: self.name = '' self.ui.txtName.setText('') self.ui.txtName.setPlaceholderText('Por favor ingrese un nombre válido') if self.ui.txtDNI is obj: if self.validate_num(self.ui.txtDNI.text().replace(',','')): self.dni = int(self.ui.txtDNI.text().replace(',','')) self.ui.txtDNI.setText('{:,}'.format(self.dni)) else: self.dni = '' self.ui.txtDNI.setText('') self.ui.txtDNI.setPlaceholderText('Por favor, escriba un número') if self.ui.txtPhone is obj: if self.validate_num(self.ui.txtPhone.text()): self.phone = int(self.ui.txtPhone.text()) self.ui.txtPhone.setText(str(self.phone)) else: self.phone = '' self.ui.txtPhone.setText('') self.ui.txtPhone.setPlaceholderText('Por favor, escriba un número') if self.ui.txtDNIRegister is obj: if self.validate_num(self.ui.txtDNIRegister.text()): self.dni_register = int(self.ui.txtDNIRegister.text()) else: self.ui.txtDNIRegister.setText('') self.ui.txtDNIRegister.setPlaceholderText('Por favor, escriba un número') return super(MainWindow, self).eventFilter(obj, event) #---------------------------------------------------------------------- def save_user(self): user = dict( name = self.name, dni = self.dni, phone = self.phone, addres = self.ui.txtAddres.text(), birth = self.ui.txtBirth.text(), ) self.conn = connect(self.path) if all([user[value] for value in ['name', 'dni', 'phone']]): if not validate_duplicate(self.conn, 'users', 'dni', user['dni']): print (user) try: with self.conn as conn: insert_data(conn, 'users', **user) except: close(self.conn) finally: close(self.conn) self.ui.lblBanner.setText(f'Usuario {self.name} creado correctamente') self.ui.txtName.setText('') self.ui.txtDNI.setText('') self.ui.txtPhone.setText('') self.ui.txtAddres.setText('') self.ui.txtBirth.setText('') self.ui.txtName.setPlaceholderText('') self.ui.txtDNI.setPlaceholderText('') self.ui.txtPhone.setPlaceholderText('') else: self.ui.lblBanner.setText(f"El usuario {user['dni']} ya existe") else: self.ui.lblBanner.setText('Verifique los datos') #---------------------------------------------------------------------- def validate_txt(self, *fields): validator = [re.match(r'^[a-z\sáéíóú.]+$', field, re.I) for field in fields] return all(validator) #---------------------------------------------------------------------- def validate_num(self, *fields): validator = [re.match(r'^[0-9]+$', field) for field in fields] return all(validator) if __name__ == "__main__": app = QApplication(sys.argv) window = MainWindow() apply_stylesheet(app, theme='light_red.xml', light_secondary=True) font = QtGui.QFont() font.setFamily("Ubuntu Mono") font.setPointSize(15) font.setWeight(75) font.setBold(True) font2 = QtGui.QFont() font2.setPointSize(10) font2.setBold(True) window.ui.lblTitle.setFont(font) window.ui.lblBanner.setFont(font2) window.show() sys.exit(app.exec_())
python
""" $lic$ Copyright (C) 2016-2017 by The Board of Trustees of Stanford University This program is free software: you can redistribute it and/or modify it under the terms of the Modified BSD-3 License as published by the Open Source Initiative. If you use this program in your research, we request that you reference the TETRIS paper ("TETRIS: Scalable and Efficient Neural Network Acceleration with 3D Memory", in ASPLOS'17. April, 2017), and that you send us a citation of your work. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received a copy of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. """ import itertools from collections import namedtuple from .phy_dim2 import PhyDim2 NODE_REGION_LIST = ['dim', 'origin', 'type', ] class NodeRegion(namedtuple('NodeRegion', NODE_REGION_LIST)): ''' A node region defined by the dimension and origin offset. The `type` attribute specifies the region type, which could be `PROC` for computation processing nodes or 'DATA' for data storage nodes. NOTES: we cannot overload __contains__ and __iter__ as a node container, because the base namedtuple already defines them. ''' # Type enums. PROC = 0 DATA = 1 NUM = 2 def __new__(cls, *args, **kwargs): ntp = super(NodeRegion, cls).__new__(cls, *args, **kwargs) if not isinstance(ntp.dim, PhyDim2): raise TypeError('NodeRegion: dim must be a PhyDim2 object.') if not isinstance(ntp.origin, PhyDim2): raise TypeError('NodeRegion: origin must be a PhyDim2 object.') if ntp.type not in range(cls.NUM): raise ValueError('NodeRegion: type must be a valid type enum.') return ntp def contains_node(self, coordinate): ''' Whether the region contains the given coordinate. ''' min_coord = self.origin max_coord = self.origin + self.dim return all(cmin <= c and c < cmax for c, cmin, cmax in zip(coordinate, min_coord, max_coord)) def node_iter(self): ''' Iterate through all nodes in the region. ''' gens = [] for o, d in zip(self.origin, self.dim): gens.append(xrange(o, o + d)) cnt = 0 for tp in itertools.product(*gens): coord = PhyDim2(*tp) assert self.contains_node(coord) cnt += 1 yield coord def rel2abs(self, rel_coordinate): ''' Convert relative node coordinate to absolute node coordinate. ''' if not isinstance(rel_coordinate, PhyDim2): raise TypeError('NodeRegion: relative coordinate must be ' 'a PhyDim2 object.') abs_coordinate = self.origin + rel_coordinate if not self.contains_node(abs_coordinate): raise ValueError('NodeRegion: relative coordinate {} is not ' 'in node region {}'.format(rel_coordinate, self)) return abs_coordinate
python
# Basic Frame Differencing Example # # Note: You will need an SD card to run this example. # # This example demonstrates using frame differencing with your OpenMV Cam. It's # called basic frame differencing because there's no background image update. # So, as time passes the background image may change resulting in issues. import sensor, image, pyb, os, time TRIGGER_THRESHOLD = 5 sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) sensor.skip_frames(time = 2000) # Let new settings take affect. clock = time.clock() # Tracks FPS. if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory print("About to save background image...") sensor.skip_frames(time = 2000) # Give the user time to get ready. sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now frame differencing!") while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # Replace the image with the "abs(NEW-OLD)" frame difference. img.difference("temp/bg.bmp") hist = img.get_histogram() # This code below works by comparing the 99th percentile value (e.g. the # non-outlier max value against the 90th percentile value (e.g. a non-max # value. The difference between the two values will grow as the difference # image seems more pixels change. diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value() triggered = diff > TRIGGER_THRESHOLD print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected.
python
import tetris.input.gamepad as gp import pygame pygame.init() if __name__ == "__main__": sticks = [] qs = gp.PygameEventReader.q for stick in gp.get_available(): print(gp.display_gamepad_info(stick) + '\n') sticks.append(gp.GamepadWrapper(stick.get_id())) print('Listening...') while True: event = qs.get() print(event)
python
import datetime # enables the start time elements in date and time format # Interaction for patient with learning disabilities # "psychol_assessment", # "iapt", # "cmh_for_smi" # Mental health interaction 1: Psychological assessment def psychol_assessment(patient, environment, patient_time): encounter = { "resource_type": "Encounter", "name" : "psychological assessment", "start": patient_time, } entry = { "resource_type" : "Observation", "name": "psychological assessment", "start": encounter["start"] + datetime.timedelta(minutes=60), "cost": 96, # NHS Ref cost for IAPT - PSSRU "glucose": 0, # dummy glucose impact, to be updated "carbon": 50, # update for accurate carbon } new_patient_record_entries = [encounter, entry] next_environment_id_to_prob = {0: 0.5, 13: 0.5} next_environment_id_to_time = { 0: datetime.timedelta(days=10), # TODO: from initial patient_time (not last) 13: datetime.timedelta(days=20), } update_data = {"new_patient_record_entries": new_patient_record_entries} return ( patient, environment, update_data, next_environment_id_to_prob, next_environment_id_to_time, ) # Mental health interaction 2: IAPT def iapt(patient, environment, patient_time): encounter = { "resource_type": "Encounter", "name" : "iapt", "start": patient_time, } entry = { "resource_type" : "Observation", "name": "iapt", "start": encounter["start"] + datetime.timedelta(minutes=15), "cost": 96, # NHS Ref cost for IAPT - PSSRU "glucose": 0, # dummy glucose impact, to be updated "carbon": 50, # update for accurate carbon } new_patient_record_entries = [encounter, entry] next_environment_id_to_prob = {0: 0.5, 13: 0.5} next_environment_id_to_time = { 0: datetime.timedelta(days=10), # TODO: from initial patient_time (not last) 13: datetime.timedelta(days=20), } update_data = {"new_patient_record_entries": new_patient_record_entries} return ( patient, environment, update_data, next_environment_id_to_prob, next_environment_id_to_time, ) # Mental health interaction 3: Community mental health for severe mental illness def cmh_for_smi(patient, environment, patient_time): encounter = { "resource_type": "Encounter", "name" : "cmh for smi", "start": patient_time, } entry = { "resource_type" : "Observation", "name": "cmh for smi", "start": encounter["start"] + datetime.timedelta(minutes=15), "cost": 96, # NHS Ref cost for IAPT - PSSRU "glucose": 0, # dummy glucose impact, to be updated "carbon": 50, # update for accurate carbon } new_patient_record_entries = [encounter, entry] next_environment_id_to_prob = {0: 0.5, 13: 0.5} next_environment_id_to_time = { 0: datetime.timedelta(days=10), # TODO: from initial patient_time (not last) 13: datetime.timedelta(days=20), } update_data = {"new_patient_record_entries": new_patient_record_entries} return ( patient, environment, update_data, next_environment_id_to_prob, next_environment_id_to_time, )
python
def temp(input, output): img = cv2.imread(input) xmap, ymap = utils.buildmap_1(Ws=800, Hs=800, Wd=800, Hd=800, fov=193.0) cv2.imwrite(output, cv2.remap(img, xmap,ymap,cv2.INTER_LINEAR))
python
from datetime import date # random person class Person: # def __new__(cls, name, age): # print("New object called") # # super.__new__(cls[name, age]) def __init__(self, name, age): print('__init__ called') self.name = name self.age = age @classmethod def fromBirthYear(cls, name, birthyear): print("Factory method called") return cls(name, date.today().year - birthyear) def display(self): print(self.name, self.age) @staticmethod def fromFathersAge(name, fatherAge, fatherPersonAgeDiff): return Person(name, date.today().year - fatherAge + fatherPersonAgeDiff) # person = Person('Sudeep', 19) # person.display() person1 = Person.fromBirthYear('John', 1985) person1.display() # print(id(person), id(person1)) # class Man(Person): # sex = 'Male' # man = Man.fromBirthYear('John', 1985) # print(isinstance(man, Man)) # man1 = Man.fromFathersAge('John', 1965, 20) # print(isinstance(man1, Man), type(man1))
python
import logging from discord.ext import tasks, commands from naotomori.cogs.source.anime import _9anime, gogoanime from naotomori.cogs.sourcecog import SourceCog logger = logging.getLogger('NaoTomori') class AnimeCog(SourceCog): """ AnimeCog: extends the SourceCog. """ def __init__(self, bot): """ Constructor: initialize the cog. :param bot: The Discord bot. """ logger.info("Initializing AnimeCog") super().__init__(bot) # Replace this with your own 'Anime API' if you want to use a different anime source # self.source = _9anime._9Anime() logger.info("Setting GoGoAnime as anime source") self.source = gogoanime.GoGoAnime() @commands.command( brief='Set the anime source for retrieving new anime (set source to "none" to remove the anime source)') async def setAnimeSource(self, ctx, source: str): """ Set the anime source, i.e. where it will retrieve the anime from. :param ctx: The context. :param source: Name of the anime source. """ logger.info("Receiving setAnimeSource command") successful = self._setAnimeSource(source) if successful: self.bot.get_cog('DatabaseCog').updateValue("anime_source", source) if source.lower() == "none": self.list.clear() logger.info('Successfully removed the anime source') await ctx.send(f'Successfully removed the anime source.') return elif not successful: logger.error('Unknown or unsupported anime source') await ctx.send('Unknown or unsupported anime source.') return self.list.clear() self.fillCache() logger.info(f'Successfully set the anime source to {source}') await ctx.send(f'Successfully set the anime source to {source}.') def _setAnimeSource(self, source): """ Set the anime source, i.e. where it will retrieve the anime from. :param source: Name of the anime source. :return True if successful, False otherwise. """ if source.lower() == "gogoanime": self.source = gogoanime.GoGoAnime() # elif source.lower() == "9anime": # self.source = _9anime._9Anime() elif source.lower() == "none": self.source = None else: return False return True @commands.command(brief='Ignore an anime (don\'t send pings for a certain anime)') async def ignoreAnime(self, ctx, *args): """ Ignore an anime. :param ctx: The context. :param args: Name of the anime. """ logger.info("Receiving ignoreAnime command") await super(AnimeCog, self).ignore(ctx, True, *args) @commands.command(brief='Unignore an anime') async def unignoreAnime(self, ctx, *args): """ Unignore an anime. :param ctx: The context. :param args: Name of the anime. """ logger.info("Receiving unignoreAnime command") await super(AnimeCog, self).unignore(ctx, True, *args) @tasks.loop(minutes=5) async def checkNewLoop(self): """ Loop that periodically calls checkNew to check for new anime. """ await self.checkNew()
python
def params_to_string(task: dict) -> dict: for k in task['parameters'].keys(): if (isinstance(task['parameters'][k], int) or isinstance(task['parameters'][k], float)): task['parameters'][k] = str(task['parameters'][k]) return task
python
from distutils.core import setup from Cython.Build import cythonize setup(ext_modules = cythonize('./source/cython_functions.pyx',compiler_directives={'language_level' : "3"}))
python
from .chem import BOND_TYPES, BOND_NAMES, set_conformer_positions, draw_mol_image, update_data_rdmol_positions, \ update_data_pos_from_rdmol, set_rdmol_positions, set_rdmol_positions_, get_atom_symbol, mol_to_smiles, \ remove_duplicate_mols, get_atoms_in_ring, get_2D_mol, draw_mol_svg, GetBestRMSD from .distgeom import Embed3D, get_d_from_pos from .transforms import AddHigherOrderEdges, AddEdgeLength, AddPlaceHolder, AddEdgeName, AddAngleDihedral, CountNodesPerGraph from .torch import ExponentialLR_with_minLr, repeat_batch, repeat_data, get_optimizer, get_scheduler, clip_norm from .evaluation import evaluate_conf, evaluate_distance, get_rmsd_confusion_matrix from .sde import GaussianFourierProjection __all__ = ["BOND_TYPES", "BOND_NAMES", "set_conformer_positions", "draw_mol_image", "update_data_rdmol_positions", "update_data_pos_from_rdmol", "set_rdmol_positions", "set_rdmol_positions_", "get_atom_symbol", "mol_to_smiles", "remove_duplicate_mols", "get_atoms_in_ring", "get_2D_mol", "draw_mol_svg", "GetBestRMSD", "Embed3D", "get_d_from_pos", "AddHigherOrderEdges", "AddEdgeLength", "AddPlaceHolder", "AddEdgeName", "AddAngleDihedral", "CountNodesPerGraph", "ExponentialLR_with_minLr", "repeat_batch", "repeat_data", "get_optimizer", "get_scheduler", "clip_norm", "evaluate_conf", "evaluate_distance", "get_rmsd_confusion_matrix", "GaussianFourierProjection",]
python
# -*- coding: utf-8 -*- """ Created on Thu June 17 11:53:42 2021 @author: Pavan Tummala """ import os, numpy as np import cv2 import random import torch import torch.utils.data as data import xml.etree.ElementTree as ET from abc import ABCMeta, abstractmethod import scipy.cluster.vq as vq import pickle import pandas as pd from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif from cv2 import imread, resize from numpy import concatenate from sklearn.metrics import accuracy_score from sklearn.semi_supervised import LabelPropagation from sklearn.model_selection import train_test_split import argparse from imblearn.under_sampling import RandomUnderSampler from skimage import feature import warnings from scipy.sparse import issparse from sklearn.naive_bayes import _BaseDiscreteNB from sklearn.preprocessing import LabelBinarizer from sklearn.utils import check_X_y, check_array from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.validation import check_is_fitted warnings.filterwarnings("ignore") """ Data Loader reading the files, extracting individual objects from each image """ class DataLoader(data.Dataset): def __init__(self,data_path="", trainval='trainval',transform=None): self.data_path = data_path self.transform = transform self.trainval = trainval self.__init_classes() self.names, self.labels, self.lable_set, self.bounding_box = self.__dataset_info() def __getitem__(self, index): self.data = [] self.lables = [] x = imread(self.data_path+'JPEGImages/'+self.names[index]+'.jpg') x_min, y_min, x_max, y_max = self.bounding_box[index] for i in range(len(x_min)): sub_img = x[y_min[i]:y_max[i],x_min[i]:x_max[i]] sub_img = cv2.resize(sub_img, (64, 64), interpolation=cv2.INTER_NEAREST) self.data.append(sub_img) self.lables.append(self.lable_set[index][i]) if self.transform !=None: x = self.transform(x) y = self.labels[index] def __fetchdata__(self): return self.data, self.lables def __len__(self): return len(self.names) def __dataset_info(self): with open(self.data_path+'ImageSets/Main/'+self.trainval+'.txt') as f: annotations = f.readlines() annotations = [n[:-1] for n in annotations] names = [] labels = [] lable_set = [] bounding_box = [] for af in annotations: filename = os.path.join(self.data_path,'Annotations',af) tree = ET.parse(filename+'.xml') objs = tree.findall('object') num_objs = len(objs) bdg_box = [obj.find('bndbox') for obj in objs] x_min = [int(box.find('xmin').text.lower().strip()) for box in bdg_box] y_min = [int(box.find('ymin').text.lower().strip()) for box in bdg_box] x_max = [int(box.find('xmax').text.lower().strip()) for box in bdg_box] y_max = [int(box.find('ymax').text.lower().strip()) for box in bdg_box] coords = (x_min, y_min, x_max, y_max) boxes_cl = np.zeros((num_objs), dtype=np.int32) temp_lbls = [] for ix, obj in enumerate(objs): cls = self.class_to_ind[obj.find('name').text.lower().strip()] boxes_cl[ix] = cls temp_lbls.append(cls) lbl = np.zeros(self.num_classes) lbl[boxes_cl] = 1 labels.append(lbl) names.append(af) lable_set.append(temp_lbls) bounding_box.append(coords) return np.array(names), np.array(labels).astype(np.float32), lable_set, bounding_box def __init_classes(self): self.classes = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') self.num_classes = len(self.classes) self.class_to_ind = dict(zip(self.classes, range(self.num_classes))) """ local binary pattern """ class LocalBinaryPatterns: def __init__(self, numPoints, radius): # store the number of points and radius self.numPoints = numPoints self.radius = radius def describe(self, image, eps=1e-7): # compute the Local Binary Pattern representation # of the image, and then use the LBP representation # to build the histogram of patterns image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) lbp = feature.local_binary_pattern(image, self.numPoints, self.radius, method="uniform") (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, self.numPoints + 3), range=(0, self.numPoints + 2)) # normalize the histogram hist = hist.astype("float") hist /= (hist.sum() + eps) # return the histogram of Local Binary Patterns return hist """ color layout descriptor """ class DescriptorComputer: __metaclass__ = ABCMeta @abstractmethod def compute(self, frame): pass class ColorLayoutComputer(DescriptorComputer): def __init__(self): self.rows = 8 self.cols = 8 self.prefix = "CLD" def compute(self, img): averages = np.zeros((self.rows,self.cols,3)) imgH, imgW, _ = img.shape for row in range(self.rows): for col in range(self.cols): row_start = int(imgH/self.rows * row) row_end = int(imgH/self.rows * (row+1)) col_start = int(imgW/self.cols*col) col_end = int(imgW/self.cols*(col+1)) slice1 = img[row_start:row_end, col_start:col_end] #slice1 = img[imgH/self.rows * row: imgH/self.rows * (row+1), imgW/self.cols*col : imgW/self.cols*(col+1)] #print(slice) average_color_per_row = np.mean(slice1, axis=0) average_color = np.mean(average_color_per_row, axis=0) average_color = np.uint8(average_color) averages[row][col][0] = average_color[0] averages[row][col][1] = average_color[1] averages[row][col][2] = average_color[2] icon = cv2.cvtColor(np.array(averages, dtype=np.uint8), cv2.COLOR_BGR2YCR_CB) y, cr, cb = cv2.split(icon) dct_y = cv2.dct(np.float32(y)) dct_cb = cv2.dct(np.float32(cb)) dct_cr = cv2.dct(np.float32(cr)) dct_y_zigzag = [] dct_cb_zigzag = [] dct_cr_zigzag = [] flip = True flipped_dct_y = np.fliplr(dct_y) flipped_dct_cb = np.fliplr(dct_cb) flipped_dct_cr = np.fliplr(dct_cr) for i in range(self.rows + self.cols -1): k_diag = self.rows - 1 - i diag_y = np.diag(flipped_dct_y, k=k_diag) diag_cb = np.diag(flipped_dct_cb, k=k_diag) diag_cr = np.diag(flipped_dct_cr, k=k_diag) if flip: diag_y = diag_y[::-1] diag_cb = diag_cb[::-1] diag_cr = diag_cr[::-1] dct_y_zigzag.append(diag_y) dct_cb_zigzag.append(diag_cb) dct_cr_zigzag.append(diag_cr) flip = not flip return np.concatenate([np.concatenate(dct_y_zigzag), np.concatenate(dct_cb_zigzag), np.concatenate(dct_cr_zigzag)]) """ Bag of Visual word """ device = torch.device('cpu') def random_init(dataset, num_centers): num_points = dataset.size(0) dimension = dataset.size(1) used = torch.zeros(num_points, dtype=torch.long) indices = torch.zeros(num_centers, dtype=torch.long) for i in range(num_centers): while True: cur_id = random.randint(0, num_points - 1) if used[cur_id] > 0: continue used[cur_id] = 1 indices[i] = cur_id break indices = indices.to(device) centers = torch.gather(dataset, 0, indices.view(-1, 1).expand(-1, dimension)) return centers def compute_codes(dataset, centers): num_points = dataset.size(0) dimension = dataset.size(1) num_centers = centers.size(0) # 5e8 should vary depending on the free memory on the GPU # Ideally, automatically ;) chunk_size = int(5e8 / num_centers) codes = torch.zeros(num_points, dtype=torch.long, device=device) centers_t = torch.transpose(centers, 0, 1) centers_norms = torch.sum(centers ** 2, dim=1).view(1, -1) for i in range(0, num_points, chunk_size): begin = i end = min(begin + chunk_size, num_points) dataset_piece = dataset[begin:end, :] dataset_norms = torch.sum(dataset_piece ** 2, dim=1).view(-1, 1) distances = torch.mm(dataset_piece, centers_t) distances *= -2.0 distances += dataset_norms distances += centers_norms _, min_ind = torch.min(distances, dim=1) codes[begin:end] = min_ind return codes def update_centers(dataset, codes, num_centers): num_points = dataset.size(0) dimension = dataset.size(1) centers = torch.zeros(num_centers, dimension, dtype=torch.float, device=device) cnt = torch.zeros(num_centers, dtype=torch.float, device=device) centers.scatter_add_(0, codes.view(-1, 1).expand(-1, dimension), dataset) cnt.scatter_add_(0, codes, torch.ones(num_points, dtype=torch.float, device=device)) # Avoiding division by zero # Not necessary if there are no duplicates among the data points cnt = torch.where(cnt > 0.5, cnt, torch.ones(num_centers, dtype=torch.float, device=device)) centers /= cnt.view(-1, 1) return centers def cluster(dataset, num_centers): centers = random_init(dataset, num_centers) codes = compute_codes(dataset, centers) num_iterations = 0 while True: num_iterations += 1 centers = update_centers(dataset, codes, num_centers) new_codes = compute_codes(dataset, centers) # Waiting until the clustering stops updating altogether # This is too strict in practice if torch.equal(codes, new_codes): print('Converged in %d iterations' % num_iterations) break codes = new_codes return centers, codes def extract_sift_descriptors(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) sift = cv2.xfeatures2d.SIFT_create() keypoints, descriptors = sift.detectAndCompute(gray, None) return descriptors def build_codebook(X, voc_size): """ Inupt a list of feature descriptors voc_size is the "K" in K-means, k is also called vocabulary size Return the codebook/dictionary """ features = np.vstack((descriptor for descriptor in X)).astype(np.float32) dataset = torch.from_numpy(features) print('Starting clustering') centers, codes = cluster(dataset, voc_size) return centers def input_vector_encoder(feature, codebook): """ Input all the local feature of the image Pooling (encoding) by codebook and return """ code, _ = vq.vq(feature, codebook) word_hist, bin_edges = np.histogram(code, bins=range(codebook.shape[0] + 1), normed=True) return word_hist def extract_surf_descriptors(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) surf = cv2.xfeatures2d.SURF_create() keypoints, descriptors = surf.detectAndCompute(gray, None) return descriptors """ Histogram features """ def fd_histogram(image, mask=None): bins=8 image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256]) cv2.normalize(hist, hist) return hist.flatten() """ feature normalization """ def scale(X, x_min, x_max): nom = (X-X.min(axis=0))*(x_max-x_min) denom = X.max(axis=0) - X.min(axis=0) denom[denom==0] = 1 return x_min + nom/denom class MultinomialNBSS(_BaseDiscreteNB): """ Semi-supervised Naive Bayes classifier for multinomial models. Unlabeled data must be marked with -1. In comparison to the standard scikit-learn MultinomialNB classifier, the main differences are in the _count and fit methods. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). beta : float, optional (default=1.0) Weight applied to the contribution of the unlabeled data (0 for no contribution). fit_prior : boolean, optional (default=True) Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size (n_classes,), optional (default=None) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. tol : float, optional (default=1e-3) Tolerance for convergence of EM algorithm. max_iter : int, optional (default=1500) Maximum number of iterations for EM algorithm. verbose : boolean, optional (default=True) Whether to output updates during the running of the EM algorithm. Attributes ---------- class_log_prior_ : array, shape (n_classes, ) Smoothed empirical log probability for each class. intercept_ : array, shape (n_classes, ) Mirrors ``class_log_prior_`` for interpreting MultinomialNBSS as a linear model. feature_log_prob_ : array, shape (n_classes, n_features) Empirical log probability of features given a class, ``P(x_i|y)``. coef_ : array, shape (n_classes, n_features) Mirrors ``feature_log_prob_`` for interpreting MultinomialNBSS as a linear model. class_count_ : array, shape (n_classes,) Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape (n_classes, n_features) Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. """ def __init__(self, alpha=1.0, beta=1.0, fit_prior=True, class_prior=None, tol=1e-3, max_iter=1500, verbose=True): self.alpha = alpha self.beta = beta self.fit_prior = fit_prior self.class_prior = class_prior self.tol = tol self.max_iter = max_iter self.verbose = verbose def _count(self, X, Y, U_X=np.array([]), U_prob=np.array([])): """Count and smooth feature occurrences.""" if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative") self.feature_count_ = safe_sparse_dot(Y.T, X) self.class_count_ = Y.sum(axis=0) if U_X.shape[0] > 0: self.feature_count_ += self.beta*safe_sparse_dot(U_prob.T, U_X) self.class_count_ += self.beta*U_prob.sum(axis=0) else: self.feature_count_ = safe_sparse_dot(Y.T, X) self.class_count_ = Y.sum(axis=0) def _update_feature_log_prob(self, alpha): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + alpha smoothed_cc = smoothed_fc.sum(axis=1) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_) def partial_fit(self, X, y, classes=None, sample_weight=None): """A semi-supervised version of this method has not been implemented. """ def fit(self, X, y, sample_weight=None): """Fit semi-supervised Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Unlabeled data must be marked with -1. sample_weight : array-like, shape = [n_samples], (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object """ X, y = check_X_y(X, y, 'csr') _, n_features = X.shape # Unlabeled data are marked with -1 unlabeled = np.flatnonzero(y == -1) labeled = np.setdiff1d(np.arange(len(y)), unlabeled) labelbin = LabelBinarizer() Y = labelbin.fit_transform(y[labeled]) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64, copy=False) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] alpha = self._check_alpha() self._count(X[labeled], Y) self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) jll = self._joint_log_likelihood(X) sum_jll = jll.sum() # Run EM algorithm if len(unlabeled) > 0: self.num_iter = 0 pred = self.predict(X) while self.num_iter < self.max_iter: self.num_iter += 1 prev_sum_jll = sum_jll # First, the E-step: prob = self.predict_proba(X[unlabeled]) # Then, the M-step: self._count(X[labeled], Y, X[unlabeled], prob) self._update_feature_log_prob(self.beta) self._update_class_log_prior(class_prior=class_prior) jll = self._joint_log_likelihood(X) sum_jll = jll.sum() if self.verbose: print( 'Step {}: jll = {:f}'.format(self.num_iter, sum_jll) ) if self.num_iter > 1 and prev_sum_jll - sum_jll < self.tol: break if self.verbose: end_text = 's.' if self.num_iter > 1 else '.' print( 'Optimization converged after {} ' 'iteration'.format(self.num_iter) + end_text ) return self if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data', help='path for voc2007') args = parser.parse_args() path = args.data data_load = DataLoader(data_path = path) lst_data = [] lst_lbl = [] for i in range(0, 5000): data_load.__getitem__(i) test_data, test_label = data_load.__fetchdata__() lst_data.append(test_data) lst_lbl.append(test_label) labels = np.hstack(lst_lbl) data = np.concatenate(lst_data, axis=0) print(len(data)) print("################### Data load completed #######################") """ color layour features """ computer = ColorLayoutComputer() color_layout_features = [computer.compute(data[i]) for i in range(len(data))] print("################### Color layout feature generated #######################") VOC_SIZE = 128 # ============================================================================= # """ # visual bag of words using sift # """ # bow_sift = [extract_sift_descriptors(data[i].astype('uint8')) for i in range(len(data))] # bow_sift = [each for each in zip(bow_sift, labels) if not each[0] is None] # bow_sift, y_train = zip(*bow_sift) # # codebook = build_codebook(bow_sift, voc_size=VOC_SIZE) # bow_sift = [input_vector_encoder(x, codebook) for x in bow_sift] # ============================================================================= """ visual bag of words using surf """ bow_surf = [extract_surf_descriptors(data[i].astype('uint8')) for i in range(len(data))] bow_surf = [each for each in zip(bow_surf, labels) if not each[0] is None] bow_surf, y_train = zip(*bow_surf) codebook = build_codebook(bow_surf, voc_size=VOC_SIZE) bow_surf = [input_vector_encoder(x, codebook) for x in bow_surf] print("################### Visual bag of words and surf generated #######################") """ color histogram """ color_hist_features = [fd_histogram(data[i].astype('uint8')) for i in range(len(data))] print("################### Color Histogram generated #######################") """ local binary pattern """ desc = LocalBinaryPatterns(24, 8) lbp = [desc.describe(data[i]) for i in range(len(data))] print("################### Local Binary Pattern generated #######################") bow_surf = np.array(bow_surf) color_layout_features = np.array(color_layout_features) color_hist_features = np.array(color_hist_features) lbp = np.array(lbp) # with open('color_layout_descriptor_64.pkl','wb') as f: # pickle.dump(color_layout_features, f) # with open('bow_surf_64.pkl','wb') as f: # pickle.dump(bow_surf, f) # with open('hist_64.pkl','wb') as f: # pickle.dump(color_hist_features, f) # with open('labels_64.pkl','wb') as f: # pickle.dump(labels, f) # with open('data_64.pkl','wb') as f: # pickle.dump(data, f) """ pickle read """ # color_layout_features = pd.read_pickle(path + "/color_layout_descriptor_64.pkl") # bow_surf = pd.read_pickle(path + "/bow_surf_64.pkl") # color_hist_features = pd.read_pickle(path + "/hist_64.pkl") # labels = pd.read_pickle(path +"/labels_64.pkl") # data = pd.read_pickle(path +"/data_64.pkl") """ Normalizing color layour feature only since other features have been normalized while feature extraction above """ color_layout_features_scaled = scale(color_layout_features, 0, 1) """ stacking all the features into one array """ features = np.hstack([color_layout_features_scaled, color_hist_features, lbp]) features = features.astype('float64') """ feature selection using Anova, K is the hyper param that needs to be varied and tested """ fs = SelectKBest(score_func=f_classif, k=200) fs.fit(features, labels) selected_features = fs.transform(features) print("################### Feature Selection completed #######################") undersample = RandomUnderSampler(random_state=123) X_over, y_over = undersample.fit_resample(selected_features, labels) X_train, X_test, y_train, y_test = train_test_split(X_over, y_over, test_size=0.1, random_state=42) X_train_lab, X_test_unlab, y_train_lab, y_test_unlab = train_test_split(X_train, y_train, test_size=0.1, random_state=1, stratify=y_train) print("################### Class Balancing completed #######################") print("Labelled features set size: %d, %d"%X_train_lab.shape) print("Labelled lable set size: %d"%y_train_lab.shape) print("Unlabelled features set size: %d, %d"%X_test_unlab.shape) print("Unlabelled lable set size: %d"%y_test_unlab.shape) X_train_mixed = concatenate((X_train_lab, X_test_unlab)) nolabel = [-1 for _ in range(len(y_test_unlab))] y_train_mixed = concatenate((y_train_lab, nolabel)) model = MultinomialNBSS(verbose=False) model.fit(X_train_mixed, y_train_mixed) print("################### SSGMM model built #######################") yhat = model.predict(X_test) print("Test data accuracy: %.2f%%"% (accuracy_score(y_test, yhat)*100))
python