content
stringlengths
0
894k
type
stringclasses
2 values
class BadMoves(object): def bad_move(self, move, gs): if move is None: return True coord = gs.me.head + move if gs.me.neck == coord: return True if not gs.is_empty(coord) and coord not in gs.all_tails: return True if coord in gs.possible_death_coords: return True return False def death_move(self, move, gs): if move is None: return True coord = gs.me.head + move if gs.me.neck == coord: return True if not gs.is_empty(coord) and coord not in gs.all_tails: return True return False def risky_move(self, move, gs): if move is None: return True coord = gs.me.head + move if coord in gs.possible_death_coords: return True return False
python
''' Created on Jan 3, 2016 @author: graysonelias ''' seeding = False import wallaby as w # Time startTime = -1 # Motor ports LMOTOR = 0 RMOTOR = 3 COWMOTOR = 1 # analog ports LTOPHAT = 0 RTOPHAT = 1 # Digital ports LEFT_BUTTON = 0 RIGHT_BUTTON = 1 CLONE_SWITCH = 9 RIGHT_BUTTON = 13 isClone = w.digital(CLONE_SWITCH) # Servos servoArm = 0 servoCowArm = 1 servoClaw = 2 servoCowClaw = 3 #Main Arm Values armUp = 900#1400 armUpBotguy = 300#800 armOnRampBotGuy = 1100#1500 # 1575 armUpRampBotGuy = 860#1500 # 1575 armUpRampBotGuyLowered = 1300#1800 armUpLineFollow = 50#550 armBotguy = 1300#1800 armDown = 1350#1850 armBotguyHover = 800#1300 #Cow Arm values cowArmDown = 1800 cowArmUp = 600 cowArmTurn = 1270 cowArmDrop = 1550 #Botguy Claw Values clawClose = 450 clawOpen = 2000 #Cow Claw Values cowClawOpen = 1800 cowClawPush = 1900 cowClawClose = 1000 cowClawStart = 1400 # Tophat values frontLineFollowerGrey = 1300 ET = 5 TOPHAT_PIPE = 3 STARTLIGHT = 4 if isClone: # Servos servoArm = 0 servoCowArm = 1 servoClaw = 2 servoCowClaw = 3 #Main Arm Values # armUp = 1500 # armUpBotguy = 900 # armBotguy = 1470 armDown = 1400 # armUpRampBotGuy = 1500 # armUpRampBotGuyLowered = 1800 # armUpLineFollow = 550 # armBotguy = 1800 # armDown = 1850 # armBotguyHover = 1300 # Cow Arm values cowArmDown = 1800 cowArmUp = 600 cowArmTurn = 1270 cowArmDrop = 1550 #Botguy Claw Values clawClose = 900 clawOpen = 1900 #Cow Claw Values cowClawOpen = 1800 cowClawPush = 1900 cowClawClose = 900 cowClawStart = 900 # Tophat values FRONT_TOPHAT = 0 frontLineFollowerGrey = 1300
python
# O(n) time complexity # O(n) space complexity def reverse1(a): i = 0 j = len(a) b = a[:] while j > 0: #b.append(a[j - 1]) -> not efficient b[i] = a[j - 1] i += 1 j -= 1 return b # O(n) time complexity # O(1) space complexity def reverse2(a): temp = None i = 0 j = len(a) half_len = int(j/2) for _ in range(half_len): temp = a[i] a[i] = a[j - 1] a[j - 1] = temp i += 1 j -= 1 return a print(reverse1([1, 2, 3, 4])) print(reverse2([1, 2, 3, 4, 5]))
python
import sys import time from sdk import * addr_list = addresses() _pid = 20036 _proposer = addr_list[0] _initial_funding = (int("2") * 10 ** 9) _each_funding = (int("3") * 10 ** 9) _big_funding = (int("8") * 10 ** 9) _funding_goal_general = (int("10") * 10 ** 9) def gen_prop(): global _pid prop = Proposal(str(_pid), "general", "proposal for fund", "proposal headline", _proposer, _initial_funding) _pid += 1 return prop def test_normal_cancel(): # create proposal prop = gen_prop() prop.send_create() time.sleep(1) encoded_pid = prop.pid # check proposal state check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusFunding) # 1st fund fund_proposal(encoded_pid, _each_funding, addr_list[0]) # 2nd fund fund_proposal(encoded_pid, _each_funding, addr_list[1]) check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusFunding) # cancel this proposal cancel_proposal(encoded_pid, _proposer, "changed mind") check_proposal_state(encoded_pid, ProposalOutcomeCancelled, ProposalStatusCompleted) return encoded_pid def test_cancel_noactive_proposal(pid_not_active): # cancel this no-active proposal, should fail res = cancel_proposal(pid_not_active, _proposer, "try a weird cancel") if res: sys.exit(-1) check_proposal_state(pid_not_active, ProposalOutcomeCancelled, ProposalStatusCompleted) def test_cancel_proposal_in_voting_status(): # create proposal prop = gen_prop() prop.send_create() time.sleep(1) encoded_pid = prop.pid # 1st fund fund_proposal(encoded_pid, _big_funding, addr_list[1]) check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusVoting) # cancel this proposal, should fail res = cancel_proposal(encoded_pid, _proposer, "too late to changed mind") if res: sys.exit(-1) check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusVoting) def test_cancel_someone_else_proposal(): # create proposal prop = gen_prop() prop.send_create() time.sleep(1) encoded_pid = prop.pid # cancel this proposal, should fail res = cancel_proposal(encoded_pid, addr_list[1], "do bad things") if res: sys.exit(-1) check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusFunding) if __name__ == "__main__": pid_canceled = test_normal_cancel() test_cancel_noactive_proposal(pid_canceled) test_cancel_proposal_in_voting_status() test_cancel_someone_else_proposal() print bcolors.OKGREEN + "#### Test cancel proposals succeed" + bcolors.ENDC print ""
python
text_3 = '3' print(text_3.isalnum())
python
import subprocess import time import unittest from game.client.controller.network import Network class TestServer(unittest.TestCase): def setUp(self) -> None: self.server = subprocess.Popen(["python3", "-m", "game", "--server"]) time.sleep(2) def test_game_creation(self): network = Network(addr='127.0.0.1', port=1488) self.assertTrue(network.create_game(False, False)) def test_game_connect(self): network = Network(addr='127.0.0.1', port=1488) network.create_game(False, False) games = network.list_games() self.assertTrue(len(games) == 1) self.assertTrue(network.connect_to_game(games[0])) def tearDown(self) -> None: self.server.kill()
python
from unittest.mock import ANY, mock_open, patch import pytest import rumps from src.app_functions.exceptions.credentials_failed import CredentialInputFailed from src.duo.login.input_credentials import input_credentials def test_succesful_entry_of_credentials(mocker): """Check if prompt correctly returns when to retry""" mock_function = mocker.patch( "src.duo.login.input_credentials.window", side_effect=[rumps.rumps.Response(1, "UserName"), rumps.rumps.Response(1, "Password")], ) mock_function2 = mocker.patch("src.duo.login.input_credentials.json.dump") with patch("src.duo.login.input_credentials.open", mock_open()): input_credentials() mock_function.assert_called_with( cancel_button=True, message="Please enter your password", dimensions=(200, 50) ) mock_function2.assert_called_once_with({"username": "UserName", "password": "Password"}, ANY) def test_stop_during_password(mocker): """Check if prompt correctly when broking during password entry""" mock_function = mocker.patch( "src.duo.login.input_credentials.window", side_effect=[rumps.rumps.Response(1, "UserName"), rumps.rumps.Response(0, "Password")], ) with pytest.raises(CredentialInputFailed): with patch("src.duo.login.input_credentials.open", mock_open()): input_credentials() mock_function.assert_called_with( cancel_button=True, message="Please enter your password", dimensions=(200, 50) ) def test_stop_during_username(mocker): """Check if prompt correctly when broking during username entry""" mock_function = mocker.patch( "src.duo.login.input_credentials.window", side_effect=[rumps.rumps.Response(0, "UserName")] ) with pytest.raises(CredentialInputFailed): with patch("src.duo.login.input_credentials.open", mock_open()): input_credentials() mock_function.assert_called_once_with( cancel_button=True, message="Please enter your username", dimensions=(200, 50) )
python
import torch from ..bayesian.models.models import create_model import numpy as np from xopt.vocs import VOCS class TestModelCreation: vocs = VOCS(variables = {'x1': [0, 1], 'x2': [0, 1], 'x3': [0, 1]} ) def test_create_model(self): train_x = torch.rand(5, 3) train_y = torch.rand(5, 2) train_c = torch.rand(5, 4) model = create_model(train_x, train_y, train_c, vocs=self.vocs) train_y_nan = train_y.clone() train_y_nan[0][1] = np.nan model = create_model(train_x, train_y_nan, train_c, vocs=self.vocs)
python
#64 # Given a m x n grid filled with non-negative numbers, # find a path from top left to bottom right # which minimizes the sum of all numbers along its path. # # Note: You can only move either down or right at any point in time. class DynamicProgrammingSol(): # Time: O(m * n) # Space: O(m + n) def minPathSum1(self,grid): path_sum=grid[0] for row in range(len(grid)): if row==0: for col in range(1,len(grid[0])): path_sum[col]+=path_sum[col-1] else: for col in range(len(grid[0])): if col==0: path_sum[col]+=grid[row][col] else: path_sum[col]=min(path_sum[col],path_sum[col-1])+grid[row][col] return path_sum[-1] # Time: O(m * n) # Space: O(m + n) def minPathSum2(self,grid): path_sum=grid[0] for col in range(1,len(grid[0])): path_sum[col]+=path_sum[col-1] for row in range(1,len(grid)): path_sum[0]+=grid[row][0] for col in range(1,len(grid[0])): path_sum[col]=min(path_sum[col],path_sum[col-1])+grid[row][col] return path_sum[-1]
python
from flask import Flask,jsonify from flask_restplus import Resource, Api from faker import Faker app = Flask(__name__) api = Api(app, version='0.1.0', title='Faker', description="""## Faker API **λ‹Ήμ‹ μ˜ μƒˆλ‘œμš΄ μ˜μ›…μ„ μ†Œν™˜ν•˜μ„Έμš”.** """) ns = api.namespace('Hero', description='μ˜μ›…μ΄ μ—¬κΈ° μž λ“€λ‹€.') fake = Faker("ko-KR") @ns.route('/new_hero') class NewHero(Resource): def get(self): '''μƒˆ μ˜μ›… ν”„λ‘œν•„μ„ μƒμ„±ν•©λ‹ˆλ‹€.''' profile = fake.profile() profile.pop('current_location') profile['phone_number'] = fake.phone_number() return jsonify(profile) if __name__ == '__main__': app.run(debug=True, port=80, host='0.0.0.0')
python
import cv2 img = cv2.imread("example_images/brain_noise.jpeg") # Structuring element se = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # also called kernel # Basic morphology img_erosion = cv2.erode(img, se, iterations=1) img_dilation = cv2.dilate(img, se, iterations=1) img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, se) img_closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, se) cv2.imshow("Original", img) cv2.waitKey(0) cv2.imshow("Eroded", img_erosion) cv2.waitKey(0) cv2.imshow("Dilated", img_dilation) cv2.waitKey(0) cv2.imshow("Opened", img_opening) cv2.waitKey(0) cv2.imshow("Closed", img_closing) cv2.waitKey(0)
python
from django.contrib.auth.models import User from rest_framework import serializers from blog.models import Like, Post class UserInfoSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name="api:user-detail") class Meta: model = User fields = ("url", "id", "username", "first_name", "last_name") class PostInfoSerializer(serializers.HyperlinkedModelSerializer): url = serializers.HyperlinkedIdentityField(view_name="api:post-detail") author = UserInfoSerializer() class Meta: model = Post fields = ("url", "id", "post_title", "post_text", "author", "pub_date") class LikeInfoSerializer(serializers.ModelSerializer): user = UserInfoSerializer() post = PostInfoSerializer() class Meta: model = Like fields = ("post", "user")
python
def merge_the_tools(string, k): # your code goes here s = int(len(string)/k) l=[] for i in range(0,len(string),k): l.append(string[i:i+k]) aux = [] aux_2 = [] for j in l: for k in j: if k not in aux: aux.append(k) st = ''.join(aux) aux_2.append(st) aux = [] for elem in aux_2: print(elem)
python
import numpy as np import random import matplotlib.pyplot as plt from matplotlib.legend_handler import HandlerLine2D from matplotlib import ticker import torch import math k = 20 # num of selected clients in each round K = 100 # num of total activated clients T = 2500 # num of total rounds def classA(size): return np.random.binomial(size=size, n=1, p=0.1) def classB(size): return np.random.binomial(size=size, n=1, p=0.3) def classC(size): return np.random.binomial(size=size, n=1, p=0.6) def classD(size): return np.random.binomial(size=size, n=1, p=0.9) def random_n(): rand_list = [] out = [0, 0, 0, 0] for i in range(20): rand_list.append(random.randint(1, 100)) for rand in rand_list: if rand <= 25: out[0] += 1 elif 25 < rand <= 50: out[1] += 1 elif 50 < rand <= 75: out[2] += 1 else: out[3] += 1 return out def random_d(d, k=20): rand_list = [] out = [0, 0, 0, 0] for i in range(d): rand_list.append(random.randint(1, 100)) for rand in rand_list: if rand <= 25: out[0] += 1 elif 25 < rand <= 50: out[1] += 1 elif 50 < rand <= 75: out[2] += 1 else: out[3] += 1 pick = k for i in range(4): if pick == 0: out[i] = 0 elif pick < out[i]: out[i] = pick pick = 0 else: pick -= out[i] return out def make_CEP_SR_FedCs(T, comm_rounds, k=20): cep_sum = np.zeros(T) for t in range(T): pick = classD(k) for x_i_t in pick: cep_sum[t] += x_i_t CEP_FedCs = sum_up_to_arr(comm_rounds, cep_sum) sr_sum = np.zeros(len(comm_rounds)) for i, T_tag in enumerate(comm_rounds): sr_sum[i] = CEP_FedCs[i]/(T_tag*k) return CEP_FedCs, sr_sum def make_CEP_SP(T, comm_rounds, selected_clients_list, k=20): cep_sum = np.zeros(T) for t in range(T): pick = classA(selected_clients_list[0]) pick = np.append(pick, classB(selected_clients_list[1])) pick = np.append(pick, classC(selected_clients_list[2])) pick = np.append(pick, classD(selected_clients_list[3])) for x_i_t in pick: cep_sum[t] += x_i_t CEF_res = sum_up_to_arr(comm_rounds, cep_sum) SR_sum = np.zeros(len(comm_rounds)) for i, T_tag in enumerate(comm_rounds): SR_sum[i] = CEF_res[i]/(T_tag*k) return CEF_res, SR_sum def make_CEP_SR_E3CS(T, sig_num, sig_type, comm_rounds, K=100): Wt = np.ones(K) cep_sum = np.zeros(T) Xt, At = E3CS_FL_algorithm(k=20, T=T, W_t=Wt, K=K, sig_num=sig_num, sig_type=sig_type) for t in range(T): for i in At[t]: cep_sum[t] += Xt[int(i)] CEP_E3CS = sum_up_to_arr(comm_rounds, cep_sum) SR_E3CS = np.zeros(len(comm_rounds)) for i, T_tag in enumerate(comm_rounds): SR_E3CS[i] = CEP_E3CS[i] / (T_tag * k) return CEP_E3CS, SR_E3CS def _create_clients_group(K=100, groups=4): Xt = [] group_size = int(K/groups) Xt = np.concatenate((classA(group_size), classB(group_size))) Xt = np.concatenate((Xt, classC(group_size))) Xt = np.concatenate((Xt, classD(group_size))) return Xt def _num_sigma(s_type, num=1): def _sigma_t(t): return (num*k/K) def _inc_sigma_t(t): if t<(T/4): return 0 else: return k/K if s_type=="num": return _sigma_t else: return _inc_sigma_t def E3CS_FL_algorithm(k, T, W_t, K=100, sig_num=1, sig_type="num", eta=0.5): ''' :param k: the number of involved clients in each round :param sig_t: fairness quota :param T: final round number :param D_i: local data distribution :param o1: local update operation :param eta: the learning rate of weights update :return: - At: the selected group in round t ''' At = np.zeros((T, k)) # default dtype is numpy.float64. Pt, St = ([] for i in range(2)) x_t = _create_clients_group(K) print("E3CS-{}({})".format(sig_type, sig_num)) for t in range(T): sigma_t = (_num_sigma(sig_type, sig_num))(t) Pt, St = ProbAlloc(k, sigma_t, W_t, K) Pt_tensor = torch.tensor(Pt) At[t] = torch.multinomial(Pt_tensor, k, replacement=False) # At[t] = At[t].detach().numpy() selected_clients = [x_t[int(i)] for i in At[t]] print("Num of 0 clients: " + str(20-sum(selected_clients))) x_estimator_t = np.zeros(K) for i in range(0, K): x_estimator_t[i] = x_t[i]/Pt[i] if Pt[i]>0.001 else x_t[i]/0.001 # for cases when Pt[i] is very small number # x_estimator_t[i] = x_t[i]/Pt[i] if (i in At[t]) else 0 W_t[i] = W_t[i] if (i in St) else W_t[i]*math.exp((k-(K*sigma_t))*eta*x_estimator_t[i]/K) return x_t, At def ProbAlloc(k, sigma_t, W_t, K=100): ''' :param k: the number of involved clients in each round :param sigma_t: fairness quota of round t :param W_t: exponential weights for round (vector of size K) :param K: total num of activate clients :return: - Pt: probability allocation vector for round t - St: overflowed set for round t ''' St = [] P_t = np.zeros(len(W_t)) for i in range(0, len(W_t)): P_t[i] = sigma_t + (((k - (K * sigma_t)) * W_t[i]) / sum(W_t)) if P_t[i] > 1: P_t[i] = 1 St.append(i) P_t = [0 if np.isnan(p) else p for p in P_t] return P_t, St def sum_up_to_arr(T_arr, arr): res_arr = np.zeros(len(T_arr)) for i, t in enumerate(T_arr): res_arr[i] = _sum_up_tp(t, arr) return res_arr def _sum_up_tp(T, arr): res = 0 for i in range(T): res += arr[i] return res def _aggr_CEP_SR_E3CS(r, T, s_num, s_type, comm_rounds, k=20): cep = np.zeros(len(comm_rounds)) sr = np.zeros(len(comm_rounds)) for i in range(r): cep_tmp, sr_tmp = make_CEP_SR_E3CS(T, s_num, s_type, comm_rounds) cep += cep_tmp sr += sr_tmp CEP_E3CS = (cep / r) SR_E3CS = (sr / r) return CEP_E3CS, SR_E3CS def main(): T = 2500 r = 10 dots = 200 comm_rounds = [i for i in range(1, T, dots)] # make FedCS print("FedCS") CEP_FedCs, s_r_FedCs = make_CEP_SR_FedCs(T, comm_rounds, k) # make Random print("Random") random_tmp = random_n() CEP_random, s_r_random = make_CEP_SP(T, comm_rounds, random_tmp, k=20) # make pow_d d=30 print("pow_d("+str(d)+")") random_tmp_pow = random_d(d, k) CEP_pow_d, s_r_pow_d = make_CEP_SP(T, comm_rounds, random_tmp_pow, k=20) # make E3CS-0 print("E3CS-0") CEP_E3CS_0, s_r_E3CS_0 = make_CEP_SR_E3CS(T, 0, "num", comm_rounds) CEP_E3CS_0, s_r_E3CS_0 = _aggr_CEP_SR_E3CS(r, T, 0, "num", comm_rounds) # make E3CS-0.5 print("E3CS-0.5") CEP_E3CS_05, s_r_E3CS_05 = make_CEP_SR_E3CS(T, 0.5, "num", comm_rounds) CEP_E3CS_05, s_r_E3CS_05 = _aggr_CEP_SR_E3CS(r, T, 0.5, "num", comm_rounds) # make E3CS-0.8 print("E3CS-0.8") CEP_E3CS_08, s_r_E3CS_08 = make_CEP_SR_E3CS(T, 0.8, "num", comm_rounds) CEP_E3CS_08, s_r_E3CS_08 = _aggr_CEP_SR_E3CS(r, T, 0.8, "num", comm_rounds) # make E3CS-inc print("E3CS-inc") CEP_E3CS_inc, s_r_E3CS_inc = make_CEP_SR_E3CS(T, 1, "inc", comm_rounds) CEP_E3CS_inc, s_r_E3CS_inc = _aggr_CEP_SR_E3CS(r, T, 1, "inc", comm_rounds) fig, (ax1, ax2) = plt.subplots(2) ax1.plot(comm_rounds, s_r_E3CS_0, label='E3CS-0') ax1.plot(comm_rounds, s_r_E3CS_05, label='E3CS-0.5') ax1.plot(comm_rounds, s_r_E3CS_08, label='E3CS-0.8') ax1.plot(comm_rounds, s_r_E3CS_inc, label='E3CS-inc') ax1.plot(comm_rounds, s_r_FedCs, label='FedCS') ax1.plot(comm_rounds, s_r_random, label='Random') ax1.plot(comm_rounds, s_r_pow_d, label='pow-d') ax1.get_yaxis().get_major_formatter().set_useOffset(True) ax1.ticklabel_format(axis='y', style='sci', scilimits=(-1, -1)) ax1.yaxis.major.formatter._useMathText = True ax1.set_ylabel('Success Ratio') ax1.grid(alpha=0.5, linestyle='dashed', linewidth=0.5) ax2.plot(comm_rounds, CEP_E3CS_0, label='E3CS-0') ax2.plot(comm_rounds, CEP_E3CS_05, label='E3CS-0.5') ax2.plot(comm_rounds, CEP_E3CS_08, label='E3CS-0.8') ax2.plot(comm_rounds, CEP_E3CS_inc, label='E3CS-inc') ax2.plot(comm_rounds, CEP_FedCs, label='FedCS') ax2.plot(comm_rounds, CEP_random, label='Random') ax2.plot(comm_rounds, CEP_pow_d, label='pow-d') ax2.grid(alpha=0.5, linestyle='dashed', linewidth=0.5) ax2.get_yaxis().get_major_formatter().set_useOffset(True) ax2.set_xlabel('Communication Rounds') ax2.set_ylabel('CEP') ax2.legend(['E3CS-0', 'E3CS-0.5', 'E3CS-0.8', 'E3CS-inc', 'FedCS', 'Random', 'pow-d']) ax2.ticklabel_format(axis='y', style='sci', scilimits=(4, 4)) ax2.yaxis.major.formatter._useMathText = True plt.grid() plt.show() # Press the green button in the gutter to run the script. if __name__ == '__main__': main() # See PyCharm help at https://www.jetbrains.com/help/pycharm/
python
import Formatter import Config import Logger import Arguments from Utils import * args = Arguments.Parse() cfg = Config.Get() @Formatter.Register("csv") def csv_formatter(components): """ Formats components as a CSV """ columns = cfg['columns'] nl = cfg['outputLineSeparator'] result = denormalizeStr(columns[0]) # Add column headers for column in columns[1:]: result = result + "," + denormalizeStr(column) # Add components for component in components: result = result + nl + str(component[columns[0]]) for i in range(1, len(columns)): try: result = result + "," + str(component[columns[i]]) except: result = result + "," + str(cfg['emptyValue']) # Save the csv file save_path = args.output_file try: with open(save_path, "w") as file: file.write(result) Logger.Debug("Output saved to", save_path) return save_path except: Logger.Error("Could not save output to", save_path)
python
import collections class Solution: def topKFrequent(self, words: List[str], k: int) -> List[str]: # freq = collections.Counter(words) # return [item[0] for item in heapq.nsmallest(k, (freq.items()), key=lambda x: (x[1] * -1, x[0]))] # sorted_freq = [item[0] for item in sorted(freq.items(), key=lambda x: (x[1] * -1, x[0]))][:k] # return sorted_freq buckets = [[] for i in range(len(words)+1)] freq = collections.Counter(words) for item, f in freq.items(): buckets[f].append(item) for bucket in buckets: bucket.sort() flattened_list = [x for bucket in buckets[::-1] for x in bucket] return flattened_list[:k]
python
############################################################### # Autogenerated module. Please don't modify. # # Edit according file in protocol_generator/templates instead # ############################################################### from typing import Dict from ...structs.api.list_offsets_request import ListOffsetsRequestData, Partition, Topic from ._main_serializers import ( ArraySerializer, ClassSerializer, DummySerializer, Schema, int8Serializer, int32Serializer, int64Serializer, stringSerializer, ) partitionSchemas: Dict[int, Schema] = { 0: [ ("partition", int32Serializer), ("timestamp", int64Serializer), (None, int32Serializer), ("current_leader_epoch", DummySerializer(int32Serializer.default)), ], 1: [ ("partition", int32Serializer), ("timestamp", int64Serializer), ("current_leader_epoch", DummySerializer(int32Serializer.default)), ], 2: [ ("partition", int32Serializer), ("timestamp", int64Serializer), ("current_leader_epoch", DummySerializer(int32Serializer.default)), ], 3: [ ("partition", int32Serializer), ("timestamp", int64Serializer), ("current_leader_epoch", DummySerializer(int32Serializer.default)), ], 4: [("partition", int32Serializer), ("current_leader_epoch", int32Serializer), ("timestamp", int64Serializer)], 5: [("partition", int32Serializer), ("current_leader_epoch", int32Serializer), ("timestamp", int64Serializer)], } partitionSerializers: Dict[int, ClassSerializer[Partition]] = { version: ClassSerializer(Partition, schema) for version, schema in partitionSchemas.items() } partitionSerializers[-1] = partitionSerializers[5] topicSchemas: Dict[int, Schema] = { 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[0]))], 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[1]))], 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[2]))], 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[3]))], 4: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[4]))], 5: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[5]))], } topicSerializers: Dict[int, ClassSerializer[Topic]] = { version: ClassSerializer(Topic, schema) for version, schema in topicSchemas.items() } topicSerializers[-1] = topicSerializers[5] listOffsetsRequestDataSchemas: Dict[int, Schema] = { 0: [ ("replica_id", int32Serializer), ("topics", ArraySerializer(topicSerializers[0])), ("isolation_level", DummySerializer(int8Serializer.default)), ], 1: [ ("replica_id", int32Serializer), ("topics", ArraySerializer(topicSerializers[1])), ("isolation_level", DummySerializer(int8Serializer.default)), ], 2: [ ("replica_id", int32Serializer), ("isolation_level", int8Serializer), ("topics", ArraySerializer(topicSerializers[2])), ], 3: [ ("replica_id", int32Serializer), ("isolation_level", int8Serializer), ("topics", ArraySerializer(topicSerializers[3])), ], 4: [ ("replica_id", int32Serializer), ("isolation_level", int8Serializer), ("topics", ArraySerializer(topicSerializers[4])), ], 5: [ ("replica_id", int32Serializer), ("isolation_level", int8Serializer), ("topics", ArraySerializer(topicSerializers[5])), ], } listOffsetsRequestDataSerializers: Dict[int, ClassSerializer[ListOffsetsRequestData]] = { version: ClassSerializer(ListOffsetsRequestData, schema) for version, schema in listOffsetsRequestDataSchemas.items() } listOffsetsRequestDataSerializers[-1] = listOffsetsRequestDataSerializers[5]
python
from septentrion import core def test_initialize(db): settings_kwargs = { # database connection settings "host": db["host"], "port": db["port"], "username": db["user"], "dbname": db["dbname"], # migrate settings "target_version": "1.1", "migrations_root": "example_migrations", } # create table with no error core.initialize(**settings_kwargs) # action is idempotent, no error either core.initialize(**settings_kwargs) def test_initialize_customize_names(db): settings_kwargs = { # database connection settings "host": db["host"], "port": db["port"], "username": db["user"], "dbname": db["dbname"], # migrate settings "target_version": "1.1", "migrations_root": "example_migrations", # customize table "table": "my_own_table", # customize columns "name_column": "name_custo", "version_column": "version_custo", "applied_at_column": "applied_custo", } # create table with no error core.initialize(**settings_kwargs) # action is idempotent, no error either core.initialize(**settings_kwargs)
python
from .motion_dataloader import * from .spatial_dataloader import *
python
import pytest from pathlib import Path from app.database import db from app.main import create_app TEST_DB = 'test.db' class TestMainCase: @pytest.fixture def client(self): BASE_DIR = Path(__file__).resolve().parent.parent self.app = create_app() self.app.app_context().push() self.app.config['TESTING'] = True self.app.config['DATABASE'] = BASE_DIR.joinpath(TEST_DB) self.app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{BASE_DIR.joinpath(TEST_DB)}' self.app.config['EMAIL'] = '[email protected]' self.app.config['USERNAME'] = 'admin' self.app.config['PASSWORD'] = 'password' db.create_all() with self.app.test_client(self) as client: yield client db.drop_all() def testIndex(self, client): response = client.get( '/', content_type='html/text' ) assert 200 == response.status_code assert b'There is no ignorance, there is knowledge.' == response.data def testDatabase(self): assert Path(TEST_DB).is_file()
python
import json import falcon import smtplib from smtplib import SMTPException from email.MIMEText import MIMEText from email.MIMEMultipart import MIMEMultipart corp_email_server = 'mail.example.com' corp_email_port = 587 corp_email_name = "My Company" corp_email_sentfrom = '[email protected]' corp_email_password = 'changeme' class EmailMessage(object): def __init__(self): pass def send_email(self, email_to, email_to_name, email_subject, email_message): smtp_connection = self.get_smtp_connection(corp_email_server, corp_email_port, corp_email_sentfrom, corp_email_password) if not smtp_connection: return False meme_msg = self.build_meme_body(corp_email_sentfrom, corp_email_name, email_to, email_to_name, email_subject, email_message) smtp_rtn = self.send_meme(smtp_connection, corp_email_sentfrom, email_to, meme_msg) if not smtp_rtn: return False return True def get_smtp_connection(self, email_server, email_port, email_user, email_password, starttls=True): try: smtp_connection = smtplib.SMTP(email_server, email_port) if starttls: smtp_connection.starttls() smtp_connection.login(email_user, email_password) print "Connected to mail server" return smtp_connection except SMTPException, e: print "Error: unable to send email" return False def build_meme_body(self, email_from, email_from_name, email_to, email_to_name, email_subject, email_message): msg = MIMEMultipart() msg['From'] = "%s <%s>" % (email_from_name, email_from) msg['To'] = "%s <%s>" % (email_to_name, email_to) msg['Subject'] = email_subject html_message = """<html> <head> <style> h1 { color: navy; margin-left: 20px; } </style> </head> <body> <h1>Hi!</h1> %s<br><br> </p> </body> </html>""" % email_message msg.attach(MIMEText(html_message, 'html')) return msg def send_meme(self, smtp_connection, email_sent_from, email_to, meme_msg): try: smtp_connection.sendmail(email_sent_from, email_to, meme_msg.as_string()) print 'Mail sent' return True except SMTPException, e: print 'Mail could not be sent %s' % e return False class NotifyResource: def on_post(self, req, resp): try: msg_body = json.loads(req.stream.read()) except ValueError: resp.body = '{"msg": "Invalid JSON"}' resp.status = falcon.HTTP_400 return email_message = EmailMessage() email_rtn = email_message.send_email(msg_body['email'], msg_body['name'], msg_body['subject'], msg_body['msg']) if not email_rtn: resp.body = '{"msg": "Sending Mail Failed"}' resp.status = falcon.HTTP_500 return app = falcon.API() notify = NotifyResource() app.add_route('/notify', notify)
python
import pyctrl.bbb as pyctrl class Controller(pyctrl.Controller): def __init__(self, *vargs, **kwargs): # Initialize controller super().__init__(*vargs, **kwargs) def __reset(self): # call super super().__reset() # add source: encoder1 self.add_device('encoder1', 'pyctrl.bbb.encoder', 'Encoder', type = 'source', outputs = ['encoder1'], encoder = 1, ratio = - 60 * 35.557) # add source: encoder2 self.add_device('encoder2', 'pyctrl.bbb.encoder', 'Encoder', type = 'source', outputs = ['encoder2'], encoder = 2, ratio = 60 * 35.557) # add source: imu # self.add_device('mpu6050', # 'pyctrl.bbb.mpu6050', 'Inclinometer', # type = 'source', # enable = True, # outputs = ['imu']) # add source: mic1 self.add_device('mic1', 'pyctrl.bbb.analog', 'Analog', type = 'source', pin = 'AIN0', outputs = ['mic1']) # add source: mic2 self.add_device('mic2', 'pyctrl.bbb.analog', 'Analog', type = 'source', pin = 'AIN1', outputs = ['mic2']) # add source: prox1 self.add_device('prox1', 'pyctrl.bbb.analog', 'Analog', type = 'source', pin = 'AIN2', outputs = ['prox1']) # add source: prox2 self.add_device('prox2', 'pyctrl.bbb.analog', 'Analog', type = 'source', pin = 'AIN3', outputs = ['prox2']) # add sink: motor1 self.add_device('motor1', 'pyctrl.bbb.motor', 'Motor', type = 'sink', enable = True, inputs = ['motor1'], pwm_pin = 'P9_14', dir_A = 'P9_15', dir_B = 'P9_23') # add sink: motor2 self.add_device('motor2', 'pyctrl.bbb.motor', 'Motor', type = 'sink', enable = True, inputs = ['motor2'], pwm_pin='P9_16', dir_B='P9_12', dir_A='P9_27') if __name__ == "__main__": import time, math import pyctrl.block as block from pyctrl.block.linear import Feedback, Gain # initialize robut robut = Controller() print("> WELCOME TO ROBUT") print(robut.info('all')) # install printer robut.add_sink('printer', block.Printer(endln = '\r'), ['clock', 'motor1', 'encoder1', 'motor2', 'encoder2', #'imu', 'mic1','mic2', 'prox1','prox2']) # install controller robut.add_signal('reference1') robut.add_filter('controller', Feedback(block = Gain(gain = 1)), ['prox2', 'reference1'], ['motor1']) with robut: for k in range(100): mic1 = robut.get_signal('mic1') print('> mic1 = {}'.format(mic1)) time.sleep(1) print("> BYE")
python
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2014, Nicolas P. Rougier. All rights reserved. # Distributed under the terms of the new BSD License. # ----------------------------------------------------------------------------- import unittest import numpy as np from vispy.gloo import gl from vispy.gloo.variable import Uniform, Variable, Attribute # ----------------------------------------------------------------------------- class VariableTest(unittest.TestCase): def test_init(self): variable = Variable(None, "A", gl.GL_FLOAT) assert variable._handle == -1 assert variable.name == "A" assert variable.data is None assert variable.gtype == gl.GL_FLOAT assert variable.enabled is True def test_init_wrong_type(self): # with self.assertRaises(TypeError): # v = Variable(None, "A", gl.GL_INT_VEC2) self.assertRaises(TypeError, Variable, None, "A", gl.GL_INT_VEC2) # with self.assertRaises(TypeError): # v = Variable(None, "A", gl.GL_INT_VEC3) self.assertRaises(TypeError, Variable, None, "A", gl.GL_INT_VEC3) # with self.assertRaises(TypeError): # v = Variable(None, "A", gl.GL_INT_VEC4) self.assertRaises(TypeError, Variable, None, "A", gl.GL_INT_VEC4) # with self.assertRaises(TypeError): # v = Variable(None, "A", gl.GL_BOOL_VEC2) self.assertRaises(TypeError, Variable, None, "A", gl.GL_BOOL_VEC2) # with self.assertRaises(TypeError): # v = Variable(None, "A", gl.GL_BOOL_VEC3) self.assertRaises(TypeError, Variable, None, "A", gl.GL_BOOL_VEC3) # with self.assertRaises(TypeError): # v = Variable(None, "A", gl.GL_BOOL_VEC4) self.assertRaises(TypeError, Variable, None, "A", gl.GL_BOOL_VEC4) # ----------------------------------------------------------------------------- class UniformTest(unittest.TestCase): def test_init(self): uniform = Uniform(None, "A", gl.GL_FLOAT) assert uniform._unit == -1 def test_float(self): uniform = Uniform(None, "A", gl.GL_FLOAT) assert uniform.data.dtype == np.float32 assert uniform.data.size == 1 def test_vec2(self): uniform = Uniform(None, "A", gl.GL_FLOAT_VEC2) assert uniform.data.dtype == np.float32 assert uniform.data.size == 2 def test_vec3(self): uniform = Uniform(None, "A", gl.GL_FLOAT_VEC2) assert uniform.data.dtype == np.float32 assert uniform.data.size == 2 def test_vec4(self): uniform = Uniform(None, "A", gl.GL_FLOAT_VEC2) assert uniform.data.dtype == np.float32 assert uniform.data.size == 2 def test_int(self): uniform = Uniform(None, "A", gl.GL_INT) assert uniform.data.dtype == np.int32 assert uniform.data.size == 1 def test_mat2(self): uniform = Uniform(None, "A", gl.GL_FLOAT_MAT2) assert uniform.data.dtype == np.float32 assert uniform.data.size == 4 def test_mat3(self): uniform = Uniform(None, "A", gl.GL_FLOAT_MAT3) assert uniform.data.dtype == np.float32 assert uniform.data.size == 9 def test_mat4(self): uniform = Uniform(None, "A", gl.GL_FLOAT_MAT4) assert uniform.data.dtype == np.float32 assert uniform.data.size == 16 def test_set(self): uniform = Uniform(None, "A", gl.GL_FLOAT_VEC4) uniform.set_data(1) assert (uniform.data == 1).all() uniform.set_data([1, 2, 3, 4]) assert (uniform.data == [1, 2, 3, 4]).all() def test_set_exception(self): uniform = Uniform(None, "A", gl.GL_FLOAT_VEC4) # with self.assertRaises(ValueError): # uniform.set_data([1, 2]) self.assertRaises(ValueError, uniform.set_data, [1, 2]) # with self.assertRaises(ValueError): # uniform.set_data([1, 2, 3, 4, 5]) self.assertRaises(ValueError, uniform.set_data, [1, 2, 3, 4, 5]) # ----------------------------------------------------------------------------- class AttributeTest(unittest.TestCase): def test_init(self): attribute = Attribute(None, "A", gl.GL_FLOAT) assert attribute.size == 0 def test_set_generic(self): attribute = Attribute(None, "A", gl.GL_FLOAT_VEC4) attribute.set_data(1) assert type(attribute.data) is np.ndarray # @unittest.expectedFailure # def test_set_generic_2(self): # attribute = Attribute(None, "A", gl.GL_FLOAT_VEC4) # attribute.set_data([1, 2, 3, 4]) # assert type(attribute.data) is np.ndarray if __name__ == "__main__": unittest.main()
python
''' A recursive approach to implementing the fibonacci series This is a BAD approach since it takes a very long time to execute takes a ridiculously long time ''' def fib_recurr(n): if n <= 1: return n else: return fib_recurr(n-1) + fib_recurr(n -2)
python
def create_mapping_with_unk(dico): sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0])) id_to_word = {index + 1: w[0] for (index, w) in enumerate(sorted_items)} word_to_id = {v: k for k, v in id_to_word.items()} id_to_word[0] = "<unk>" word_to_id["<unk>"] = 0 return word_to_id, id_to_word def create_mapping(dico): """ Create a mapping (item to ID / ID to item) from a dictionary. Items are ordered by decreasing frequency. """ sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0])) id_to_item = {i: v[0] for i, v in enumerate(sorted_items)} item_to_id = {v: k for k, v in id_to_item.items()} return item_to_id, id_to_item def lookup_word(word, word_to_lemmas, pretrained): if word in pretrained: return word elif word.lower() in pretrained: return word.lower() elif word in word_to_lemmas: for word in word_to_lemmas[word]: if word in pretrained: return word elif word.lower() in pretrained: return word.lower() return "" def augment_with_pretrained(dictionary, word_to_id, id_to_word, pretrained, word_to_lemmas): """ Augment the dictionary with words that have a pretrained embedding. If `words` is None, we add every word that has a pretrained embedding to the dictionary, otherwise, we only add the words that are given by `words` (typically the words in the development and test sets.) """ # We either add every word in the pretrained file, # or only words given in the `words` list to which # we can assign a pretrained embedding for word in word_to_lemmas: if word not in dictionary: hit_word = lookup_word(word, word_to_lemmas, pretrained) if hit_word != "": dictionary[word] = 0 wid = len(word_to_id) word_to_id[word] = wid id_to_word[wid] = word
python
""" Scenario: 1 speaker, 2 listeners (one of which is an adversary). Good agents rewarded for proximity to goal, and distance from adversary to goal. Adversary is rewarded for its distance to the goal. """ import numpy as np from multiagent.core import World, Agent, Landmark from multiagent.scenario import BaseScenario import random class CryptoAgent(Agent): def __init__(self): super(CryptoAgent, self).__init__() self.key = None class Scenario(BaseScenario): def make_world(self): world = World() # set any world properties first num_agents = 3 num_adversaries = 1 num_landmarks = 2 world.dim_c = 4 # add agents world.agents = [CryptoAgent() for i in range(num_agents)] for i, agent in enumerate(world.agents): agent.name = 'agent %d' % i agent.collide = False agent.adversary = True if i < num_adversaries else False agent.speaker = True if i == 2 else False agent.movable = False # add landmarks world.landmarks = [Landmark() for i in range(num_landmarks)] for i, landmark in enumerate(world.landmarks): landmark.name = 'landmark %d' % i landmark.collide = False landmark.movable = False # make initial conditions self.reset_world(world) return world def reset_world(self, world): # random properties for agents for i, agent in enumerate(world.agents): agent.color = np.array([0.25, 0.25, 0.25]) if agent.adversary: agent.color = np.array([0.75, 0.25, 0.25]) agent.key = None # random properties for landmarks color_list = [np.zeros(world.dim_c) for i in world.landmarks] for i, color in enumerate(color_list): color[i] += 1 for color, landmark in zip(color_list, world.landmarks): landmark.color = color # set goal landmark goal = np.random.choice(world.landmarks) world.agents[1].color = goal.color world.agents[2].key = np.random.choice(world.landmarks).color for agent in world.agents: agent.goal_a = goal # set random initial states for agent in world.agents: agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p) agent.state.p_vel = np.zeros(world.dim_p) agent.state.c = np.zeros(world.dim_c) for i, landmark in enumerate(world.landmarks): landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p) landmark.state.p_vel = np.zeros(world.dim_p) def benchmark_data(self, agent, world): # returns data for benchmarking purposes return (agent.state.c, agent.goal_a.color) # return all agents that are not adversaries def good_listeners(self, world): return [agent for agent in world.agents if not agent.adversary and not agent.speaker] # return all agents that are not adversaries def good_agents(self, world): return [agent for agent in world.agents if not agent.adversary] # return all adversarial agents def adversaries(self, world): return [agent for agent in world.agents if agent.adversary] def reward(self, agent, world): return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world) def agent_reward(self, agent, world): # Agents rewarded if Bob can reconstruct message, but adversary (Eve) cannot good_listeners = self.good_listeners(world) adversaries = self.adversaries(world) good_rew = 0 adv_rew = 0 for a in good_listeners: if (a.state.c == np.zeros(world.dim_c)).all(): continue else: good_rew -= np.sum(np.square(a.state.c - agent.goal_a.color)) for a in adversaries: if (a.state.c == np.zeros(world.dim_c)).all(): continue else: adv_l1 = np.sum(np.square(a.state.c - agent.goal_a.color)) adv_rew += adv_l1 return adv_rew + good_rew def adversary_reward(self, agent, world): # Adversary (Eve) is rewarded if it can reconstruct original goal rew = 0 if not (agent.state.c == np.zeros(world.dim_c)).all(): rew -= np.sum(np.square(agent.state.c - agent.goal_a.color)) return rew def observation(self, agent, world): # goal color goal_color = np.zeros(world.dim_color) if agent.goal_a is not None: goal_color = agent.goal_a.color #print('goal color in obs is {}'.format(goal_color)) # get positions of all entities in this agent's reference frame entity_pos = [] for entity in world.landmarks: entity_pos.append(entity.state.p_pos - agent.state.p_pos) # communication of all other agents comm = [] for other in world.agents: if other is agent or (other.state.c is None) or not other.speaker: continue comm.append(other.state.c) confer = np.array([0]) if world.agents[2].key is None: confer = np.array([1]) key = np.zeros(world.dim_c) goal_color = np.zeros(world.dim_c) else: key = world.agents[2].key prnt = False # speaker if agent.speaker: if prnt: print('speaker') print(agent.state.c) print(np.concatenate([goal_color] + [key] + [confer] + [np.random.randn(1)])) return np.concatenate([goal_color] + [key]) # listener if not agent.speaker and not agent.adversary: if prnt: print('listener') print(agent.state.c) print(np.concatenate([key] + comm + [confer])) return np.concatenate([key] + comm) if not agent.speaker and agent.adversary: if prnt: print('adversary') print(agent.state.c) print(np.concatenate(comm + [confer])) return np.concatenate(comm)
python
# -*- coding: utf-8 -*- """ Created on Fri Jan 26 12:11:02 2018 @author: User """ def forever15(n): final = (((n*3) + 45)*2)//6 - n print( final) forever15(1550) def findmin(a, b, c): first = a - b second = b - c third = c - a
python
from typing import Generator, Generic, Optional, TypeVar _T = TypeVar("_T") class Ring(Generic[_T]): def __init__(self, value: _T, next_: "Ring[_T]"): self.value = value self.next = next_ def copy(self) -> "Ring[_T]": result = None run = self first = True while first or run != self: first = False result = Ring.create(run.value, result) run = run.next if result is None: raise Exception # Can actually never happen return result.next def __str__(self) -> str: result = str(self.value) run = self.next while run != self: result += str(run.value) run = run.next return result def __repr__(self) -> str: return f"Ring({self.value})" def __contains__(self, item: _T) -> bool: return self.find(item) is not None def __iter__(self) -> Generator["Ring[_T]", None, None]: run = self while run.next != self: yield run run = run.next yield run def __next__(self) -> "Ring[_T]": return self.next def __len__(self) -> int: run = self len = 0 while run.next != self: len += 1 run = run.next return len + 1 def __getitem__(self, item: _T) -> "Ring[_T]": result = self.find(item) if result is None: raise KeyError(f"{item} is not in this ring") return result def append(self, value: _T) -> "Ring[_T]": ring = Ring[_T](value, self.next) self.next = ring return ring def find(self, value: _T) -> Optional["Ring[_T]"]: run = self while run.value != value and run.next != self: run = run.next if run.value == value: return run else: return None def prev(self) -> "Ring[_T]": run = self while run.next != self: run = run.next return run @staticmethod def create(value: _T, prev: Optional["Ring[_T]"]) -> "Ring[_T]": if prev is not None: return prev.append(value) # Mini Ring, points to itself ring: Ring[_T] = Ring[_T](value, None) # type: ignore ring.next = ring return ring
python
# Generated by Django 3.2.3 on 2021-05-31 04:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0005_alter_studentprofile_student'), ] operations = [ migrations.AddField( model_name='studentprofile', name='name', field=models.CharField(default='', max_length=200), ), ]
python
class Solution: def maxProduct(self, nums: List[int]) -> int: ans = nums[0] prevMin = nums[0] prevMax = nums[0] for i in range(1, len(nums)): mini = prevMin * nums[i] maxi = prevMax * nums[i] prevMin = min(nums[i], mini, maxi) prevMax = max(nums[i], mini, maxi) ans = max(ans, prevMax) return ans
python
from __future__ import print_function import os.path import time import sys import six import platform from openpype.api import Logger from openpype.api import get_system_settings from .abstract_provider import AbstractProvider from ..utils import time_function, ResumableError log = Logger().get_logger("SyncServer") try: from googleapiclient.discovery import build import google.oauth2.service_account as service_account from googleapiclient import errors from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload except (ImportError, SyntaxError): if six.PY3: six.reraise(*sys.exc_info()) # handle imports from Python 2 hosts - in those only basic methods are used log.warning("Import failed, imported from Python 2, operations will fail.") SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.readonly'] # for write|delete class GDriveHandler(AbstractProvider): """ Implementation of Google Drive API. As GD API doesn't have real folder structure, 'tree' in memory structure is build in constructor to map folder paths to folder ids, which are used in API. Building of this tree might be expensive and slow and should be run only when necessary. Currently is set to lazy creation, created only after first call when necessary. Configuration for provider is in 'settings/defaults/project_settings/global.json' Settings could be overwritten per project. Example of config: "gdrive": { - site name "provider": "gdrive", - type of provider, label must be registered "credentials_url": "/my_secret_folder/credentials.json", "root": { - could be "root": "/My Drive" for single root "root_one": "/My Drive", "root_two": "/My Drive/different_folder" } } """ CODE = 'gdrive' LABEL = 'Google Drive' FOLDER_STR = 'application/vnd.google-apps.folder' MY_DRIVE_STR = 'My Drive' # name of root folder of regular Google drive CHUNK_SIZE = 2097152 # must be divisible by 256! used for upload chunks def __init__(self, project_name, site_name, tree=None, presets=None): self.active = False self.project_name = project_name self.site_name = site_name self.service = None self.root = None self.presets = presets if not self.presets: log.info("Sync Server: There are no presets for {}.". format(site_name)) return provider_presets = self.presets.get(self.CODE) if not provider_presets: msg = "Sync Server: No provider presets for {}".format(self.CODE) log.info(msg) return cred_path = self.presets[self.CODE].get("credentials_url", {}).\ get(platform.system().lower()) or '' if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ "for '{}' on path '{}'!".format(site_name, cred_path) log.info(msg) return self.service = self._get_gd_service(cred_path) self._tree = tree self.active = True def is_active(self): """ Returns True if provider is activated, eg. has working credentials. Returns: (boolean) """ return self.service is not None @classmethod def get_system_settings_schema(cls): """ Returns dict for editable properties on system settings level Returns: (list) of dict """ return [] @classmethod def get_project_settings_schema(cls): """ Returns dict for editable properties on project settings level Returns: (list) of dict """ # {platform} tells that value is multiplatform and only specific OS # should be returned editable = [ # credentials could be overriden on Project or User level { 'key': "credentials_url", 'label': "Credentials url", 'type': 'text' }, # roots could be overriden only on Project leve, User cannot { 'key': "roots", 'label': "Roots", 'type': 'dict' } ] return editable @classmethod def get_local_settings_schema(cls): """ Returns dict for editable properties on local settings level Returns: (dict) """ editable = [ # credentials could be override on Project or User level { 'key': "credentials_url", 'label': "Credentials url", 'type': 'text', 'namespace': '{project_settings}/global/sync_server/sites/{site}/credentials_url/{platform}' # noqa: E501 } ] return editable def get_roots_config(self, anatomy=None): """ Returns root values for path resolving Use only Settings as GDrive cannot be modified by Local Settings Returns: (dict) - {"root": {"root": "/My Drive"}} OR {"root": {"root_ONE": "value", "root_TWO":"value}} Format is importing for usage of python's format ** approach """ # GDrive roots cannot be locally overridden return self.presets['root'] def get_tree(self): """ Building of the folder tree could be potentially expensive, constructor provides argument that could inject previously created tree. Tree structure must be handled in thread safe fashion! Returns: (dictionary) - url to id mapping """ if not self._tree: self._tree = self._build_tree(self.list_folders()) return self._tree def create_folder(self, path): """ Create all nonexistent folders and subfolders in 'path'. Updates self._tree structure with new paths Args: path (string): absolute path, starts with GDrive root, without filename Returns: (string) folder id of lowest subfolder from 'path' """ folder_id = self.folder_path_exists(path) if folder_id: return folder_id parts = path.split('/') folders_to_create = [] while parts: folders_to_create.append(parts.pop()) path = '/'.join(parts) path = path.strip() folder_id = self.folder_path_exists(path) # lowest common path if folder_id: while folders_to_create: new_folder_name = folders_to_create.pop() folder_metadata = { 'name': new_folder_name, 'mimeType': 'application/vnd.google-apps.folder', 'parents': [folder_id] } folder = self.service.files().create( body=folder_metadata, supportsAllDrives=True, fields='id').execute() folder_id = folder["id"] new_path_key = path + '/' + new_folder_name self.get_tree()[new_path_key] = {"id": folder_id} path = new_path_key return folder_id def upload_file(self, source_path, path, server, collection, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. It creates all folders on the path if are not existing. Args: source_path (string): path (string): absolute path with or without name of the file overwrite (boolean): replace existing file arguments for saving progress: server (SyncServer): server instance to call update_db on collection (str): name of collection file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name Returns: (string) file_id of created/modified file , throws FileExistsError, FileNotFoundError exceptions """ if not os.path.isfile(source_path): raise FileNotFoundError("Source file {} doesn't exist." .format(source_path)) root, ext = os.path.splitext(path) if ext: # full path target_name = os.path.basename(path) path = os.path.dirname(path) else: target_name = os.path.basename(source_path) target_file = self.file_path_exists(path + "/" + target_name) if target_file and not overwrite: raise FileExistsError("File already exists, " "use 'overwrite' argument") folder_id = self.folder_path_exists(path) if not folder_id: raise NotADirectoryError("Folder {} doesn't exists".format(path)) file_metadata = { 'name': target_name } media = MediaFileUpload(source_path, mimetype='application/octet-stream', chunksize=self.CHUNK_SIZE, resumable=True) try: if not target_file: # update doesnt like parent file_metadata['parents'] = [folder_id] request = self.service.files().create(body=file_metadata, supportsAllDrives=True, media_body=media, fields='id') else: request = self.service.files().update(fileId=target_file["id"], body=file_metadata, supportsAllDrives=True, media_body=media, fields='id') media.stream() log.debug("Start Upload! {}".format(source_path)) last_tick = status = response = None status_val = 0 while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, project_name=collection): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() log.debug("Uploaded %d%%." % int(status_val * 100)) server.update_db(collection=collection, new_file_id=None, file=file, representation=representation, site=site, progress=status_val ) status, response = request.next_chunk() except errors.HttpError as ex: if ex.resp['status'] == '404': return False if ex.resp['status'] == '403': # real permission issue if 'has not granted' in ex._get_reason().strip(): raise PermissionError(ex._get_reason().strip()) log.warning("Forbidden received, hit quota. " "Injecting 60s delay.") time.sleep(60) return False raise return response['id'] def download_file(self, source_path, local_path, server, collection, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'local_path'. It creates all folders on the local_path if are not existing. By default existing file on 'local_path' will trigger an exception Args: source_path (string): absolute path on provider local_path (string): absolute path with or without name of the file overwrite (boolean): replace existing file arguments for saving progress: server (SyncServer): server instance to call update_db on collection (str): name of collection file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name Returns: (string) file_id of created/modified file , throws FileExistsError, FileNotFoundError exceptions """ remote_file = self.file_path_exists(source_path) if not remote_file: raise FileNotFoundError("Source file {} doesn't exist." .format(source_path)) root, ext = os.path.splitext(local_path) if ext: # full path with file name target_name = os.path.basename(local_path) local_path = os.path.dirname(local_path) else: # just folder, get file name from source target_name = os.path.basename(source_path) local_file = os.path.isfile(local_path + "/" + target_name) if local_file and not overwrite: raise FileExistsError("File already exists, " "use 'overwrite' argument") request = self.service.files().get_media(fileId=remote_file["id"], supportsAllDrives=True) with open(local_path + "/" + target_name, "wb") as fh: downloader = MediaIoBaseDownload(fh, request) last_tick = status = response = None status_val = 0 while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, project_name=collection): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() log.debug("Downloaded %d%%." % int(status_val * 100)) server.update_db(collection=collection, new_file_id=None, file=file, representation=representation, site=site, progress=status_val ) status, response = downloader.next_chunk() return target_name def delete_folder(self, path, force=False): """ Deletes folder on GDrive. Checks if folder contains any files or subfolders. In that case raises error, could be overriden by 'force' argument. In that case deletes folder on 'path' and all its children. Args: path (string): absolute path on GDrive force (boolean): delete even if children in folder Returns: None """ folder_id = self.folder_path_exists(path) if not folder_id: raise ValueError("Not valid folder path {}".format(path)) fields = 'nextPageToken, files(id, name, parents)' q = self._handle_q("'{}' in parents ".format(folder_id)) response = self.service.files().list( q=q, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, pageSize='1', fields=fields).execute() children = response.get('files', []) if children and not force: raise ValueError("Folder {} is not empty, use 'force'". format(path)) self.service.files().delete(fileId=folder_id, supportsAllDrives=True).execute() def delete_file(self, path): """ Deletes file from 'path'. Expects path to specific file. Args: path: absolute path to particular file Returns: None """ file = self.file_path_exists(path) if not file: raise ValueError("File {} doesn't exist") self.service.files().delete(fileId=file["id"], supportsAllDrives=True).execute() def list_folder(self, folder_path): """ List all files and subfolders of particular path non-recursively. Args: folder_path (string): absolut path on provider Returns: (list) """ pass @time_function def list_folders(self): """ Lists all folders in GDrive. Used to build in-memory structure of path to folder ids model. Returns: (list) of dictionaries('id', 'name', [parents]) """ folders = [] page_token = None fields = 'nextPageToken, files(id, name, parents)' while True: q = self._handle_q("mimeType='application/vnd.google-apps.folder'") response = self.service.files().list( q=q, pageSize=1000, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, fields=fields, pageToken=page_token).execute() folders.extend(response.get('files', [])) page_token = response.get('nextPageToken', None) if page_token is None: break return folders def list_files(self): """ Lists all files in GDrive Runs loop through possibly multiple pages. Result could be large, if it would be a problem, change it to generator Returns: (list) of dictionaries('id', 'name', [parents]) """ files = [] page_token = None fields = 'nextPageToken, files(id, name, parents)' while True: q = self._handle_q("") response = self.service.files().list( q=q, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, fields=fields, pageToken=page_token).execute() files.extend(response.get('files', [])) page_token = response.get('nextPageToken', None) if page_token is None: break return files def folder_path_exists(self, file_path): """ Checks if path from 'file_path' exists. If so, return its folder id. Args: file_path (string): gdrive path with / as a separator Returns: (string) folder id or False """ if not file_path: return False root, ext = os.path.splitext(file_path) if not ext: file_path += '/' dir_path = os.path.dirname(file_path) path = self.get_tree().get(dir_path, None) if path: return path["id"] return False def file_path_exists(self, file_path): """ Checks if 'file_path' exists on GDrive Args: file_path (string): separated by '/', from root, with file name Returns: (dictionary|boolean) file metadata | False if not found """ folder_id = self.folder_path_exists(file_path) if folder_id: return self.file_exists(os.path.basename(file_path), folder_id) return False def file_exists(self, file_name, folder_id): """ Checks if 'file_name' exists in 'folder_id' Args: file_name (string): folder_id (int): google drive folder id Returns: (dictionary|boolean) file metadata, False if not found """ q = self._handle_q("name = '{}' and '{}' in parents" .format(file_name, folder_id)) response = self.service.files().list( q=q, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, fields='nextPageToken, files(id, name, parents, ' 'mimeType, modifiedTime,size,md5Checksum)').execute() if len(response.get('files')) > 1: raise ValueError("Too many files returned for {} in {}" .format(file_name, folder_id)) file = response.get('files', []) if not file: return False return file[0] @classmethod def get_presets(cls): """ Get presets for this provider Returns: (dictionary) of configured sites """ provider_presets = None try: provider_presets = ( get_system_settings()["modules"] ["sync_server"] ["providers"] ["gdrive"] ) except KeyError: log.info(("Sync Server: There are no presets for Gdrive " + "provider."). format(str(provider_presets))) return return provider_presets def _get_gd_service(self, credentials_path): """ Authorize client with 'credentials.json', uses service account. Service account needs to have target folder shared with. Produces service that communicates with GDrive API. Returns: None """ service = None try: creds = service_account.Credentials.from_service_account_file( credentials_path, scopes=SCOPES) service = build('drive', 'v3', credentials=creds, cache_discovery=False) except Exception: log.error("Connection failed, " + "check '{}' credentials file".format(credentials_path), exc_info=True) return service def _prepare_root_info(self): """ Prepare info about roots and theirs folder ids from 'presets'. Configuration might be for single or multiroot projects. Regular My Drive and Shared drives are implemented, their root folder ids need to be queried in slightly different way. Returns: (dicts) of dicts where root folders are keys throws ResumableError in case of errors.HttpError """ roots = {} config_roots = self.get_roots_config() try: for path in config_roots.values(): if self.MY_DRIVE_STR in path: roots[self.MY_DRIVE_STR] = self.service.files()\ .get(fileId='root')\ .execute() else: shared_drives = [] page_token = None while True: response = self.service.drives().list( pageSize=100, pageToken=page_token).execute() shared_drives.extend(response.get('drives', [])) page_token = response.get('nextPageToken', None) if page_token is None: break folders = path.split('/') if len(folders) < 2: raise ValueError("Wrong root folder definition {}". format(path)) for shared_drive in shared_drives: if folders[1] in shared_drive["name"]: roots[shared_drive["name"]] = { "name": shared_drive["name"], "id": shared_drive["id"]} if self.MY_DRIVE_STR not in roots: # add My Drive always roots[self.MY_DRIVE_STR] = self.service.files() \ .get(fileId='root').execute() except errors.HttpError: log.warning("HttpError in sync loop, " "trying next loop", exc_info=True) raise ResumableError return roots @time_function def _build_tree(self, folders): """ Create in-memory structure resolving paths to folder id as recursive querying might be slower. Initialized in the time of class initialization. Maybe should be persisted Tree is structure of path to id: '/ROOT': {'id': '1234567'} '/ROOT/PROJECT_FOLDER': {'id':'222222'} '/ROOT/PROJECT_FOLDER/Assets': {'id': '3434545'} Args: folders (list): list of dictionaries with folder metadata Returns: (dictionary) path as a key, folder id as a value """ log.debug("build_tree len {}".format(len(folders))) if not self.root: # build only when necessary, could be expensive self.root = self._prepare_root_info() root_ids = [] default_root_id = None tree = {} ending_by = {} for root_name, root in self.root.items(): # might be multiple roots if root["id"] not in root_ids: tree["/" + root_name] = {"id": root["id"]} ending_by[root["id"]] = "/" + root_name root_ids.append(root["id"]) if self.MY_DRIVE_STR == root_name: default_root_id = root["id"] no_parents_yet = {} while folders: folder = folders.pop(0) parents = folder.get("parents", []) # weird cases, shared folders, etc, parent under root if not parents: parent = default_root_id else: parent = parents[0] if folder["id"] in root_ids: # do not process root continue if parent in ending_by: path_key = ending_by[parent] + "/" + folder["name"] ending_by[folder["id"]] = path_key tree[path_key] = {"id": folder["id"]} else: no_parents_yet.setdefault(parent, []).append((folder["id"], folder["name"])) loop_cnt = 0 # break if looped more then X times - safety against infinite loop while no_parents_yet and loop_cnt < 20: keys = list(no_parents_yet.keys()) for parent in keys: if parent in ending_by.keys(): subfolders = no_parents_yet.pop(parent) for folder_id, folder_name in subfolders: path_key = ending_by[parent] + "/" + folder_name ending_by[folder_id] = path_key tree[path_key] = {"id": folder_id} loop_cnt += 1 if len(no_parents_yet) > 0: log.debug("Some folders path are not resolved {}". format(no_parents_yet)) log.debug("Remove deleted folders from trash.") return tree def _get_folder_metadata(self, path): """ Get info about folder with 'path' Args: path (string): Returns: (dictionary) with metadata or raises ValueError """ try: return self.get_tree()[path] except Exception: raise ValueError("Uknown folder id {}".format(id)) def _handle_q(self, q, trashed=False): """ API list call contain trashed and hidden files/folder by default. Usually we dont want those, must be included in query explicitly. Args: q (string): query portion trashed (boolean): False|True Returns: (string) - modified query """ parts = [q] if not trashed: parts.append(" trashed = false ") return " and ".join(parts) if __name__ == '__main__': gd = GDriveHandler('gdrive') print(gd.root) print(gd.get_tree())
python
# coding: utf8 from __future__ import unicode_literals from .stop_words import STOP_WORDS from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS from ...language import Language from ...attrs import LANG, NORM from ...util import update_exc, add_lookups class CroatianDefaults(Language.Defaults): lex_attr_getters = dict(Language.Defaults.lex_attr_getters) lex_attr_getters[LANG] = lambda text: "hr" lex_attr_getters[NORM] = add_lookups( Language.Defaults.lex_attr_getters[NORM], BASE_NORMS ) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS) stop_words = STOP_WORDS class Croatian(Language): lang = "hr" Defaults = CroatianDefaults __all__ = ["Croatian"]
python
""" Data related functions and objects. This module contains several parts: - `data_loader` defines a DataLoader objects that behave similar to pandas DataFrame and are used to load data. In addition it provides DataLoader wrappers that implement various transformations on the loaded dataset. - `data_generator` defines a DataGenerator object that takes a DataLoader as input and creates batches of data from it. This submodule also defines a number of wrappers that apply transformation to the generated batches of data. - `data` file defines a number of routines to simplify data handling. """ from .data import load_data, create_data_generators, construct_data_loader __all__ = [ 'load_data', 'create_data_generators', 'construct_data_loader' ]
python
from .ner_labels import NERLabels from .ner_dataset import NERDataset from .label_mapper import LabelMapper from .dataset_tokenizer import DatasetTokenizer __all__=["NERLabels", "NERDataset", "LabelMapper", "DatasetTokenizer"]
python
from django.shortcuts import render, redirect from django.contrib.auth.models import User, auth from django.contrib import messages from django.shortcuts import get_object_or_404 from .models import * from .forms import * from datetime import datetime, timedelta from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, EmptyPage def index(request): return render(request, 'index.html') def register(request): if request.method == 'POST': username = request.POST['username'] email = request.POST['email'] password = request.POST['password'] password2 = request.POST['password2'] if password == password2: if User.objects.filter(email=email).exists(): messages.info(request, 'Email already used!') return redirect('register') elif User.objects.filter(username=username).exists(): messages.info(request, 'Username already used!') return redirect('register') else: user = User.objects.create_user(username=username, email=email, password=password) user.save() return redirect('login') else: messages.info(request, 'Passwords dont match') return redirect('register') else: return render(request, 'register.html') def login(request): if request.method == 'POST': username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not None: auth.login(request, user) return redirect('/') else: messages.info(request, 'User doesnt exists') return redirect('login') else: return render(request, 'login.html') @login_required def logout(request): auth.logout(request) return redirect('/') def rooms(request): free_rooms = Room.objects.filter(is_reserved=False) p = Paginator(free_rooms, 1) page_num = request.GET.get('page', 1) try: page = p.page(page_num) except EmptyPage: page = p.page(1) return render(request, 'rooms.html', {'rooms': page}) def search_rooms(request): searched = request.POST['search_value'] obj = Room.objects.filter(number=searched, is_reserved=False) return render(request, 'search_rooms.html', {'room': obj}) def room(request, pk): room = Room.objects.get(id=pk) comments = Comment.objects.filter(room=room) return render(request, 'room.html', {'room': room, 'comments': comments}) @login_required def reservation(request, pk): obj = get_object_or_404(Room, id=pk) user = request.user if request.method == "POST": form = ReservationForm(request.POST) if form['arrival_date'].value() != form['departure_date'].value(): if form.is_valid(): res = form.save(commit=False) res.user = user obj.is_reserved = True res.room = obj obj.save() res.save() return redirect('/') else: messages.info(request, 'Arrival and departure date are equal') else: form = ReservationForm() return render(request, 'reservation.html', {'form': form, 'room': obj}) @login_required def profile(request): user = request.user reservations = Reservation.objects.all().order_by('-reserve_time') return render(request, 'profile.html', {'reservations': reservations, 'user': user}) @login_required def delete_reservation(request, pk): obj = get_object_or_404(Reservation, id=pk) room = obj.room room.is_reserved = False room.save() obj.delete() return redirect('/profile') @login_required def edit_reservation(request, pk): obj = get_object_or_404(Reservation, id=pk) room = obj.room form = ReservationForm(request.POST or None, instance=obj) if form['arrival_date'].value() != form['departure_date'].value(): if form.is_valid(): res = form.save(commit=False) res.reserve_time = datetime.now() res.save() return redirect('/profile') else: messages.info(request, 'Arrival and departure date are equal') return render(request, 'reservation.html', {'form': form, 'room': room}) @login_required def add_comment(request, pk): obj = get_object_or_404(Room, id=pk) user = request.user if request.method == 'POST': form = CommentForm(request.POST) if form['text'].value(): if form['rate'].value(): if form.is_valid(): com = form.save(commit=False) com.user = user com.room = obj com.save() return redirect('/rooms') else: messages.info(request, 'You must rate the room!') else: messages.info(request, 'You should type something!') else: form = CommentForm() return render(request, 'comment.html', {'form': form}) @login_required def show_last_month(request): obj = Reservation.objects.filter(departure_date__gt=datetime.now() - timedelta(days=30)) print(datetime.now() - timedelta(30)) return render(request, 'lastmonth.html', {'objects': obj})
python
from .accuracy import Accuracy, accuracy __all__ = ['Accuracy', 'accuracy']
python
import sys """ File name: scenario_modifier Date created: 03/03/2019 Feature: # Feature to enable the user to overwrite the scenario file. """ __author__ = "Alexander Kell" __copyright__ = "Copyright 2018, Alexander Kell" __license__ = "MIT" __email__ = "[email protected]" def overwrite_scenario_file(scenario_file): sys.modules['elecsim'].scenario.scenario_data=scenario_file
python
class User: """Represents a MangaDex User.""" __slots__ = ("id", "username", "roles", "client") def __init__(self, data, client): self.id = data.get("id") _attrs = data.get("attributes") self.username = _attrs.get("username") self.roles = _attrs.get("roles", []) self.client = client
python
# Authors: Stephane Gaiffas <[email protected]> # Ibrahim Merad <[email protected]> # License: BSD 3 clause """ This module implement the ``GMOM`` class for the geometric median-of-means robust estimator. ``StateGMOM`` is a place-holder for the GMOM estimator containing: gradient: numpy.ndarray A numpy array of shape (n_weights,) containing gradients computed by the `grad` function returned by the `grad_factory` factory function. TODO: fill the missing things in StateCH """ from collections import namedtuple import numpy as np from numba import jit from ._base import Estimator, jit_kwargs from .._utils import np_float @jit(**jit_kwargs) def gmom_njit(xs, tol=1e-4): # from Vardi and Zhang 2000 n_elem, n_dim = xs.shape # TODO : avoid the memory allocations in this function y = np.zeros(n_dim) dists = np.zeros(n_elem) inv_dists = np.zeros(n_elem) xsy = np.zeros_like(xs) for i in range(n_elem): y += xs[i] y /= n_elem eps = 1e-10 delta = 1 niter = 0 while delta > tol: xsy[:] = xs - y dists.fill(0.0) for j in range(n_dim): dists[:] += xsy[:, j] * xsy[:, j] # np.linalg.norm(xsy, axis=1) for i in range(n_elem): dists[i] = np.sqrt(dists[i]) # dists[:] = euclidean_numba1(xs, [y]).flatten() mask = dists < eps nmask = np.logical_not(mask) inv_dists[nmask] = 1 / dists[nmask] # print("pass2") inv_dists[mask] = 0 nb_too_close = mask.sum() ry = np.sqrt( np.sum(np.dot(inv_dists, xsy) ** 2) ) # np.linalg.norm(np.dot(inv_dists, xsy)) if ry == 0: break cst = nb_too_close / ry sum_inv_dists = np.sum(inv_dists) if sum_inv_dists == 0: raise ValueError y_new = ( max(0, 1 - cst) * np.dot(inv_dists, xs) / sum_inv_dists + min(1, cst) * y ) delta = np.sqrt(np.sum((y - y_new) ** 2)) # np.linalg.norm(y - y_new) y = y_new niter += 1 return y, niter * (n_elem + 1) @jit(**jit_kwargs) def gmom_njit2(X, tol=1e-5): n_elem, n_dim = X.shape y = np.zeros(n_dim) for i in range(n_elem): y += X[i] y /= n_elem D = np.zeros((n_elem, 1)) while True: D.fill(0.0) for i in range(n_elem): for j in range(n_dim): D[i] += (X[i, j] - y[j]) ** 2 D[i] = np.sqrt(D[i]) # D = cdist(X, [y]) nonzeros = (D != 0)[:, 0] Dinv = 1 / D[nonzeros] Dinvs = np.sum(Dinv) W = Dinv / Dinvs T = np.sum(W * X[nonzeros], 0) num_zeros = n_elem - np.sum(nonzeros) if num_zeros == 0: y1 = T elif num_zeros == n_elem: return (y, 0) else: R = (T - y) * Dinvs r = np.linalg.norm(R) rinv = 0 if r == 0 else num_zeros / r y1 = max(0, 1 - rinv) * T + min(1, rinv) * y if np.linalg.norm(y - y1) < tol: return (y1, 0) y = y1 StateGMOM = namedtuple( "StateGMOM", [ "block_means", "sample_indices", "grads_sum_block", "gradient", "loss_derivative", "partial_derivative", ], ) class GMOM(Estimator): def __init__(self, X, y, loss, n_classes, fit_intercept, n_samples_in_block): super().__init__(X, y, loss, n_classes, fit_intercept) self.n_samples_in_block = n_samples_in_block if n_samples_in_block <= 0: raise ValueError self.n_blocks = self.n_samples // n_samples_in_block self.last_block_size = self.n_samples % n_samples_in_block if self.last_block_size > 0: self.n_blocks += 1 def get_state(self): return StateGMOM( block_means=np.empty( ( self.n_blocks, self.n_features + int(self.fit_intercept), self.n_classes, ), dtype=np_float, ), sample_indices=np.arange(self.n_samples, dtype=np.uintp), grads_sum_block=np.empty( (self.n_features + int(self.fit_intercept), self.n_classes), dtype=np_float, ), gradient=np.empty( (self.n_features + int(self.fit_intercept), self.n_classes), dtype=np_float, ), loss_derivative=np.empty(self.n_classes, dtype=np_float), partial_derivative=np.empty(self.n_classes, dtype=np_float), ) def partial_deriv_factory(self): raise ValueError( "gmom estimator does not support CGD, use mom estimator instead" ) def grad_factory(self): X = self.X y = self.y loss = self.loss deriv_loss = loss.deriv_factory() n_samples_in_block = self.n_samples_in_block n_classes = self.n_classes n_features = self.n_features n_blocks = self.n_blocks last_block_size = self.last_block_size if self.fit_intercept: @jit(**jit_kwargs) def grad(inner_products, state): sample_indices = state.sample_indices block_means = state.block_means gradient = state.gradient # Cumulative sum in the block grads_sum_block = state.grads_sum_block # for i in range(n_samples): # sample_indices[i] = i np.random.shuffle(sample_indices) for j in range(n_features + 1): for k in range(n_classes): grads_sum_block[j, k] = 0.0 # Block counter counter = 0 deriv = state.loss_derivative for i, idx in enumerate(sample_indices): deriv_loss(y[idx], inner_products[idx], deriv) for k in range(n_classes): grads_sum_block[0, k] += deriv[k] for j in range(n_features): grads_sum_block[j + 1, k] += ( X[idx, j] * deriv[k] ) # np.outer(X[idx], deriv) if ((i != 0) and ((i + 1) % n_samples_in_block == 0)) or n_samples_in_block == 1: for j in range(n_features + 1): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / n_samples_in_block ) grads_sum_block[j, k] = 0.0 counter += 1 if last_block_size != 0: for j in range(n_features + 1): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / last_block_size ) # TODO : possible optimizations in the next line by rewriting gmom_njit with out parameter # and preallocated place holders ... gmom_grad, sc_prods = gmom_njit(block_means.reshape((n_blocks, -1))) gradient[:] = gmom_grad.reshape( block_means.shape[1:] ) return sc_prods return grad else: @jit(**jit_kwargs) def grad(inner_products, state): sample_indices = state.sample_indices block_means = state.block_means gradient = state.gradient # Cumulative sum in the block grads_sum_block = state.grads_sum_block # for i in range(n_samples): # sample_indices[i] = i np.random.shuffle(sample_indices) # Cumulative sum in the block for j in range(n_features): for k in range(n_classes): grads_sum_block[j, k] = 0.0 # Block counter counter = 0 deriv = state.loss_derivative for i, idx in enumerate(sample_indices): deriv_loss(y[idx], inner_products[idx], deriv) for j in range(n_features): for k in range(n_classes): grads_sum_block[j, k] += X[idx, j] * deriv[k] if (i != 0) and ((i + 1) % n_samples_in_block == 0): for j in range(n_features): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / n_samples_in_block ) grads_sum_block[j, k] = 0.0 counter += 1 if last_block_size != 0: for j in range(n_features): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / last_block_size ) # TODO : possible optimizations in the next line by rewriting gmom_njit with out parameter # and preallocated place holders ... gmom_grad, sc_prods = gmom_njit(block_means.reshape((n_blocks, -1))) gradient[:] = gmom_grad.reshape( block_means.shape[1:] ) return sc_prods return grad
python
#!/usr/bin/python3 # -*- coding:utf-8 -*- from os import listdir from os.path import splitext, join import markdown import yaml import json def read(uri): with open(uri, 'r') as f: return f.read() def text_to_yml_md(tex): tex = tex.strip('-') sep = '---' if sep in tex: return tex.split(sep) else: return (None, tex) def extract_info(path, mode, keys): list_files = listdir(path) resp = {} for file in list_files: n, e = splitext(file) if mode == 'yaml': info = yaml.safe_load(read(join(path, file))) if mode == 'markdown': yml, md = text_to_yml_md(read(join(path, file))) info = yaml.safe_load(yml) for k in keys: if not n in resp: resp[n] = {} if k in info: resp[n][k] = info[k] return resp def main(): file_dat = '../cfg/data-automatic.js' data = { 'pages': extract_info('../public_html/dat/pages/', 'markdown', ['title']), 'series': extract_info('../public_html/dat/series/', 'yaml', ['title-es', 'title-en']), #'news': extract_info('../public_html/dat/news/', 'markdown', ['title', 'date']), #'press': extract_info('../public_html/dat/press/', 'markdown', ['title', 'date']), } with open(file_dat, 'w') as f: json_tex = json.dumps(data) f.write(f'const data = {json_tex};') if __name__ == '__main__': main()
python
#!/usr/bin/env python3 # Copyright (c) 2016 Fabian Schuiki # # This script generates GDS layout data for a memory macro. import sys, os, argparse from potstill.macro import Macro from potstill.layout import Layout from potstill.output.gds import make_gds, make_phalanx_input # Parse the command line arguments. parser = argparse.ArgumentParser(prog="potstill make-gds", description="Generate the GDS layout data of a memory macro.") parser.add_argument("NADDR", type=int, help="number of address lines") parser.add_argument("NBITS", type=int, help="number of bits per word") parser.add_argument("-o", "--output", metavar="GDSFILE", type=str, help="name of the output GDS file") parser.add_argument("-p", "--phalanx", action="store_true", help="write Phalanx input file to stdout") args = parser.parse_args() # Calculate the layout. macro = Macro(args.NADDR, args.NBITS) layout = Layout(macro) filename = args.output or (macro.name+".gds") # Dump the input file to stdout if requested. if args.phalanx: sys.stdout.write(make_phalanx_input(layout, filename)) sys.exit(0) # Generate GDS output. make_gds(layout, filename)
python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['EnvironmentVariableArgs', 'EnvironmentVariable'] @pulumi.input_type class EnvironmentVariableArgs: def __init__(__self__, *, key: pulumi.Input[str], value: pulumi.Input[str], locked: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a EnvironmentVariable resource. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) if locked is not None: pulumi.set(__self__, "locked", locked) @property @pulumi.getter def key(self) -> pulumi.Input[str]: return pulumi.get(self, "key") @key.setter def key(self, value: pulumi.Input[str]): pulumi.set(self, "key", value) @property @pulumi.getter def value(self) -> pulumi.Input[str]: return pulumi.get(self, "value") @value.setter def value(self, value: pulumi.Input[str]): pulumi.set(self, "value", value) @property @pulumi.getter def locked(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "locked") @locked.setter def locked(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "locked", value) @pulumi.input_type class _EnvironmentVariableState: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering EnvironmentVariable resources. """ if key is not None: pulumi.set(__self__, "key", key) if locked is not None: pulumi.set(__self__, "locked", locked) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def locked(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "locked") @locked.setter def locked(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "locked", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) class EnvironmentVariable(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None, __props__=None): """ ## Example Usage ```python import pulumi import pulumi_checkly as checkly # Simple Enviroment Variable example variable_1 = checkly.EnvironmentVariable("variable-1", key="API_KEY", locked=True, value="loZd9hOGHDUrGvmW") variable_2 = checkly.EnvironmentVariable("variable-2", key="API_URL", value="http://localhost:3000") ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. """ ... @overload def __init__(__self__, resource_name: str, args: EnvironmentVariableArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## Example Usage ```python import pulumi import pulumi_checkly as checkly # Simple Enviroment Variable example variable_1 = checkly.EnvironmentVariable("variable-1", key="API_KEY", locked=True, value="loZd9hOGHDUrGvmW") variable_2 = checkly.EnvironmentVariable("variable-2", key="API_URL", value="http://localhost:3000") ``` :param str resource_name: The name of the resource. :param EnvironmentVariableArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(EnvironmentVariableArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EnvironmentVariableArgs.__new__(EnvironmentVariableArgs) if key is None and not opts.urn: raise TypeError("Missing required property 'key'") __props__.__dict__["key"] = key __props__.__dict__["locked"] = locked if value is None and not opts.urn: raise TypeError("Missing required property 'value'") __props__.__dict__["value"] = value super(EnvironmentVariable, __self__).__init__( 'checkly:index/environmentVariable:EnvironmentVariable', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None) -> 'EnvironmentVariable': """ Get an existing EnvironmentVariable resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _EnvironmentVariableState.__new__(_EnvironmentVariableState) __props__.__dict__["key"] = key __props__.__dict__["locked"] = locked __props__.__dict__["value"] = value return EnvironmentVariable(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def key(self) -> pulumi.Output[str]: return pulumi.get(self, "key") @property @pulumi.getter def locked(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "locked") @property @pulumi.getter def value(self) -> pulumi.Output[str]: return pulumi.get(self, "value")
python
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long, too-many-instance-attributes import logging import timeit import inspect from knack.log import get_logger from azure.cli.core.commands.client_factory import get_subscription_id from .telemetry import _track_command_telemetry, _track_run_command_telemetry from .repair_utils import _get_function_param_dict STATUS_SUCCESS = 'SUCCESS' STATUS_ERROR = 'ERROR' VM_REPAIR_RUN_COMMAND = 'vm repair run' class command_helper(object): """ The command helper stores command state data and helper functions for vm-repair commands. It will also execute needed functions at the start and end of commands such as sending telemetry data and displaying progress controller """ def __init__(self, logger, cmd, command_name): """ The command helper object should always be initialized at the start of a command """ # Start timer for custom telemetry self.start_time = timeit.default_timer() # Fetch and store command parameters self.command_params = _get_function_param_dict(inspect.getouterframes(inspect.currentframe())[1].frame) # Logger self.logger = logger # CLI cmd object self.cmd = cmd # Command name self.command_name = command_name # Init script data if command is vm repair run if command_name == VM_REPAIR_RUN_COMMAND: self.script = script_data() self.script.run_id = self.command_params['run_id'] # Return message self.message = '' # Return error message self.error_message = '' # Return Status: STATUS_SUCCESS | STATUS_ERROR self.status = '' # Error stack trace self.error_stack_trace = '' # Return dict self.return_dict = {} # Verbose flag for command self.is_verbose = any(handler.level == logging.INFO for handler in get_logger().handlers) # Begin progress reporting for long running operation if not verbose if not self.is_verbose: self.cmd.cli_ctx.get_progress_controller().begin() self.cmd.cli_ctx.get_progress_controller().add(message='Running') def __del__(self): """ This object will have the same life time as an invoked command. We will run all telemetry and clean-up work through the destructor. """ # End long running op for process if not verbose if not self.is_verbose: self.cmd.cli_ctx.get_progress_controller().end() # Track telemetry data elapsed_time = timeit.default_timer() - self.start_time if self.command_name == VM_REPAIR_RUN_COMMAND: _track_run_command_telemetry(self.logger, self.command_name, self.command_params, self.status, self.message, self.error_message, self.error_stack_trace, elapsed_time, get_subscription_id(self.cmd.cli_ctx), self.return_dict, self.script.run_id, self.script.status, self.script.output, self.script.run_time) else: _track_command_telemetry(self.logger, self.command_name, self.command_params, self.status, self.message, self.error_message, self.error_stack_trace, elapsed_time, get_subscription_id(self.cmd.cli_ctx), self.return_dict) def set_status_success(self): """ Set command status to success """ self.status = STATUS_SUCCESS def set_status_error(self): """ Set command status to error """ self.status = STATUS_ERROR def is_status_success(self): return self.status == STATUS_SUCCESS def init_return_dict(self): """ Returns the command return dictionary """ self.return_dict = {} self.return_dict["status"] = self.status self.return_dict["message"] = self.message if not self.is_status_success(): self.return_dict["error_message"] = self.error_message if self.error_message: self.logger.error(self.error_message) if self.message: self.logger.error(self.message) return self.return_dict class script_data(object): """ Stores repair script data. """ def __init__(self): # Unique run-id self.run_id = '' # Script status self.status = '' # Script Output self.output = '' # Script run time self.run_time = None def set_status_success(self): """ Set command status to success """ self.status = STATUS_SUCCESS def set_status_error(self): """ Set command status to error """ self.status = STATUS_ERROR
python
from . import base_api_core class Backup(base_api_core.Core): def __init__(self, ip_address, port, username, password, secure=False, cert_verify=False, dsm_version=2, debug=True): super(Backup, self).__init__(ip_address, port, username, password, secure, cert_verify, dsm_version, debug) def backup_repository_get(self): api_name = 'SYNO.Backup.Repository' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'get'} return self.request_data(api_name, api_path, req_param) def backup_repository_list(self): api_name = 'SYNO.Backup.Repository' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'list'} return self.request_data(api_name, api_path, req_param) def backup_task_list(self): api_name = 'SYNO.Backup.Task' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'list'} return self.request_data(api_name, api_path, req_param) def backup_task_status(self): api_name = 'SYNO.Backup.Task' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'status'} return self.request_data(api_name, api_path, req_param) def backup_task_get(self): api_name = 'SYNO.Backup.Task' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'get'} return self.request_data(api_name, api_path, req_param)
python
""" Copyright (C) 2019 NetApp Inc. All rights reserved. A test module for the recline.repl.shell module """ import asyncio import builtins import pytest import recline from recline.repl import shell @pytest.mark.parametrize("user_input, expected_marker, expected_output", [ ("ut command -arg 2", 2, ""), ("ut command", None, "required: -arg"), ("ut command -arg 5", None, "This is a UT failure"), ("ut command -arg foo", None, "invalid int value"), ("bad command", None, "Unknown command"), ("ut command -arg 2 && ut command -arg 3", 3, ""), ("ut command; ut command -arg 3", 3, "required: -arg"), ("ut command -arg 5 && ut command -arg 2", None, "This is a UT failure"), ("ut command -arg 2 || ut command -arg 1", 2, ""), ("bad command; bad other command || ut command -arg 3", 3, "Unknown command"), ]) def test_shell_execute(user_input, expected_marker, expected_output, capsys): """Test that our shell can run one or more commands on input""" ut_marker = None @recline.command(name="ut command") def ut_command(arg: int): # pylint: disable=unused-variable if arg == 5: raise ValueError("This is a UT failure") nonlocal ut_marker ut_marker = arg shell.execute(user_input) assert ut_marker == expected_marker captured = capsys.readouterr() assert expected_output in captured.out + captured.err @pytest.mark.parametrize("user_input, expected_marker", [ ("ut async command -arg 2", 2), ("ut async command -arg 30", 30), ]) def test_shell_execute_async_command(user_input, expected_marker): """Verify we can run async commands as well""" ut_marker = None @recline.command(name="ut async command") async def ut_command(arg: int): # pylint: disable=unused-variable loops = 0 while loops < arg: loops += 1 await asyncio.sleep(0.001) nonlocal ut_marker ut_marker = arg shell.execute(user_input) assert ut_marker == expected_marker def test_run_startup_exit_command(monkeypatch): """Verify that a command which is marked to run at startup or exit gets run""" startup_command_ran = False recline.commands.START_COMMAND = None def mock_eof(prompt): raise EOFError("UT is finished") monkeypatch.setattr(builtins, "input", mock_eof) @recline.command(atstart=True) def startup(): # pylint: disable=unused-variable nonlocal startup_command_ran startup_command_ran = True with pytest.raises(SystemExit): shell.relax(argv=["ut_program"]) assert startup_command_ran recline.commands.START_COMMAND = None @pytest.mark.parametrize("motd, expected", [ ("This is a simple message", "This is a simple message"), (lambda: "This is a dynamic message", "This is a dynamic message"), ]) def test_run_motd(motd, expected, monkeypatch, capsys): """Verify the MOTD gets printed if one is provided""" def mock_eof(prompt): raise EOFError("UT is finished") monkeypatch.setattr(builtins, "input", mock_eof) with pytest.raises(SystemExit): shell.relax(argv=["ut_program"], motd=motd) captured = capsys.readouterr() assert expected in captured.out def test_run_with_dash_c(): """Verify only a single command is run when -c is passed in""" @recline.command(name="single command") def single_command(): # pylint: disable=unused-variable return 73 assert shell.relax(argv=["ut_program", "-c", "single", "command"]) == 73 def test_run_non_repl(): """Verify that if a program is not trying to be a repl, then we will parse a command from the input and exit """ @recline.command(name="single command") def single_command(): # pylint: disable=unused-variable return 73 assert shell.relax(argv=["ut_program", "single", "command"], repl_mode=False) == 73 def test_run_single_command(): """Verify that if a program is not trying to be a repl, then we will parse a command from the input and exit """ @recline.command(name="single command") def single_command(): # pylint: disable=unused-variable return 73 assert shell.relax(argv=["ut_program"], single_command="single command") == 73
python
#!/usr/bin/env python #-*- coding:utf-8 -*- ## ## setup.py ## ## Created on: Jan 23, 2018 ## Author: Alexey S. Ignatiev ## E-mail: [email protected] ## # #============================================================================== import os import os.path import contextlib import glob try: from setuptools import setup, Extension HAVE_SETUPTOOLS = True except ImportError: from distutils.core import setup, Extension HAVE_SETUPTOOLS = False import distutils.command.build import distutils.command.install import inspect, os, sys sys.path.insert(0, os.path.join(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])), 'solvers/')) import platform import prepare from pysat import __version__ # #============================================================================== @contextlib.contextmanager def chdir(new_dir): old_dir = os.getcwd() try: os.chdir(new_dir) yield finally: os.chdir(old_dir) # #============================================================================== ROOT = os.path.abspath(os.path.dirname(__file__)) LONG_DESCRIPTION = """ A Python library providing a simple interface to a number of state-of-art Boolean satisfiability (SAT) solvers and a few types of cardinality and pseudo-Boolean encodings. The purpose of PySAT is to enable researchers working on SAT and its applications and generalizations to easily prototype with SAT oracles in Python while exploiting incrementally the power of the original low-level implementations of modern SAT solvers. With PySAT it should be easy for you to implement a MaxSAT solver, an MUS/MCS extractor/enumerator, or any tool solving an application problem with the (potentially multiple) use of a SAT oracle. Details can be found at `https://pysathq.github.io <https://pysathq.github.io>`__. """ # solvers to install #============================================================================== to_install = ['cadical', 'glucose30', 'glucose41', 'lingeling', 'maplechrono', 'maplecm', 'maplesat', 'minicard', 'minisat22', 'minisatgh'] # example scripts to install as standalone executables #============================================================================== scripts = ['fm', 'genhard', 'lbx', 'lsu', 'mcsls', 'models', 'musx', 'rc2'] # we need to redefine the build command to # be able to download and compile solvers #============================================================================== class build(distutils.command.build.build): """ Our custom builder class. """ def run(self): """ Download, patch and compile SAT solvers before building. """ # download and compile solvers prepare.do(to_install) # now, do standard build distutils.command.build.build.run(self) # compilation flags for C extensions #============================================================================== compile_flags, cpplib = ['-std=c++11', '-Wall', '-Wno-deprecated'], ['stdc++'] if platform.system() == 'Darwin': compile_flags += ['--stdlib=libc++'] cpplib = ['c++'] elif platform.system() == 'Windows': compile_flags = ['-DNBUILD', '-DNLGLYALSAT' , '/DINCREMENTAL', '-DNLGLOG', '-DNDEBUG', '-DNCHKSOL', '-DNLGLFILES', '-DNLGLDEMA', '/experimental:preprocessor', '-I./zlib'] cpplib = [] # C extensions: pycard and pysolvers #============================================================================== pycard_ext = Extension('pycard', sources=['cardenc/pycard.cc'], extra_compile_args=compile_flags, include_dirs=['cardenc'] , language='c++', libraries=cpplib, library_dirs=[] ) pysolvers_sources = ['solvers/pysolvers.cc'] if platform.system() == 'Windows': with chdir('solvers'): for solver in to_install: with chdir(solver): for filename in glob.glob('*.c*'): pysolvers_sources += ['solvers/%s/%s' % (solver, filename)] for filename in glob.glob('*/*.c*'): pysolvers_sources += ['solvers/%s/%s' % (solver, filename)] libraries = [] library_dirs = [] else: libraries = to_install + cpplib library_dirs = list(map(lambda x: os.path.join('solvers', x), to_install)) pysolvers_ext = Extension('pysolvers', sources=pysolvers_sources, extra_compile_args=compile_flags + \ list(map(lambda x: '-DWITH_{0}'.format(x.upper()), to_install)), include_dirs=['solvers'], language='c++', libraries=libraries, library_dirs=library_dirs ) # finally, calling standard setuptools.setup() (or distutils.core.setup()) #============================================================================== setup(name='python-sat', packages=['pysat', 'pysat.examples'], package_dir={'pysat.examples': 'examples'}, version=__version__, description='A Python library for prototyping with SAT oracles', long_description=LONG_DESCRIPTION, long_description_content_type='text/x-rst; charset=UTF-8', license='MIT', author='Alexey Ignatiev, Joao Marques-Silva, Antonio Morgado', author_email='[email protected], [email protected], [email protected]', url='https://github.com/pysathq/pysat', ext_modules=[pycard_ext, pysolvers_ext], scripts=['examples/{0}.py'.format(s) for s in scripts], cmdclass={'build': build}, install_requires=['six'], extras_require = { 'aiger': ['py-aiger-cnf>=2.0.0'], 'pblib': ['pypblib>=0.0.3'] } )
python
# pylint: disable=missing-module-docstring # -*- coding: utf-8 -*- __short_version__ = '1.6' __release__ = '1.6.0' __description__ = 'Framework for Quart to add swagger generation to routes and restful resources'
python
""" This module contains common code shared by utils/rule_dir_stats.py and utils/rule_dir_diff.py. This code includes functions for walking the output of the utils/rule_dir_json.py script, and filtering functions used in both scripts. """ from __future__ import absolute_import from __future__ import print_function import os from collections import defaultdict from .build_remediations import REMEDIATION_TO_EXT_MAP as REMEDIATION_MAP from .utils import subset_dict def get_affected_products(rule_obj): """ From a rule_obj, return the set of affected products from rule.yml """ return set(rule_obj['products']) def get_all_affected_products(args, rule_obj): """ From a rule_obj, return the set of affected products from rule.yml, and all fixes and checks. If args.strict is set, this function is equivalent to get_affected_products. Otherwise, it includes ovals and fix content based on the values of args.fixes_only and args.ovals_only. """ affected_products = get_affected_products(rule_obj) if args.strict: return affected_products if not args.fixes_only: for product in rule_obj['oval_products']: affected_products.add(product) if not args.ovals_only: for product in rule_obj['remediation_products']: affected_products.add(product) return affected_products def _walk_rule(args, rule_obj, oval_func, remediation_func, verbose_output): """ Walks a single rule and updates verbose_output if visited. Returns visited state as a boolean. Internal function for walk_rules and walk_rules_parallel. """ rule_id = rule_obj['id'] affected_products = get_all_affected_products(args, rule_obj) if not affected_products.intersection(args.products): return False if args.query and rule_id not in args.query: return False if not args.fixes_only: result = oval_func(rule_obj) if result: verbose_output[rule_id]['oval'] = result if not args.ovals_only: for r_type in REMEDIATION_MAP: result = remediation_func(rule_obj, r_type) if result: verbose_output[rule_id][r_type] = result return True def walk_rules(args, known_rules, oval_func, remediation_func): """ Walk a dictionary of known_rules, returning the number of visited rules and the output at each visited rule, conditionally calling oval_func and remediation_func based on the values of args.fixes_only and args.ovals_only. If the result of these functions are not Falsy, set the appropriate output content. The input rule_obj structure is the value of known_rules[rule_id]. The output structure is a dict as follows: { rule_id: { "oval": oval_func(args, rule_obj), "ansible": remediation_func(args, "ansible", rule_obj), "anaconda": remediation_func(args, "anaconda", rule_obj), "bash": remediation_func(args, "bash", rule_obj), "puppet": remediation_func(args, "puppet", rule_obj) }, ... } The arguments supplied to oval_func are args and rule_obj. The arguments supplied to remediation_func are args, the remediation type, and rule_obj. """ affected_rules = 0 verbose_output = defaultdict(lambda: defaultdict(lambda: None)) for rule_id in known_rules: rule_obj = known_rules[rule_id] if _walk_rule(args, rule_obj, oval_func, remediation_func, verbose_output): affected_rules += 1 return affected_rules, verbose_output def walk_rule_stats(rule_output): """ Walk the output of a rule, generating statistics about affected ovals, remediations, and generating verbose output in a stable order. Returns a tuple of (affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output) """ affected_ovals = 0 affected_remediations = 0 all_affected_remediations = 0 affected_remediations_type = defaultdict(lambda: 0) all_output = [] affected_remediation = False all_remedation = True if 'oval' in rule_output: affected_ovals += 1 all_output.append(rule_output['oval']) for r_type in sorted(REMEDIATION_MAP): if r_type in rule_output: affected_remediation = True affected_remediations_type[r_type] += 1 all_output.append(rule_output[r_type]) else: all_remedation = False if affected_remediation: affected_remediations += 1 if all_remedation: all_affected_remediations += 1 return (affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output) def walk_rules_stats(args, known_rules, oval_func, remediation_func): """ Walk a dictionary of known_rules and generate simple aggregate statistics for all visited rules. The oval_func and remediation_func arguments behave according to walk_rules(). Returned values are visited_rules, affected_ovals, affected_remediation, a dictionary containing all fix types and the quantity of affected fixes, and the ordered output of all functions. An effort is made to provide consistently ordered verbose_output by sorting all visited keys and the keys of ssg.build_remediations.REMEDIATION_MAP. """ affected_rules, verbose_output = walk_rules(args, known_rules, oval_func, remediation_func) affected_ovals = 0 affected_remediations = 0 all_affected_remediations = 0 affected_remediations_type = defaultdict(lambda: 0) all_output = [] for rule_id in sorted(verbose_output): rule_output = verbose_output[rule_id] results = walk_rule_stats(rule_output) affected_ovals += results[0] affected_remediations += results[1] all_affected_remediations += results[2] for key in results[3]: affected_remediations_type[key] += results[3][key] all_output.extend(results[4]) return (affected_rules, affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output) def walk_rules_parallel(args, left_rules, right_rules, oval_func, remediation_func): """ Walks two sets of known_rules (left_rules and right_rules) with identical keys and returns left_only, right_only, and common_only output from _walk_rule. If the outputted data for a rule when called on left_rules and right_rules is the same, it is added to common_only. Only rules which output different data will have their data added to left_only and right_only respectively. Can assert. """ left_affected_rules = 0 right_affected_rules = 0 common_affected_rules = 0 left_verbose_output = defaultdict(lambda: defaultdict(lambda: None)) right_verbose_output = defaultdict(lambda: defaultdict(lambda: None)) common_verbose_output = defaultdict(lambda: defaultdict(lambda: None)) assert set(left_rules) == set(right_rules) for rule_id in left_rules: left_rule_obj = left_rules[rule_id] right_rule_obj = right_rules[rule_id] if left_rule_obj == right_rule_obj: if _walk_rule(args, left_rule_obj, oval_func, remediation_func, common_verbose_output): common_affected_rules += 1 else: left_temp = defaultdict(lambda: defaultdict(lambda: None)) right_temp = defaultdict(lambda: defaultdict(lambda: None)) left_ret = _walk_rule(args, left_rule_obj, oval_func, remediation_func, left_temp) right_ret = _walk_rule(args, right_rule_obj, oval_func, remediation_func, right_temp) if left_ret == right_ret and left_temp == right_temp: common_verbose_output.update(left_temp) if left_ret: common_affected_rules += 1 else: left_verbose_output.update(left_temp) right_verbose_output.update(right_temp) if left_ret: left_affected_rules += 1 if right_ret: right_affected_rules += 1 left_only = (left_affected_rules, left_verbose_output) right_only = (right_affected_rules, right_verbose_output) common_only = (common_affected_rules, common_verbose_output) return left_only, right_only, common_only def walk_rules_diff(args, left_rules, right_rules, oval_func, remediation_func): """ Walk a two dictionary of known_rules (left_rules and right_rules) and generate five sets of output: left_only rules output, right_only rules output, shared left output, shared right output, and shared common output, as a five-tuple, where each tuple element is equivalent to walk_rules on the appropriate set of rules. Does not understand renaming of rule_ids as this would depend on disk content to reflect these differences. Unless significantly more data is added to the rule_obj structure (contents of rule.yml, ovals, remediations, etc.), all information besides 'title' is not uniquely identifying or could be easily updated. """ left_rule_ids = set(left_rules) right_rule_ids = set(right_rules) left_only_rule_ids = left_rule_ids.difference(right_rule_ids) right_only_rule_ids = right_rule_ids.difference(left_rule_ids) common_rule_ids = left_rule_ids.intersection(right_rule_ids) left_restricted = subset_dict(left_rules, left_only_rule_ids) left_common = subset_dict(left_rules, common_rule_ids) right_restricted = subset_dict(right_rules, right_only_rule_ids) right_common = subset_dict(right_rules, common_rule_ids) left_only_data = walk_rules(args, left_restricted, oval_func, remediation_func) right_only_data = walk_rules(args, right_restricted, oval_func, remediation_func) l_c_d, r_c_d, c_d = walk_rules_parallel(args, left_common, right_common, oval_func, remediation_func) left_changed_data = l_c_d right_changed_data = r_c_d common_data = c_d return (left_only_data, right_only_data, left_changed_data, right_changed_data, common_data) def walk_rules_diff_stats(results): """ Takes the results of walk_rules_diff (results) and generates five sets of output statistics: left_only rules output, right_only rules output, shared left output, shared right output, and shared common output, as a five-tuple, where each tuple element is equivalent to walk_rules_stats on the appropriate set of rules. Can assert. """ assert len(results) == 5 output_data = [] for data in results: affected_rules, verbose_output = data affected_ovals = 0 affected_remediations = 0 all_affected_remediations = 0 affected_remediations_type = defaultdict(lambda: 0) all_output = [] for rule_id in sorted(verbose_output): rule_output = verbose_output[rule_id] _results = walk_rule_stats(rule_output) affected_ovals += _results[0] affected_remediations += _results[1] all_affected_remediations += _results[2] for key in _results[3]: affected_remediations_type[key] += _results[3][key] all_output.extend(_results[4]) output_data.append((affected_rules, affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output)) assert len(output_data) == 5 return tuple(output_data) def filter_rule_ids(all_keys, queries): """ From a set of queries (a comma separated list of queries, where a query is either a rule id or a substring thereof), return the set of matching keys from all_keys. When queries is the literal string "all", return all of the keys. """ if not queries: return set() if queries == 'all': return set(all_keys) # We assume that all_keys is much longer than queries; this allows us to do # len(all_keys) iterations of size len(query_parts) instead of len(query_parts) # queries of size len(all_keys) -- which hopefully should be a faster data access # pattern due to caches but in reality shouldn't matter. Note that we have to iterate # over the keys in all_keys either way, because we wish to check whether query is a # substring of a key, not whether query is a key. # # This does have the side-effect of not having the results be ordered according to # their order in query_parts, so we instead, we intentionally discard order by using # a set. This also guarantees that our results are unique. results = set() query_parts = queries.split(',') for key in all_keys: for query in query_parts: if query in key: results.add(key) return results def missing_oval(rule_obj): """ For a rule object, check if it is missing an oval. """ rule_id = rule_obj['id'] check = len(rule_obj['ovals']) > 0 if not check: return "\trule_id:%s is missing all OVALs" % rule_id def missing_remediation(rule_obj, r_type): """ For a rule object, check if it is missing a remediation of type r_type. """ rule_id = rule_obj['id'] check = len(rule_obj['remediations'][r_type]) > 0 if not check: return "\trule_id:%s is missing %s remediations" % (rule_id, r_type) def two_plus_oval(rule_obj): """ For a rule object, check if it has two or more OVALs. """ rule_id = rule_obj['id'] check = len(rule_obj['ovals']) >= 2 if check: return "\trule_id:%s has two or more OVALs: %s" % (rule_id, ','.join(rule_obj['ovals'])) def two_plus_remediation(rule_obj, r_type): """ For a rule object, check if it has two or more remediations of type r_type. """ rule_id = rule_obj['id'] check = len(rule_obj['remediations'][r_type]) >= 2 if check: return "\trule_id:%s has two or more %s remediations: %s" % \ (rule_id, r_type, ','.join(rule_obj['remediations'][r_type])) def prodtypes_oval(rule_obj): """ For a rule object, check if the prodtypes match between the YAML and the OVALs. """ rule_id = rule_obj['id'] rule_products = set(rule_obj['products']) if not rule_products: return oval_products = set() for oval in rule_obj['ovals']: oval_products.update(rule_obj['ovals'][oval]['products']) if not oval_products: return sym_diff = sorted(rule_products.symmetric_difference(oval_products)) check = len(sym_diff) > 0 if check: return "\trule_id:%s has a different prodtypes between YAML and OVALs: %s" % \ (rule_id, ','.join(sym_diff)) def prodtypes_remediation(rule_obj, r_type): """ For a rule object, check if the prodtypes match between the YAML and the remediations of type r_type. """ rule_id = rule_obj['id'] rule_products = set(rule_obj['products']) if not rule_products: return remediation_products = set() for remediation in rule_obj['remediations'][r_type]: remediation_products.update(rule_obj['remediations'][r_type][remediation]['products']) if not remediation_products: return sym_diff = sorted(rule_products.symmetric_difference(remediation_products)) check = len(sym_diff) > 0 and rule_products and remediation_products if check: return "\trule_id:%s has a different prodtypes between YAML and %s remediations: %s" % \ (rule_id, r_type, ','.join(sym_diff)) def product_names_oval(rule_obj): """ For a rule_obj, check the scope of the platforms versus the product name of the OVAL objects. """ rule_id = rule_obj['id'] for oval_name in rule_obj['ovals']: if oval_name == "shared.xml": continue oval_product, _ = os.path.splitext(oval_name) for product in rule_obj['ovals'][oval_name]['products']: if product != oval_product: return "\trule_id:%s has a different product and OVALs names: %s is not %s" % \ (rule_id, product, oval_product) def product_names_remediation(rule_obj, r_type): """ For a rule_obj, check the scope of the platforms versus the product name of the remediations of type r_type. """ rule_id = rule_obj['id'] for r_name in rule_obj['remediations'][r_type]: r_product, _ = os.path.splitext(r_name) if r_product == "shared": continue for product in rule_obj['remediations'][r_type][r_name]['products']: if product != r_product: return "\trule_id:%s has a different product and %s remediation names: %s is not %s" % \ (rule_id, r_type, product, r_product)
python
class Database(Exception): pass class Serialize(Database): def __init__(self, cls, msg="Serialization Failed"): self.cls = cls self.msg = msg def __str__(self) -> str: return f"'{self.cls}' {self.msg}"
python
from sqlalchemy import Column, Integer, String, Sequence, SmallInteger from sgs_schema.declarative_base import Base from sqlalchemy.orm import relationship from sqlalchemy.sql.schema import ForeignKey from sqlalchemy.sql.sqltypes import Float class Produto(Base): __tablename__ = "PRODUTO" id = Column(Integer, Sequence("PRODUTO_ID_GEN"), primary_key=True) codigo = Column(String(20)) codbarra = Column(String(50)) descricao = Column(String(250)) id_categoria = Column(Integer, ForeignKey("CATPRODUTO.id"), nullable=False) categoria = relationship("CategoriaProduto") id_unidade = Column(Integer) #TODO: unity = relationship("ItemUnity") custo = Column(Float) precovenda = Column(Float, default=0) precovenda2 = Column(Float, default=0) precovenda3 = Column(Float, default=0) id_unidade_venda = Column(Float) #TODO: unity_sell = relationship("ItemUnity") vende_sem_estoque = Column(Integer, default=0) #TODO: balanca = None fator_un_venda = Column(Integer, default=1) marca = Column(String(50)) para_revenda = Column(SmallInteger) id_moeda = Column(Integer, default=1) inativo = Column(Integer, default=0) class CategoriaProduto(Base): __tablename__ = "CATPRODUTO" id = Column(Integer, Sequence('id_manager'), primary_key=True) descricao = Column(String(50)) tem_aprovacao = Column(Integer, default=0) id_owner = Column(Integer, ForeignKey("CATPRODUTO.id"))
python
# SPDX-FileCopyrightText: 2019 Scott Shawcroft for Adafruit Industries # # SPDX-License-Identifier: MIT """ `adafruit_il91874` ================================================================================ CircuitPython `displayio` driver for IL91874-based ePaper displays * Author(s): Scott Shawcroft Implementation Notes -------------------- **Hardware:** * `Adafruit 2.7" Tri-Color ePaper Display Shield <https://www.adafruit.com/product/4229>`_ **Software and Dependencies:** * Adafruit CircuitPython firmware for the supported boards: https://github.com/adafruit/circuitpython/releases """ import displayio __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_IL91874.git" _START_SEQUENCE = ( b"\x04\x00" # Power on b"\x00\x01\xaf" # panel setting b"\x30\x01\x3a" # PLL b"\x01\x05\x03\x00\x2b\x2b\x09" # power setting b"\x06\x03\x07\x07\x17" # booster soft start b"\xf8\x02\x60\xa5" # mystery command in example code b"\xf8\x02\x89\xa5" # mystery command in example code b"\xf8\x02\x90\x00" # mystery command in example code b"\xf8\x02\x93\xa2" # mystery command in example code b"\xf8\x02\x73\x41" # mystery command in example code b"\x82\x01\x12" # VCM DC b"\x50\x01\x87" # CDI setting # Look Up Tables # LUT1 b"\x20\x2c\x00\x00\x00\x1a\x1a\x00\x00\x01\x00\x0a\x0a\x00\x00\x08\x00\x0e\x01\x0e\x01\x10\x00" b"\x0a\x0a\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" # LUTWW b"\x21\x2a\x90\x1a\x1a\x00\x00\x01\x40\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x80\x0a\x0a" b"\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" # LUTBW b"\x22\x2a\xa0\x1a\x1a\x00\x00\x01\x00\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x90\x0a\x0a" b"\x00\x00\x08\xb0\x04\x10\x00\x00\x05\xb0\x03\x0e\x00\x00\x0a\xc0\x23\x00\x00\x00\x01" # LUTWB b"\x23\x2a\x90\x1a\x1a\x00\x00\x01\x40\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x80\x0a\x0a" b"\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" # LUTBB b"\x24\x2a\x90\x1a\x1a\x00\x00\x01\x20\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x10\x0a\x0a" b"\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" b"\x61\x04\x00\x00\x00\x00" # Resolution b"\x16\x80\x00" # PDRF ) _STOP_SEQUENCE = b"\x02\x01\x17" # Power off # pylint: disable=too-few-public-methods class IL91874(displayio.EPaperDisplay): """IL91874 display driver""" def __init__(self, bus, **kwargs): start_sequence = bytearray(_START_SEQUENCE) width = kwargs["width"] height = kwargs["height"] if "rotation" in kwargs and kwargs["rotation"] % 180 != 0: width, height = height, width start_sequence[-7] = (width >> 8) & 0xFF start_sequence[-6] = width & 0xFF start_sequence[-5] = (height >> 8) & 0xFF start_sequence[-4] = height & 0xFF super().__init__( bus, start_sequence, _STOP_SEQUENCE, **kwargs, ram_width=320, ram_height=300, busy_state=False, write_black_ram_command=0x10, black_bits_inverted=True, write_color_ram_command=0x13, refresh_display_command=0x12, always_toggle_chip_select=True, )
python
# pylint: disable=C0111,R0903 """Print the branch and git status for the currently focused window. Requires: * xcwd * Python module 'pygit2' """ import os import pygit2 import core.module import util.cli class Module(core.module.Module): def __init__(self, config, theme): super().__init__(config, theme, []) self.__error = False def hidden(self): return self.__error def update(self): state = {} self.clear_widgets() try: directory = util.cli.execute("xcwd").strip() directory = self.__get_git_root(directory) repo = pygit2.Repository(directory) self.add_widget(name="git.main", full_text=repo.head.shorthand) for filepath, flags in repo.status().items(): if ( flags == pygit2.GIT_STATUS_WT_NEW or flags == pygit2.GIT_STATUS_INDEX_NEW ): state["new"] = True if ( flags == pygit2.GIT_STATUS_WT_DELETED or flags == pygit2.GIT_STATUS_INDEX_DELETED ): state["deleted"] = True if ( flags == pygit2.GIT_STATUS_WT_MODIFIED or flags == pygit2.GIT_STATUS_INDEX_MODIFIED ): state["modified"] = True self.__error = False if "new" in state: self.add_widget(name="git.new") if "modified" in state: self.add_widget(name="git.modified") if "deleted" in state: self.add_widget(name="git.deleted") except Exception as e: self.__error = True def state(self, widget): return widget.name.split(".")[1] def __get_git_root(self, directory): while len(directory) > 1: if os.path.exists(os.path.join(directory, ".git")): return directory directory = "/".join(directory.split("/")[0:-1]) return "/" # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
python
from pathlib import Path import typer from spacy.tokens import DocBin import spacy ASSETS_DIR = Path(__file__).parent.parent / "assets" CORPUS_DIR = Path(__file__).parent.parent / "corpus" def read_categories(path: Path): return path.open().read().strip().split("\n") def read_tsv(file_): for line in file_: text, labels, annotator = line.split("\t") yield { "text": text, "labels": [int(label) for label in labels.split(",") if label != ''], "annotator": annotator } def convert_record(nlp, record, categories): """Convert a record from the tsv into a spaCy Doc object.""" doc = nlp.make_doc(record["text"]) # All categories other than the true ones get value 0 doc.cats = {category: 0 for category in categories} # True labels get value 1 for label in record["labels"]: doc.cats[categories[label]] = 1 return doc def main(assets_dir: Path=ASSETS_DIR, corpus_dir: Path=CORPUS_DIR, lang: str="en"): """Convert the GoEmotion corpus's tsv files to spaCy's binary format.""" categories = read_categories(assets_dir / "categories.txt") nlp = spacy.blank(lang) for tsv_file in assets_dir.iterdir(): if not tsv_file.parts[-1].endswith(".tsv"): continue records = read_tsv(tsv_file.open(encoding="utf8")) docs = [convert_record(nlp, record, categories) for record in records] out_file = corpus_dir / tsv_file.with_suffix(".spacy").parts[-1] out_data = DocBin(docs=docs).to_bytes() with out_file.open("wb") as file_: file_.write(out_data) if __name__ == "__main__": typer.run(main)
python
# -*- coding: utf-8 -*- from httoop.exceptions import InvalidURI from httoop.messages import Request, Response from httoop.parser import NOT_RECEIVED_YET, StateMachine from httoop.status import ( BAD_REQUEST, HTTP_VERSION_NOT_SUPPORTED, LENGTH_REQUIRED, MOVED_PERMANENTLY, SWITCHING_PROTOCOLS, URI_TOO_LONG, ) from httoop.util import Unicode, _ from httoop.version import ServerHeader, ServerProtocol class ServerStateMachine(StateMachine): Message = Request HTTP2 = None def __init__(self, scheme, host, port): super(ServerStateMachine, self).__init__() self.MAX_URI_LENGTH = float('inf') # 8000 self._default_scheme = scheme self._default_host = host self._default_port = port self.request = None self.response = None def on_message_started(self): super(ServerStateMachine, self).on_message_started() self.response = Response() self.request = self.message self.state.update(dict( method=False, uri=False )) def on_message_complete(self): request = super(ServerStateMachine, self).on_message_complete() response = self.response self.request = None self.response = None return (request, response) def parse_startline(self): state = super(ServerStateMachine, self).parse_startline() if state is NOT_RECEIVED_YET: self._check_uri_max_length(self.buffer) return state def on_startline_complete(self): self.state['method'] = True self.on_method_complete() self.state['uri'] = True self.on_uri_complete() super(ServerStateMachine, self).on_startline_complete() def on_uri_complete(self): super(ServerStateMachine, self).on_uri_complete() self._check_uri_max_length(bytes(self.request.uri)) self.sanitize_request_uri_path() self.validate_request_uri_scheme() self.set_server_response_header() def on_protocol_complete(self): super(ServerStateMachine, self).on_protocol_complete() self.check_request_protocol() self.set_response_protocol() def on_headers_complete(self): self.check_host_header_exists() self.set_request_uri_host() self.check_http2_upgrade() super(ServerStateMachine, self).on_headers_complete() def on_body_complete(self): self.check_message_without_body_containing_data() super(ServerStateMachine, self).on_body_complete() self.check_methods_without_body() def check_request_protocol(self): # check if we speak the same major HTTP version if self.message.protocol > ServerProtocol: # the major HTTP version differs raise HTTP_VERSION_NOT_SUPPORTED('The server only supports HTTP/1.0 and HTTP/1.1.') def set_response_protocol(self): # set appropriate response protocol version self.response.protocol = min(self.message.protocol, ServerProtocol) def _check_uri_max_length(self, uri): if len(uri) > self.MAX_URI_LENGTH: raise URI_TOO_LONG( u'The maximum length of the request is %d' % self.MAX_URI_LENGTH ) def sanitize_request_uri_path(self): path = self.message.uri.path self.message.uri.normalize() if path != self.message.uri.path: raise MOVED_PERMANENTLY(self.message.uri.path.encode('UTF-8')) def validate_request_uri_scheme(self): if self.message.uri.scheme: if self.message.uri.scheme not in ('http', 'https'): # pragma: no cover exc = InvalidURI(_(u'Invalid URL: wrong scheme')) raise BAD_REQUEST(Unicode(exc)) else: self.message.uri.scheme = self._default_scheme self.message.uri.host = self._default_host self.message.uri.port = self._default_port def set_server_response_header(self): self.response.headers.setdefault('Server', ServerHeader) def check_host_header_exists(self): if self.message.protocol >= (1, 1) and 'Host' not in self.message.headers: raise BAD_REQUEST('Missing Host header') def set_request_uri_host(self): if 'Host' not in self.message.headers: return host = self.message.headers.element('Host') self.message.uri.host = host.host self.message.uri.port = host.port def check_message_without_body_containing_data(self): if self.buffer and 'Content-Length' not in self.message.headers and not self.chunked: # request without Content-Length header but body raise LENGTH_REQUIRED(u'Missing Content-Length header.') def check_methods_without_body(self): if self.message.method in (u'HEAD', u'GET', u'TRACE') and self.message.body: raise BAD_REQUEST('A %s request is considered as safe and MUST NOT contain a request body.' % self.message.method) def check_http2_upgrade(self): def is_http2_upgrade(): connection = self.message.headers.values('Connection') yield 'Upgrade' in connection yield 'HTTP2-Settings' in connection yield 'Upgrade' in self.message.headers yield self.message.headers.element('Upgrade') == 'h2c' yield 'HTTP2-Settings' in self.message.headers yield self.message.headers.element('HTTP2-Settings') if all(is_http2_upgrade()): if self.HTTP2 is None: return self.response.headers['Upgrade'] = 'h2c' self.response.headers['Connection'] = 'Upgrade' self.__class__ = self.HTTP2 raise SWITCHING_PROTOCOLS()
python
""" Test passing exceptions to logs """ import inspect import pytest from .util import check_finished_spans, logger, tracer @pytest.mark.parametrize('stmt,exception', [ ('1 / 0', ZeroDivisionError('division by zero')), ('y = non_existent_variable', NameError("name 'non_existent_variable' is not defined")), ('import non_existent_package', ModuleNotFoundError("No module named 'non_existent_package'")), ]) def test_exception(logger, tracer, stmt, exception): operation_name = 'span_exception' log = { 'event': 'error', 'message': 'Who would cross the Bridge of Death must answer me these questions three, ' 'ere the other side he see.', 'error.object': exception, 'error.kind': exception.__class__, 'stack': f' File "{__file__}", line ' + '{lineno}, in {func}\n exec(stmt)\n File "<string>", ' 'line 1, in <module>\n', } with tracer.start_active_span(operation_name): try: lineno = inspect.currentframe().f_lineno + 1 exec(stmt) except exception.__class__: func = inspect.currentframe().f_code.co_name log['stack'] = log['stack'].format(lineno=lineno, func=func) logger.exception(log['message']) check_finished_spans(tracer=tracer, operation_names_expected=[operation_name], logs_expected={operation_name: [log]})
python
from fastapi import HTTPException from datetime import datetime from .router import Router import models from secrets import token_hex class SessionsRouter(Router): def __init__(self, config, database): super().__init__('/sessions', config, database) def methods(self): @self.router.get('/all') async def get_sessions(key: models.ApiKey): api_key_check = self.check_api_key(key.key, 'super') if api_key_check is not True: raise api_key_check with self.database as cursor: cursor.execute("SELECT id, time, user, token FROM sessions") sessions = [{'id': id, 'time': time, 'user': user, 'token': token} for id, time, user, token in cursor.fetchall()] return sessions @self.router.get('/') async def get_session(key: models.ApiKey, session: models.Session): api_key_check = self.check_api_key(key.key, 'super') if api_key_check is not True: raise api_key_check with self.database as cursor: cursor.execute("SELECT id, user FROM sessions WHERE token=?", (session.token,)) session = cursor.fetchone() if not session: raise HTTPException(404, 'Session does not exist') id, user_id = session cursor.execute("SELECT permission FROM users WHERE id=?", (user_id,)) user = cursor.fetchone() if not user: raise HTTPException(404, 'User does not exist') permission, = user session = {'id': id, 'user': user_id, 'permission': permission} return session @self.router.put('/') async def put_session(key: models.ApiKey, session: models.NewSession): api_key_check = self.check_api_key(key.key, 'super') if api_key_check is not True: raise api_key_check with self.database as cursor: cursor.execute("SELECT id, permission FROM users WHERE nick=? AND password=?", (session.username, session.password)) user = cursor.fetchone() if not user: raise HTTPException(404, "User does not exist") user_id, permission = user cursor.execute("DELETE FROM sessions WHERE user=?", (user_id,)) time = round(datetime.now().timestamp()) token = token_hex(self.config['TOKEN_SECURITY'][permission]) cursor.execute("INSERT INTO sessions (time, user, token) VALUES (?, ?, ?)", (time, user_id, token)) cursor.execute("SELECT id, time, user, token FROM sessions WHERE user=?", (user_id,)) session = cursor.fetchone() return {'id': session[0], 'time': session[1], 'user': session[2], 'token': session[3]}
python
# Copyright (c) 2020 elParaguayo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from typing import Any, List, Tuple from libqtile import bar, hook from libqtile.widget import base class WindowCount(base._TextBox): """A simple widget to show the number of windows in the current group.""" orientations = base.ORIENTATION_HORIZONTAL defaults = [ ("font", "sans", "Text font"), ("fontsize", None, "Font pixel size. Calculated if None."), ("fontshadow", None, "font shadow color, default is None(no shadow)"), ("padding", None, "Padding left and right. Calculated if None."), ("foreground", "#ffffff", "Foreground colour."), ("text_format", "{num}", "Format for message"), ("show_zero", False, "Show window count when no windows") ] # type: List[Tuple[str, Any, str]] def __init__(self, text=" ", width=bar.CALCULATED, **config): base._TextBox.__init__(self, text=text, width=width, **config) self.add_defaults(WindowCount.defaults) self._count = 0 def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) self._setup_hooks() self._wincount() def _setup_hooks(self): hook.subscribe.client_killed(self._win_killed) hook.subscribe.client_managed(self._wincount) hook.subscribe.current_screen_change(self._wincount) hook.subscribe.setgroup(self._wincount) def _wincount(self, *args): try: self._count = len(self.qtile.current_group.windows) except AttributeError: self._count = 0 self.update() def _win_killed(self, window): try: self._count = len(self.qtile.current_group.windows) except AttributeError: self._count = 0 if self._count and getattr(window, "group", None): self._count -= 1 self.update() def calculate_length(self): if self.text and (self._count or self.show_zero): return min( self.layout.width, self.bar.width ) + self.actual_padding * 2 else: return 0 def update(self): self.text = self.text_format.format(num=self._count) self.bar.draw() def cmd_get(self): """Retrieve the current text.""" return self.text
python
# Copyright Google Inc. All Rights Reserved. # # Use of this source code is governed by an MIT-style license that can be # found in the LICENSE file at https://angular.io/license """ Public API surface is re-exported here. This API is exported for users building angular from source in downstream projects. The rules from packages/bazel are re-exported here as well as the ng_setup_workspace repository rule needed when building angular from source downstream. Alternately, this API is available from the @angular/bazel npm package if the npm distribution of angular is used in a downstream project. """ load("//packages/bazel:index.bzl", _ng_module = "ng_module", _ng_package = "ng_package", _protractor_web_test = "protractor_web_test", _protractor_web_test_suite = "protractor_web_test_suite") load("//tools:ng_setup_workspace.bzl", _ng_setup_workspace = "ng_setup_workspace") ng_module = _ng_module ng_package = _ng_package protractor_web_test = _protractor_web_test protractor_web_test_suite = _protractor_web_test_suite ng_setup_workspace = _ng_setup_workspace
python
# encoding: utf-8 from __future__ import absolute_import, division, print_function, unicode_literals from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.template import Context, loader from haystack import connections, connection_router, constants from haystack.backends.solr_backend import SolrSearchBackend class Command(BaseCommand): help = "Generates a Solr schema that reflects the indexes." def add_arguments(self, parser): parser.add_argument( "-f", "--filename", help='If provided, directs output to a file instead of stdout.' ) parser.add_argument( "-u", "--using", default=constants.DEFAULT_ALIAS, help='If provided, chooses a connection to work with.' ) def handle(self, **options): """Generates a Solr schema that reflects the indexes.""" using = options.get('using') schema_xml = self.build_template(using=using) if options.get('filename'): self.write_file(options.get('filename'), schema_xml) else: self.print_stdout(schema_xml) def build_context(self, using): backend = connections[using].get_backend() if not isinstance(backend, SolrSearchBackend): raise ImproperlyConfigured("'%s' isn't configured as a SolrEngine)." % backend.connection_alias) content_field_name, fields = backend.build_schema( connections[using].get_unified_index().all_searchfields() ) return Context({ 'content_field_name': content_field_name, 'fields': fields, 'default_operator': constants.DEFAULT_OPERATOR, 'ID': constants.ID, 'DJANGO_CT': constants.DJANGO_CT, 'DJANGO_ID': constants.DJANGO_ID, }) def build_template(self, using): t = loader.get_template('search_configuration/solr.xml') c = self.build_context(using=using) return t.render(c) def print_stdout(self, schema_xml): self.stderr.write("\n") self.stderr.write("\n") self.stderr.write("\n") self.stderr.write("Save the following output to 'schema.xml' and place it in your Solr configuration directory.\n") self.stderr.write("--------------------------------------------------------------------------------------------\n") self.stderr.write("\n") self.stdout.write(schema_xml) def write_file(self, filename, schema_xml): with open(filename, 'w') as schema_file: schema_file.write(schema_xml)
python
# Generated by Django 3.2.9 on 2021-12-12 10:34 import django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("customers", "0005_auto_20211205_0953"), ] operations = [ migrations.AlterField( model_name="address", name="customer", field=models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, related_name="addresses", to=settings.AUTH_USER_MODEL, verbose_name="Customer", ), ), ]
python
#tree # β”œβ”€β”€ data # β”‚Β Β  β”œβ”€β”€ SRR388226_1.fastq # β”‚Β Β  β”œβ”€β”€ SRR388226_2.fastq # β”‚Β Β  β”œβ”€β”€ SRR388227_1.fastq # β”‚Β Β  β”œβ”€β”€ SRR388227_2.fastq # β”‚Β Β  β”œβ”€β”€ SRR388228_1.fastq # β”‚Β Β  β”œβ”€β”€ SRR388228_2.fastq # β”‚Β Β  β”œβ”€β”€ SRR388229_1.fastq # β”‚Β Β  β”œβ”€β”€ SRR388229_2.fastq # β”‚Β Β  └── SRR.file # β”œβ”€β”€ fastqc_res # β”œβ”€β”€ RSEM_res # └── STAR_res #write for paired-end only import os import subprocess cpu="10" mainPath="/home/disk/fyh/lab_other_work/STAR_test/" fastqc="/home/disk/fyh/tools/FastQC/fastqc" trimmomatic="/home/disk/fyh/tools/Trimmomatic-0.38/trimmomatic-0.38.jar" STAR="/home/disk/fyh/tools/STAR-2.6.0a/bin/Linux_x86_64_static/STAR" RSEM="/home/disk/fyh/tools/RSEM-1.3.1/rsem-calculate-expression" fastq_phred="/home/disk/fyh/tools/scr/fastq_phred.pl" infer_experiment="/home/disk/fyh/tools/RSeQC-2.6.5/scripts/infer_experiment.py" strand_test="/home/disk/fyh/tools/scr/strand.sh" STAR_index="/home/genomewide/RNA-seq_idx/hg38/STAR" RSEM_index="/home/genomewide/RNA-seq_idx/hg38/RSEM/hg38" RefSeq="/home/genomewide/RNA-seq_idx/hg38/hg38_RefSeq.bed" fastqc_res="/home/disk/fyh/lab_other_work/STAR_test/fastqc_res" STAR_res="/home/disk/fyh/lab_other_work/STAR_test/STAR_res" RSEM_res="/home/disk/fyh/lab_other_work/STAR_test/RSEM_res" log_file="/home/disk/fyh/lab_other_work/STAR_test/quantity_log.txt" os.chdir(mainPath+"data") subprocess.Popen("ls > ../SRRfile.list",shell=True).wait() with open(mainPath+"SRRfile.list") as SRRfile: for file in SRRfile: realfile=file.rstrip() if "_1" in realfile: SRRName=realfile[:-8] read1=SRRName+"_1.fastq" read2=SRRName+"_2.fastq" subprocess.Popen(fastqc+" "+read1+" -o "+fastqc_res+" -t "+cpu,shell=True).wait() subprocess.Popen(fastqc+" "+read2+" -o "+fastqc_res+" -t "+cpu,shell=True).wait() subprocess.Popen("unzip "+fastqc_res+"/"+SRRName+"_1_fastqc.zip -d "+fastqc_res,shell=True).wait() subprocess.Popen("unzip "+fastqc_res+"/"+SRRName+"_2_fastqc.zip -d "+fastqc_res,shell=True).wait() subprocess.Popen(fastq_phred+" "+read1+"> "+mainPath+"phred.txt",shell=True).wait() subprocess.Popen('grep "Per base sequence content" '+fastqc_res+'/'+SRRName+'_1_fastqc/summary.txt | cut -f 1 > '+mainPath+'headcrop.txt',shell=True).wait() phred,headcrop="","" with open(mainPath+"phred.txt") as phredFile: phred=phredFile.readlines()[0].rstrip() with open(mainPath+"headcrop.txt") as headcropFile: headcrop=headcropFile.readlines()[0].rstrip() if headcrop=="FAIL" or headcrop=="WARN": subprocess.Popen("java -jar "+trimmomatic+" PE -phred"+phred+" "+read1+" "+read2+" "+read1+".map"+" "+read1+".unmap"+" "+read2+".map"+" "+read2+".unmap HEADCROP:12 SLIDINGWINDOW:5:20",shell=True).wait() else: subprocess.Popen("java -jar "+trimmomatic+" PE -phred"+phred+" "+read1+" "+read2+" "+read1+".map"+" "+read1+".unmap"+" "+read2+".map"+" "+read2+".unmap SLIDINGWINDOW:5:20",shell=True).wait() subprocess.Popen("mkdir "+STAR_res+"/"+SRRName,shell=True).wait() subprocess.Popen(STAR+" --runThreadN "+cpu+" --twopassMode Basic --outSAMstrandField intronMotif --genomeDir "+STAR_index+" --readFilesIn "+read1+".map "+read2+".map --outFileNamePrefix "+STAR_res+"/"+SRRName+"/ --outSAMtype BAM SortedByCoordinate --quantMode GeneCounts TranscriptomeSAM",shell=True).wait() subprocess.Popen(infer_experiment+" -i "+STAR_res+"/"+SRRName+"/Aligned.sortedByCoord.out.bam -r "+RefSeq+" > "+STAR_res+"/"+SRRName+"/strand.txt",shell=True).wait() subprocess.Popen("sh "+strand_test+" "+"../"+STAR_res+"/"+SRRName+"/strand.txt > "+mainPath+"strandInfer.txt",shell=True).wait() strand="" with open(mainPath+"strandInfer.txt") as strandFile: strand=strandFile.readlines()[0].rstrip() subprocess.Popen(RSEM+" -p "+cpu+" --bam --paired-end --forward-prob "+strand+" "+STAR_res+"/"+SRRName+"/Aligned.toTranscriptome.out.bam "+RSEM_index+" "+RSEM_res+"/"+SRRName,shell=True).wait() subprocess.Popen("rm -r "+SRRName+"*map "+fastqc_res+"/"+SRRName+"*.fastqc "+fastqc_res+"/"+SRRName+"*.zip "+RSEM_res+"/"+SRRName+".transcript.bam "+RSEM_res+"/"+SRRName+".stat "+STAR_res+"/"+SRRName,shell=True).wait() print("finished!") elif "_2" in realfile: continue
python
# -*- coding: utf-8 -*- import urllib.request, urllib.parse, urllib.error print("\xe7\xbb\xb4\xe5\x9f\xba\xe6\x96\xb0\xe9\x97\xbb\xef\xbc\x8c\xe8\x87\xaa\xe7\x94\xb1\xe7\x9a\x84\xe6\x96\xb0\xe9\x97\xbb\xe6\xba\x90") print(urllib.parse.unquote_plus("http%3A%2F%2Fzh.wikinews.org%2Fwiki%2FWikinews%3A%25E9%25A6%2596%25E9%25A1%25B5"))
python
""" Copyright (c) 2017, Jairus Martin. Distributed under the terms of the MIT License. The full license is in the file LICENSE, distributed with this software. Created on Aug 3, 2017 @author: jrm """ from atom.api import Typed from enamlnative.widgets.scroll_view import ProxyScrollView from .bridge import ObjcMethod, ObjcProperty from .uikit_view import UIView, UiKitView class UIScrollView(UIView): #: Properties contentSize = ObjcProperty('CGSize') #: Added by UIScrollView+AutoResize fitToContents = ObjcMethod() # axis = ObjcProperty('UILayoutConstraintAxis') # #setProgress = ObjcMethod('float', dict(animated='bool')) # addArrangedSubview = ObjcMethod('UIView') # insertArrangedSubview = ObjcMethod('UIView', dict(atIndex='NSInteger')) # removeArrangedSubview = ObjcMethod('UIView') # # UILayoutConstraintAxisHorizontal = 0 # UILayoutConstraintAxisVertical = 1 class UiKitScrollView(UiKitView, ProxyScrollView): """ An UiKit implementation of an Enaml ProxyToolkitObject. """ #: A reference to the toolkit layout created by the proxy. widget = Typed(UIScrollView) # ------------------------------------------------------------------------- # Initialization API # ------------------------------------------------------------------------- def create_widget(self): """ Create the widget """ self.widget = UIScrollView() # def update_frame(self): # """ """ # super # # d = self.declaration # # if not (d.x or d.y or d.width or d.height): # # d.width, d.height = d.parent.width, d.parent.height # # self.frame = (d.x,d.y,d.width,d.height) def init_layout(self): super(UiKitScrollView, self).init_layout() for c in self.children(): if c.frame: self.widget.contentSize = c.frame[-2:] return self.widget.fitToContents() # ------------------------------------------------------------------------- # ProxyScrollView API # ------------------------------------------------------------------------- # def set_frame(self, change): # super(UiKitScrollView, self).set_frame(change) # d = self.declaration # self.widget.contentSize = (d.width, d.height) def set_orientation(self, orientation): #: TODO: Cannot enforce direction that I'm aware of #: (but can lock direction) pass def set_scroll_by(self, delta): raise NotImplementedError def set_scroll_to(self, point): raise NotImplementedError
python
# --------------------------------------------------------- # Tensorflow Utils Implementation # Licensed under The MIT License [see LICENSE for details] # Written by Cheng-Bin Jin # Email: [email protected] # --------------------------------------------------------- import os import logging import functools import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.python.training import moving_averages logger = logging.getLogger(__name__) # logger logger.setLevel(logging.INFO) def _init_logger(log_path): formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s') # file handler file_handler = logging.FileHandler(os.path.join(log_path, 'model.log')) file_handler.setFormatter(formatter) file_handler.setLevel(logging.INFO) # stream handler stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) # add handlers logger.addHandler(file_handler) logger.addHandler(stream_handler) def padding2d(x, p_h=1, p_w=1, pad_type='REFLECT', name='pad2d'): if pad_type == 'REFLECT': return tf.pad(x, [[0, 0], [p_h, p_h], [p_w, p_w], [0, 0]], 'REFLECT', name=name) def conv2d(x, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, padding='SAME', name='conv2d', is_print=True): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, x.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(x, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) # conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) conv = tf.nn.bias_add(conv, biases) if is_print: print_activations(conv) return conv def conv3d(x, output_dim, k_h=5, k_w=5, k_d=5, d_h=2, d_w=2, d_d=2, stddev=0.02, padding='SAME', name='conv3d', is_print=True): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, k_d, x.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv3d(x, w, strides=[1, d_h, d_w, d_d, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) # conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) conv = tf.nn.bias_add(conv, biases) if is_print: print_activations(conv) return conv def deconv2d(x, k, k_h=3, k_w=3, d_h=2, d_w=2, stddev=0.02, padding_='SAME', output_size=None, name='deconv2d', with_w=False, is_print=True): with tf.variable_scope(name): input_shape = x.get_shape().as_list() # calculate output size h_output, w_output = None, None if not output_size: h_output, w_output = input_shape[1] * 2, input_shape[2] * 2 # output_shape = [input_shape[0], h_output, w_output, k] # error when not define batch_size output_shape = [tf.shape(x)[0], h_output, w_output, k] # conv2d transpose w = tf.get_variable('w', [k_h, k_w, k, input_shape[3]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, d_h, d_w, 1], padding=padding_) biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) deconv = tf.nn.bias_add(deconv, biases) if is_print: print_activations(deconv) if with_w: return deconv, w, biases else: return deconv def upsampling2d(x, size=(2, 2), name='upsampling2d'): with tf.name_scope(name): shape = x.get_shape().as_list() return tf.image.resize_nearest_neighbor(x, size=(size[0] * shape[1], size[1] * shape[2])) def linear(x, output_size, bias_start=0.0, with_w=False, name='fc'): shape = x.get_shape().as_list() with tf.variable_scope(name): matrix = tf.get_variable(name="matrix", shape=[shape[1], output_size], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bias = tf.get_variable(name="bias", shape=[output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(x, matrix) + bias, matrix, bias else: return tf.matmul(x, matrix) + bias def norm(x, name, _type, _ops, is_train=True): if _type == 'batch': return batch_norm(x, name=name, _ops=_ops, is_train=is_train) elif _type == 'instance': return instance_norm(x, name=name) elif _type == 'layer': return layer_norm(x, name=name) else: raise NotImplementedError def batch_norm(x, name, _ops, is_train=True): """Batch normalization.""" with tf.variable_scope(name): params_shape = [x.get_shape()[-1]] beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32)) gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32)) if is_train is True: mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments') moving_mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) moving_variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False) _ops.append(moving_averages.assign_moving_average(moving_mean, mean, 0.9)) _ops.append(moving_averages.assign_moving_average(moving_variance, variance, 0.9)) else: mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) variance = tf.get_variable('moving_variance', params_shape, tf.float32, trainable=False) # epsilon used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net. y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-5) y.set_shape(x.get_shape()) return y def instance_norm(x, name='instance_norm', mean=1.0, stddev=0.02, epsilon=1e-5): with tf.variable_scope(name): depth = x.get_shape()[3] scale = tf.get_variable( 'scale', [depth], tf.float32, initializer=tf.random_normal_initializer(mean=mean, stddev=stddev, dtype=tf.float32)) offset = tf.get_variable('offset', [depth], initializer=tf.constant_initializer(0.0)) # calcualte mean and variance as instance mean, variance = tf.nn.moments(x, axes=[1, 2], keep_dims=True) # normalization inv = tf.rsqrt(variance + epsilon) normalized = (x - mean) * inv return scale * normalized + offset # TODO: I'm not sure is it a good implementation of layer normalization... def layer_norm(x, name='layer_norm'): with tf.variable_scope(name): norm_axes = [1, 2, 3] mean, var = tf.nn.moments(x, axes=norm_axes, keep_dims=True) # Assume the 'neurons' axis is the third of norm_axes. This is the case for fully-connected # and BHWC conv layers. n_neurons = x.get_shape().as_list()[norm_axes[2]] offset = tf.get_variable('offset', n_neurons, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32)) scale = tf.get_variable('scale', n_neurons, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32)) # Add broadcasting dims to offset and scale (e.g. BCHW conv data) offset = tf.reshape(offset, [1 for _ in range(len(norm_axes)-1)] + [-1]) scale = tf.reshape(scale, [1 for _ in range(len(norm_axes)-1)] + [-1]) result = tf.nn.batch_normalization(x, mean, var, offset, scale, 1e-5) return result def n_res_blocks(x, _ops=None, norm_='instance', is_train=True, num_blocks=6, is_print=False): output = None for idx in range(1, num_blocks+1): output = res_block(x, x.get_shape()[3], _ops=_ops, norm_=norm_, is_train=is_train, name='res{}'.format(idx)) x = output if is_print: print_activations(output) return output # norm(x, name, _type, _ops, is_train=True) def res_block(x, k, _ops=None, norm_='instance', is_train=True, pad_type=None, name=None): with tf.variable_scope(name): conv1, conv2 = None, None # 3x3 Conv-Batch-Relu S1 with tf.variable_scope('layer1'): if pad_type is None: conv1 = conv2d(x, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='SAME', name='conv') elif pad_type == 'REFLECT': padded1 = padding2d(x, p_h=1, p_w=1, pad_type='REFLECT', name='padding') conv1 = conv2d(padded1, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='VALID', name='conv') normalized1 = norm(conv1, name='norm', _type=norm_, _ops=_ops, is_train=is_train) relu1 = tf.nn.relu(normalized1) # 3x3 Conv-Batch S1 with tf.variable_scope('layer2'): if pad_type is None: conv2 = conv2d(relu1, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='SAME', name='conv') elif pad_type == 'REFLECT': padded2 = padding2d(relu1, p_h=1, p_w=1, pad_type='REFLECT', name='padding') conv2 = conv2d(padded2, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='VALID', name='conv') normalized2 = norm(conv2, name='norm', _type=norm_, _ops=_ops, is_train=is_train) # sum layer1 and layer2 output = x + normalized2 return output def identity(x, name='identity', is_print=False): output = tf.identity(x, name=name) if is_print: print_activations(output) return output def avgPoolConv(x, output_dim, filter_size=3, stride=1, name='avgPoolConv', is_print=True): with tf.variable_scope(name): output = avg_pool_2x2(x) output = conv2d(output, output_dim=output_dim, k_h=filter_size, k_w=filter_size, d_h=stride, d_w=stride) if is_print: print_activations(output) return output def convAvgPool(x, output_dim, filter_size=3, stride=1, name='convAvgPool', is_print=True): with tf.variable_scope(name): output = conv2d(x, output_dim=output_dim, k_h=filter_size, k_w=filter_size, d_h=stride, d_w=stride) output = avg_pool_2x2(output) if is_print: print_activations(output) return output def max_pool_2x2(x, name='max_pool'): with tf.name_scope(name): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def avg_pool_2x2(x, name='avg_pool'): with tf.name_scope(name): return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def sigmoid(x, name='sigmoid', is_print=False): output = tf.nn.sigmoid(x, name=name) if is_print: print_activations(output) return output def tanh(x, name='tanh', is_print=False): output = tf.nn.tanh(x, name=name) if is_print: print_activations(output) return output def relu(x, name='relu', is_print=False): output = tf.nn.relu(x, name=name) if is_print: print_activations(output) return output def lrelu(x, leak=0.2, name='lrelu', is_print=False): output = tf.maximum(x, leak*x, name=name) if is_print: print_activations(output) return output def xavier_init(in_dim): # print('in_dim: ', in_dim) xavier_stddev = 1. / tf.sqrt(in_dim / 2.) return xavier_stddev def print_activations(t): # print(t.op.name, ' ', t.get_shape().as_list()) logger.info(t.op.name + '{}'.format(t.get_shape().as_list())) def show_all_variables(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) def batch_convert2int(images): # images: 4D float tensor (batch_size, image_size, image_size, depth) return tf.map_fn(convert2int, images, dtype=tf.uint8) def convert2int(image): # transform from float tensor ([-1.,1.]) to int image ([0,255]) return tf.image.convert_image_dtype((image + 1.0) / 2.0, tf.uint8) def res_block_v2(x, k, filter_size, _ops=None, norm_='instance', is_train=True, resample=None, name=None): with tf.variable_scope(name): if resample == 'down': conv_shortcut = functools.partial(avgPoolConv, output_dim=k, filter_size=1) conv_1 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) conv_2 = functools.partial(convAvgPool, output_dim=k) elif resample == 'up': conv_shortcut = functools.partial(deconv2d, k=k) conv_1 = functools.partial(deconv2d, k=k, k_h=filter_size, k_w=filter_size) conv_2 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) elif resample is None: conv_shortcut = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) conv_1 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) conv_2 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) else: raise Exception('invalid resample value') if (k == x.get_shape().as_list()[3]) and (resample is None): shortcut = x # Identity skip-connection else: shortcut = conv_shortcut(x, name='shortcut') output = x output = norm(output, _type=norm_, _ops=_ops, is_train=is_train, name='norm1') output = relu(output, name='relu1') output = conv_1(output, name='conv1') output = norm(output, _type=norm_, _ops=_ops, is_train=is_train, name='norm2') output = relu(output, name='relu2') output = conv_2(output, name='conv2') return shortcut + output
python
import pathlib from django.utils.safestring import mark_safe CSS_PATH = (pathlib.Path(__file__).resolve().parent / 'static' / 'frontend' / 'built' / 'style' / 'email.min.css') def get(): return mark_safe(CSS_PATH.read_text(encoding='utf-8')) # nosec
python
import os print("if you want use the service Install : ") print(":django") print(":vsftpd") Select_usr = input("Do You Want Install django and vsftpd?:") print("[Y]or[N] if Select_usr == 'Y': os.system("pip3 install django") os.system("sudo apt install vsftpd") print("[*]Services has been Installed") print("Start FTP and DJANGO Service") os.system("python3 start.py") if Select_usr == 'N': exit()
python
""" The proper way to create an uncertain array is by calling :func:`.uarray` """ # Adding numpy arrays to GTC is not an easy exercise. # Our need is to provide convenient containers for uncertain numbers. # We do not try to integrate uncertain numbers in numpy's design. from __future__ import division import warnings from numbers import Number, Real, Complex from math import isnan, isinf from cmath import isnan as cisnan from cmath import isinf as cisinf try: from itertools import izip # Python 2 except ImportError: izip = zip xrange = range import numpy as np from GTC import is_sequence from GTC.linear_algebra import matmul from GTC.core import ( value, uncertainty, variance, dof, cos, sin, tan, acos, asin, atan, atan2, exp, log, log10, sqrt, sinh, cosh, tanh, acosh, asinh, atanh, mag_squared, magnitude, phase, result, ) from GTC.lib import ( UncertainReal, UncertainComplex ) def _isnan(number): val = value(number) if isinstance(val, Real): return isnan(val) elif isinstance(val, Complex): return cisnan(val) else: raise TypeError('cannot calculate isnan of type {}'.format(type(number))) def _isinf(number): val = value(number) if isinstance(val, Real): return isinf(val) elif isinstance(val, Complex): return cisinf(val) else: raise TypeError('cannot calculate isinf of type {}'.format(type(number))) # Note numpy defines its own numeric types, instead of bool, int, # float, complex, that have additional attributes. These types are needed by # functions like `numpy.average`. (Uses `dtype` and `.size` attributes # on the result returned by `mean`, as defined in a subclass if available.) # One way to fix this is to add the required attributes # to all the return values from `UncertainArray` methods. # Another option is to ensure that array elements # are always numpy-compatible and to ensure that all # uncertain number objects are initialised with # a.dtype = np.dtype('O') # a.size = 1 # a.shape = () # # Our use of `dtype=object` for arrays means that numeric # elements are not cast to numpy types when loaded into an array. # To fix this would require iteration through all arrays as they # are being created! #-------------------------------------------------------------------- class UncertainArray(np.ndarray): """An :class:`UncertainArray` can contain elements of type :class:`int`, :class:`float`, :class:`complex`, :class:`.UncertainReal` or :class:`.UncertainComplex`. Do not instantiate this class directly. Use :func:`~.uarray` instead. Base: :class:`numpy.ndarray` .. versionadded:: 1.1 """ def __new__(cls, array, dtype=None, label=None): # The first case allows users to create uarray instances # with a definite numpy number type. This could be done # by wrapping a call to uarray() around an ndarray. # Without this, the type gets converted back to Python. if isinstance(array, np.ndarray): dtype = array.dtype elif dtype is None: dtype = np.dtype('O') obj = np.asarray(array, dtype=dtype).view(cls) obj._label = label return obj def __array_finalize__(self, obj): if obj is None: return self._label = getattr(obj, 'label', None) # numpy looks at type().__name__ when preparing # a string representation of the object. This # change means we see `uarray` not `UncertainArray`. self.__class__.__name__ = 'uarray' self._broadcasted_shape = None def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): try: attr = getattr(self, '_' + ufunc.__name__) except AttributeError: # Want to raise a NotImplementedError without nested exceptions # In Python 3 this could be achieved by "raise Exception('...') from None" attr = None if attr is None: raise NotImplementedError( 'The {} function has not been implemented'.format(ufunc) ) if kwargs: warnings.warn('**kwargs, {}, are currently not supported' .format(kwargs), stacklevel=2) case = len(inputs) if case == 1: pass # Must be an UncertainArray elif case == 2: # At least 1 of the inputs must be an UncertainArray # If an input is not an ndarray then convert it to be an ndarray not0 = not isinstance(inputs[0], np.ndarray) if not0 or not isinstance(inputs[1], np.ndarray): # A tuple cannot be modified # This does not create a copy of the items inputs = list(inputs) # convert the input that is not an ndarray convert, keep = (0, 1) if not0 else (1, 0) if isinstance(inputs[convert], (Number, UncertainReal, UncertainComplex)): inputs[convert] = np.full(inputs[keep].shape, inputs[convert], dtype=object) else: inputs[convert] = np.asarray(inputs[convert], dtype=object) self._broadcasted_shape = None if inputs[0].shape != inputs[1].shape: broadcasted = np.broadcast(*inputs) inputs = broadcasted.iters self._broadcasted_shape = broadcasted.shape else: assert False, 'Should not occur: __array_ufunc__ received {} inputs'.format(case) return attr(*inputs) def __repr__(self): # Use the numpy formatting but hide the default dtype np_array_repr = np.array_repr(self) if self.dtype == object: # Truncate string from trailing ',' i = np_array_repr.rfind(',') return np_array_repr[:i] + ')' else: return np_array_repr def __matmul__(self, other): # Implements the protocol used by the '@' operator defined in PEP 465. return matmul(self, other) def __rmatmul__(self, other): # Implements the protocol used by the '@' operator defined in PEP 465. return matmul(other, self) def _matmul(self, *inputs): # np.matmul became a ufunc in version 1.16.0 return matmul(*inputs) def _create_empty(self, inputs=None, dtype=None, order='C'): if dtype is None: dtype = object shape = self.shape if self._broadcasted_shape is None else self._broadcasted_shape a = np.empty(shape, dtype=dtype, order=order) if inputs is None: return a, a.itemset, self.flat if len(inputs) == 1: return a, a.itemset, inputs[0].flat if isinstance(inputs[0], np.ndarray): return a, a.itemset, izip(inputs[0].flat, inputs[1].flat) # then the inputs are already broadcasted iterators return a, a.itemset, izip(*inputs) @property def label(self): """The label that was assigned to the array when it was created. **Example**:: >>> current = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)], label='amps') >>> current.label 'amps' :rtype: :class:`str` """ return self._label @property def real(self): """The result of applying the attribute ``real`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> a.real uarray([ureal(1.2,0.6,inf), ureal(3.2,1.4,inf), ureal(0.0,0.9,inf)]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, item.real) return UncertainArray(arr) @property def imag(self): """The result of applying the attribute ``imag`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> a.imag uarray([ureal(-0.5,0.6,inf), ureal(1.2,0.2,inf), ureal(1.5,0.9,inf)]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, item.imag) return UncertainArray(arr) @property def r(self): """The result of applying the attribute ``r`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, (1.2, 0.7, 0.7, 2.2)), ... ucomplex(-0.2+1.2j, (0.9, 0.4, 0.4, 1.5))]) >>> a.r uarray([0.43082021842766455, 0.34426518632954817]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, item.r) return UncertainArray(arr) @property def x(self): """The result of :func:`~.core.value` for each element in the array. **Example**:: >>> a = la.uarray([0.57, ureal(0.45, 0.12), ucomplex(1.1+0.68j, 0.19)]) >>> a.x uarray([0.57, 0.45, (1.1+0.68j)]) :rtype: :class:`UncertainArray` """ return self.value() def value(self): """The result of :func:`~.core.value` for each element in the array. **Example**:: >>> a = la.uarray([0.57, ureal(0.45, 0.12), ucomplex(1.1+0.68j, 0.19)]) >>> a.value() uarray([0.57, 0.45, (1.1+0.68j)]) :rtype: :class:`UncertainArray` """ # Note: in the future we might allow different `dtype` values. # However, this needs some thought. Should `dtype=float` # return complex numbers as a pair of reals, for example? # What are the most likely use-cases? # :param dtype: The data type of the returned array. # :type dtype: :class:`numpy.dtype` arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, value(item)) return UncertainArray(arr) @property def u(self): """The result of :func:`~.core.uncertainty` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.u uarray([0.18, 0.12, 0.19]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> c.u uarray([StandardUncertainty(real=0.6, imag=0.6), StandardUncertainty(real=1.4, imag=0.2), StandardUncertainty(real=0.9, imag=0.9)]) :rtype: :class:`UncertainArray` """ return self.uncertainty() def uncertainty(self): """The result of :func:`~.core.uncertainty` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.uncertainty() uarray([0.18, 0.12, 0.19]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> c.uncertainty() uarray([StandardUncertainty(real=0.6, imag=0.6), StandardUncertainty(real=1.4, imag=0.2), StandardUncertainty(real=0.9, imag=0.9)]) :rtype: :class:`UncertainArray` """ # Note: in the future we might allow different `dtype` values. # However, we need to consider the use-cases carefully. # :param dtype: The data type of the returned array. # :type dtype: :class:`numpy.dtype` arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, uncertainty(item)) return UncertainArray(arr) @property def v(self): """The result of :func:`~.core.variance` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.v uarray([0.0324, 0.0144, 0.0361]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.5, 0.5)), ucomplex(1.5j, 0.9)]) >>> c.v uarray([VarianceCovariance(rr=0.36, ri=0.0, ir=0.0, ii=0.36), VarianceCovariance(rr=2.25, ri=0.0, ir=0.0, ii=0.25), VarianceCovariance(rr=0.81, ri=0.0, ir=0.0, ii=0.81)]) :rtype: :class:`UncertainArray` """ return self.variance() def variance(self): """The result of :func:`~.core.variance` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.variance() uarray([0.0324, 0.0144, 0.0361]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.5, 0.5)), ucomplex(1.5j, 0.9)]) >>> c.variance() uarray([VarianceCovariance(rr=0.36, ri=0.0, ir=0.0, ii=0.36), VarianceCovariance(rr=2.25, ri=0.0, ir=0.0, ii=0.25), VarianceCovariance(rr=0.81, ri=0.0, ir=0.0, ii=0.81)]) :rtype: :class:`UncertainArray` """ # Note: in the future we might allow different `dtype` values. # However, we need to consider the use-cases carefully. # :param dtype: The data type of the returned array. # :type dtype: :class:`numpy.dtype` arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, variance(item)) return UncertainArray(arr) @property def df(self): """The result of :func:`~.core.dof` for each element in the array. **Example**:: >>> a = la.uarray([ureal(6, 2, df=3), ureal(4, 1, df=4), ureal(5, 3, df=7), ureal(1, 1)]) >>> a.df uarray([3.0, 4.0, 7.0, inf]) :rtype: :class:`UncertainArray` """ return self.dof() def dof(self): """The result of :func:`~.core.dof` for each element in the array. **Example**:: >>> a = la.uarray([ureal(6, 2, df=3), ureal(4, 1, df=4), ureal(5, 3, df=7), ureal(1, 1)]) >>> a.dof() uarray([3.0, 4.0, 7.0, inf]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, dof(item)) return UncertainArray(arr) def sensitivity(self, x): """The result of :func:`~.reporting.sensitivity` for each element in the array. :rtype: :class:`UncertainArray` """ # Note, there is a case for introducing `dtype` or some other parameter. # The return types for complex cases may be multivariate. # `_create_empty()` handles only ndarray-like sequences if not isinstance(x, np.ndarray): x = np.asarray(x) arr, itemset, iterator = self._create_empty((self, x)) for i, (y, x) in enumerate(iterator): itemset(i, y.sensitivity(x)) return UncertainArray(arr) def u_component(self, x): """The result of :func:`~.reporting.u_component` for each element in the array. :rtype: :class:`UncertainArray` """ # Note, there is a case for introducing `dtype` or some other parameter. # The return types for complex cases may be multivariate. # `_create_empty()` handles only ndarray-like sequences if not isinstance(x, np.ndarray): x = np.asarray(x) arr, itemset, iterator = self._create_empty((self, x)) for i, (y, x) in enumerate(iterator): itemset(i, y.u_component(x)) return UncertainArray(arr) def conjugate(self): """The result of applying the attribute ``conjugate`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> a.conjugate() uarray([ucomplex((1.2+0.5j), u=[0.6,0.6], r=0.0, df=inf), ucomplex((3.2-1.2j), u=[1.4,0.2], r=0.0, df=inf), ucomplex((0-1.5j), u=[0.9,0.9], r=0.0, df=inf)]) :rtype: :class:`UncertainArray` """ # override this method because I wanted to create a custom __doc__ return self._conjugate() def _conjugate(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, item.conjugate()) return UncertainArray(arr) def _positive(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, +item) return UncertainArray(arr) def _negative(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, -item) return UncertainArray(arr) def _add(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a + b) return UncertainArray(arr) def _subtract(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a - b) return UncertainArray(arr) def _multiply(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a * b) return UncertainArray(arr) def _divide(self, *inputs): return self._true_divide(*inputs) def _true_divide(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a / b) return UncertainArray(arr) def _power(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a ** b) return UncertainArray(arr) def _exp(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, exp(item)) return UncertainArray(arr) def _log(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, log(item)) return UncertainArray(arr) def _log10(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, log10(item)) return UncertainArray(arr) def _sqrt(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, sqrt(item)) return UncertainArray(arr) def _cos(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, cos(item)) return UncertainArray(arr) def _sin(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, sin(item)) return UncertainArray(arr) def _tan(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, tan(item)) return UncertainArray(arr) def _arccos(self, *ignore): return self._acos() def _acos(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, acos(item)) return UncertainArray(arr) def _arcsin(self, *ignore): return self._asin() def _asin(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, asin(item)) return UncertainArray(arr) def _arctan(self, *ignore): return self._atan() def _atan(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, atan(item)) return UncertainArray(arr) def _arctan2(self, *inputs): return self._atan2(inputs[1]) def _atan2(self, *inputs): arr, itemset, iterator = self._create_empty((self, inputs[0])) for i, (a, b) in enumerate(iterator): itemset(i, atan2(a, b)) return UncertainArray(arr) def _sinh(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, sinh(item)) return UncertainArray(arr) def _cosh(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, cosh(item)) return UncertainArray(arr) def _tanh(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, tanh(item)) return UncertainArray(arr) def _arccosh(self, *ignore): return self._acosh() def _acosh(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, acosh(item)) return UncertainArray(arr) def _arcsinh(self, *ignore): return self._asinh() def _asinh(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, asinh(item)) return UncertainArray(arr) def _arctanh(self, *ignore): return self._atanh() def _atanh(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, atanh(item)) return UncertainArray(arr) def _square(self, *ignore): return self._mag_squared() def _mag_squared(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, mag_squared(item)) return UncertainArray(arr) def _magnitude(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, magnitude(item)) return UncertainArray(arr) def _phase(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, phase(item)) return UncertainArray(arr) def _intermediate(self, labels): # Default second argument of calling function is `None` if labels is None: arr, itemset, iterator = self._create_empty() for i, x in enumerate(iterator): itemset(i, result(x)) else: # `_create_empty()` handles only ndarray-like sequences if not is_sequence(labels): # Add index notation to the label base labels = [ "{}[{}]".format(labels, i) for i in xrange(self.size) ] labels = np.asarray(labels) arr, itemset, iterator = self._create_empty((self, labels)) for i, (x, lbl) in enumerate(iterator): itemset(i, result(x, lbl)) return UncertainArray(arr) def _equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a == b) return arr def _not_equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a != b) return arr def _less(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a < b) return arr def _less_equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a <= b) return arr def _greater(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a > b) return arr def _greater_equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a >= b) return arr def _maximum(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): if _isnan(a): itemset(i, a) elif _isnan(b): itemset(i, b) elif a > b: itemset(i, a) else: itemset(i, b) return UncertainArray(arr) def _minimum(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): if _isnan(a): itemset(i, a) elif _isnan(b): itemset(i, b) elif a < b: itemset(i, a) else: itemset(i, b) return UncertainArray(arr) def _logical_and(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=object) for i, (a, b) in enumerate(iterator): itemset(i, a and b) return UncertainArray(arr) def _logical_or(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=object) for i, (a, b) in enumerate(iterator): itemset(i, a or b) return UncertainArray(arr) def _logical_xor(self, *inputs): raise TypeError( "Boolean bitwise operations are not defined for `UncertainArray`" ) # arr, itemset, iterator = self._create_empty(inputs, dtype=bool) # for i, (a, b) in enumerate(iterator): # itemset(i, bool(a) ^ bool(b)) # return arr def _logical_not(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, not bool(item)) return arr def _isinf(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, _isinf(item)) return arr def _isnan(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, _isnan(item)) return arr def _isfinite(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, not (_isnan(item) or _isinf(item))) return arr def _reciprocal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, item in enumerate(iterator): itemset(i, 1.0/item) return UncertainArray(arr) def _absolute(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, item in enumerate(iterator): itemset(i, abs(item)) return UncertainArray(arr) def copy(self, order='C'): arr, itemset, iterator = self._create_empty(order=order) for i, item in enumerate(iterator): itemset(i, +item) return UncertainArray(arr, label=self.label) def round(self, decimals=0, **kwargs): digits = kwargs.get('digits', decimals) df_decimals = kwargs.get('df_decimals', digits) arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): try: itemset(i, item._round(digits, df_decimals)) except AttributeError: try: itemset(i, round(item, digits)) except TypeError: itemset(i, complex(round(item.real, digits), round(item.imag, digits))) return UncertainArray(arr) def sum(self, *args, **kwargs): raise TypeError( "`sum` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).sum(*args, **kwargs)) def mean(self, *args, **kwargs): raise TypeError( "`mean` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).mean(*args, **kwargs)) def std(self, *args, **kwargs): # If this is to be implemented we need to be clear about # what is calculated. This will not be an uncertain-number # calculation, it will take the values of a sample of uncertain # numbers and evaluate the SD. This will probably be clearer # if the function is in the `type_a` module. # Note we would also want a similar function to calculate # the standard error (ie the type-A uncertainty). raise TypeError( "`std` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).std(*args, **kwargs)) def var(self, *args, **kwargs): # If this is to be implemented we need to be clear about # what is calculated. This will not be an uncertain-number # calculation, it will take the values of a sample of uncertain # numbers and evaluate the SD. This will probably be clearer # if the function is in the `type_a` module. # Note we would also want a similar function to calculate # the standard variance (ie the type-A uncertainty squared). raise TypeError( "`var` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).var(*args, **kwargs)) def max(self, *args, **kwargs): raise TypeError( "`max` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).max(*args, **kwargs)) def min(self, *args, **kwargs): raise TypeError( "`min` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).min(*args, **kwargs)) def trace(self, *args, **kwargs): raise TypeError( "`trace` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).trace(*args, **kwargs)) def cumprod(self, *args, **kwargs): # numpy catches ``TypeError`` and uses its # internal implementation of this method raise RuntimeError( "`cumprod` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).cumprod(*args, **kwargs)) def cumsum(self, *args, **kwargs): # numpy catches ``TypeError`` and uses its # internal implementation of this method raise RuntimeError( "`cumsum` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).cumsum(*args, **kwargs)) def prod(self, *args, **kwargs): raise TypeError( "`prod` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).prod(*args, **kwargs)) def ptp(self, *args, **kwargs): raise TypeError( "`ptp` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).ptp(*args, **kwargs)) def any(self, *args, **kwargs): raise TypeError( "`any` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self, dtype=bool).any(*args, **kwargs)) def all(self, *args, **kwargs): raise TypeError( "`all` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self, dtype=bool).all(*args, **kwargs)) # Allows pickle to understand the class name 'uarray' uarray = UncertainArray
python
''' The default translation file removes all the attributes with empty values ''' def filterTags(attrs): if not attrs: return tags = {} for k,v in attrs.iteritems(): if v: tags.update({k: v}) return tags
python
import os import unittest def resolve_runfile(path): if os.getenv('RUNFILES_MANIFEST_ONLY') != "1": return os.path.join(os.environ['TEST_SRCDIR'], path) manifest = os.getenv('RUNFILES_MANIFEST_FILE') with open(manifest) as f: for line in f.readlines(): if line.split()[0] == path: return line.split()[1] raise "Cannot find %s in manifest %s" % (path, manifest) class CheckVersionTest(unittest.TestCase): BZL_PATH = 'build_bazel_rules_nodejs/internal/common/check_version.bzl' def setUp(self): self.globals = {} exec(open(resolve_runfile(self.BZL_PATH)).read(), self.globals) def testVersionComparison(self): result = self.globals['check_version']('1.2.2', '1.2.3') self.assertIs(result, False) def testVersionRangeWithin(self): result = self.globals['check_version_range']('1.2.2', '1.2.1', '1.2.3') self.assertIs(result, True) def testVersionOutOfLowRange(self): result = self.globals['check_version_range']('1.2.0', '1.2.1', '1.2.3') self.assertIs(result, False) def testVersionOutOfHighRange(self): result = self.globals['check_version_range']('1.2.4', '1.2.1', '1.2.3') self.assertIs(result, False) def testNotAlphaComparison(self): result = self.globals['check_version']('1.12.3', '1.2.1') self.assertIs(result, True) def testReleaseCandidate(self): result = self.globals['check_version']('0.8.0rc2', '0.8.0') self.assertIs(result, True) if __name__ == '__main__': unittest.main()
python
import tensorflow as tf import sys sys.path.append('./ext/voxelmorph/') sys.path.append('./ext/neurite-master/') sys.path.append('./ext/pynd-lib/') sys.path.append('./ext/pytools-lib/') from voxelmorph.tf.losses import Grad, NCC, NonSquareNCC loss_object = tf.keras.losses.MeanSquaredError() # used for GAN + def. reg. loss_object_NCC = NCC(win=[9]*3) # used for registration loss_object_NonSquareNCC = NonSquareNCC(win=[9]*3) # not used in paper # ---------------------------------------------------------------------------- # Generator losses @tf.function def total_variation3D(ypred): """ Not used in paper. Calculates anisotropic total variation for a 3D image ypred. """ pixel_dif1 = ypred[:, 1:, :, :, :] - ypred[:, :-1, :, :, :] pixel_dif2 = ypred[:, :, 1:, :, :] - ypred[:, :, :-1, :, :] pixel_dif3 = ypred[:, :, :, 1:, :] - ypred[:, :, :, :-1, :] tot_var = ( tf.reduce_mean(tf.math.abs(pixel_dif1)) + tf.reduce_mean(tf.math.abs(pixel_dif2)) + tf.reduce_mean(tf.math.abs(pixel_dif3)) ) return tf.reduce_mean(tot_var) @tf.function def generator_loss( disc_opinion_fake_local, disp_ms, disp, moved_atlases, fixed_images, epoch, sharp_atlases, loss_wts, start_step=0, reg_loss_type='NCC', ): """Loss function for Generator: Args: disc_opinion_fake_local: tf float Local feedback from discriminator. disp_ms: tf float Moving average of displacement fields. disp: tf float Displacement fields. moved_atlases: tf float Moved template images. fixed_images: tf float Target images. epoch: int Training step. sharp_atlases: tf float Generated Template image. loss_wts: list List of regularization weights for gan loss, deformation, and TV. start_step: int Training step to start training adversarial component. """ lambda_gan, lambda_reg, lambda_tv = loss_wts # If training registration only, without GAN loss. # Need to do this, otherwise graph detaches: if epoch >= start_step: gan_loss = loss_object( tf.ones_like(disc_opinion_fake_local), disc_opinion_fake_local, ) if lambda_tv > 0.0: # never happens as TV loss not used in paper tv_loss = total_variation3D(sharp_atlases) else: tv_loss = 0.0 else: gan_loss = 0.0 tv_loss = 0.0 # Similarity terms: if reg_loss_type == 'NCC': similarity_loss = tf.reduce_mean( loss_object_NCC.loss(moved_atlases, fixed_images), ) elif reg_loss_type == 'NonSquareNCC': # Not used in paper. similarity_loss = tf.reduce_mean( loss_object_NonSquareNCC.loss(moved_atlases, fixed_images), ) # smoothness terms: smoothness_loss = tf.reduce_mean( Grad('l2').loss(tf.zeros_like(disp), disp), ) # magnitude terms: magnitude_loss = loss_object(tf.zeros_like(disp), disp) moving_magnitude_loss = loss_object(tf.zeros_like(disp_ms), disp_ms) # Choose between registration only or reg+gan training: if epoch < start_step: total_gen_loss = ( (lambda_reg * smoothness_loss) + (0.01 * lambda_reg * magnitude_loss) + (lambda_reg * moving_magnitude_loss) + 1*similarity_loss ) else: total_gen_loss = ( lambda_gan*gan_loss + (lambda_reg * smoothness_loss) + (0.01 * lambda_reg * magnitude_loss) + (lambda_reg * moving_magnitude_loss) + 1*similarity_loss + lambda_tv*tv_loss ) return ( total_gen_loss, gan_loss, smoothness_loss, magnitude_loss, similarity_loss, moving_magnitude_loss, tv_loss, ) # ---------------------------------------------------------------------------- # Discriminator losses @tf.function def discriminator_loss( disc_opinion_real_local, disc_opinion_fake_local, ): """Loss function for Generator: Args: disc_opinion_fake_local: tf float Local feedback from discriminator on moved templates. disc_opinion_real_local: tf float Local feedback from discriminator on real fixed images. """ gan_fake_loss = loss_object( tf.zeros_like(disc_opinion_fake_local), disc_opinion_fake_local, ) gan_real_loss = loss_object( tf.ones_like(disc_opinion_real_local), disc_opinion_real_local, ) total_loss = 0.5*(gan_fake_loss + gan_real_loss) return total_loss
python
import time import datetime import shutil import os import sys sys.path.insert( 0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) import emdee print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) # In this case we want to LOAD the results of a previous emdee run to pick up where # we left off. With mode set to 'load' and loc pointing to the subdirectory containing # a previous set of results (LOG.txt, last_lnprob.txt, etc...), an Emdee class is # populated with the loaded data and is ready to continue iterating. Changes to the # number of walkers or the parameters (and bounds) should not be made at this point. # This is primarily for continuing runs that may have crashed, or completed successfully # but haven't reached burn in (if running in small chunks locally, for example). emdeeClass = emdee.Emdee(mode='load',loc='example_output') emdeeClass.PrintParams() # Just to check, for example # As before, we just run another batch of iterations picking up from where the previous # run that we loaded had left off. emdeeClass.GoMCMC(100) print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
python
############################################################################### # # file: typing.py # # Purpose: refer to module documentation for details # # Note: This file is part of Termsaver application, and should not be used # or executed separately. # ############################################################################### # # Copyright 2012 Termsaver # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ############################################################################### """ A helper class used for screens that require more dynamic output to users. See additional information in the class itself. The helper class available here is: * `TypingHelperBase` """ # # Python built-in modules # import sys import time # # Internal modules # from termsaverlib.screen.helper import ScreenHelperBase from termsaverlib import constants class TypingHelperBase(ScreenHelperBase): """ This helper class gives functionality to screens to print out information in a more interactive way, simulating a typing writer machine, based on two main speed control properties: * `delay`: defines the delay for printing out characters of a string * `line_delay`: defines the delay for printing out new lines within a string (sometimes, setting different proportions make a lot of a difference) If no values are defined by the screen itself, default values should be used. The `delay` is set in `constants.Settings.CHAR_DELAY_SECONDS`, and the `line_delay` is 10 times the value of delay. To use this screen helper is pretty straightforward, just call the method: * `typing_print`: this will print the specified text string using the speed controls `delay` and `line_delay`. """ delay = None """ Defines the character printing delay, to give a cool visual of a typing machine. This value is measured in seconds, and default marks are defined in `constants.Settings.CHAR_DELAY_SECONDS`. """ line_delay = None """ Defines the delay imposed to every new line prior to char printing. By default, its value is 10x the `delay`. """ def typing_print(self, text): """ Prints text with standard output to allow side-by-side printing, and give the impression of a typing writer machine. The speed is controlled by properties of this class: `delay` and `line_delay`. Arguments: * text: the text to be printed in typing style Notes: * This also supports new lines (\n) * blank spaces, due to its lack of meaning, are ignored for speed limiting, so they will be flushed all at once. """ # set defaults if self.delay is None: self.delay = constants.Settings.CHAR_DELAY_SECONDS if self.line_delay is None: self.line_delay = 10 * self.delay splitText = text.split('\n') for line in splitText: for char in line: sys.stdout.write(char) # only pause if it is not a blank space if char != ' ': time.sleep(self.delay) sys.stdout.flush() # need to re-print the line removed from the split sys.stdout.write('\n') time.sleep(self.line_delay) # specific pause for new lines
python
from .runners import Noun # ?
python
class Value: def __get__(self, instance, instance_type): return self.amount def __set__(self, instance, value): self.amount = value - instance.commission * value class Account: amount = Value() def __init__(self, commission): self.commission = commission """ new_account = Account(0.1) new_account.amount = 100 print(new_account.amount) #90 """ #Teacher's solution: class Value2: def __init__(self): self.amount = 0 def __get__(self, obj, obj_type): return self.amount def __set__(self, obj, value): self.amount = value - value * obj.commission
python
""" 395. Longest Substring with At Least K Repeating Characters This question is listed as a medium question under sliding window category. But sliding window approach is too complex and maybe a hard problem for that case, simple approach is doing a dfs. But it is expensive. The following solution is not optimal but works. Time complexity -> O(N) for stack iteration, O(N) for set operation, O(N) for count operation. ==> O(N3) where N is the length of string """ class Solution: def longestSubstring(self, s: str, k: int) -> int: stack = [s] max_len = 0 while stack: tmp = stack.pop() for key in set(tmp): if tmp.count(key) < k: parts = tmp.split(key) stack.extend(parts) break else: max_len = max(max_len, len(tmp)) return max_len
python
from sys import argv from pathlib import Path from datetime import date from time import strftime import json # Logging (console) def print_log(message): """Simple logging function: Adds timestamp before message""" print(strftime("%H:%M:%S") + ": " + message) # Basic structures def set_date(date_=None): """Provides the processing date""" if not date_: return date.today().strftime("%y-%m-%d") else: return date_ def get_categories(): """Provides the basic categories of data - confirmed: Confirmed cases - deaths: Deaths - recovered: Recovered cases - active: Active cases (confirmed - deaths - recovered) """ return ["confirmed", "deaths", "recovered", "active"] def get_variants(category): """Provides the different data variants""" variants = [ "cum", "cum_rel_popmio", "cum_rel_pop100k", "diff", "diff_rel_popmio", "diff_rel_pop100k", "diff_ma1w", "diff_rel_popmio_ma1w", "diff_rel_pop100k_ma1w", "diff_rel_active", ] if category == "active": return variants return variants[:-1] # Web-related information def get_feed_url(category): """Provides the data urls of John Hopkins University's GitHub project (confirmed, deaths, recovered) """ with get_settings_file_path("urls").open("r") as file: return json.load(file)[category] # Paths and files def get_dir_path(key, date_=None): """Sets up the directory structure used in the rest of the application: - script_path/settings: For settings (json-files with parameters) - output_path/data/dte/feed: For the raw downloaded data - output_path/data/dte: For the prepared data - output_path/plots/dte: For the generated plots """ # Determine settings directory: Subdirectory of the directory in which the # script is located, named "settings" if key == "settings": return Path(argv[0]).parent / key # Determine the output directory: Either stored in the "output_dir.json"- # file located in the settings directory or the directory in which the # script is located path = Path(argv[0]).parent if get_settings_file_path("output_dir").exists(): with get_settings_file_path("output_dir").open("r") as file: settings = json.load(file) if settings["OUTPUT_DIR"] != "": path = Path(settings["OUTPUT_DIR"]) # Output directories if key in ["base_data", "base_plots"]: path = path / key[5:] elif key in ["data", "plots"]: path = get_dir_path("base" + "_" + key) / date_ elif key == "feed": path = get_dir_path("data", date_) / key path.mkdir(parents=True, exist_ok=True) return path def get_settings_file_path(key): """Provides path to the settings files (json-files stored in the folder ../settings, containing some basic parameters and definitions) """ return get_dir_path("settings").joinpath(key + ".json") def get_feed_file_path(date_, category): """Provides paths to the CSV-files used for saving the downloaded data: dir_base/dte/data/feed_(confirmed/deaths/recovered).csv """ return get_dir_path("feed", date_).joinpath(category + ".csv") def get_data_file_path(date_, name="data", file_format="json"): """Provides the path to the prepared csv/json-files from day dte containing the data for category cat and variant var """ return get_dir_path("data", date_) / f"{name}.{file_format}" def get_plot_file_path(date_, base, *args): """Provides the path to the plot-file generated from day dte-data, defined by the categories and variants specified in *args """ filename = base for arg in args: filename += "_" + arg filename += ".png" path = get_dir_path("plots", date_).joinpath(base) path.mkdir(parents=True, exist_ok=True) return path.joinpath(filename) def get_region(region, subregion="-"): """Provides lists of countries organized in regions (e.g. Europe, middle, south, east, north, ...). Definitions are stored in the settings file regions.json in the folder ../settings. """ with get_settings_file_path("regions").open("r") as file: return json.load(file)[region][subregion]
python
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2012 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Pablo OrduΓ±a <[email protected]> # from __future__ import print_function, unicode_literals import os import time from collections import OrderedDict from weblab.util import data_filename import flask_admin def weblab_httpd_config_generate(directory): print("Generating HTTPd configuration files... ", end='') result = httpd_config_generate(directory) print("[done]") return result def httpd_config_generate(directory): debugging_variables = {} execfile(os.path.join(directory, 'debugging.py'), debugging_variables) ports = debugging_variables.get('PORTS', {}).get('json') base_url = debugging_variables.get('BASE_URL', '') if base_url in ('','/'): base_url = '' static_directories = OrderedDict() #{ # url path : disk path # } static_directories[base_url + '/weblab/client'] = data_filename('weblab/core/static/oldclient').replace('\\','/') # \ => / for Windows flask_admin_static = os.path.join(os.path.dirname(flask_admin.__file__), 'static') static_directories[base_url + '/weblab/admin/static'] = flask_admin_static.replace('\\','/') # TODO: Avoid repeated paths static_directories[base_url + '/weblab/instructor/static'] = data_filename('weblab/admin/web/static').replace('\\','/') static_directories[base_url + '/weblab/profile/static'] = data_filename('weblab/admin/web/static').replace('\\','/') static_directories[base_url + '/weblab/web/static'] = data_filename('weblab/core/static').replace('\\','/') static_directories[base_url + '/weblab/static'] = data_filename('weblab/core/static').replace('\\','/') static_directories[base_url + '/weblab/gwt/weblabclientlab'] = data_filename('war/weblabclientlab').replace('\\','/') static_directories[base_url + '/weblab/web/pub'] = os.path.abspath(os.path.join(directory, 'pub')).replace('\\','/') files = {} apache_contents = _apache_generation(directory, base_url, ports, static_directories) files['apache'] = _set_contents(directory, 'httpd/apache_weblab_generic.conf', apache_contents) simple_httpd_contents = _simple_httpd_generation(directory, base_url, ports, static_directories) files['simple'] = _set_contents(directory, 'httpd/simple_server_config.py', simple_httpd_contents) # TODO: support nginx return files def _set_contents(directory, filename, new_contents): original_path = os.path.join(directory, filename) destination_path = os.path.join(directory, filename + "-backup-" + time.strftime("%Y-%m-%d_%H-%M-%S")) if os.path.exists(original_path): original_contents = open(original_path).read() open(destination_path, 'w').write(original_contents) open(original_path, 'w').write(new_contents) return os.path.abspath(original_path) def _apache_generation(directory, base_url, ports, static_directories): apache_conf = ( "\n" """<LocationMatch (.*)nocache\.js$>\n""" """ Header Set Cache-Control "max-age=0, no-store"\n""" """</LocationMatch>\n""" """\n""" """<Files *.cache.*>\n""" """ Header Set Cache-Control "max-age=2592000"\n""" """</Files>\n""" """\n""" """# Apache redirects the regular paths to the particular directories \n""" # """RedirectMatch ^%(root)s$ %(root)s/weblab/\n""" # """RedirectMatch ^%(root)s/$ %(root)s/weblab/\n""" """RedirectMatch ^%(root)s/weblab$ %(root)s/weblab/\n""" """RedirectMatch ^%(root)s/weblab/client/$ %(root)s/weblab/client/index.html\n""" """\n""") for static_url, static_directory in static_directories.items(): apache_conf += """Alias %(static_url)s %(static_directory)s\n""" % dict(static_url=static_url, static_directory=static_directory) apache_conf += ( """\n""" """<Location %(root)s/weblab/>\n""" """ <IfModule authz_core_module>\n""" """ Require all granted\n""" """ </IfModule>\n""" """\n""" """ <IfModule !authz_core_module>\n""" """ Order allow,deny\n""" """ Allow from All\n""" """ </IfModule>\n""" """</Location>\n""" """\n""" """<Directory "%(directory)s">\n""" """ Options Indexes FollowSymLinks\n""" """\n""" """ <IfModule authz_core_module>\n""" """ Require all granted\n""" """ </IfModule>\n""" """\n""" """ <IfModule !authz_core_module>\n""" """ Order allow,deny\n""" """ Allow from All\n""" """ </IfModule>\n""" """</Directory>\n""" """\n""") previous = [] for static_directory in static_directories.values(): if static_directory in previous: continue previous.append(static_directory) apache_conf += ("""<Directory "%(static_directory)s">\n""" """ Options Indexes FollowSymLinks\n""" """\n""" """ <IfModule authz_core_module>\n""" """ Require all granted\n""" """ </IfModule>\n""" """\n""" """ <IfModule !authz_core_module>\n""" """ Order allow,deny\n""" """ Allow from All\n""" """ </IfModule>\n""" """</Directory>\n""" """\n""") % dict(static_directory=static_directory) apache_conf += ( """# Apache redirects the requests retrieved to the particular server, using a stickysession if the sessions are based on memory\n""" """ProxyPreserveHost On\n""" """ProxyVia On\n""" """\n""") for static_url, static_directory in static_directories.items(): apache_conf += """ProxyPass %(static_url)s !\n""" % dict(static_url=static_url) apache_conf += ( """\n""" """ProxyPass %(root)s/weblab/ balancer://%(root-no-slash)s_weblab_cluster/ stickysession=weblabsessionid lbmethod=bybusyness\n""" """ProxyPassReverse %(root)s/weblab/ balancer://%(root-no-slash)s_weblab_cluster/ stickysession=weblabsessionid\n""" "\n") apache_conf += "\n" apache_conf += """<Proxy balancer://%(root-no-slash)s_weblab_cluster>\n""" for pos, port in enumerate(ports): d = { 'port' : port, 'route' : 'route%s' % (pos+1), 'root' : '%(root)s' } apache_conf += """ BalancerMember http://localhost:%(port)s/weblab route=%(route)s\n""" % d apache_conf += """</Proxy>\n""" apache_img_dir = '/client/images' apache_root_without_slash = base_url[1:] if base_url.startswith('/') else base_url server_conf_dict = { 'root' : base_url, 'root-no-slash' : apache_root_without_slash.replace('/','_'), 'directory' : os.path.abspath(directory).replace('\\','/'), 'war_path' : data_filename('war').replace('\\','/') } apache_conf = apache_conf % server_conf_dict apache_conf_path = os.path.join('', 'apache_weblab_generic.conf') return apache_conf def _simple_httpd_generation(directory, base_url, ports, static_directories): proxy_paths = [ ('%(root)s$', 'redirect:%(root)s/weblab/'), ('%(root)s/$', 'redirect:%(root)s/weblab/'), ('%(root)s/weblab/client$', 'redirect:%(root)s/weblab/client/index.html'), ] for key, directory in static_directories.items(): proxy_paths.append((key, 'file:{0}'.format(directory))) proxy_path = "proxy-sessions:weblabsessionid:" for pos, port in enumerate(ports): d = { 'port' : port, 'route' : 'route%s' % (pos+1), 'root' : '%(root)s' } proxy_path += '%(route)s=http://localhost:%(port)s/weblab/,' % d proxy_paths.append(('%(root)s/weblab/', proxy_path)) proxy_paths.append(('%(root)s/weblab', 'redirect:%(root)s/weblab/')) proxy_paths.append(('', 'redirect:%(root)s/weblab/')) if base_url in ('','/'): root = '' else: root = base_url apache_img_dir = '/client/images' server_conf_dict = { 'root' : root, 'directory' : os.path.abspath(directory).replace('\\','/') } proxy_paths = eval(repr(proxy_paths) % server_conf_dict) proxy_paths_str = "PATHS = [ \n" for proxy_path in proxy_paths: proxy_paths_str += " %s,\n" % repr(proxy_path) proxy_paths_str += "]\n" return proxy_paths_str if __name__ == '__main__': httpd_config_generate("/tmp/foo")
python
""" Copyright (c) 2020 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ import argparse import shlex import subprocess import sys import config import global_vars from deamon import Daemon from detector.tools.slow_sql import diagnosing from global_vars import * from utils import check_time_legality, check_port_occupancy, check_collector, check_db_alive sys.path.append(CURRENT_DIRNAME) __version__ = '1.0.0' __description__ = 'Anomaly-detection: a time series forecast and anomaly detection tool.' __epilog__ = """ epilog: the 'a-detection.conf' and 'metric_task.conf' will be read when the program is running, the location of them is: a-detection.conf: {detection}. metric_config: {metric_config}. """.format(detection=CONFIG_PATH, metric_config=METRIC_CONFIG_PATH) def usage(): return """ python main.py start [--role {{agent,collector,monitor}}] # start local service. python main.py stop [--role {{agent,collector,monitor}}] # stop local service. python main.py start [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector,monitor}}] # start the remote service. python main.py stop [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector, monitor}}] # stop the remote service. python main.py deploy [--user USER] [--host HOST] [--project-path PROJECT_PATH] # deploy project in remote host. python main.py diagnosis [--query] [--start_time] [--finish_time] # rca for slow SQL. python main.py show_metrics # display all monitored metrics(can only be executed on 'detector' machine). python main.py forecast [--metric-name METRIC_NAME] [--period] [--freq] [--forecast-method {{auto_arima, fbprophet}}] [--save-path SAVE_PATH] # forecast future trend of metric(can only be executed on 'detector' machine). """ def parse_args(): parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__description__, usage=usage(), epilog=__epilog__) parser.add_argument('mode', choices=['start', 'stop', 'deploy', 'show_metrics', 'forecast', 'diagnosis']) parser.add_argument('--user', help="User of remote server.") parser.add_argument('--host', help="IP of remote server.") parser.add_argument('--project-path', help="Project location in remote server.") parser.add_argument('--role', choices=['agent', 'collector', 'monitor'], help="Run as 'agent', 'collector', 'monitor'. " "Notes: ensure the normal operation of the openGauss in agent.") parser.add_argument('--metric-name', help="Metric name to be predicted, if this parameter is not provided, " "all metric in database will be predicted.") parser.add_argument('--query', help="target sql for RCA") parser.add_argument('--start_time', help="start time of query") parser.add_argument('--finish_time', help="finish time of query") parser.add_argument('--period', default=1, help="Forecast periods of metric, it should be integer" "notes: the specific value should be determined to the trainnig data." "if this parameter is not provided, the default value '100S' will be used.") parser.add_argument('--freq', default='S', help="forecast gap, time unit: " "S: Second, " "M: Minute, " "H: Hour, " "D: Day, " "W: Week. ") parser.add_argument('--forecast-method', default='auto_arima', help="Forecast method, default method is 'auto_arima'," "if want to use 'fbprophet', you should install fbprophet first.") parser.add_argument('--save-path', help='Save the results to this path using csv format, if this parameter is not provided,' ', the result wil not be saved.') parser.add_argument('-v', '--version', action='version') parser.version = __version__ return parser.parse_args() def forecast(args): from prettytable import PrettyTable from detector.algorithm import get_fcst_alg from detector.service.storage.sqlite_storage import SQLiteStorage from utils import StdStreamSuppressor display_table = PrettyTable() display_table.field_names = ['Metric name', 'Date range', 'Minimum', 'Maximum', 'Average'] database_dir = config.get('database', 'database_dir') if not args.forecast_method: forecast_alg = get_fcst_alg('auto_arima')() else: forecast_alg = get_fcst_alg(args.forecast_method)() def forecast_metric(name, train_ts, save_path=None): with StdStreamSuppressor(): forecast_alg.fit(timeseries=train_ts) dates, values = forecast_alg.forecast( period=int(args.period) + 1, freq=args.freq) date_range = "{start_date}~{end_date}".format(start_date=dates[0], end_date=dates[-1]) display_table.add_row( [name, date_range, min(values), max(values), sum(values) / len(values)] ) if save_path: if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) with open(save_path, mode='w') as f: for date, value in zip(dates, values): f.write(date + ',' + str(value) + '\n') for database in os.listdir(database_dir): with SQLiteStorage(os.path.join(database_dir, database)) as db: table_rows = db.get_table_rows('os_exporter') timeseries = db.get_timeseries(table='os_exporter', field=args.metric_name, period=table_rows) forecast_metric(args.metric_name, timeseries, args.save_path) print(display_table.get_string()) def slow_sql_rca(args): from prettytable import PrettyTable from detector.service.storage.sqlite_storage import SQLiteStorage from utils import input_sql_processing, remove_comment if not args.query: print('Error: no query input!') return user_query = args.query.split(';')[0] start_time = args.start_time finish_time = args.finish_time if start_time and not check_time_legality(start_time): print("error time format '{time}', using: {date_format}.".format(time=start_time, date_format=global_vars.DATE_FORMAT)) return if finish_time and not check_time_legality(finish_time): print("error time format '{time}', using: {date_format}.".format(time=finish_time, date_format=global_vars.DATE_FORMAT)) return database_dir = os.path.realpath(config.get('database', 'database_dir')) display_table = PrettyTable() display_table.field_names = ['database', 'start time', 'finish time', 'rca', 'suggestion'] display_table.align = 'l' for database in os.listdir(database_dir): if 'journal' in database: continue try: database_path = os.path.join(database_dir, database) with SQLiteStorage(database_path) as db: if start_time and finish_time: results = db.fetch_all_result( "select query, start_time, finish_time from wdr where start_time " "between '{start_time}' and '{finish_time}';".format( start_time=start_time, finish_time=finish_time)) elif start_time: results = db.fetch_all_result( "select query, start_time, finish_time from wdr where start_time >= '{margin_time}';".format( margin_time=start_time)) elif finish_time: results = db.fetch_all_result( "select query, start_time, finish_time from wdr where finish_time <= '{margin_time}';".format( margin_time=finish_time)) else: current_time = int(time.time()) # If not input start_time and finish_time, then default search for 12 hours of historical data. margin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(current_time - 43200)) results = db.fetch_all_result( "select query, start_time, finish_time from wdr where start_time >= '{margin_time}';".format( margin_time=margin_time)) if not results: continue for wdr_query, start_time, finish_time in results: try: processed_wdr_query = input_sql_processing(wdr_query).replace(' ', '') processed_user_query = input_sql_processing(user_query).replace(' ', '') if processed_user_query == processed_wdr_query: user_query = remove_comment(user_query) diagnose_result = diagnosing.diagnose_user(db, user_query, start_time) start_time, finish_time = diagnose_result[0], diagnose_result[1] rca_ana = "" suggestion_ana = "" if not diagnose_result[2:]: rca_ana = "the query has no slow features or its syntax is incorrect." suggestion_ana = "please check the query threshold, check the log, and analyze the reason." else: index = 1 for rca, suggestion in diagnose_result[2:]: rca_ana = rca_ana + "{index}: {rca}\n".format(index=index, rca=rca) suggestion_ana = suggestion_ana + "{index}: {suggestion}\n".format(index=index, suggestion=suggestion) index += 1 display_table.add_row([database, start_time, finish_time, rca_ana, suggestion_ana]) except Exception as e: # Prevent unknown accidents from causing the program to stop continue except Exception as e: print(str(e)) return print(display_table.get_string()) def deploy(args): print('Please input the password of {user}@{host}: '.format(user=args.user, host=args.host)) command = 'sh start.sh --deploy {host} {user} {project_path}' \ .format(user=args.user, host=args.host, project_path=args.project_path) if subprocess.call(shlex.split(command), cwd=BIN_PATH) == 0: print("\nExecute successfully.") else: print("\nExecute unsuccessfully.") def show_metrics(): from prettytable import PrettyTable from detector.service.storage.sqlite_storage import SQLiteStorage display_table = PrettyTable() display_table.field_names = ['Metric name', 'Current rows'] database_dir = config.get('database', 'database_dir') for database in os.listdir(database_dir): with SQLiteStorage(os.path.join(database_dir, database)) as db: table = 'os_exporter' fields = db.get_all_fields(table) rows = db.get_table_rows(table) for field in fields: display_table.add_row([field, rows]) print(display_table.get_string()) def manage_local_service(args): daemon = Daemon() daemon.set_stdout(os.devnull).set_stderr(os.devnull) if args.role == 'collector': from detector.service import service_main daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/collector.pid')) daemon.set_function(service_main) elif args.role == 'monitor': from detector.metric_detector import detector_main daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/detector.pid')) daemon.set_function(detector_main) elif args.role == 'agent': from agent.metric_agent import agent_main pre_check = check_collector() and check_db_alive(port=config.get('agent', 'db_port')) if args.mode == 'start' and not pre_check: print('FATAL: Agent process failed to start.', file=sys.stderr, flush=True) return daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/agent.pid')) daemon.set_function(agent_main) else: print('FATAL: incorrect parameter.') print(usage()) return if args.mode == 'start': if args.role == 'collector': listen_port = config.get('server', 'listen_port') check_port_occupancy(listen_port) daemon.start() else: daemon.stop() def manage_remote_service(args): print('Please input the password of {user}@{host}: '.format(user=args.user, host=args.host)) if args.mode == 'start': command = "sh start.sh --start_remote_service {host} {user} {project_path} {role}" \ .format(user=args.user, host=args.host, role=args.role, project_path=args.project_path) else: command = "sh stop.sh --stop_remote_service {host} {user} {project_path} {role}" \ .format(user=args.user, host=args.host, role=args.role, project_path=args.project_path) if subprocess.call(shlex.split(command), cwd=BIN_PATH) == 0: print("\nExecute successfully.") else: print("\nExecute unsuccessfully.") def main(): args = parse_args() if args.mode in ('start', 'stop') and all((args.user, args.host, args.project_path, args.role)): manage_remote_service(args) elif args.mode in ('start', 'stop') and args.role and not any((args.user, args.host, args.project_path)): manage_local_service(args) elif args.mode == 'deploy' and all((args.user, args.host, args.project_path)): deploy(args) elif args.mode == 'show_metrics': show_metrics() elif args.mode == 'forecast': forecast(args) elif args.mode == 'diagnosis': slow_sql_rca(args) else: print("FATAL: incorrect parameter.") print(usage()) return -1 if __name__ == '__main__': main()
python
#!/usr/bin/env python from setuptools import setup, find_packages import os import shutil from subprocess import check_output setup( name="array_neutron_lbaas", description="Array vADC OpenStack Neutron LBaaS Device Driver", long_description=open("README.md").read(), version="1.0.0", url="https://www.arraynetworks.com.cn", packages=find_packages(), scripts=[ "scripts/array_lbaas_config_generator", "scripts/array_lbaas_init_db", "scripts/array_lbaas_init_network", "scripts/array_lbaas_tenant_customization" ], data_files=[ ("/etc/neutron/conf.d/neutron-server", ["conf/array_vapv_lbaas.conf"]), ("/etc/dhcp/octavia/", ["conf/dhclient.conf"]) ], license="Apache Software License", platforms=["Linux"], classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Environment :: OpenStack", "License :: OSI Approved :: Apache Software License" "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7" ] )
python
# from typing import Optional # from discord import Embed # from discord.utils import get # from discord.ext.menus import MenuPages, ListPageSource # from discord.ext.commands import Cog, command # def syntax(command): # cmd_and_aliases = "|".join([str(command), *command.aliases]) # params = [] # for key, value in command.params.items(): # if key not in ("self", "ctx"): # params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>") # params = " ".join(params) # return f"```{cmd_and_aliases} {params}```" # class HelpMenu(ListPageSource): # def __init__(self, ctx, data): # self.ctx = ctx # super().__init__(data, per_page=5) # async def write_page(self, menu, fields=[]): # offset = (menu.current_page * self.per_page) + 1 # len_data = len(self.entries) # embed = Embed( # title="Help", # description="Welcome to the boNo help dialog!", # colour=self.ctx.author.colour, # ) # embed.set_thumbnail(url=self.ctx.guild.me.avatar_url) # embed.set_footer( # text=f"{offset:,} - {min(len_data, offset+self.per_page-1):,} of {len_data:,} commands." # ) # for name, value in fields: # embed.add_field(name=name, value=value, inline=False) # return embed # async def format_page(self, menu, entries): # fields = [] # for entry in entries: # fields.append((entry.brief or "No Description", syntax(entry))) # return await self.write_page(menu, fields) # class Help(Cog): # def __init__(self, bot): # self.bot = bot # self.bot.remove_command("help") # async def cmd_help(self, ctx, command): # embed = Embed( # title=f"Help with `{command}`", # description=syntax(command), # colour=ctx.author.colour, # ) # embed.add_field(name="Command Description", value=command.help) # await ctx.send(embed=embed) # @command(name="help") # async def show_help(self, ctx, cmd: Optional[str]): # """ # Helps to know commands better. # """ # if cmd is None: # menu = MenuPages( # source=HelpMenu(ctx, list(self.bot.commands)), # clear_reactions_after=True, # delete_message_after=True, # ) # await menu.start(ctx) # else: # if (command := get(self.bot.commands, name=cmd)) : # await self.cmd_help(ctx, command) # else: # await ctx.send( # "I don't follow that command. Perhaps that's an alias or not a command at all." # ) # @Cog.listener() # async def on_ready(self): # if not self.bot.ready: # self.bot.cogs_ready.ready_up("help") # def setup(bot): # bot.add_cog(Help(bot)) from discord.ext import commands from discord.ext.commands import Cog from utils.util import Pag class Help(Cog): def __init__(self, bot): self.bot = bot self.bot.remove_command("help") self.cmds_per_page = 6 def get_command_signature(self, command: commands.Command, ctx: commands.Context): aliases = "|".join(command.aliases) cmd_invoke = f"[{command.name}|{aliases}]" if command.aliases else command.name full_invoke = command.qualified_name.replace(command.name, "") signature = f"{ctx.prefix}{full_invoke}{cmd_invoke}" return signature async def return_filtered_commands(self, walkable, ctx): filtered = [] for c in walkable.walk_commands(): try: if c.hidden: continue elif c.parent: continue await c.can_run(ctx) filtered.append(c) except commands.CommandError: continue return self.return_sorted_commands(filtered) def return_sorted_commands(self, commandList): return sorted(commandList, key=lambda x: x.name) async def setup_help_pag(self, ctx, entity=None, title=None): entity = entity or self.bot title = title or self.bot.description pages = [] if isinstance(entity, commands.Command): filtered_commands = ( list(set(entity.all_commands.values())) if hasattr(entity, "all_commands") else [] ) filtered_commands.insert(0, entity) else: filtered_commands = await self.return_filtered_commands(entity, ctx) for i in range(0, len(filtered_commands), self.cmds_per_page): next_commands = filtered_commands[i : i + self.cmds_per_page] commands_entry = "" for cmd in next_commands: desc = cmd.short_doc or cmd.description signature = self.get_command_signature(cmd, ctx) subcommand = "Has subcommands" if hasattr(cmd, "all_commands") else "" commands_entry += ( f"β€’ **__{cmd.name}__**\n```\n{signature}\n```\n{desc}\n" if isinstance(entity, commands.Command) else f"β€’ **__{cmd.name}__**\n{desc}\n {subcommand}\n" ) pages.append(commands_entry) await Pag(title=title, color=0xCE2029, entries=pages, length=1).start(ctx) @commands.command( name="help", aliases=["h", "commands"], description="The help command. Duh!" ) async def help_command(self, ctx, *, entity=None): if not entity: await self.setup_help_pag(ctx) else: cog = self.bot.get_cog(entity) if cog: await self.setup_help_pag(ctx, cog, f"{cog.qualified_name}'s commands") else: command = self.bot.get_command(entity) if command: await self.setup_help_pag(ctx, command, command.name) else: await ctx.send("Entity not found.") @commands.Cog.listener() async def on_ready(self): if not self.bot.ready: self.bot.cogs_ready.ready_up("help") print("cog ready") def setup(bot): bot.add_cog(Help(bot))
python
## heap # Time: O(NlogK) class Solution: def findKthLargest(self, nums: List[int], k: int) -> int: return heapq.nlargest(k, nums)[-1] ## sort # Time: O(NlogN) class Solution: def findKthLargest(self, nums: List[int], k: int) -> int: nums.sort() return nums[-k]
python
from typing import Any from .metaf_base import DataDescriptor class Direction(DataDescriptor): def __init__(self, name: str): super().__init__(name) def _handler(self, value): return float(value) class Speed(DataDescriptor): def __init__(self, name: str): super().__init__(name) def _handler(self, value): return float(value) class Wind: __direction = Direction("direction") __speed = Speed("speed") __gust = Speed("gust") def __init__(self, group: str): self.__group = group self.__direction = group[:3] self.__speed = group[3:5] self.__gust = group[6:8] @property def direction_in_degrees(self): return self.__direction @property def direction_in_radians(self): return self.__direction * 3.14 / 180 @property def speed_in_mps(self): return self.__speed @property def speed_in_kph(self): return self.__speed * 3.6 @property def gust(self): return self.__gust
python
import csv import numpy as np from scipy import signal import copy def getCsv(txtFileName='seventeenth.txt'): with open(txtFileName) as csv_file: csv_reader = csv.reader(csv_file, delimiter=' ') return list(csv_reader) def parseCharacter(character): value = 1 if character == '#' else 0 return value def parseInput(csvFile): return [[parseCharacter(character) for character in (list(row[0]))] for row in csvFile] def prepareInitialArray(input, plannedIterationSteps): inputArray = np.array(input) inputArrayShape = list(np.shape(inputArray)) initialArrayShapeXAxis = inputArrayShape[0] + 2 * plannedIterationSteps initialArrayShapeYAxis = inputArrayShape[1] + 2 * plannedIterationSteps initialArrayShapeZAxis = 1 + 2 * plannedIterationSteps initialArrayShape = [initialArrayShapeXAxis, initialArrayShapeYAxis, initialArrayShapeZAxis] initialArray = np.zeros(initialArrayShape) initialArray[plannedIterationSteps:plannedIterationSteps + inputArrayShape[0], plannedIterationSteps:plannedIterationSteps + inputArrayShape[1], plannedIterationSteps] = inputArray return initialArray def determineConfiguration(initialState, iterationSteps): recentState = copy.deepcopy(initialState) summationFilter = np.ones((3, 3, 3)) summationFilter[1, 1, 1] = 0 for counter in range(iterationSteps): summationArray = signal.convolve(recentState, summationFilter, 'same', 'direct') sumIsThree = summationArray == 3 sumIsNotTwoOrThree = np.logical_not(np.logical_or(summationArray == 2, summationArray == 3)) recentState[np.logical_and(recentState == 0, sumIsThree)] = 1 recentState[np.logical_and(recentState == 1, sumIsNotTwoOrThree)] = 0 return recentState csvFile = getCsv() providedInput = parseInput(csvFile) iterationSteps = 6 initialArray = prepareInitialArray(providedInput, iterationSteps) finalState = determineConfiguration(initialArray, iterationSteps) print(np.sum(finalState))
python
from gi.repository import Gtk import asyncio import threading class ThreadLoop(threading.Thread): def __init__(self, loop): threading.Thread.__init__(self) self.loop = loop def run(self): print("starting Thread") self.loop.run_forever() print("Ending Thread") class ClientProtocol(asyncio.Protocol): def __init__(self, text_buf, loop): self.text_buf = text_buf self.loop = loop self.trasport = None def connection_made(self, transport): self.transport = transport def data_received(self, data): iter_end = self.text_buf.get_end_iter() self.text_buf.insert(iter_end, "\n{}".format(data.decode())) def connection_lost(self, exc): iter_end = self.text_buf.get_end_iter() self.text_buf.insert(iter_end, "\n disconnected") self.transport.close() print("transport has closed") #print(dir(self.loop)) print("self.loop.stop()") print(self.loop.stop()) def send_msg(self, message): self.transport.write(message.encode()) class Handler: def __init__(self, window, text_entry, text_box): self.window = window self.text_entry = text_entry self.text_box = text_box self.text_buf = self.text_box.get_buffer() self.window.connect('delete-event', self.quit) self.loop = None def _send_msg(self, msg): self.transport.write(msg.encode()) @property def _can_send_msg(self): result = False if self.loop: if self.loop.is_running(): result = True else: self.loop = None return result def connect_to_server(self, address=('127.0.0.1', 3333)): self.loop = asyncio.get_event_loop() coro = self.loop.create_connection(lambda: ClientProtocol( self.text_buf, self.loop), '127.0.0.1', 3333) self.transport, self.protocol = self.loop.run_until_complete(coro) self.thread = ThreadLoop(self.loop) self.thread.start() def connect_button_clicked(self, widget): print("connect button clicked") if not self._can_send_msg: self.connect_to_server() def send_button_clicked(self, widget): print("sending") text = self.text_entry.get_text() # end_iter = self.text_buf.get_end_iter() if self._can_send_msg: self._send_msg(text) def quit(self, *args): print("quit!!!!") print(args) if self._can_send_msg: self._send_msg("/disconnect") Gtk.main_quit() builder = Gtk.Builder() builder.add_from_file("chat_test.glade") window = builder.get_object("window1") text_entry = builder.get_object("text_entry") text_box = builder.get_object("textbox") builder.connect_signals(Handler(window, text_entry, text_box)) window.show_all() Gtk.main()
python
#coding: utf-8 if __name__ == '__main__': st = "data/mult/result" print st t = open("data/resultX.csv","w") t.write("Id,Tags\n") a = 0 for i in range(21): print i f = open(st+str(i)+".csv") h = f.readlines() temp = 0 print len(h) for line in h: t.write(line) a += 1 temp += 1 print "temp",temp print "total",a
python
"""Sweep tests""" import pytest import wandb def test_create_sweep(live_mock_server, test_settings): live_mock_server.set_ctx({"resume": True}) sweep_config = { "name": "My Sweep", "method": "grid", "parameters": {"parameter1": {"values": [1, 2, 3]}}, } sweep_id = wandb.sweep(sweep_config) assert sweep_id == "test" def test_minmax_validation(): api = wandb.apis.InternalApi() sweep_config = { "name": "My Sweep", "method": "random", "parameters": {"parameter1": {"min": 0, "max": 1}}, } filled = api.api._validate_config_and_fill_distribution(sweep_config) assert "distribution" in filled["parameters"]["parameter1"] assert "int_uniform" == filled["parameters"]["parameter1"]["distribution"] sweep_config = { "name": "My Sweep", "method": "random", "parameters": {"parameter1": {"min": 0.0, "max": 1.0}}, } filled = api.api._validate_config_and_fill_distribution(sweep_config) assert "distribution" in filled["parameters"]["parameter1"] assert "uniform" == filled["parameters"]["parameter1"]["distribution"] sweep_config = { "name": "My Sweep", "method": "random", "parameters": {"parameter1": {"min": 0.0, "max": 1}}, } with pytest.raises(ValueError): api.api._validate_config_and_fill_distribution(sweep_config)
python
import os import signal from abc import ABCMeta, abstractmethod from multiprocessing import Pool from django.conf import settings import pymei import solr DEFAULT_MIN_GRAM = 2 DEFAULT_MAX_GRAM = 10 class AbstractMEIConverter: __metaclass__ = ABCMeta TYPE = "cantusdata_music_notation" def __init__(self, file_name, siglum_slug, manuscript_id, min_gram=DEFAULT_MIN_GRAM, max_gram=DEFAULT_MAX_GRAM): self.file_name = file_name self.siglum_slug = siglum_slug self.manuscript_id = manuscript_id self.min_gram = min_gram self.max_gram = max_gram self.doc = pymei.documentFromFile(str(file_name), False).getMeiDocument() self.page_number = getPageNumber(file_name) solrconn = solr.SolrConnection(settings.SOLR_SERVER) self.image_uri = getImageURI(file_name, manuscript_id, solrconn) @classmethod def convert(cls, directory, siglum_slug, id, processes=None, **options): mei_files = cls._get_file_list(directory) if processes == 0: processed = cls._process_in_sequence(mei_files, siglum_slug, id, **options) else: processed = cls._process_in_parallel(mei_files, siglum_slug, id, processes=processes, **options) return mei_files, processed @classmethod def _get_file_list(cls, directory): """Generate a list of files to process""" mei_files = [] for root, dirs, files in os.walk(directory): # Skip .git directories try: git_index = dirs.index('.git') except ValueError: pass else: del dirs[git_index] for f in files: if f.startswith("."): continue if os.path.splitext(f)[1] == '.mei': mei_files.append(os.path.join(root, f)) mei_files.sort() return mei_files @classmethod def _process_in_sequence(cls, mei_files, siglum_slug, id, **options): for file_name in mei_files: ngrams = cls.process_file(file_name, siglum_slug, id, **options) yield file_name, ngrams @classmethod def _process_in_parallel(cls, mei_files, siglum_slug, id, processes, **options): pool = Pool(initializer=init_worker, processes=processes) args = ((cls, file_name, siglum_slug, id, options) for file_name in mei_files) return pool.imap(process_file_in_worker, args) @classmethod def process_file(cls, file_name, siglum_slug, id, **options): inst = cls(file_name, siglum_slug, id, **options) return inst.process() @abstractmethod def process(self): raise NotImplementedError('process()') def init_worker(): # Allow KeyboardInterrupt to propagate to the parent process signal.signal(signal.SIGINT, signal.SIG_IGN) def process_file_in_worker(params): cls, file_name, siglum_slug, id, options = params ngrams = list(cls.process_file(file_name, siglum_slug, id, **options)) return file_name, ngrams def getPageNumber(ffile): """ Extract the page number from the file name :param ffile: :return: image URI as a string """ return str(ffile).split('_')[-1].split('.')[0] def getImageURI(ffile, manuscript_id, solrconn): """ Extract the page number from the file name and get the corresponding image URI from Solr :param ffile: :param manuscript_id: :param solrconn: :return: image URI as a string """ # Send the value of the folio name to Solr and get the corresponding URI folio_name = getPageNumber(ffile) composed_request = u'type:"cantusdata_folio" AND manuscript_id:{0} AND number:{1}' \ .format(manuscript_id, folio_name) result = solrconn.query(composed_request, rows=1, fields=['image_uri']) return result.results[0]['image_uri']
python
import sys import atlednolispe_settings # private_password from .base import * DEBUG = False DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': atlednolispe_settings.DATABASE_NAME, 'USER': atlednolispe_settings.USER, 'PASSWORD': atlednolispe_settings.PASSWORD, 'HOST': atlednolispe_settings.HOST, 'PORT': '3306', 'CONN_MAX_AGE': 60, # like connect pool } } THEME_DIR = 'themes' THEME_TYPE = 'html5up' THEME = os.path.join(THEME_DIR, THEME_TYPE) SITE_PACKAGES = [s_p for s_p in sys.path if s_p.endswith('site-packages')][0] TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates', THEME), os.path.join(SITE_PACKAGES, 'xadmin/templates'), os.path.join(SITE_PACKAGES, 'crispy_forms/templates'), os.path.join(SITE_PACKAGES, 'reversion/templates'), os.path.join(SITE_PACKAGES, 'ckeditor/templates'), os.path.join(SITE_PACKAGES, 'ckeditor_uploader/templates'), os.path.join(SITE_PACKAGES, 'rest_framework/templates'), ], 'APP_DIRS': False, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'libraries': { 'filters': 'templatetags.filters' } }, }, ] STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), os.path.join(SITE_PACKAGES, 'rest_framework/static'), ] CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://127.0.0.1:6379/1", "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", "PARSER_CLASS": "redis.connection.HiredisParser", } } } CKEDITOR_CONFIGS = { 'awesome_ckeditor': { # set the name of the config 'toolbar': 'Full', 'height': 300, # 'width': 1200, 'tabSpaces': 4, }, } DEFAULT_FILE_STORAGE = 'blog.storage.MyStorage' # django-debug-toolbar & silk if DEBUG: TEMPLATES[0]['DIRS'] += [ os.path.join(SITE_PACKAGES, 'debug_toolbar/templates'), os.path.join(SITE_PACKAGES, 'silk/templates'), ] INSTALLED_APPS += [ 'debug_toolbar', 'silk', ] MIDDLEWARE += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', 'silk.middleware.SilkyMiddleware', ] INTERNAL_IPS = ['127.0.0.1'] # debug-toolbar SILKY_PYTHON_PROFILER = True else: ALLOWED_HOSTS = [ # required if DEBUG = False atlednolispe_settings.ALLOWED_HOST1, '127.0.0.1', ]
python
from xml.dom.minidom import Document, parseString from xml.parsers.expat import ExpatError import pytest from sunpy.util import xml def test_xml_to_dict1(): """ should return dict of xml string. """ source_xml = "<outer>\ <inner1>one</inner1>\ <inner2>two</inner2>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'inner2': u'two', u'inner1': u'one'}} assert xml_dict == expected_dict def test_xml_to_dict2(): """ should return dict of xml string and if a tag is duplicated it takes the last one. """ source_xml = "<outer>\ <inner1>one-one</inner1>\ <inner1>one-two</inner1>\ <inner2>two-one</inner2>\ <inner2>two-two</inner2>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'inner2': u'two-two', u'inner1': u'one-two'}} assert xml_dict == expected_dict def test_xml_to_dict3(): """ should return dict of xml string with empty value if there are no inner elements. """ source_xml = "<outer/>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': ''} assert xml_dict == expected_dict def test_xml_to_dict4(): """ should return dict of xml string with empty value if there are no inner elements. """ source_xml = "<outer></outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': ''} assert xml_dict == expected_dict def test_xml_to_dict5(): """ should return dict of xml string with 2 layer nesting. """ source_xml = "<outer>\ <mid1>\ <inner1>one-one</inner1>\ </mid1>\ <mid2>\ <inner2>two-one</inner2>\ </mid2>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'mid2': {u'inner2': u'two-one'}, u'mid1': {u'inner1': u'one-one'}}} assert xml_dict == expected_dict def test_xml_to_dict6(): """ should return dict of xml string with 2 layer nesting and if a tag is duplicated it takes the last one. """ source_xml = "<outer>\ <mid>\ <inner1>one-one</inner1>\ </mid>\ <mid>\ <inner2>two-one</inner2>\ </mid>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'mid': {u'inner2': u'two-one'}}} assert xml_dict == expected_dict def test_xml_to_dict7(): """ should raise TypeError when passed None. """ assert pytest.raises(TypeError, xml.xml_to_dict, None) def test_xml_to_dict8(): """ should raise TypeError when passed non string. """ assert pytest.raises(TypeError, xml.xml_to_dict, 9) def test_xml_to_dict9(): """ should raise ExpatError when passed empty string. """ assert pytest.raises(ExpatError, xml.xml_to_dict, "") def test_xml_to_dict10(): """ should raise ExpatError when passed space. """ assert pytest.raises(ExpatError, xml.xml_to_dict, " ") def test_get_node_text1(): """ should raise NotTextNodeError if there is a non text node. """ doc = Document() outer = doc.createElement("outer") doc.appendChild(outer) pytest.raises(xml.NotTextNodeError, xml.get_node_text, doc) def test_get_node_text2(): """ should return empty string for a node with no child nodes. """ assert xml.get_node_text(Document()) == "" def test_get_node_text3(): """ should return node text. """ node = parseString("<outer>one</outer>") text_node = node.childNodes[0] assert xml.get_node_text(text_node) == "one" def test_get_node_text4(): """ should raise AttributeError when sent None. """ assert pytest.raises(AttributeError, xml.get_node_text, None) def test_get_node_text5(): """ should raise AttributeError when sent wrong type. """ assert pytest.raises(AttributeError, xml.get_node_text, "wrong type") def test_node_to_dict1(): """ should return dict of node. """ doc = Document() outer = doc.createElement("outer") doc.appendChild(outer) inner1 = doc.createElement("inner1") inner2 = doc.createElement("inner2") outer.appendChild(inner1) outer.appendChild(inner2) inner1_text = doc.createTextNode("one") inner2_text = doc.createTextNode("two") inner1.appendChild(inner1_text) inner2.appendChild(inner2_text) expected_dict = {'outer': {'inner2': 'two', 'inner1': 'one'}} xml_dict = xml.node_to_dict(doc) assert xml_dict == expected_dict def test_node_to_dict2(): """ should return dict of node double nested. """ doc = Document() outer = doc.createElement("outer") doc.appendChild(outer) mid1 = doc.createElement("mid1") outer.appendChild(mid1) mid2 = doc.createElement("mid2") outer.appendChild(mid2) inner1 = doc.createElement("inner1") inner2 = doc.createElement("inner2") mid1.appendChild(inner1) mid2.appendChild(inner2) inner1_text = doc.createTextNode("one") inner2_text = doc.createTextNode("two") inner1.appendChild(inner1_text) inner2.appendChild(inner2_text) expected_dict = {'outer': {'mid2': {'inner2': 'two'}, 'mid1': {'inner1': 'one'}}} xml_dict = xml.node_to_dict(doc) assert xml_dict == expected_dict def test_node_to_dict3(): """ should return empty dict when sent empty doc. """ expected_dict = {} xml_dict = xml.node_to_dict(Document()) assert xml_dict == expected_dict def test_node_to_dict4(): """ should raise AttributeError when sent wrong type. """ assert pytest.raises(AttributeError, xml.node_to_dict, 9) def test_node_to_dict5(): """ should raise AttributeError when sent None. """ assert pytest.raises(AttributeError, xml.node_to_dict, None) def test_with_multiple_children_in_list(): """ Setting the 'multiple' attribute of parent node should put child nodes in a list. """ def getChild(lst_of_children, key, value): for child in lst_of_children: if child[key] == value: return child raise ValueError("No children with key {0} set to {1} found.".format(key, value)) source = '''<?xml version="1.0" encoding="UTF-8"?> <Config> <Name>With multiple children</Name> <Children multiple="true"> <Child> <Name>First Child</Name> <Value>Value 1</Value> </Child> <Child> <Name>Second Child</Name> <Value>Value 2</Value> </Child> </Children> </Config>''' expected = {'Config': {'Children': [{'Name': 'First Child', 'Value': 'Value 1'}, {'Name': 'Second Child', 'Value': 'Value 2'}], 'Name': 'With multiple children'}} actual = xml.xml_to_dict(source) assert len(expected['Config']) == len(actual['Config']) assert expected['Config']['Name'] == actual['Config']['Name'] assert len(actual['Config']['Children']) == 2 # As the child dictionaries are in lists we cannot be certain what order # they are in. Test individualy. expected_children = expected['Config']['Children'] actual_children = actual['Config']['Children'] expected_first_child = getChild(expected_children, key='Name', value='First Child') actual_first_child = getChild(actual_children, key='Name', value='First Child') assert expected_first_child == actual_first_child expected_second_child = getChild(expected_children, key='Name', value='Second Child') actual_second_child = getChild(actual_children, key='Name', value='Second Child') assert expected_second_child == actual_second_child
python
"""Interface of RLAlgorithm.""" import abc class RLAlgorithm(abc.ABC): """Base class for all the algorithms. Note: If the field sampler_cls exists, it will be by Trainer.setup to initialize a sampler. """ # pylint: disable=too-few-public-methods @abc.abstractmethod def train(self, trainer): """Obtain samplers and start actual training for each epoch. Args: trainer (Trainer): Trainer is passed to give algorithm the access to trainer.step_epochs(), which provides services such as snapshotting and sampler control. """
python
import data import copy, logging import numpy as np def minimize_states_and_actions_to_iterate(): logging.info("Minimizing states and actions to iterate for each engine type...") for engine_subtype in data.engine_subtypes: num_working_engines = data.engines_info[engine_subtype]['NUM_WORKING_ENGINES'] current_state = data.engines_info[engine_subtype]['CURRENT_STATE'][:] if num_working_engines > 3: data.states_by_subtype[engine_subtype] = get_unique_list_of_lists(minimize_states(current_state, num_working_engines))[:] data.actions_by_subtype[engine_subtype] = minimize_actions(current_state, num_working_engines) else: data.states_by_subtype[engine_subtype] = get_unique_list_of_lists(data.all_possible_states[num_working_engines])[:] data.actions_by_subtype[engine_subtype] = data.all_possible_actions[num_working_engines][:] logging.info("The number of states and actions to iterate have been minimized.") def get_unique_list_of_lists(a_list): unique_list_of_lists = [] for l in a_list: if l not in unique_list_of_lists: unique_list_of_lists.append(l) return unique_list_of_lists def minimize_states(current_state, num_working_engines): max_num_engines_currently_at_any_hub = max(current_state) all_states = data.all_possible_states[num_working_engines] states_minimized = [] if max_num_engines_currently_at_any_hub > 1: # If at least one hub currently has more than 1 engine num_hubs_with_max_num_engines = current_state.count(max_num_engines_currently_at_any_hub) if num_hubs_with_max_num_engines > 1: # If more than one hub currently has more than 1 engine indices_of_hubs_with_max_num_engines = [i for i, num in enumerate(current_state) if num == max_num_engines_currently_at_any_hub] indices_of_hubs_with_max_num_engines.sort() for state in all_states: # For every possible state being considered state_to_edit = state[:] num_engines_at_hubs_with_max_num_engines = [] for i in reversed(indices_of_hubs_with_max_num_engines): num_engines_at_hubs_with_max_num_engines.append(state_to_edit.pop(i)) # If at least 1 engine is at each hub with maximum number of engines allowed AND all other hubs have 3 or less engines if all(num >= 1 for num in num_engines_at_hubs_with_max_num_engines) and (max(state_to_edit) <= 3): states_minimized.append(state) else: # If one hub currently has more than 1 engine index_of_hub_with_max_num_engines = current_state.index(max_num_engines_currently_at_any_hub) for state in all_states: # For every possible state being considered state_to_edit = state[:] num_at_hub_with_max_num_engines = state_to_edit.pop(index_of_hub_with_max_num_engines) # If at least 1 engine is at hub with maximum number of engines allowed AND all other hubs have 3 or less engines if (num_at_hub_with_max_num_engines >= 1) and (max(state_to_edit) <= 3): states_minimized.append(state) else: # If there is max 1 engine currently at any hub for state in all_states: if max(state) <= 3: # If no more than 3 engines are at any one hub for the new state states_minimized.append(state) return states_minimized def minimize_actions(current_state, num_working_engines): all_actions = data.all_possible_actions[num_working_engines][:] actions_minimized = [] for action in all_actions: current_state_to_edit = current_state[:] valid = True for engine_from in range(7): for engine_to in range(7): if valid: num_engines_to_move = action[engine_from][engine_to] # If the current index indicates engines are moved from one hub to another if num_engines_to_move > 0: num_engines_at_current_hub = current_state_to_edit[engine_from] # If the number of engines at the hub to move engines from is equal to zero if num_engines_at_current_hub == 0: valid = False # The action is not valid # If the number of engines to move from the hub is greater than the number of engines at the hub elif num_engines_to_move > num_engines_at_current_hub: valid = False # The action is not valid else: # Edit the current state to reflect the engines being moved from the hub current_state_to_edit[engine_from] -= num_engines_to_move if valid: actions_minimized.append(action) actions_minimized = np.array(actions_minimized) return actions_minimized def validate_removal_and_engine_info(): for engine_subtype in data.engine_subtypes: assert (engine_subtype in data.aos_cost), "No AOS cost was provided for " + engine_subtype + " in the removal_info file. Please provide ALL info for this engine subtype in the removal_info file." assert (data.aos_cost[engine_subtype] > 0), "AOS cost for " + engine_subtype + " is not set to a positive value. Please provide a positive value indicating the expected AOS cost for this engine type in the removal_info file." assert (engine_subtype in data.engines_info), "No engine data was provided for " + engine_subtype + " in the engine_info file. Please provide ALL info for this engine subtype in the engine_info file." assert (data.engines_info[engine_subtype]['TOTAL_NUM_ENGINES'] <= 5), "The program is limited to running only for engine types with 5 or less total engines. The " + engine_subtype + " has more than 5 engines." total_engines = data.engines_info[engine_subtype]['NUM_WORKING_ENGINES'] + data.engines_info[engine_subtype]['NUM_BROKEN_ENGINES_ATL'] + data.engines_info[engine_subtype]['NUM_BROKEN_ENGINES_MSP'] assert (data.engines_info[engine_subtype]['TOTAL_NUM_ENGINES'] == total_engines), "The total number of engines does not equal the sum of engines working, engines broken at ATL, and engines broken at MSP for the " + engine_subtype + ". Make sure the value in the TOTAL_NUM_ENGINES column is equal to the sum of values in the TOTAL_NUM_WORKING, NUM_BROKEN_ATL, and NUM_BROKEN_MSP columns." assert (data.engines_info[engine_subtype]['NUM_WORKING_ENGINES'] == sum(data.engines_info[engine_subtype]['CURRENT_STATE'])), "The number of working engines does not equal the sum of engines currently at each hub for the " + engine_subtype + ". Make sure the value in the TOTAL_NUM_WORKING column is equal to the sum of values in the NUM_WORKING columns for each hub." def validate_engine_subtype_data(): pass
python
import csv, sys if (len(sys.argv) != 6): print("format: python3 join_csv.py OUT-FILE FILE-1 KEY-INDEX-1 FILE-2 KEY-INDEX-2") exit() with open(sys.argv[2], 'rb') as file: reader = csv.reader(file, delimiter=",", quotechar='"') with open(sys.argv[4]) as file2: reader2 = csv.reader(file2, delimiter=",", quotechar='"') for a1 in reader: for a2 in reader2: a = reader[a1] aa = reader[a2] if a[int(sys.argv[3])] == aa[int(sys.argv[5])]: a.extend(aa[int(sys.argv[5])+1:]) with open(sys.argv[1], 'wb') as csvfile: print(reader) spamwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(a) continue
python
#!/usr/local/bin/python3 import boto3 from botocore.client import Config from botocore.vendored.requests.exceptions import ReadTimeout import traceback import json import sys from run_cumulus_task import run_cumulus_task import requests, zipfile, io client = boto3.client('stepfunctions', region_name = 'us-east-1') lambda_client = boto3.client('lambda', region_name = 'us-east-1') def handler(function, event, context): """handler that is provided to aws lambda""" return run_cumulus_task(function, event, context, {}) def get_lambda_function(lambda_arn): lambda_function = lambda_client.get_function(FunctionName=lambda_arn) lambda_code_url = lambda_function['Code']['Location'] r = requests.get(lambda_code_url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall('.') module_str, function_str = lambda_function['Configuration']['Handler'].split('.') task = __import__(module_str) return getattr(task, function_str) def step_function_handler(handler, activity_arn, lambda_arn): """ This function polls AWS Step Functions for new activities and run the process function for each message in activities """ print('ics querying for task from %s' % activity_arn) # poll for new activities try: response = client.get_activity_task(activityArn=activity_arn) print('Received an activity. Processing it') except ReadTimeout: return except Exception as e: print('Activity Read error (%s). Trying again.' % str(e)) return task_token = response.get('taskToken', None) output=None if task_token: try: function = get_lambda_function(lambda_arn) input = json.loads(response.get('input', '{}')) output = json.dumps(handler(function=function, event=input, context={})) return client.send_task_success(taskToken=task_token, output=output) except Exception as e: err = str(e) print("Exception when running task: %s" % err) tb = traceback.format_exc() err = (err[252] + ' ...') if len(err) > 252 else err client.send_task_failure(taskToken=task_token, error=err, cause=tb) else: print('No activity found') def poll(activity_arn, lambda_arn): config = Config(read_timeout=70) print('outside of the loop') # loop forever while True: step_function_handler(handler, activity_arn, lambda_arn)
python
##################################################################### # # # SkillsFuture IBM Cloud Function Example 2 # # This example is used to show how to get data from Discovery # # and return it to Watson Assistant. # # # # input JSON: { "text": "What is Barn Town?"} # # # # WL IBM - 17 July 2019 # # # ##################################################################### import os import sys try: from ibm_cloud import DiscoveryV1 except ImportError: from watson_developer_cloud import DiscoveryV1 def MakeReturnMessage(results): messageback = "Here are some answers from search:<br>\n" counter = 0 for aresult in results: counter = counter + 1 messageback = messageback + "<b>" + str(counter) + "</b> " + aresult["text"] + "<br>\n" return messageback def main(dict): #create defaults for our variable text = "" #first, lets deconstruct the input variable if "text" in dict: text = dict["text"] #then create the discovery object, please choose the right version. discovery = "" if 'username' in dict: discovery = DiscoveryV1(version=dict['version'], url=dict['url'], username=dict['username'], password=dict['password']) elif 'iam_apikey' in os.environ: discovery = DiscoveryV1(version=dict['version'], url=dict['url'], iam_apikey=dict['iam_apikey'] ) else: return { 'text': 'Error: Disc. Creds not specified!' } #query discovery get_disc = discovery.query(dict['envid'], dict['colid'], natural_language_query=text, count=3) #get results get_results = get_disc.get_result()['results'] #make the output message messageback = "" if len(get_results) > 0: messageback = MakeReturnMessage(get_results) else: messageback = "I am sorry, there are no results from search. Please try another question" #craft the output result = {"text":messageback} return result
python
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains tests for the logical behaviour of the tac negotiation skill.""" import copy from pathlib import Path from unittest.mock import patch from aea.decision_maker.gop import GoalPursuitReadiness, OwnershipState, Preferences from aea.helpers.preference_representations.base import ( linear_utility, logarithmic_utility, ) from aea.test_tools.test_skill import BaseSkillTestCase from packages.fetchai.skills.tac_control.helpers import ( determine_scaling_factor, generate_utility_params, ) from packages.fetchai.skills.tac_negotiation.dialogues import FipaDialogue from packages.fetchai.skills.tac_negotiation.strategy import Strategy from tests.conftest import ROOT_DIR class TestLogical(BaseSkillTestCase): """Logical Tests for tac negotiation.""" path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation") @classmethod def setup(cls): """Setup the test class.""" tac_dm_context_kwargs = { "goal_pursuit_readiness": GoalPursuitReadiness(), "ownership_state": OwnershipState(), "preferences": Preferences(), } super().setup(dm_context_kwargs=tac_dm_context_kwargs) cls.register_as = "both" cls.search_for = "both" cls.is_contract_tx = False cls.ledger_id = "some_ledger_id" cls.location = {"longitude": 0.1270, "latitude": 51.5194} cls.search_radius = 5.0 cls.service_key = "tac_service" cls.strategy = Strategy( register_as=cls.register_as, search_for=cls.search_for, is_contract_tx=cls.is_contract_tx, ledger_id=cls.ledger_id, location=cls.location, service_key=cls.service_key, search_radius=cls.search_radius, name="strategy", skill_context=cls._skill.skill_context, ) cls.sender = "some_sender_address" cls.counterparty = "some_counterparty_address" cls.mocked_currency_id = "12" cls.mocked_currency_amount = 2000000 cls.mocked_amount_by_currency_id = { cls.mocked_currency_id: cls.mocked_currency_amount } cls.mocked_good_ids = ["13", "14", "15", "16", "17", "18", "19", "20", "21"] cls.mocked_good_quantities = [5, 7, 4, 3, 5, 4, 3, 5, 6] cls.mocked_quantities_by_good_id = dict( zip(cls.mocked_good_ids, cls.mocked_good_quantities) ) cls.mocked_ownership_state = ( cls._skill.skill_context.decision_maker_handler_context.ownership_state ) cls.mocked_ownership_state.set( cls.mocked_amount_by_currency_id, cls.mocked_quantities_by_good_id ) cls.exchange_params_by_currency_id = {cls.mocked_currency_id: 1.0} cls.utility_params_by_good_id = generate_utility_params( [cls._skill.skill_context.agent_address], cls.mocked_good_ids, determine_scaling_factor(cls.mocked_currency_amount), )[cls._skill.skill_context.agent_address] cls.mocked_preferences = ( cls._skill.skill_context.decision_maker_handler_context.preferences ) cls.mocked_preferences.set( exchange_params_by_currency_id=cls.exchange_params_by_currency_id, utility_params_by_good_id=cls.utility_params_by_good_id, ) @staticmethod def _calculate_score(preferences, ownership_state): """Calculate the score given a preferences and an ownership_state object.""" goods_score = logarithmic_utility( preferences.utility_params_by_good_id, ownership_state.quantities_by_good_id, ) money_score = linear_utility( preferences.exchange_params_by_currency_id, ownership_state.amount_by_currency_id, ) return goods_score + money_score def test_generated_proposals_increase_score_seller(self): """Test whether the proposals generated by _generate_candidate_proposals method of the Strategy class actually increases agent's score where role is seller.""" # setup is_searching_for_sellers = True # operation with patch.object( self.skill.skill_context.transactions, "ownership_state_after_locks", return_value=self.mocked_ownership_state, ) as mock_ownership: actual_proposals = self.strategy._generate_candidate_proposals( is_searching_for_sellers ) # after mock_ownership.assert_any_call(is_seller=is_searching_for_sellers) current_score = self._calculate_score( self.mocked_preferences, self.mocked_ownership_state ) for proposal in actual_proposals: # applying proposal on a new ownership_state terms = self.strategy.terms_from_proposal( proposal, self.sender, self.counterparty, FipaDialogue.Role.SELLER ) new_ownership_state = copy.copy(self.mocked_ownership_state) new_ownership_state.apply_delta( terms.amount_by_currency_id, terms.quantities_by_good_id ) # new score new_score = self._calculate_score( self.mocked_preferences, new_ownership_state ) assert new_score >= current_score def test_generated_proposals_increase_score_buyer(self): """Test whether the proposals generated by _generate_candidate_proposals method of the Strategy class actually increases agent's score where role is buyer.""" # setup is_searching_for_sellers = False # operation with patch.object( self.skill.skill_context.transactions, "ownership_state_after_locks", return_value=self.mocked_ownership_state, ) as mock_ownership: actual_proposals = self.strategy._generate_candidate_proposals( is_searching_for_sellers ) # after mock_ownership.assert_any_call(is_seller=is_searching_for_sellers) current_score = self._calculate_score( self.mocked_preferences, self.mocked_ownership_state ) for proposal in actual_proposals: # applying proposal on a new ownership_state terms = self.strategy.terms_from_proposal( proposal, self.sender, self.counterparty, FipaDialogue.Role.BUYER ) new_ownership_state = copy.copy(self.mocked_ownership_state) new_ownership_state.apply_delta( terms.amount_by_currency_id, terms.quantities_by_good_id ) # new score new_score = self._calculate_score( self.mocked_preferences, new_ownership_state ) assert new_score >= current_score
python
import requests import shutil import csv import os def writeToFile(directory, filename, filecontent): if directory: try: os.mkdir(directory) except: pass else: directory = "" with open(os.path.join(directory, filename), 'wb') as f: filecontent.raw.decode_content = True shutil.copyfileobj(filecontent.raw, f) directory = "data" instituicoesURL = "https://sisu-api.apps.mec.gov.br/api/v1/oferta/instituicoes" response = requests.get(instituicoesURL).json() instituicoes = [r["co_ies"] for r in response] baseURL = "https://sisu.mec.gov.br/static/listagem-alunos-aprovados-portal/" baseFilename = "listagem-alunos-aprovados-ies-{}-{}.csv" for i, instituicao in enumerate(instituicoes): termoAdesaoURL = "https://sisu-api.apps.mec.gov.br/api/v1/oferta/instituicao/{}".format(instituicao) response = requests.get(termoAdesaoURL).json() termoAdesao = response["0"]["co_termo_adesao"] filename = baseFilename.format(instituicao, termoAdesao) url = baseURL + filename file = requests.get(url, stream=True) if file.status_code != 200: print("[{}/{}] [ERROR {}] {}".format(i+1, len(instituicoes), file.status_code, filename)) else: writeToFile(directory, filename, file) print("[{}/{}] Saved to '{}'".format(i+1, len(instituicoes), filename))
python
NAMES = ["cmd_insensetive"] ANSWER = "You used `cmd_insensetive` command!"
python
# # PySNMP MIB module AC-LAG-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AC-LAG-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:09:20 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # AcSlotNumber, acPport, AcPortNumber, AcOpStatus, AcNodeId, AcAdminStatus = mibBuilder.importSymbols("APPIAN-SMI-MIB", "AcSlotNumber", "acPport", "AcPortNumber", "AcOpStatus", "AcNodeId", "AcAdminStatus") OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion") NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup") ObjectIdentity, Integer32, ModuleIdentity, NotificationType, Gauge32, Unsigned32, Counter32, Bits, IpAddress, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "ModuleIdentity", "NotificationType", "Gauge32", "Unsigned32", "Counter32", "Bits", "IpAddress", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "MibIdentifier") TextualConvention, MacAddress, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "MacAddress", "TruthValue", "DisplayString") acLagMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8)) if mibBuilder.loadTexts: acLagMIB.setLastUpdated('0002231600Z') if mibBuilder.loadTexts: acLagMIB.setOrganization('Appian Communications, Inc.') if mibBuilder.loadTexts: acLagMIB.setContactInfo(' David Ward') if mibBuilder.loadTexts: acLagMIB.setDescription('The Appian Communications Link Aggregation module for managing IEEE Std 802.3ad.') lagMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1)) class LacpKey(TextualConvention, Integer32): description = 'The Actor or Partner Key value.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535) class LacpState(TextualConvention, Bits): description = 'The Actor and Partner State values from the LACPDU.' status = 'current' namedValues = NamedValues(("lacpActivity", 0), ("lacpTimeout", 1), ("aggregation", 2), ("synchronization", 3), ("collecting", 4), ("distributing", 5), ("defaulted", 6), ("expired", 7)) class ChurnState(TextualConvention, Integer32): description = 'The state of the Churn Detection machine.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("noChurn", 1), ("churn", 2), ("churnMonitor", 3)) class PortList(TextualConvention, OctetString): description = "Each octet within this value specifies a set of eight ports, with the first octet specifying ports 1 through 8, the second octet specifying ports 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the bridge is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0'." status = 'current' class AcAggInstanceIndex(TextualConvention, Integer32): description = 'An instance of an aggregation group within this OSAP which is within the range of (1..64).' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 64) class AcAggInstanceValue(TextualConvention, Integer32): description = 'An instance of an aggregation group within this OSAP which is within the range of (1..64). A value of zero indicates the aggregator instance has not been determined.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 64) acDot3adAgg = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1)) acDot3adAggPort = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2)) acDot3adTablesLastChanged = MibScalar((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 3), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adTablesLastChanged.setStatus('current') if mibBuilder.loadTexts: acDot3adTablesLastChanged.setDescription('This object indicates the time of the most recent change to the acDot3adAggTable, acDot3adAggPortListTable, or acDot3adAggPortTable.') acDot3adAggTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1), ) if mibBuilder.loadTexts: acDot3adAggTable.setReference('IEEE 802.3 Subclause 30.7.1') if mibBuilder.loadTexts: acDot3adAggTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggTable.setDescription('A table that contains information about every Aggregator that is associated with this System.') acDot3adAggEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggInstanceIndex")) if mibBuilder.loadTexts: acDot3adAggEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggEntry.setDescription('A list of the Aggregator parameters. This is indexed by OSAP node ID - supporting one Aggregator per OSAP.') acDot3adAggNodeIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 1), AcNodeId()) if mibBuilder.loadTexts: acDot3adAggNodeIdIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggNodeIdIndex.setDescription('The node id is the id for this specific node in the OSAP ring.') acDot3adAggInstanceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 2), AcAggInstanceIndex()) if mibBuilder.loadTexts: acDot3adAggInstanceIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggInstanceIndex.setDescription('The instance of this aggregator within this OSAP.') acDot3adAggMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 3), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggMACAddress.setReference('IEEE 802.3 Subclause 30.7.1.1.9') if mibBuilder.loadTexts: acDot3adAggMACAddress.setStatus('current') if mibBuilder.loadTexts: acDot3adAggMACAddress.setDescription('A 6-octet read-only value carrying the individual MAC address assigned to the Aggregator.') acDot3adAggActorSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggActorSystemPriority.setReference('IEEE 802.3 Subclause 30.7.1.1.5') if mibBuilder.loadTexts: acDot3adAggActorSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorSystemPriority.setDescription("A 2-octet read-write value indicating the priority value associated with the Actor's System ID.") acDot3adAggActorSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 5), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggActorSystemID.setReference('IEEE 802.3 Subclause 30.7.1.1.4') if mibBuilder.loadTexts: acDot3adAggActorSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorSystemID.setDescription("A 6-octet read-write MAC address value used as a unique identifier for the System that contains this Aggregator. NOTE-From the perspective of the Link Aggregation mechanisms described in Clause 43, only a single combination of Actor's System ID and System Priority are considered, and no distinction is made between the values of these parameters for an Aggregator and the port(s) that are associated with it; i.e., the protocol is described in terms of the operation of aggregation within a single System. However, the managed objects provided for the Aggregator and the port both allow management of these parameters. The result of this is to permit a single piece of equipment to be configured by management to contain more than one System from the point of view of the operation of Link Aggregation. This may be of particular use in the configuration of equipment that has limited aggregation capability (see 43.6).") acDot3adAggAggregateOrIndividual = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 6), TruthValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggAggregateOrIndividual.setReference('IEEE 802.3 Subclause 30.7.1.1.6') if mibBuilder.loadTexts: acDot3adAggAggregateOrIndividual.setStatus('current') if mibBuilder.loadTexts: acDot3adAggAggregateOrIndividual.setDescription("A read-only Boolean value indicating whether the Aggregator represents an Aggregate (`TRUE') or an Individual link (`FALSE').") acDot3adAggActorAdminKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 7), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggActorAdminKey.setReference('IEEE 802.3 Subclause 30.7.1.1.7') if mibBuilder.loadTexts: acDot3adAggActorAdminKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorAdminKey.setDescription('The current administrative value of the Key for the Aggregator. The administrative Key value may differ from the operational Key value for the reasons discussed in 43.6.2. This is a 16-bit, read-write value. The meaning of particular Key values is of local significance.') acDot3adAggActorOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 8), LacpKey()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggActorOperKey.setReference('IEEE 802.3 Subclause 30.7.1.1.8') if mibBuilder.loadTexts: acDot3adAggActorOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorOperKey.setDescription('The current operational value of the Key for the Aggregator. The administrative Key value may differ from the operational Key value for the reasons discussed in 43.6.2. This is a 16-bit read-only value. The meaning of particular Key values is of local significance.') acDot3adAggPartnerSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 9), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPartnerSystemID.setReference('IEEE 802.3 Subclause 30.7.1.1.10') if mibBuilder.loadTexts: acDot3adAggPartnerSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPartnerSystemID.setDescription('A 6-octet read-only MAC address value consisting of the unique identifier for the current protocol Partner of this Aggregator. A value of zero indicates that there is no known Partner. If the aggregation is manually configured, this System ID value will be a value assigned by the local System.') acDot3adAggPartnerSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPartnerSystemPriority.setReference('IEEE 802.3 Subclause 30.7.1.1.11') if mibBuilder.loadTexts: acDot3adAggPartnerSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPartnerSystemPriority.setDescription("A 2-octet read-only value that indicates the priority value associated with the Partner's System ID. If the aggregation is manually configured, this System Priority value will be a value assigned by the local System.") acDot3adAggPartnerOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 11), LacpKey()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPartnerOperKey.setReference('IEEE 802.3 Subclause 30.7.1.1.12') if mibBuilder.loadTexts: acDot3adAggPartnerOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPartnerOperKey.setDescription("The current operational value of the Key for the Aggregator's current protocol Partner. This is a 16-bit read-only value. If the aggregation is manually configured, this Key value will be a value assigned by the local System.") acDot3adAggCollectorMaxDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggCollectorMaxDelay.setReference('IEEE 802.3 Subclause 30.7.1.1.32') if mibBuilder.loadTexts: acDot3adAggCollectorMaxDelay.setStatus('current') if mibBuilder.loadTexts: acDot3adAggCollectorMaxDelay.setDescription('The value of this 16-bit read-write attribute defines the maximum delay, in tens of microseconds, that may be imposed by the Frame Collector between receiving a frame from an Aggregator Parser, and either delivering the frame to its MAC Client or discarding the frame (see 43.2.3.1.1).') acDot3adAggPortListTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 2), ) if mibBuilder.loadTexts: acDot3adAggPortListTable.setReference('IEEE 802.3 Subclause 30.7.1.1.30') if mibBuilder.loadTexts: acDot3adAggPortListTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListTable.setDescription('A table that contains a list of all the ports associated with each Aggregator.') acDot3adAggPortListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 2, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggInstanceIndex")) if mibBuilder.loadTexts: acDot3adAggPortListEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListEntry.setDescription('A list of the ports associated with a given Aggregator. This is indexed by OSAP node ID - supporting one Aggregator per OSAP.') acDot3adAggPortListPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 2, 1, 1), PortList()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortListPorts.setReference('IEEE 802.3 Subclause 30.7.1.1.30') if mibBuilder.loadTexts: acDot3adAggPortListPorts.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListPorts.setDescription('The complete set of ports currently associated with this Aggregator. Each bit set in this list represents an Actor Port member of this Link Aggregation.') acDot3adAggPortTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1), ) if mibBuilder.loadTexts: acDot3adAggPortTable.setReference('IEEE 802.3 Subclause 30.7.2') if mibBuilder.loadTexts: acDot3adAggPortTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortTable.setDescription('A table that contains Link Aggregation Control configuration information about every Aggregation Port associated with this device. A row appears in this table for each physical port.') acDot3adAggPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggPortNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortSlotIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortPortIndex")) if mibBuilder.loadTexts: acDot3adAggPortEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortEntry.setDescription('A list of Link Aggregation Control configuration parameters for each Aggregation Port on this device.') acDot3adAggPortNodeIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 1), AcNodeId()) if mibBuilder.loadTexts: acDot3adAggPortNodeIdIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortNodeIdIndex.setDescription('The node id is the id for this specific node in the OSAP ring.') acDot3adAggPortSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 2), AcSlotNumber()) if mibBuilder.loadTexts: acDot3adAggPortSlotIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortSlotIndex.setDescription('The slot number within the chassis where this module entry resides.') acDot3adAggPortPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 3), AcPortNumber()) if mibBuilder.loadTexts: acDot3adAggPortPortIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPortIndex.setDescription('The port number on the module which represents this instance of an Ethernet access port.') acDot3adAggPortActorSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorSystemPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.2') if mibBuilder.loadTexts: acDot3adAggPortActorSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorSystemPriority.setDescription("A 2-octet read-write value used to define the priority value associated with the Actor's System ID.") acDot3adAggPortActorSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 5), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortActorSystemID.setReference('IEEE 802.3 Subclause 30.7.2.1.3') if mibBuilder.loadTexts: acDot3adAggPortActorSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorSystemID.setDescription('A 6-octet read-only MAC address value that defines the value of the System ID for the System that contains this Aggregation Port.') acDot3adAggPortActorAdminKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 6), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorAdminKey.setReference('IEEE 802.3 Subclause 30.7.2.1.4') if mibBuilder.loadTexts: acDot3adAggPortActorAdminKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorAdminKey.setDescription('The current administrative value of the Key for the Aggregation Port. This is a 16-bit read-write value. The meaning of particular Key values is of local significance.') acDot3adAggPortActorOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 7), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorOperKey.setReference('IEEE 802.3 Subclause 30.7.2.1.5') if mibBuilder.loadTexts: acDot3adAggPortActorOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorOperKey.setDescription('The current operational value of the Key for the Aggregation Port. This is a 16-bit read-only value. The meaning of particular Key values is of local significance.') acDot3adAggPortPartnerAdminSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.6') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemPriority.setDescription("A 2-octet read-write value used to define the administrative value of priority associated with the Partner's System ID. The assigned value is used, along with the value of aAggPortPartnerAdminSystemID, aAggPortPartnerAdminKey, aAggPortPartnerAdminPort, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.") acDot3adAggPortPartnerOperSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.7') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemPriority.setDescription("A 2-octet read-only value indicating the operational value of priority associated with the Partner's System ID. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminSystemPriority if there is no protocol Partner.") acDot3adAggPortPartnerAdminSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 10), MacAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemID.setReference('IEEE 802.3 Subclause 30.7.2.1.8') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemID.setDescription("A 6-octet read-write MACAddress value representing the administrative value of the Aggregation Port's protocol Partner's System ID. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminKey, aAggPortPartnerAdminPort, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.") acDot3adAggPortPartnerOperSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 11), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemID.setReference('IEEE 802.3 Subclause 30.7.2.1.9') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemID.setDescription("A 6-octet read-only MACAddress value representing the current value of the Aggregation Port's protocol Partner's System ID. A value of zero indicates that there is no known protocol Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminSystemID if there is no protocol Partner.") acDot3adAggPortPartnerAdminKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 12), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminKey.setReference('IEEE 802.3 Subclause 30.7.2.1.10') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminKey.setDescription('The current administrative value of the Key for the protocol Partner. This is a 16-bit read-write value. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminSystemID, aAggPortPartnerAdminPort, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 13), LacpKey()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperKey.setReference('IEEE 802.3 Subclause 30.7.2.1.11') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperKey.setDescription('The current operational value of the Key for the protocol Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminKey if there is no protocol Partner. This is a 16-bit read-only value.') acDot3adAggPortSelectedAggID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 14), AcAggInstanceValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortSelectedAggID.setReference('IEEE 802.3 Subclause 30.7.2.1.12') if mibBuilder.loadTexts: acDot3adAggPortSelectedAggID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortSelectedAggID.setDescription('The identifier value of the Aggregator that this Aggregation Port has currently selected. Zero indicates that the Aggregation Port has not selected an Aggregator, either because it is in the process of detaching from an Aggregator or because there is no suitable Aggregator available for it to select. This value is read-only.') acDot3adAggPortAttachedAggID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 15), AcAggInstanceValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortAttachedAggID.setReference('IEEE 802.3 Subclause 30.7.2.1.13') if mibBuilder.loadTexts: acDot3adAggPortAttachedAggID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortAttachedAggID.setDescription('The identifier value of the Aggregator that this Aggregation Port is currently attached to. Zero indicates that the Aggregation Port is not currently attached to an Aggregator. This value is read-only.') acDot3adAggPortActorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortActorPort.setReference('IEEE 802.3 Subclause 30.7.2.1.14') if mibBuilder.loadTexts: acDot3adAggPortActorPort.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorPort.setDescription('The port number locally assigned to the Aggregation Port. The port number is communicated in LACPDUs as the Actor_Port. This value is read-only.') acDot3adAggPortActorPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorPortPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.15') if mibBuilder.loadTexts: acDot3adAggPortActorPortPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorPortPriority.setDescription('The priority value assigned to this Aggregation Port. This 16-bit value is read-write.') acDot3adAggPortPartnerAdminPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPort.setReference('IEEE 802.3 Subclause 30.7.2.1.16') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPort.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPort.setDescription('The current administrative value of the port number for the protocol Partner. This is a 16-bit read-write value. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminSystemID, aAggPortPartnerAdminKey, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPort.setReference('IEEE 802.3 Subclause 30.7.2.1.17') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPort.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPort.setDescription("The operational port number assigned to this Aggregation Port by the Aggregation Port's protocol Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminPort if there is no protocol Partner. This 16-bit value is read-only.") acDot3adAggPortPartnerAdminPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPortPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.18') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPortPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPortPriority.setDescription('The current administrative value of the port priority for the protocol Partner. This is a 16-bit read-write value. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminSystemID, aAggPortPartnerAdminKey, and aAggPortPartnerAdminPort, in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPortPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.19') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPortPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPortPriority.setDescription('The priority value assigned to this Aggregation Port by the Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminPortPriority if there is no protocol Partner. This 16-bit value is read-only.') acDot3adAggPortActorAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 22), LacpState()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorAdminState.setReference('IEEE 802.3 Subclause 30.7.2.1.20') if mibBuilder.loadTexts: acDot3adAggPortActorAdminState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorAdminState.setDescription('A string of 8 bits, corresponding to the administrative values of Actor_State (43.4.2) as transmitted by the Actor in LACPDUs. The first bit corresponds to bit 0 of Actor_State (LACP_Activity), the second bit corresponds to bit 1 (LACP_Timeout), the third bit corresponds to bit 2 (Aggregation), the fourth bit corresponds to bit 3 (Synchronization), the fifth bit corresponds to bit 4 (Collecting), the sixth bit corresponds to bit 5 (Distributing), the seventh bit corresponds to bit 6 (Defaulted), and the eighth bit corresponds to bit 7 (Expired). These values allow administrative control over the values of LACP_Activity, LACP_Timeout and Aggregation. This attribute value is read-write.') acDot3adAggPortActorOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 23), LacpState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortActorOperState.setReference('IEEE 802.3 Subclause 30.7.2.1.21') if mibBuilder.loadTexts: acDot3adAggPortActorOperState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorOperState.setDescription('A string of 8 bits, corresponding to the current operational values of Actor_State as transmitted by the Actor in LACPDUs. The bit allocations are as defined in 30.7.2.1.20. This attribute value is read-only.') acDot3adAggPortPartnerAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 24), LacpState()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminState.setReference('IEEE 802.3 Subclause 30.7.2.1.22') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminState.setDescription('A string of 8 bits, corresponding to the current administrative value of Actor_State for the protocol Partner. The bit allocations are as defined in 30.7.2.1.20. This attribute value is read-write. The assigned value is used in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 25), LacpState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperState.setReference('IEEE 802.3 Subclause 30.7.2.1.23') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperState.setDescription('A string of 8 bits, corresponding to the current values of Actor_State in the most recently received LACPDU transmitted by the protocol Partner. The bit allocations are as defined in 30.7.2.1.20. In the absence of an active protocol Partner, this value may reflect the manually configured value aAggPortPartnerAdminState. This attribute value is read-only.') acDot3adAggPortAggregateOrIndividual = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 26), TruthValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortAggregateOrIndividual.setReference('IEEE 802.3 Subclause 30.7.2.1.24') if mibBuilder.loadTexts: acDot3adAggPortAggregateOrIndividual.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortAggregateOrIndividual.setDescription("A read-only Boolean value indicating whether the Aggregation Port is able to Aggregate (`TRUE') or is only able to operate as an Individual link (`FALSE').") acDot3adAggPortStatsTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2), ) if mibBuilder.loadTexts: acDot3adAggPortStatsTable.setReference('IEEE 802.3 Subclause 30.7.3') if mibBuilder.loadTexts: acDot3adAggPortStatsTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsTable.setDescription('A table that contains Link Aggregation information about every port that is associated with this device. A row appears in this table for each physical port.') acDot3adAggPortStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggPortNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortSlotIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortPortIndex")) if mibBuilder.loadTexts: acDot3adAggPortStatsEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsEntry.setDescription('A list of Link Aggregation Control Protocol statistics for each port on this device.') acDot3adAggPortStatsLACPDUsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsRx.setReference('IEEE 802.3 Subclause 30.7.3.1.2') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsRx.setDescription('The number of valid LACPDUs received on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerPDUsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsRx.setReference('IEEE 802.3 Subclause 30.7.3.1.3') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsRx.setDescription('The number of valid Marker PDUs received on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerResponsePDUsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsRx.setReference('IEEE 802.3 Subclause 30.7.3.1.4') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsRx.setDescription('The number of valid Marker Response PDUs received on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsUnknownRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsUnknownRx.setReference('IEEE 802.3 Subclause 30.7.3.1.5') if mibBuilder.loadTexts: acDot3adAggPortStatsUnknownRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsUnknownRx.setDescription('The number of frames received that either: - carry the Slow Protocols Ethernet Type value (43B.4), but contain an unknown PDU, or: - are addressed to the Slow Protocols group MAC Address (43B.3), but do not carry the Slow Protocols Ethernet Type. This value is read-only.') acDot3adAggPortStatsIllegalRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsIllegalRx.setReference('IEEE 802.3 Subclause 30.7.3.1.6') if mibBuilder.loadTexts: acDot3adAggPortStatsIllegalRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsIllegalRx.setDescription('The number of frames received that carry the Slow Protocols Ethernet Type value (43B.4), but contain a badly formed PDU or an illegal value of Protocol Subtype (43B.4). This value is read-only.') acDot3adAggPortStatsLACPDUsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsTx.setReference('IEEE 802.3 Subclause 30.7.3.1.7') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsTx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsTx.setDescription('The number of LACPDUs transmitted on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerPDUsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsTx.setReference('IEEE 802.3 Subclause 30.7.3.1.8') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsTx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsTx.setDescription('The number of Marker PDUs transmitted on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerResponsePDUsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsTx.setReference('IEEE 802.3 Subclause 30.7.3.1.9') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsTx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsTx.setDescription('The number of Marker Response PDUs transmitted on this Aggregation Port. This value is read-only.') acDot3adAggPortDebugTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3), ) if mibBuilder.loadTexts: acDot3adAggPortDebugTable.setReference('IEEE 802.3 Subclause 30.7.4') if mibBuilder.loadTexts: acDot3adAggPortDebugTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugTable.setDescription('A table that contains Link Aggregation debug information about every port that is associated with this device. A row appears in this table for each physical port.') acDot3adAggPortDebugEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggPortNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortSlotIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortPortIndex")) if mibBuilder.loadTexts: acDot3adAggPortDebugEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugEntry.setDescription('A list of the debug parameters for a port.') acDot3adAggPortDebugRxState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("currentRx", 1), ("expired", 2), ("defaulted", 3), ("initialize", 4), ("lacpDisabled", 5), ("portDisabled", 6)))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugRxState.setReference('IEEE 802.3 Subclause 30.7.4.1.2') if mibBuilder.loadTexts: acDot3adAggPortDebugRxState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugRxState.setDescription("This attribute holds the value `currentRx' if the Receive state machine for the Aggregation Port is in the CURRENT state, `expired' if the Receive state machine is in the EXPIRED state, `defaulted' if the Receive state machine is in the DEFAULTED state, `initialize' if the Receive state machine is in the INITIALIZE state, `lacpDisabled' if the Receive state machine is in the LACP_DISABLED state, or `portDisabled' if the Receive state machine is in the PORT_DISABLED state. This value is read-only.") acDot3adAggPortDebugLastRxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 2), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugLastRxTime.setReference('IEEE 802.3 Subclause 30.7.4.1.3') if mibBuilder.loadTexts: acDot3adAggPortDebugLastRxTime.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugLastRxTime.setDescription('The value of aTimeSinceSystemReset (F.2.1) when the last LACPDU was received by this Aggregation Port. This value is read-only.') acDot3adAggPortDebugMuxState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("detached", 1), ("waiting", 2), ("attached", 3), ("collecting", 4), ("distributing", 5), ("collectingDistributing", 6)))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugMuxState.setReference('IEEE 802.3 Subclause 30.7.4.1.4') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxState.setDescription("This attribute holds the value `detached' if the Mux state machine (43.4.14) for the Aggregation Port is in the DETACHED state, `waiting' if the Mux state machine is in the WAITING state, `attached' if the Mux state machine for the Aggregation Port is in the ATTACHED state, `collecting' if the Mux state machine for the Aggregation Port is in the COLLECTING state, `distributing' if the Mux state machine for the Aggregation Port is in the DISTRIBUTING state, and `collectingDistributing' if the Mux state machine for the Aggregation Port is in the COLLECTING_DISTRIBUTING state. This value is read-only.") acDot3adAggPortDebugMuxReason = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugMuxReason.setReference('IEEE 802.3 Subclause 30.7.4.1.5') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxReason.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxReason.setDescription('A human-readable text string indicating the reason for the most recent change of Mux machine state. This value is read-only.') acDot3adAggPortDebugActorChurnState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 5), ChurnState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnState.setReference('IEEE 802.3 Subclause 30.7.4.1.6') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnState.setDescription("The state of the Actor Churn Detection machine (43.4.17) for the Aggregation Port. A value of `noChurn' indicates that the state machine is in either the NO_ACTOR_CHURN or the ACTOR_CHURN_MONITOR state, and `churn' indicates that the state machine is in the ACTOR_CHURN state. This value is read-only.") acDot3adAggPortDebugPartnerChurnState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 6), ChurnState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnState.setReference('IEEE 802.3 Subclause 30.7.4.1.7') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnState.setDescription("The state of the Partner Churn Detection machine (43.4.17) for the Aggregation Port. A value of `noChurn' indicates that the state machine is in either the NO_PARTNER_CHURN or the PARTNER_CHURN_MONITOR state, and `churn' indicates that the state machine is in the PARTNER_CHURN state. This value is read-only.") acDot3adAggPortDebugActorChurnCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnCount.setReference('IEEE 802.3 Subclause 30.7.4.1.8') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnCount.setDescription('Count of the number of times the Actor Churn state machine has entered the ACTOR_CHURN state. This value is read-only.') acDot3adAggPortDebugPartnerChurnCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnCount.setReference('IEEE 802.3 Subclause 30.7.4.1.9') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnCount.setDescription('Count of the number of times the Partner Churn state machine has entered the PARTNER_CHURN state. This value is read-only.') acDot3adAggPortDebugActorSyncTransitionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorSyncTransitionCount.setReference('IEEE 802.3 Subclause 30.7.4.1.10') if mibBuilder.loadTexts: acDot3adAggPortDebugActorSyncTransitionCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorSyncTransitionCount.setDescription("Count of the number of times the Actor's Mux state machine (43.4.15) has entered the IN_SYNC state. This value is read-only.") acDot3adAggPortDebugPartnerSyncTransitionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerSyncTransitionCount.setReference('IEEE 802.3 Subclause 30.7.4.1.11') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerSyncTransitionCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerSyncTransitionCount.setDescription("Count of the number of times the Partner's Mux state machine (43.4.15) has entered the IN_SYNC state. This value is read-only.") acDot3adAggPortDebugActorChangeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorChangeCount.setReference('IEEE 802.3 Subclause 30.7.4.1.12') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChangeCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChangeCount.setDescription("Count of the number of times the Actor's perception of the LAG ID for this Aggregation Port has changed. This value is read-only.") acDot3adAggPortDebugPartnerChangeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 12), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChangeCount.setReference('IEEE 802.3 Subclause 30.7.4.1.13') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChangeCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChangeCount.setDescription("Count of the number of times the Partner's perception of the LAG ID (see 43.3.6.1) for this Aggregation Port has changed. This value is read-only.") acDot3adAggConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2)) acDot3adAggGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1)) acDot3adAggCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 2)) acDot3adAggGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 1)).setObjects(("AC-LAG-MIB", "acDot3adAggActorSystemID"), ("AC-LAG-MIB", "acDot3adAggActorSystemPriority"), ("AC-LAG-MIB", "acDot3adAggAggregateOrIndividual"), ("AC-LAG-MIB", "acDot3adAggActorAdminKey"), ("AC-LAG-MIB", "acDot3adAggMACAddress"), ("AC-LAG-MIB", "acDot3adAggActorOperKey"), ("AC-LAG-MIB", "acDot3adAggPartnerSystemID"), ("AC-LAG-MIB", "acDot3adAggPartnerSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPartnerOperKey"), ("AC-LAG-MIB", "acDot3adAggCollectorMaxDelay")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggGroup = acDot3adAggGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggGroup.setDescription('A collection of objects providing information about an aggregation.') acDot3adAggPortListGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 2)).setObjects(("AC-LAG-MIB", "acDot3adAggPortListPorts")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortListGroup = acDot3adAggPortListGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListGroup.setDescription('A collection of objects providing information about every port in an aggregation.') acDot3adAggPortGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 3)).setObjects(("AC-LAG-MIB", "acDot3adAggPortActorSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPortActorSystemID"), ("AC-LAG-MIB", "acDot3adAggPortActorAdminKey"), ("AC-LAG-MIB", "acDot3adAggPortActorOperKey"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminSystemID"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperSystemID"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminKey"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperKey"), ("AC-LAG-MIB", "acDot3adAggPortSelectedAggID"), ("AC-LAG-MIB", "acDot3adAggPortAttachedAggID"), ("AC-LAG-MIB", "acDot3adAggPortActorPort"), ("AC-LAG-MIB", "acDot3adAggPortActorPortPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminPort"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperPort"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminPortPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperPortPriority"), ("AC-LAG-MIB", "acDot3adAggPortActorAdminState"), ("AC-LAG-MIB", "acDot3adAggPortActorOperState"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminState"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperState"), ("AC-LAG-MIB", "acDot3adAggPortAggregateOrIndividual")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortGroup = acDot3adAggPortGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortGroup.setDescription('A collection of objects providing information about every port in an aggregation.') acDot3adAggPortStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 4)).setObjects(("AC-LAG-MIB", "acDot3adAggPortStatsLACPDUsRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerPDUsRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerResponsePDUsRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsUnknownRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsIllegalRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsLACPDUsTx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerPDUsTx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerResponsePDUsTx")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortStatsGroup = acDot3adAggPortStatsGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsGroup.setDescription('A collection of objects providing information about every port in an aggregation.') acDot3adAggPortDebugGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 5)).setObjects(("AC-LAG-MIB", "acDot3adAggPortDebugRxState"), ("AC-LAG-MIB", "acDot3adAggPortDebugLastRxTime"), ("AC-LAG-MIB", "acDot3adAggPortDebugMuxState"), ("AC-LAG-MIB", "acDot3adAggPortDebugMuxReason"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorChurnState"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerChurnState"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorChurnCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerChurnCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorSyncTransitionCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerSyncTransitionCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorChangeCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerChangeCount")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortDebugGroup = acDot3adAggPortDebugGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugGroup.setDescription('A collection of objects providing debug information about every aggregated port.') acDot3adTablesLastChangedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 1, 6)).setObjects(("AC-LAG-MIB", "acDot3adTablesLastChanged")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adTablesLastChangedGroup = acDot3adTablesLastChangedGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adTablesLastChangedGroup.setDescription('A collection of objects providing information about the time of changes to the configuration of aggregations and their ports.') acDot3adAggCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 2, 1)).setObjects(("AC-LAG-MIB", "acDot3adAggGroup"), ("AC-LAG-MIB", "acDot3adAggPortGroup"), ("AC-LAG-MIB", "acDot3adTablesLastChangedGroup"), ("AC-LAG-MIB", "acDot3adAggPortListGroup"), ("AC-LAG-MIB", "acDot3adAggPortStatsGroup"), ("AC-LAG-MIB", "acDot3adAggPortDebugGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggCompliance = acDot3adAggCompliance.setStatus('current') if mibBuilder.loadTexts: acDot3adAggCompliance.setDescription('The compliance statement for device support of Link Aggregation.') mibBuilder.exportSymbols("AC-LAG-MIB", acDot3adAggPortAggregateOrIndividual=acDot3adAggPortAggregateOrIndividual, acDot3adAggNodeIdIndex=acDot3adAggNodeIdIndex, acDot3adAggPortStatsMarkerPDUsTx=acDot3adAggPortStatsMarkerPDUsTx, acDot3adAggPortActorSystemID=acDot3adAggPortActorSystemID, acDot3adAggPortDebugMuxReason=acDot3adAggPortDebugMuxReason, acDot3adAggPortTable=acDot3adAggPortTable, acDot3adAggPartnerSystemPriority=acDot3adAggPartnerSystemPriority, acDot3adAggPortActorPort=acDot3adAggPortActorPort, acDot3adAggPortActorAdminKey=acDot3adAggPortActorAdminKey, acDot3adAggTable=acDot3adAggTable, PYSNMP_MODULE_ID=acLagMIB, acDot3adAggPortDebugActorChangeCount=acDot3adAggPortDebugActorChangeCount, acDot3adAggPortDebugGroup=acDot3adAggPortDebugGroup, acDot3adAggPortDebugMuxState=acDot3adAggPortDebugMuxState, acDot3adAggPortDebugActorChurnState=acDot3adAggPortDebugActorChurnState, acDot3adAggGroup=acDot3adAggGroup, acDot3adAggPortActorOperKey=acDot3adAggPortActorOperKey, acDot3adAggPortPortIndex=acDot3adAggPortPortIndex, acDot3adAggInstanceIndex=acDot3adAggInstanceIndex, acDot3adAggPortStatsTable=acDot3adAggPortStatsTable, acDot3adAggPortStatsMarkerResponsePDUsTx=acDot3adAggPortStatsMarkerResponsePDUsTx, acDot3adAggActorSystemID=acDot3adAggActorSystemID, acDot3adAggPortActorPortPriority=acDot3adAggPortActorPortPriority, acDot3adAggPortDebugLastRxTime=acDot3adAggPortDebugLastRxTime, acDot3adAggPortDebugActorSyncTransitionCount=acDot3adAggPortDebugActorSyncTransitionCount, acDot3adAggCompliances=acDot3adAggCompliances, acDot3adAggActorSystemPriority=acDot3adAggActorSystemPriority, acDot3adAggCompliance=acDot3adAggCompliance, acDot3adAggPortActorOperState=acDot3adAggPortActorOperState, ChurnState=ChurnState, AcAggInstanceIndex=AcAggInstanceIndex, acDot3adAggPortPartnerAdminPort=acDot3adAggPortPartnerAdminPort, acDot3adAggPortPartnerOperState=acDot3adAggPortPartnerOperState, acDot3adAggPortSlotIndex=acDot3adAggPortSlotIndex, acDot3adAggPortPartnerAdminKey=acDot3adAggPortPartnerAdminKey, acLagMIB=acLagMIB, lagMIBObjects=lagMIBObjects, acDot3adAggPort=acDot3adAggPort, acDot3adAggPortPartnerAdminSystemID=acDot3adAggPortPartnerAdminSystemID, LacpState=LacpState, acDot3adAggPortPartnerOperPort=acDot3adAggPortPartnerOperPort, acDot3adAggConformance=acDot3adAggConformance, acDot3adAggPortStatsEntry=acDot3adAggPortStatsEntry, acDot3adAggPortDebugPartnerChurnCount=acDot3adAggPortDebugPartnerChurnCount, acDot3adAggPortDebugPartnerChangeCount=acDot3adAggPortDebugPartnerChangeCount, acDot3adAggPortPartnerOperKey=acDot3adAggPortPartnerOperKey, acDot3adAggAggregateOrIndividual=acDot3adAggAggregateOrIndividual, acDot3adAggActorOperKey=acDot3adAggActorOperKey, acDot3adAggPartnerOperKey=acDot3adAggPartnerOperKey, AcAggInstanceValue=AcAggInstanceValue, acDot3adAggPortDebugPartnerSyncTransitionCount=acDot3adAggPortDebugPartnerSyncTransitionCount, acDot3adAggPortAttachedAggID=acDot3adAggPortAttachedAggID, acDot3adAggEntry=acDot3adAggEntry, acDot3adAggPortStatsLACPDUsTx=acDot3adAggPortStatsLACPDUsTx, acDot3adAggPortDebugActorChurnCount=acDot3adAggPortDebugActorChurnCount, acDot3adAggPortNodeIdIndex=acDot3adAggPortNodeIdIndex, acDot3adAggPortListGroup=acDot3adAggPortListGroup, acDot3adAggPortPartnerOperPortPriority=acDot3adAggPortPartnerOperPortPriority, acDot3adAggPortPartnerAdminPortPriority=acDot3adAggPortPartnerAdminPortPriority, acDot3adAggPortStatsGroup=acDot3adAggPortStatsGroup, acDot3adAggPortGroup=acDot3adAggPortGroup, acDot3adAggPortPartnerOperSystemID=acDot3adAggPortPartnerOperSystemID, acDot3adAggPortListEntry=acDot3adAggPortListEntry, acDot3adAggPortPartnerAdminState=acDot3adAggPortPartnerAdminState, acDot3adAggPortPartnerOperSystemPriority=acDot3adAggPortPartnerOperSystemPriority, acDot3adAggPortEntry=acDot3adAggPortEntry, acDot3adAggPortDebugEntry=acDot3adAggPortDebugEntry, acDot3adTablesLastChanged=acDot3adTablesLastChanged, acDot3adAggGroups=acDot3adAggGroups, acDot3adAggPartnerSystemID=acDot3adAggPartnerSystemID, PortList=PortList, acDot3adAggCollectorMaxDelay=acDot3adAggCollectorMaxDelay, acDot3adTablesLastChangedGroup=acDot3adTablesLastChangedGroup, acDot3adAggPortStatsUnknownRx=acDot3adAggPortStatsUnknownRx, acDot3adAggPortSelectedAggID=acDot3adAggPortSelectedAggID, LacpKey=LacpKey, acDot3adAggPortActorSystemPriority=acDot3adAggPortActorSystemPriority, acDot3adAggPortStatsMarkerPDUsRx=acDot3adAggPortStatsMarkerPDUsRx, acDot3adAggPortStatsIllegalRx=acDot3adAggPortStatsIllegalRx, acDot3adAggMACAddress=acDot3adAggMACAddress, acDot3adAggPortActorAdminState=acDot3adAggPortActorAdminState, acDot3adAggPortListPorts=acDot3adAggPortListPorts, acDot3adAggPortDebugTable=acDot3adAggPortDebugTable, acDot3adAggPortDebugRxState=acDot3adAggPortDebugRxState, acDot3adAgg=acDot3adAgg, acDot3adAggActorAdminKey=acDot3adAggActorAdminKey, acDot3adAggPortListTable=acDot3adAggPortListTable, acDot3adAggPortDebugPartnerChurnState=acDot3adAggPortDebugPartnerChurnState, acDot3adAggPortPartnerAdminSystemPriority=acDot3adAggPortPartnerAdminSystemPriority, acDot3adAggPortStatsLACPDUsRx=acDot3adAggPortStatsLACPDUsRx, acDot3adAggPortStatsMarkerResponsePDUsRx=acDot3adAggPortStatsMarkerResponsePDUsRx)
python
import torch import torch.optim as optim device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) import sys sys.path.append("PATH") from Models.MATCH.MATCH import MATCH from Models.MATCH.functions import (get_tensors, augment, format_output, CE_loss) from Models.metrics import (AUC, Brier) from Simulation.data_simulation_base import simulate_JM_base from Simulation.data_simulation_nonPH import simulate_JM_nonPH import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler pd.options.mode.chained_assignment = None import pickle import time start = time.time() n_sim = 2 I = 1000 obstime = [0,1,2,3,4,5,6,7,8,9,10] landmark_times = [1,2,3,4,5] pred_windows = [1,2,3] AUC_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) iAUC_array = np.zeros((n_sim, len(landmark_times))) true_AUC_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) true_iAUC_array = np.zeros((n_sim, len(landmark_times))) BS_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) iBS_array = np.zeros((n_sim, len(landmark_times))) true_BS_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) true_iBS_array = np.zeros((n_sim, len(landmark_times))) for i_sim in range(n_sim): if i_sim % 10 == 0: print("i_sim:",i_sim) np.random.seed(i_sim) data_all = simulate_JM_base(I=I, obstime=obstime, opt="none", seed=i_sim) data = data_all[data_all.obstime < data_all.time] ## split train/test random_id = range(I) #np.random.permutation(range(I)) train_id = random_id[0:int(0.7*I)] test_id = random_id[int(0.7*I):I] train_data = data[data["id"].isin(train_id)] test_data = data[data["id"].isin(test_id)] ## Scale data using Min-Max Scaler minmax_scaler = MinMaxScaler(feature_range=(-1,1)) train_data.loc[:,["X1","X2","Y1","Y2","Y3"]] = minmax_scaler.fit_transform(train_data.loc[:,["X1","X2","Y1","Y2","Y3"]]) test_data.loc[:,["X1","X2","Y1","Y2","Y3"]] = minmax_scaler.transform(test_data.loc[:,["X1","X2","Y1","Y2","Y3"]]) train_long, train_base, train_mask, e_train, t_train, train_obs_time = get_tensors(train_data.copy()) # for BS ## Train model torch.manual_seed(0) out_len = 4 model = MATCH(3,2, out_len) model = model.train() optimizer = optim.Adam(model.parameters()) n_epoch = 25 batch_size = 32 test_long, test_base, test_mask, e_test, t_test, test_obs_time = get_tensors(test_data.copy()) test_long, test_base, test_mask, e_test, t_test, subjid_test = augment( test_long, test_base, test_mask, e_test, t_test) loss_values = [] loss_test = [] for epoch in range(n_epoch): running_loss = 0 train_id = np.random.permutation(train_id) for batch in range(0, len(train_id), batch_size): optimizer.zero_grad() indices = train_id[batch:batch+batch_size] batch_data = train_data[train_data["id"].isin(indices)] batch_long, batch_base, batch_mask, batch_e, batch_t, obs_time = get_tensors(batch_data.copy()) batch_long, batch_base, batch_mask, batch_e, batch_t, subjid = augment( batch_long, batch_base, batch_mask, batch_e, batch_t) if len(indices)>1: #drop if last batch size is 1 yhat_surv = torch.softmax(model(batch_long, batch_base, batch_mask), dim=1) s_filter, e_filter = format_output(obs_time, batch_mask, batch_t, batch_e, out_len) loss = CE_loss(yhat_surv, s_filter, e_filter) loss.backward() optimizer.step() running_loss += loss yhat_surv_test = torch.softmax(model(test_long, test_base, test_mask), dim=1) s_filter_t, e_filter_t = format_output(test_obs_time, test_mask, t_test, e_test, out_len) loss_t = CE_loss(yhat_surv_test, s_filter_t, e_filter_t) loss_test.append(loss_t.tolist()) loss_values.append(running_loss.tolist()) plt.plot((loss_values-np.min(loss_values))/(np.max(loss_values)-np.min(loss_values)), 'b-') plt.plot((loss_test-np.min(loss_test))/(np.max(loss_test)-np.min(loss_test)), 'g-') for LT_index, LT in enumerate(landmark_times): pred_times = [x+LT for x in pred_windows] # Only keep subjects with survival time > landmark time tmp_data = test_data.loc[test_data["time"]>LT,:] tmp_id = np.unique(tmp_data["id"].values) tmp_all = data_all.loc[data_all["id"].isin(tmp_id),:] # Only keep longitudinal observations <= landmark time tmp_data = tmp_data.loc[tmp_data["obstime"]<=LT,:] true_prob_tmp = tmp_all.loc[tmp_all["predtime"].isin(pred_times), ["true"]].values.reshape(-1,len(pred_times)) true_prob_LT = tmp_all.loc[tmp_all["predtime"]==LT, ["true"]].values true_prob_tmp = true_prob_tmp / true_prob_LT tmp_long, tmp_base, tmp_mask, e_tmp, t_tmp, obs_time = get_tensors(tmp_data.copy()) model = model.eval() surv_pred = torch.softmax(model(tmp_long, tmp_base, tmp_mask), dim=1) surv_pred = surv_pred.detach().numpy() surv_pred = surv_pred[:,::-1].cumsum(axis=1)[:,::-1] surv_pred = surv_pred[:,1:(out_len+1)] auc, iauc = AUC(surv_pred, e_tmp.numpy(), t_tmp.numpy(), np.array(pred_times)) AUC_array[i_sim, LT_index, :] = auc iAUC_array[i_sim, LT_index] = iauc auc, iauc = AUC(true_prob_tmp, np.array(e_tmp), np.array(t_tmp), np.array(pred_times)) true_AUC_array[i_sim, LT_index, :] = auc true_iAUC_array[i_sim, LT_index] = iauc bs, ibs = Brier(surv_pred, e_tmp.numpy(), t_tmp.numpy(), e_train.numpy(), t_train.numpy(), LT, np.array(pred_windows)) BS_array[i_sim, LT_index, :] = bs iBS_array[i_sim, LT_index] = ibs bs, ibs = Brier(true_prob_tmp, e_tmp.numpy(), t_tmp.numpy(), e_train.numpy(), t_train.numpy(), LT, np.array(pred_windows)) true_BS_array[i_sim, LT_index, :] = bs true_iBS_array[i_sim, LT_index] = ibs np.set_printoptions(precision=3) print("AUC:",np.nanmean(AUC_array, axis=0)) print("iAUC:",np.mean(iAUC_array, axis=0)) print("True AUC:",np.nanmean(true_AUC_array, axis=0)) print("True iAUC:",np.mean(true_iAUC_array, axis=0)) print("BS:\n", np.mean(BS_array, axis=0)) print("iBS:",np.mean(iBS_array, axis=0)) print("True BS:\n", np.mean(true_BS_array, axis=0)) print("True iBS:",np.mean(true_iBS_array, axis=0)) end = time.time() print("total time:", (end-start)/60) ''' ## save results results = {"AUC":AUC_array, "iAUC":iAUC_array, "True_AUC":true_AUC_array, "True_iAUC":true_iAUC_array, "BS":BS_array, "iBS":iBS_array, "True_BS":true_BS_array, "True_iBS":true_iBS_array} outfile = open('MATCH_results.pickle', 'wb') pickle.dump(results, outfile) outfile.close() ''' ''' ## read results infile = open('MATCH_results.pickle', 'rb') results = pickle.load(infile) infile.close '''
python
# coding=utf-8 import ctypes import json import time import jsonpath import requests import progressbar import requests.packages.urllib3 headers1 = { 'User-Agent': 'Mozilla/5.0 (Windows NT 5.1; rv:24.0) Gecko/20100101 Firefox/24.0' } headers2 = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.69 ' 'Safari/537.36 TheWorld 6 ' } url1 = "https://api.github.com/repos/way-zer/ScriptAgent4MindustryExt/releases/latest" url2 = "https://api.github.com/repos/Anuken/Mindustry/releases/latest" assets1 = requests.get(url1, headers=headers1).json()['assets'] tag1 = requests.get(url1, headers=headers1).json()['tag_name'] name1 = jsonpath.jsonpath(assets1, "$..name") zn = [i for i, x in enumerate(name1) if x.rfind('zip') != -1] zipname = name1[zn] jn = [i for i, x in enumerate(name1) if x.rfind('jar') != -1] jarname = name1[jn] down1 = jsonpath.jsonpath(assets1, "$..browser_download_url") zd = [i for i, x in enumerate(down1) if x.rfind('zip') != -1] zipdown = down1[zd] jd = [i for i, x in enumerate(down1) if x.rfind('jar') != -1] jardown = down1[jd] updata1 = requests.get(url1, headers=headers1).json()['body'] ctypes.WinDLL("user32.dll").MessageBoxW(0, updata1, "插仢更新提醒".decode("utf8"), 0) time.sleep(3) tag2 = requests.get(url2, headers=headers1).json()['tag_name'] assets2 = requests.get(url2, headers=headers2).json()['assets'] name2 = jsonpath.jsonpath(assets2, "$..name") md = [i for i, x in enumerate(name2) if x.find('M') != -1] mdtname = name2[md] sd = [i for i, x in enumerate(name2) if x.find('server') != -1] sername = name2[sd] down2 = jsonpath.jsonpath(assets2, "$..browser_download_url") md = [i for i, x in enumerate(down2) if x.find('M') != -1] mdtdown = down2[md] sd = [i for i, x in enumerate(down2) if x.find('server') != -1] serdown = down2[sd] updata2 = requests.get(url2, headers=headers2).json()['body'] ctypes.WinDLL("user32.dll").MessageBoxW(0, updata2, "核心更新提醒".decode("utf8"), 0) def DownLoad(save, url): response = requests.request("GET", url, stream=True, data=None, headers=None) requests.packages.urllib3.disable_warnings() save_path = save total_length = int(response.headers.get("Content-Length")) with open(save_path, 'wb') as f: widgets = ['Progress: ', progressbar.Percentage(), ' ', progressbar.Bar(marker='#', left='[', right=']'), ' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()] pbar = progressbar.ProgressBar(widgets=widgets, maxval=total_length).start() for chunk in response.iter_content(chunk_size=1): if chunk: f.write(chunk) f.flush() pbar.update(len(chunk) + 1) pbar.finish()
python