content
stringlengths
0
894k
type
stringclasses
2 values
import torch import torch.nn.functional as F import numpy as np from scipy import stats from sklearn.cluster import MiniBatchKMeans class GMMOutput(torch.nn.Module): def __init__(self, n_components): super(GMMOutput, self).__init__() self.components = n_components def sample(self, x): X_train = x amount = x.shape[0] pis, mus, sigmas = self.forward(X_train) pis = pis.detach().numpy() mus = mus.detach().numpy() sigmas = sigmas.detach().numpy() samples = np.zeros((amount, 2)) n_mix = self.components to_choose_from = np.arange(n_mix) for j,(weights, means, std_devs) in enumerate(zip(pis, mus, sigmas)): index = np.random.choice(to_choose_from, p=weights) samples[j,1]= stats.norm.rvs(means[index], std_devs[index],size=1) samples[j,0]= x[j] if j == amount -1: break return samples def maploss(pi_mu_sigma, y, reduce=True, entropy_reg=True, alpha=2): pi, mu, sigma = pi_mu_sigma m = torch.distributions.Normal(loc=mu, scale=sigma) log_prob_y = m.log_prob(y) ## y | theta lp = torch.log(pi) log_prob_pi_y = log_prob_y + lp loss = -torch.logsumexp(log_prob_pi_y, dim=1) # log ( sum_i (exp xi) ) if entropy_reg: entropy = -torch.sum(lp * pi,dim=1)/ pi.shape[1] loss = loss - entropy * alpha if reduce: loss = torch.mean(loss) if loss.isnan(): print( "pi" + str(pi)+ ":mu" + str(mu) + ":sigma" + str(sigma) ) return loss else: return loss def emloss(pi_mu_sigma, y, reduce=True, entropy_reg=True, alpha=2): pi, mu, sigma = pi_mu_sigma m = torch.distributions.Normal(loc=mu, scale=sigma) log_prob_y = m.log_prob(y) ## y | theta lp = torch.log(pi) log_prob_pi_y = log_prob_y + lp #prob_pi_y = torch.exp(log_prob_pi_y) ai = F.softmax(log_prob_pi_y, dim=1) #ai = prob_pi_y /( torch.sum( prob_pi_y, dim=1, keepdim=True) + 0.000001 ) loss = -torch.sum( ai * log_prob_pi_y, dim = 1) if entropy_reg: entropy = -torch.sum(lp * pi,dim=1)/ pi.shape[1] loss = loss - entropy*alpha if reduce: return torch.mean(loss) else: return loss def loss( pi_mu_sigma, y, reduce=True, entropy_reg=False, loss_type="EM", alpha=2): if loss_type == "EM": return GMMOutput.emloss(pi_mu_sigma ,y ,reduce, entropy_reg, alpha=alpha) elif loss_type == "MAP" : return GMMOutput.maploss (pi_mu_sigma, y ,reduce, entropy_reg, alpha=alpha) else : raise Exception("Loss not implemented yet") def forward(self, X_train): return None class GMM(GMMOutput): def __init__(self, n_components, pre = True, dim =1): super(GMM, self).__init__(n_components) self.pis = torch.nn.parameter.Parameter( torch.zeros( (dim, self.components) ) ) self.mus = torch.nn.Parameter( torch.randn( dim, n_components )*2 ) self.sigmas = torch.nn.Parameter( torch.randn( (dim, self.components) ) ** 2 + 1 ) self.pre = pre def forward(self, X_train): pi = F.softmax( self.pis,dim=1) mu = self.mus sigma = torch.nn.ELU()(self.sigmas) + 1.00001 return pi, mu, sigma def fit(self, scm, features ,lr = 1e-3, loss_type="EM", batch=248, epochs=2000,entropy_reg=False, m_step_iter = 10, alpha=2): #llp = [] if self.pre : km = MiniBatchKMeans(self.components) km.fit(scm._sample(batch)[features]) cls = km.cluster_centers_ self.mus = torch.nn.Parameter( torch.tensor(cls.T,dtype=torch.float32) ) optim = torch.optim.AdamW( [self.pis,self.mus, self.sigmas], lr=lr) lossap = [] if loss_type == "MAP" : m_step_iter = 1 for i in range(epochs): #llp.append( self.pis ) smps = scm._sample(batch) X_train = smps[features] for _ in range(m_step_iter): pi_mu_sigma = self.forward(X_train) #llp.append( pi_mu_sigma[1].detach().numpy().ravel() ) energy = GMMOutput.loss( pi_mu_sigma, X_train, entropy_reg=entropy_reg, loss_type=loss_type, alpha=alpha) optim.zero_grad() energy.backward() optim.step() lossap.append(energy.detach().item()) return lossap #, llp class MDN(GMMOutput): def __init__(self, n_hidden, n_components, act = torch.nn.LeakyReLU() ): super(MDN,self).__init__(n_components) nh = len(n_hidden) l = [] for i in range(1,nh-1): l.append( torch.nn.Linear(n_hidden[i-1],n_hidden[i]) ) l.append( act ) #l.append( # torch.nn.BatchNorm1d(n_hidden[i]) #) l = l + [torch.nn.Linear(n_hidden[nh-2],n_hidden[nh-1]),act] self.z_h = torch.nn.Sequential( *l ) self.z_pi = torch.nn.Linear(n_hidden[-1], n_components) self.z_mu = torch.nn.Linear(n_hidden[-1], n_components) self.z_sigma = torch.nn.Linear(n_hidden[-1], n_components) def forward(self, x, show=False): z_h = self.z_h(x) if show : print(z_h) pi = F.softmax(self.z_pi(z_h), -1) mu = self.z_mu(z_h) sigma = torch.nn.ELU()(self.z_sigma(z_h)) + 1.00001 #sigma = torch.exp(self.z_sigma(z_h)) return pi, mu, sigma def predict(self, X_train): X_train = X_train pi, mu, _ = self.forward(X_train) return torch.einsum("ij,ij->i",pi,mu).detach().numpy() def fit(self, scm, features="X", labels="Y", lr=1e-3, batch=248, epoch = 300, loss_type="EM", m_step_iter = 10,alpha=2, reg=False): optim = torch.optim.AdamW(self.parameters(), lr=lr) lossap = [] if loss_type == "MAP": m_step_iter = 1 for i in range(epoch): smps = scm._sample(batch) X_train = smps[features] Y_train = smps[labels] for _ in range(m_step_iter): y_h = self.forward(X_train) energy = GMMOutput.loss(y_h, Y_train, reduce=True, loss_type=loss_type, entropy_reg = reg,alpha=alpha) optim.zero_grad() energy.backward() optim.step() lossap.append(energy.detach().item()) return lossap
python
import numpy as np from .estimator import Estimator class Adaline(Estimator): def __init__(self, learning_rate, activation_function, loss_function, loss_variation_tolerance): super().__init__() self.learning_rate = learning_rate self.activation_function = activation_function self.loss_function = loss_function self.loss_variation_tolerance = loss_variation_tolerance def train(self, x, d): k = len(x) w = np.random.rand(len(x[0])) epoch = 0 while True: mse_before = self.loss_function(x, d, w) if epoch == 0: print(f'Epoch: {epoch}\tWeights: {w}\tLoss: {mse_before:.5f}') self.plot_data_x.append(epoch) self.plot_data_y.append(mse_before) for i in range(k): v = np.dot(np.transpose(w), x[i]) w = np.add(w, np.multiply(x[i], self.learning_rate * (d[i] - v))) epoch = epoch + 1 mse_after = self.loss_function(x, d, w) print(f'Epoch: {epoch}\tWeights: {w}\tLoss: {mse_after:.5f}') self.plot_data_x.append(epoch) self.plot_data_y.append(mse_after) if abs(mse_after - mse_before) <= self.loss_variation_tolerance: break return w def predict(self, x, w): v = np.dot(np.transpose(w), x) y = self.activation_function(v) return y def evaluate(self, x, d, w): total = len(x) correct = 0 for i in range(len(x)): y = self.predict(x[i], w) if y == d[i]: correct = correct + 1 accuracy = correct / total print(f'Accuracy: {accuracy * 100:.5f}% ({accuracy:.5f})') return accuracy
python
from os import environ, path from telebot import TeleBot from RPG.bot_classes.game import Game # Импортирует все состояния игры from RPG.consts.game_states import MAIN_MENU, INVENTORY, INVENTORY_INFO, CREATE_PLAYER_MENU, PLAYER_PROFILE, \ CABIN, CAPTAIN_BRIDGE, CARGO_HOLD, COMPUTER, CREATE_SPACESHIP_MENU, ESTRAD_PORT, ESTRAD_SECURITY_SOLDIER, \ ESTRAD_COLONY, ESTRAD_TRADER, EQUIPMENT, ESTRAD_TRADER_TRADE_MENU, ESTRAD_TRADER_BUY, ESTRAD_TRADER_SELL, \ ESTRAD_FOREST_ENTRY, EQUIPMENT_WEAPON_INFO, EQUIPMENT_ARMOR_INFO, FIGHT_SYSTEM_PLAYER_TURN, \ FIGHT_SYSTEM_WEAPON_USE, ESTRAD_FOREST_FIELD, FIGHT_SYSTEM_AIM_SHOT_MENU, ESTRAD_BAR, ESTRAD_FOREST_LAKE, JOURNAL from RPG.saves.data import db_session from RPG.saves.data.games import DBGame token = environ.get('TOKEN') # Получает токен бота из конфигурации bot = TeleBot(token) db_session.global_init(path.join(path.dirname(__file__), './saves/db/games.db')) session = db_session.create_session() games = {} for game in session.query(DBGame).all(): games[game.chat_id] = Game(bot, game.chat_id, game.player_name, game.spaceship_name, game.current_location, game.state, game.player_inventory, game.player_money, game.player_hp, game.player_armor, game.player_weapon, game.player_armor_set, game.player_laser_ammo, game.fight_system_enemy, game.player_quest_items, game.fight_system_max_action_points, game.fight_system_action_points, None) for game_id in games: games[game_id].games = games @bot.message_handler(content_types=['text']) # Текстовый обработчик для состояний игры def text_handle(message): if message.chat.id in games: game = games[message.chat.id] if message.text == '/credits': # Вывод благодарностей)) game.bot.send_message(message.chat.id, 'Создатель - Кирилл Ковалёв\n' 'Огромная благодарность за помощь в тестировании проекта, развитие ' 'его концепции и создание его дизайна выражается Полине Литвинкович ' 'и Виктору Ладейщикову.') elif game.state == CREATE_PLAYER_MENU: # Регистрация пользователя, выбор имени и названия корабля game.player_creation_menu.handle(message) elif game.state == CREATE_SPACESHIP_MENU: game.spaceship_creation_menu.handle(message) elif game.state == MAIN_MENU: # Главное меню game.main_menu.handle(message) elif game.state == INVENTORY: # Инвентарь bot.send_message(message.chat.id, 'Не-а, здесь так нельзя.') elif game.state == INVENTORY_INFO: game.inventory_item_info.handle(message) elif game.state == PLAYER_PROFILE: # Профиль игрока game.player_profile.handle(message) elif game.state == JOURNAL: # Журнал заданий game.journal.handle(message) elif game.state == EQUIPMENT: # Снаряжение игрока game.equipment.handle(message) elif game.state == EQUIPMENT_WEAPON_INFO: game.equipment_weapon_info.handle(message) elif game.state == EQUIPMENT_ARMOR_INFO: game.equipment_armor_info.handle(message) elif game.state == FIGHT_SYSTEM_PLAYER_TURN: # Боевая система game.fight_system.player_turn.handle(message) elif game.state == FIGHT_SYSTEM_WEAPON_USE: game.fight_system.weapon_use_menu.handle(message) elif game.state == FIGHT_SYSTEM_AIM_SHOT_MENU: game.fight_system.aim_shot_menu.handle(message) elif game.state == CABIN: # Локация "Космический корабль" game.spaceship.cabin.handle(message) elif game.state == CAPTAIN_BRIDGE: game.spaceship.captain_bridge.handle(message) elif game.state == CARGO_HOLD: game.spaceship.cargo_hold.handle(message) elif game.state == COMPUTER: game.spaceship.computer.handle(message) elif game.state == ESTRAD_PORT: # Локация "Эстрад" game.estrad.port.handle(message) elif game.state == ESTRAD_SECURITY_SOLDIER: game.estrad.security_soldier.handle(message) elif game.state == ESTRAD_COLONY: # Локация "Эстрад.Колония" game.estrad.colony.handle(message) elif game.state == ESTRAD_BAR: game.estrad.colony.bar.handle(message) elif game.state == ESTRAD_TRADER: game.estrad.colony.trader.handle(message) elif game.state == ESTRAD_TRADER_TRADE_MENU: game.estrad.colony.trader.trade_menu.handle(message) elif game.state == ESTRAD_TRADER_BUY: bot.send_message(message.chat.id, 'Не-а, здесь так нельзя.') elif game.state == ESTRAD_TRADER_SELL: bot.send_message(message.chat.id, 'Не-а, здесь так нельзя.') elif game.state == ESTRAD_FOREST_ENTRY: # Локация "Эстрад.Лес" game.estrad.forest.entry.handle(message) elif game.state == ESTRAD_FOREST_FIELD: game.estrad.forest.field.handle(message) elif game.state == ESTRAD_FOREST_LAKE: game.estrad.forest.lake.handle(message) game.save(session) # Сохранение игры в базу данных elif message.text == '/start': # Обработчик команды /start, если игра ещё не начата games[message.chat.id] = Game(bot, message.chat.id, None, None, 'Личная каюта', CREATE_PLAYER_MENU, '', 500, 60, 0, '', '', 0, '', None, 1, 1, games) games[message.chat.id].player_creation_menu.start(message) game = games[message.chat.id] game.save(session) @bot.callback_query_handler(func=lambda call: True) # Call обработчик для состояний игры def callback_handle(call): game = games[call.message.chat.id] if game.state == INVENTORY: # Инвентарь game.inventory.handle(call) elif game.state == ESTRAD_TRADER_BUY: # Торговец из локации "Эстрад.Колония" game.estrad.colony.trader.trade_menu.handle_buy(call) elif game.state == ESTRAD_TRADER_SELL: game.estrad.colony.trader.trade_menu.handle_sell(call) game.save(session) bot.polling(none_stop=True)
python
import datetime import pandas as pd import numpy as np from rest_framework.generics import get_object_or_404 from rest_framework.response import Response from rest_framework.views import APIView from analytics.events.utils.dataframe_builders import SupplementEventsDataframeBuilder, SleepActivityDataframeBuilder, \ ProductivityLogEventsDataframeBuilder from betterself.utils.api_utils import get_api_value_formatted from constants import VERY_PRODUCTIVE_TIME_LABEL from betterself.utils.date_utils import get_current_date_years_ago from events.models import SupplementLog, SleepLog, DailyProductivityLog from supplements.models import Supplement class SupplementAnalyticsMixin(object): @classmethod def _get_analytics_dataframe(cls, user, supplement_uuid): supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=user) supplement_series = cls._get_daily_supplement_events_series_last_year(user, supplement) sleep_series = cls._get_sleep_series_last_year(user) productivity_series = cls._get_productivity_series_last_year(user) # if either sleep or productivity are empty, create an empty series that is timezone # aware (hence, matching the supplement index) if sleep_series.empty: sleep_series = pd.Series(index=supplement_series.index) if productivity_series.empty: productivity_series = pd.Series(index=supplement_series.index) dataframe_details = { 'supplement': supplement_series, 'sleep': sleep_series, 'productivity': productivity_series } dataframe = pd.DataFrame(dataframe_details) return dataframe @staticmethod def _get_daily_supplement_events_series_last_year(user, supplement): # TODO - This may serve better as a supplement fetcher mixin """ :param user: :param supplement: :return: TimeSeries data of how many of that particular supplement was taken that day """ start_date = get_current_date_years_ago(1) supplement_events = SupplementLog.objects.filter(user=user, supplement=supplement, time__date__gte=start_date) builder = SupplementEventsDataframeBuilder(supplement_events) try: series = builder.get_flat_daily_dataframe()[supplement.name] except KeyError: # KeyError means it doesn't exist, so create an index that can be used for everything else date_range_index = pd.date_range(start=start_date, end=datetime.date.today(), tz=user.pytz_timezone) series = pd.Series(index=date_range_index) return series @staticmethod def _get_sleep_series_last_year(user): """ :param user: :return: Series data of how much sleep that person has gotten minutes """ start_date = get_current_date_years_ago(1) sleep_events = SleepLog.objects.filter(user=user, start_time__date__gte=start_date) builder = SleepActivityDataframeBuilder(sleep_events) series = builder.get_sleep_history_series() # anytime sleep is actually set at zero, the value should be NaN series[series == 0] = np.NaN return series @staticmethod def _get_productivity_series_last_year(user): start_date = get_current_date_years_ago(1) logs = DailyProductivityLog.objects.filter(user=user, date__gte=start_date) builder = ProductivityLogEventsDataframeBuilder(logs) try: series = builder.get_flat_daily_dataframe()[VERY_PRODUCTIVE_TIME_LABEL] except KeyError: return pd.Series() return series class SupplementAnalyticsSummary(APIView, SupplementAnalyticsMixin): def get(self, request, supplement_uuid): dataframe = self._get_analytics_dataframe(request.user, supplement_uuid) supplement_series = dataframe['supplement'] # i find a week is generally the best analysis to use for correlation, otherwise # you have odd days like sunday when everyone is lazy and mondays when everyone is trying # to do as much as possible interfering with correlations dataframe_rolling_week = dataframe.rolling(window=7, min_periods=1).sum() supplement_correlation_series = dataframe_rolling_week.corr()['supplement'] # TODO - What should happen if any of these results are null / none? productivity_correlation_value = supplement_correlation_series['productivity'] sleep_correlation_value = supplement_correlation_series['sleep'] most_taken_value = supplement_series.max() # there are multi possibilities that the most caffeine was ever drank most_taken_dates = supplement_series[supplement_series == most_taken_value].index most_taken_dates = [item.isoformat() for item in most_taken_dates] # order by time because we don't really care about create time, rather the time the event is representing supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=request.user) try: creation_date = SupplementLog.objects.filter(supplement=supplement).order_by('time').first().time. \ isoformat() except AttributeError: # no creation_date found creation_date = None results = [ get_api_value_formatted( 'productivity_correlation', productivity_correlation_value, 'Productivity Correlation' ), get_api_value_formatted( 'sleep_correlation', sleep_correlation_value, 'Sleep Correlation' ), get_api_value_formatted( 'most_taken', most_taken_value, 'Most Servings Taken (1 Day)' ), get_api_value_formatted( 'most_taken_dates', most_taken_dates, 'Most Taken Dates', data_type='list-datetime' ), get_api_value_formatted( 'creation_date', creation_date, 'Date of First Use', data_type='string-datetime' ), ] return Response(results) class SupplementSleepAnalytics(APIView, SupplementAnalyticsMixin): def get(self, request, supplement_uuid): dataframe = self._get_analytics_dataframe(request.user, supplement_uuid) index_of_supplement_taken_at_least_once = dataframe['supplement'].dropna().index dataframe_of_supplement_taken_at_least_once = dataframe.ix[index_of_supplement_taken_at_least_once] supplement_series = dataframe_of_supplement_taken_at_least_once['supplement'] most_taken_value = supplement_series.max() most_taken_dates = supplement_series[supplement_series == most_taken_value].index most_taken_dataframe = dataframe_of_supplement_taken_at_least_once.ix[most_taken_dates] results = [] most_taken_sleep_mean = most_taken_dataframe['sleep'].max() most_taken_sleep_mean = get_api_value_formatted( 'most_taken_sleep_mean', most_taken_sleep_mean, 'Mean Time Slept ({} Servings)'.format( most_taken_value)) results.append(most_taken_sleep_mean) most_taken_sleep_median = most_taken_dataframe['sleep'].median() most_taken_sleep_median = get_api_value_formatted( 'most_taken_sleep_median', most_taken_sleep_median, 'Median Time Slept ({} Servings)'.format( most_taken_value)) results.append(most_taken_sleep_median) dates_where_no_supplement_taken = dataframe['supplement'].isnull() dataframe_of_no_supplement_taken = dataframe.ix[dates_where_no_supplement_taken] median_sleep_taken_once = dataframe_of_supplement_taken_at_least_once['sleep'].median() median_sleep_taken_once = get_api_value_formatted( 'median_sleep_taken_once', median_sleep_taken_once, 'Median Time Slept (Min 1 Serving)') results.append(median_sleep_taken_once) mean_sleep_taken_once = dataframe_of_supplement_taken_at_least_once['sleep'].mean() mean_sleep_taken_once = get_api_value_formatted( 'mean_sleep_taken_once', mean_sleep_taken_once, 'Mean Time Slept (Min 1 Serving)') results.append(mean_sleep_taken_once) mean_sleep_no_supplement = dataframe_of_no_supplement_taken['sleep'].mean() mean_sleep_no_supplement = get_api_value_formatted( 'mean_sleep_no_supplement', mean_sleep_no_supplement, 'Mean Time Slept (0 Servings)') results.append(mean_sleep_no_supplement) median_sleep_of_no_supplement = dataframe_of_no_supplement_taken['sleep'].median() median_sleep_of_no_supplement = get_api_value_formatted( 'median_sleep_of_no_supplement', median_sleep_of_no_supplement, 'Median Time Slept (0 Servings)') results.append(median_sleep_of_no_supplement) return Response(results) class SupplementProductivityAnalytics(APIView, SupplementAnalyticsMixin): def get(self, request, supplement_uuid): dataframe = self._get_analytics_dataframe(request.user, supplement_uuid) index_of_supplement_taken_at_least_once = dataframe['supplement'].dropna().index dataframe_of_supplement_taken_at_least_once = dataframe.ix[index_of_supplement_taken_at_least_once] dates_where_no_supplement_taken = dataframe['supplement'].isnull() dataframe_of_no_supplement_taken = dataframe.ix[dates_where_no_supplement_taken] results = [] productivity_series_with_supplement = dataframe_of_supplement_taken_at_least_once['productivity'] productivity_series_without_supplement = dataframe_of_no_supplement_taken['productivity'] # no point if productivity_series_with_supplement.dropna().empty: return Response(results) most_productive_time_with_supplement_raw = productivity_series_with_supplement.max() most_productive_time_with_supplement = get_api_value_formatted( 'most_productive_time_with_supplement', most_productive_time_with_supplement_raw, 'Most Productive Time (Min 1 Serving)') results.append(most_productive_time_with_supplement) most_productive_date_with_supplement = productivity_series_with_supplement.idxmax() most_productive_date_with_supplement = get_api_value_formatted( 'most_productive_date_with_supplement', most_productive_date_with_supplement, 'Most Productive Date', 'string-datetime') results.append(most_productive_date_with_supplement) least_productive_time_with_supplement = productivity_series_with_supplement.min() least_productive_time_with_supplement = get_api_value_formatted( 'least_productive_time_with_supplement', least_productive_time_with_supplement, 'Least Productive Time (Min 1 Serving)') results.append(least_productive_time_with_supplement) least_productive_date_with_supplement = productivity_series_with_supplement.idxmin() least_productive_date_with_supplement = get_api_value_formatted( 'least_productive_date_with_supplement', least_productive_date_with_supplement, 'Least Productive Date', 'string-datetime') results.append(least_productive_date_with_supplement) median_productive_time_with_supplement = productivity_series_with_supplement.median() median_productive_time_with_supplement = get_api_value_formatted( 'median_productive_time_with_supplement', median_productive_time_with_supplement, 'Median Productive Time (Min 1 Serving)') results.append(median_productive_time_with_supplement) mean_productive_time_with_supplement = productivity_series_with_supplement.mean() mean_productive_time_with_supplement = get_api_value_formatted( 'mean_productive_time_with_supplement', mean_productive_time_with_supplement, 'Mean Productive Time (Min 1 Serving)') results.append(mean_productive_time_with_supplement) median_productive_time_without_supplement = productivity_series_without_supplement.median() median_productive_time_without_supplement = get_api_value_formatted( 'median_productive_time_without_supplement', median_productive_time_without_supplement, 'Median Productive Time (0 Servings)') results.append(median_productive_time_without_supplement) mean_productive_time_without_supplement = productivity_series_without_supplement.mean() mean_productive_time_without_supplement = get_api_value_formatted( 'mean_productive_time_without_supplement', mean_productive_time_without_supplement, 'Mean Productive Time (0 Servings)') results.append(mean_productive_time_without_supplement) return Response(results) class SupplementDosageAnalytics(APIView, SupplementAnalyticsMixin): def get(self, request, supplement_uuid): dataframe = self._get_analytics_dataframe(request.user, supplement_uuid) index_of_supplement_taken_at_least_once = dataframe['supplement'].dropna().index dataframe_of_supplement_taken_at_least_once = dataframe.ix[index_of_supplement_taken_at_least_once] results = [] mean_serving_size_last_365_days = dataframe['supplement'].fillna(0).mean() mean_serving_size_last_365_days = get_api_value_formatted( 'mean_serving_size_last_365_days', mean_serving_size_last_365_days, 'Mean Serving Size (All Days)') results.append(mean_serving_size_last_365_days) median_serving_size = dataframe_of_supplement_taken_at_least_once['supplement'].median() median_serving_size = get_api_value_formatted( 'median_serving_size', median_serving_size, 'Median Serving Size (Min 1 Serving)') results.append(median_serving_size) mean_serving_size = dataframe_of_supplement_taken_at_least_once['supplement'].mean() mean_serving_size = get_api_value_formatted( 'mean_serving_size', mean_serving_size, 'Mean Serving Size (Min 1 Serving)') results.append(mean_serving_size) return Response(results)
python
#!flask/bin/python # imports here import click from datetime import datetime from flask import abort, Flask, g, jsonify, request from info import info import os import sqlite3 ### app instantiation ### app = Flask(__name__) app.config.update({ 'JSON_SORT_KEYS':False, 'DATABASE':os.path.join(app.root_path, 'posts.db'), }) ### cli commands ### @app.cli.command('initdb') def init_db(): db = get_db() with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() click.echo('db started') ### database stuff ### def connect_db(): r=sqlite3.connect(app.config['DATABASE']) r.row_factory=sqlite3.Row return r def get_db(): if not hasattr(g, 'sqlite_db'): g.sqlite_db=connect_db() return g.sqlite_db ### routing ### @app.route('/') def index(): return jsonify(info) # this is not the best place to put this # the function returns based on localtime def get_timestamp(date,dateformat="%d-%m-%Y"): try: return datetime.strptime(date,dateformat).timestamp() except ValueError: abort(400) @app.route('/posts/', methods=['GET']) def posts_endpoint(): db=get_db() query='select title, author, ups, num_comments from post ' constraints=[] start_date=request.args.get('start_date') end_date=request.args.get('end_date') order=request.args.get('order') if start_date: constraints.append( 'timestamp > '+str(get_timestamp(start_date))) if end_date: constraints.append( 'timestamp < '+str(get_timestamp(end_date))) if len(constraints) > 0: query+='where '+' and '.join(constraints) if order=='ups': query+=' order by ups desc' elif order=='comments': query+=' order by num_comments desc' return jsonify([ {'title':t,'author':a,'ups':u,'comments':c} for t,a,u,c in db.execute(query) ]) @app.route('/authors/', methods=['GET']) def authors_endpoint(): db=get_db() query='select author,'+\ ' sum(ups) as total_ups,'+\ ' sum(num_comments) as total_comments'+\ ' from post'+\ ' group by author' order=request.args.get('order') if order=='ups': query+=' order by ups desc' elif order=='comments': query+=' order by num_comments desc' return jsonify([ {'author':a,'total_ups':u,'total_comments':c} for a,u,c in db.execute(query) ]) ### error handling ### @app.errorhandler(404) def page_not_found(error): return jsonify( { 'error':'this end point is not yet implemented', 'code':error.code, }) @app.errorhandler(400) def bad_request(error): return jsonify( { 'error':'double check the query parameters', 'code':error.code, }) ### teardown ### @app.teardown_appcontext def close_db(error): if hasattr(g, 'sqlite_db'): g.sqlite_db.close() ### just in case ### if __name__ == '__main__': app.run()
python
from __future__ import division from __future__ import print_function import os import random import logging from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable as Var import sys # IMPORT CONSTANTS from learning.treelstm.config import parse_args from learning.treelstm.dataset import QGDataset from learning.treelstm.model import DASimilarity, SimilarityTreeLSTM from learning.treelstm.trainer import Trainer from learning.treelstm.vocab import Vocab import learning.treelstm.Constants as Constants def testmain(one_dataset): global args args = parse_args() # global logger logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) if args.sparse and args.wd != 0: logger.error('Sparsity and weight decay are incompatible, pick one!') exit() logger.debug(args) args.data = 'learning/treelstm/data/lc_quad/' args.save = 'learning/treelstm/checkpoints/' torch.manual_seed(args.seed) random.seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) torch.backends.cudnn.benchmark = True if not os.path.exists(args.save): os.makedirs(args.save) dataset_vocab_file = "D:/downloads/QA/learning/treelstm/data/lc_quad/dataset.vocab" vocab = Vocab(filename=dataset_vocab_file, data=[Constants.PAD_WORD, Constants.UNK_WORD, Constants.BOS_WORD, Constants.EOS_WORD]) similarity = DASimilarity(args.mem_dim, args.hidden_dim, args.num_classes) # if args.sim == "cos": # similarity = CosSimilarity(1) # else: # similarity = DASimilarity(args.mem_dim, args.hidden_dim, args.num_classes, dropout=True) # initialize model, criterion/loss_function, optimizer model = SimilarityTreeLSTM( vocab.size(), args.input_dim, args.mem_dim, similarity, args.sparse) criterion = nn.KLDivLoss() # nn.HingeEmbeddingLoss() if args.cuda: model.cuda(), criterion.cuda() else: torch.set_num_threads(4) logger.info("number of available cores: {}".format(torch.get_num_threads())) if args.optim == 'adam': optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) elif args.optim == 'adagrad': optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.wd) elif args.optim == 'sgd': optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd) checkpoint_filename = "D:\\downloads\\QA\\learning\\treelstm\\learning\\treelstm\\checkpoints\\lc_quad,epoch=15,train_loss=0.2348909229040146.pt" checkpoint = torch.load(checkpoint_filename) model.load_state_dict(checkpoint['model']) args.epochs = 1 # create trainer object for training and testing trainer = Trainer(args, model, criterion, optimizer) loss, dev_pred = trainer.test(one_dataset) return loss,dev_pred if __name__ == "__main__": testmain()
python
#!python3 # Code Challenge 02 - Word Values Part II - a simple game # http://pybit.es/codechallenge02.html import itertools import random from data import DICTIONARY, LETTER_SCORES, POUCH NUM_LETTERS = 7 def draw_letters(): """Pick NUM_LETTERS letters randomly. Hint: use stdlib random""" draw = random.sample(POUCH, NUM_LETTERS) return draw def input_word(draw): """Ask player for a word and validate against draw. Use _validation(word, draw) helper.""" word = input("Enter your word: ") if _validation(word, draw): return word def _validation(word, draw): """Validations: 1) only use letters of draw, 2) valid dictionary word""" for w in word.upper(): if w not in draw: print(f'Letter {w} not in draw') return False break elif draw.count(w) < word.upper().count(w): print(f'Letter {w} used too many times') return False break else: continue return True # From challenge 01: def calc_word_value(word): """Calc a given word value based on Scrabble LETTER_SCORES mapping""" return sum(LETTER_SCORES.get(char.upper(), 0) for char in word) # Below 2 functions pass through the same 'draw' argument (smell?). # Maybe you want to abstract this into a class? # get_possible_dict_words and _get_permutations_draw would be instance methods. # 'draw' would be set in the class constructor (__init__). def get_possible_dict_words(draw): """Get all possible words from draw which are valid dictionary words. Use the _get_permutations_draw helper and DICTIONARY constant""" words = [] permuts = _get_permutations_draw(draw) for p in permuts: if p.lower() in DICTIONARY: words.append(p) return words def _get_permutations_draw(draw): """Helper for get_possible_dict_words to get all permutations of draw letters. Hint: use itertools.permutations""" permuts = list(''.join(h) for h in (x for l in range(1, 8) for x in itertools.permutations(draw, l))) return permuts # From challenge 01: def max_word_value(words): """Calc the max value of a collection of words""" return max(words, key=calc_word_value) def main(): """Main game interface calling the previously defined methods""" draw = draw_letters() print('Letters drawn: {}'.format(', '.join(draw))) word = input_word(draw) word_score = calc_word_value(word) print('Word chosen: {} (value: {})'.format(word, word_score)) possible_words = get_possible_dict_words(draw) max_word = max_word_value(possible_words) max_word_score = calc_word_value(max_word) print('Optimal word possible: {} (value: {})'.format( max_word, max_word_score)) game_score = word_score / max_word_score * 100 print('You scored: {:.1f}'.format(game_score)) if __name__ == "__main__": main()
python
d=[3,22,99,68,34,17,45,66,58,89,73,12,92,1,5,26,91,32,86] print d,'\n' p=len(d) bin_size=raw_input('Choose the bin_size(Eg:9) ') for i in range(int(min(d)),int(max(d)),int(bin_size)+1): print "{:>4} - {:<4}".format(i,i+int(bin_size)),' ', for j in range(0,p): if d[j]>=i and d[j]<=i+int(bin_size): print '-', print '\n'
python
# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # OS.FreeBSD.get_vlans # --------------------------------------------------------------------- # Copyright (C) 2007-2011 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- """ """ from noc.core.script.base import BaseScript from noc.sa.interfaces.igetvlans import IGetVlans import re class Script(BaseScript): name = "OS.FreeBSD.get_vlans" interface = IGetVlans rx_vlan = re.compile(r"^\tvlan: (?P<vlanid>[1-9]\d*) parent interface: \S+", re.MULTILINE) def execute(self): r = [] for match in self.rx_vlan.finditer(self.cli("ifconfig -v", cached=True)): r += [{"vlan_id": int(match.group("vlanid"))}] return r
python
import os import kubectl import pathlib version = open(os.path.join(pathlib.Path(__file__).parent.absolute(),"../release")).read(1024) # version = "0.9.7" test_namespace = "test" clickhouse_template = "templates/tpl-clickhouse-stable.yaml" # clickhouse_template = "templates/tpl-clickhouse-19.11.yaml" # clickhouse_template = "templates/tpl-clickhouse-20.1.yaml" # clickhouse_template = "templates/tpl-clickhouse-20.3.yaml" clickhouse_version = kubectl.get_ch_version(clickhouse_template)
python
from django.apps import AppConfig class ListingsConfig(AppConfig): name = 'listings' verbose_name = "User Listings"
python
import logging from dojo.models import Test_Type PARSERS = {} # TODO remove that SCAN_SONARQUBE_API = 'SonarQube API Import' def register(parser_type): for scan_type in parser_type().get_scan_types(): parser = parser_type() if scan_type.endswith('detailed'): parser.set_mode('detailed') register_parser(scan_type, parser) def register_parser(scan_type, parser): logging.debug(f"register scan_type:{scan_type} with parser:{parser}") # check double registration or registration with an existing key if scan_type in PARSERS: raise ValueError(f"Try to register an existing parser '{scan_type}'") PARSERS[scan_type] = parser def import_parser_factory(file, test, active, verified, scan_type=None): """Return a parser by the scan type This function exists only for backward compatibility """ if scan_type in PARSERS: # create dynamicaly in DB test_type, created = Test_Type.objects.get_or_create(name=scan_type) if created: test_type.save() return PARSERS[scan_type] else: raise ValueError(f'Unknown Test Type {scan_type}') def get_choices(): res = list() for key in PARSERS: res.append((key, PARSERS[key].get_label_for_scan_types(key))) return tuple(res) def requires_file(scan_type): if scan_type is None or scan_type not in PARSERS: return False # FIXME switch to method of the parser # parser = PARSERS[scan_type] return scan_type != SCAN_SONARQUBE_API def handles_active_verified_statuses(scan_type): # FIXME switch to method of the parser # parser = PARSERS[scan_type] return scan_type in [ 'Generic Findings Import', SCAN_SONARQUBE_API, 'Qualys Scan' ] import os from inspect import isclass from pkgutil import iter_modules from pathlib import Path from importlib import import_module # iterate through the modules in the current package package_dir = Path(__file__).resolve().parent for (path, module_name, _) in iter_modules([package_dir]): # check if it's submodule if os.path.isdir(os.path.join(package_dir, module_name)): try: # import the module and iterate through its attributes module = import_module(f"dojo.tools.{module_name}.parser") for attribute_name in dir(module): attribute = getattr(module, attribute_name) if isclass(attribute) and attribute_name.lower() == module_name.replace("_", "") + 'parser': register(attribute) except: logging.exception(f"failed to load {module_name}")
python
from django.views import generic class HomePage(generic.TemplateView): template_name = "home.html" class FAQPage(generic.TemplateView): template_name = "faq.html"
python
from keras import backend as K from keras.engine.topology import Layer from keras import initializers, regularizers, constraints class Attention(Layer): def __init__( self, step_dim=65, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs ): self.supports_masking = True self.init = initializers.get("glorot_uniform") self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs) def build(self, input_shape): assert len(input_shape) == 3 self.W = self.add_weight( (input_shape[-1],), initializer=self.init, name="{}_W".format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint, ) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight( (input_shape[1],), initializer="zero", name="{}_b".format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint, ) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape( K.dot( K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1)) ), (-1, step_dim), ) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
python
from .habitica_object import HabiticaObject import attrdict class Group(HabiticaObject): def __init__(self, id_str): """A group/party in Habitica.""" assert False, "Not done yet!"
python
"""Revert revision foreign key Revision ID: 83f49fddbcb6 Revises: 55e1f2f5d706 Create Date: 2020-05-19 12:25:02.795675 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "83f49fddbcb6" down_revision = "55e1f2f5d706" branch_labels = None depends_on = None def upgrade(): op.add_column( "activity", sa.Column( "revised_by_id", sa.INTEGER(), autoincrement=False, nullable=True ), ) op.execute( """ UPDATE activity a set revised_by_id = a2.id FROM activity a2 where a.id = a2.revisee_id; """ ) op.drop_constraint( "activity_revisee_id_fkey", "activity", type_="foreignkey" ) op.create_foreign_key( "activity_revised_by_id_fkey", "activity", "activity", ["revised_by_id"], ["id"], ) op.create_index( "ix_activity_revised_by_id", "activity", ["revised_by_id"], unique=False, ) op.drop_index(op.f("ix_activity_revisee_id"), table_name="activity") op.drop_column("activity", "revisee_id") # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( "activity", sa.Column( "revisee_id", sa.INTEGER(), autoincrement=False, nullable=True ), ) op.drop_constraint(None, "activity", type_="foreignkey") op.create_foreign_key( "activity_revisee_id_fkey", "activity", "activity", ["revisee_id"], ["id"], ) op.create_index( "ix_activity_revisee_id", "activity", ["revisee_id"], unique=False ) op.drop_index(op.f("ix_activity_revised_by_id"), table_name="activity") op.drop_column("activity", "revised_by_id") # ### end Alembic commands ###
python
from __future__ import division from ..errors import InvalidParamsError from ..utils import one_row_params_array from .base import UncertaintyBase from scipy import stats import numpy as np class NormalUncertainty(UncertaintyBase): id = 3 description = "Normal uncertainty" @classmethod def validate(cls, params): if np.isnan(params['scale']).sum() or (params['scale'] <= 0).sum(): raise InvalidParamsError( "Real, positive scale (sigma) values are required" " for normal uncertainties." ) if np.isnan(params['loc']).sum(): raise InvalidParamsError( "Real loc (mu) values are required for normal uncertainties." ) @classmethod def random_variables(cls, params, size, seeded_random=None): if not seeded_random: seeded_random = np.random return seeded_random.normal( params['loc'], params['scale'], size=(size, params.shape[0])).T @classmethod def cdf(cls, params, vector): vector = cls.check_2d_inputs(params, vector) results = np.zeros(vector.shape) for row in range(params.shape[0]): results[row, :] = stats.norm.cdf( vector[row, :], loc=params['loc'][row], scale=params['scale'][row] ) return results @classmethod def ppf(cls, params, percentages): percentages = cls.check_2d_inputs(params, percentages) results = np.zeros(percentages.shape) for row in range(percentages.shape[0]): results[row, :] = stats.norm.ppf( percentages[row, :], loc=params['loc'][row], scale=params['scale'][row] ) return results @classmethod @one_row_params_array def statistics(cls, params): return { 'mean': float(params['loc']), 'mode': float(params['loc']), 'median': float(params['loc']), 'lower': float(params['loc'] - 2 * params['scale']), 'upper': float(params['loc'] + 2 * params['scale']) } @classmethod @one_row_params_array def pdf(cls, params, xs=None): if xs is None: if np.isnan(params['minimum']): lower = params['loc'] - params['scale'] * \ cls.standard_deviations_in_default_range else: lower = params['minimum'] if np.isnan(params['maximum']): upper = params['loc'] + params['scale'] * \ cls.standard_deviations_in_default_range else: upper = params['maximum'] xs = np.arange( lower, upper, (upper - lower) / cls.default_number_points_in_pdf ) ys = stats.norm.pdf(xs, params['loc'], params['scale']) return xs, ys.reshape(ys.shape[1])
python
from .global_var import * ## Python C-like struct s2 ## from dataclasses import dataclass # Queue for FIFO from queue import SimpleQueue # To save current time from time import time # Random replacement from random import choice #---------------------------# @dataclass class PAGE: #{{{ index: int# page index virtual_a: str # Adress time: float # Enter time #}}} #---------------------------# class Memory(object) : #------------------------------------------------------------------------------------------------# def __init__( self, size, page_size, ALGORITHM ): #{{{ """ Memory Constructor """ self.m_size = size # available memory size self.page = page_size # page size self.ALGORITHM = ALGORITHM # Chosen replacement algorithm self.Wop =0 # write operations count self.Rop =0 # read operations count self.pg_fault =0 # page faults count self.access_count =0 # memory access count self.slot_Space = int( int(size)/int(page_size) ) # memory space # Selecting data structure according with replacement algorithm if ALGORITHM == PRA_LRU : self.slot = dict() # Dictionary to save time else: self.slot = [] * self.slot_Space # Memory size #}}} #------------------------------------------------------------------------------------------------# def parser( self, filepath ): #{{{ """ Parsing file with adresses """ self.filename = filepath # Opening file in read mode try: log_file = open( filepath, "r" ) except IOError: print( "Não foi possível abrir o arquivo, certifique-se de fornecer o caminho certo!\n") virtual_adress = log_file.readlines() # saving all adresses and your op's adresses = [] * len(virtual_adress) # parse only the adress # Counting W's and R's for _line_ in virtual_adress : adresses.append(_line_.split()[0]) if( _line_.split()[1].upper() == 'W' ): self.Wop += 1 else: self.Rop += 1 return adresses #}}} #------------------------------------------------------------------------------------------------# def _search_in_virtual_( self, adress ): #{{{ """ Search adress in memory slot """ # Memory access: search adress self.access_count += 1 # Dictionary case # In this case the data is a struct page and key is time. if self.ALGORITHM == PRA_LRU : for _value_ in self.slot.values(): if adress == _value_.virtual_a: return True else: # List of dataclasses case for _adress_ in self.slot : if _adress_.virtual_a == adress : return True # if loop has finished then the adress doesn't exist in memory # It's a page fault! self.pg_fault += 1 return False #}}} #------------------------------------------------------------------------------------------------# def FIFO( self, CACHE ): #{{{ """ First In First Out replacement """ # Queue of entrance first_order = SimpleQueue() virtualIndex =0 for i in range( len(CACHE) ): # controlling memory access if virtualIndex == self.slot_Space: if self._search_in_virtual_( CACHE[i] ) is not True : # the adress doesn't exist in memory == page fault # Put in memory in place of the first one entered in memory first_out_page = first_order.get() # removing self.slot[first_out_page.index] = PAGE( first_out_page.index, CACHE[i], time() ) # access memory: replace self.access_count += 1 # adding the page in fifo queue first_order.put( self.slot[first_out_page.index] ) #print memory status in terminal self.printer(True, CACHE[i]) else: #print memory status in terminal self.printer(False, CACHE[i]) continue else: # Empty memory if self._search_in_virtual_( CACHE[i] ) is not True : # the adress doesn't exist in memory == page fault # Put in memory self.slot.insert( virtualIndex, PAGE( virtualIndex, CACHE[i], time() ) ) # adding the page in fifo queue first_order.put( self.slot[virtualIndex] ) # increment memory index virtualIndex += 1 # access memory: replace self.access_count += 1 #print memory status in terminal self.printer(True, CACHE[i]) else: #print memory status in terminal self.printer(False, CACHE[i]) continue #}}} #------------------------------------------------------------------------------------------------# def LRU( self, CACHE ): #{{{ """ Least Recently Used replacement """ # To take time here we used time() to return the current time since epoch for i in range( len(CACHE) ): if i >= self.slot_Space : if self._search_in_virtual_( CACHE[i] ) is not True : # saving least page index least_one_key = min( self.slot.keys() ) least_one_index = self.slot[least_one_key].index # removing least page self.slot.pop( least_one_key ) # adding new page newTime = time() self.slot[newTime] = PAGE( least_one_index, CACHE[i], newTime ) # access memory: replace self.access_count += 1 #print memory status in terminal self.printer(True, CACHE[i]) else: # the adress already exist! # so we need update your time reference for epoch in self.slot.values(): if CACHE[i] == epoch.virtual_a : oldTime = epoch.time # saving new reference time newTime = time() # updating time self.slot[newTime] = self.slot.pop(oldTime) self.slot[newTime].time = newTime #print memory status in terminal self.printer(False, CACHE[i]) else: if self._search_in_virtual_( CACHE[i] ) is not True : # We need to know the least referenced to make replacement # So the DS used is is dict with epoch time as key's newTime = time() self.slot[newTime] = PAGE( i+1, CACHE[i], newTime ) # access memory: replace self.access_count += 1 #print memory status in terminal self.printer(True, CACHE[i]) else: # the adress already exist! # so we need update your time reference for epoch in self.slot.values(): if CACHE[i] == epoch.virtual_a : oldTime = epoch.time # saving new reference time newTime = time() # updating time self.slot[newTime] = self.slot.pop(oldTime) self.slot[newTime].time = newTime #print memory status in terminal self.printer(False, CACHE[i]) #------------------------------------------------------------------------------------------------# def RANDOM( self, CACHE ): #{{{ """ Random replacement """ for i in range( len(CACHE) ): if i >= self.slot_Space: if self._search_in_virtual_( CACHE[i] ) is not True : # Random choice to out chosen_random = choice(self.slot) # Subscript the chosen self.slot[chosen_random.index] = PAGE( chosen_random.index, CACHE[i], time() ) # access memory: replace self.access_count += 1 # Print memory status in terminal self.printer(True, CACHE[i]) else: #print memory status in terminal self.printer(False, CACHE[i]) continue else: if self._search_in_virtual_( CACHE[i] ) is not True : self.slot.append( PAGE( i, CACHE[i], time() ) ) # access memory: replace self.access_count += 1 #print memory status in terminal self.printer(True, CACHE[i]) else: #print memory status in terminal self.printer(False, CACHE[i]) continue #------------------------------------------------------------------------------------------------# def simulate( self, CACHE ): #{{{ """ Execute simulation """ if self.ALGORITHM == PRA_FIFO: self.FIFO( CACHE ) elif self.ALGORITHM == PRA_LRU: self.LRU( CACHE ) else: self.RANDOM( CACHE ) #}}} #------------------------------------------------------------------------------------------------# def printer( self, FLAG, adress ): #{{{ """ Print Memory status """ if self.ALGORITHM == PRA_LRU: for _key_ in self.slot.keys(): print( self.slot[_key_] ) print("\033[93mTIME UPDATED\033[0m") if FLAG: print("\033[92mREPLACED\033[0m") else: print("\033[94mALREADY EXIST\033[0m") print("ADRESS: {}".format(adress)) else: for slot in self.slot: print( slot ) if FLAG: print("\033[92mREPLACED\033[0m") else: print("\033[94mALREADY EXIST\033[0m") print("ADRESS: {}".format(adress)) print("\n") #{{{ #------------------------------------------------------------------------------------------------# def report( self ): #{{{ """ Print data in terminal """ print('-------- Dados sobre a simulação ---------') print("""\033[96mArquivo de entrada {}\nTamanho da memória {} KB\nTamanho da página: {} KB Tecnica de reposição: {}\nPáginas lidas: {}\nPáginas escritas: {}\nAcessos a memória: {} Page faults: {}\033[0m""".format( self.filename, self.m_size, self.page, self.ALGORITHM.upper(), self.Rop, self.Wop, self.access_count, self.pg_fault ) ) print('------------------------------------------') #}}}
python
"""Generalized Pauli matrices.""" import numpy as np from toqito.matrices import shift from toqito.matrices import clock def gen_pauli(k_1: int, k_2: int, dim: int) -> np.ndarray: r""" Produce generalized Pauli operator [WikGenPaul]_. Generates a :code:`dim`-by-:code:`dim` unitary operator. More specifically, it is the operator :math:`X^k_1*Z^k_2`, where :math:`X` and :math:`Z` are the "shift" and "clock" operators that naturally generalize the Pauli X and Z operators. These matrices span the entire space of :code:`dim`-by-:code:`dim` matrices as :code:`k_1` and :code:`k_2` range from 0 to :code:`dim-1`, inclusive. Note that the generalized Pauli operators are also known by the name of "discrete Weyl operators". [WatrousLec6]_ Examples ========== The generalized Pauli operator for :code:`k_1 = 1`, :code:`k_2 = 0` and :code:`dim = 2` is given as the standard Pauli-X matrix .. math:: G_{1, 0, 2} = \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}. This can be obtained in :code:`toqito` as follows. >>> from toqito.matrices import gen_pauli >>> dim = 2 >>> k_1 = 1 >>> k_2 = 0 >>> gen_pauli(k_1, k_2, dim) [[0.+0.j, 1.+0.j], [1.+0.j, 0.+0.j]]) The generalized Pauli matrix :code:`k_1 = 1`, :code:`k_2 = 1`, and :code:`dim = 2` is given as the standard Pauli-Y matrix .. math:: G_{1, 1, 2} = \begin{pmatrix} 0 & -1 \\ 1 & 0 \end{pmatrix}. This can be obtained in :code:`toqito` as follows.` >>> from toqito.matrices import gen_pauli >>> dim = 2 >>> k_1 = 1 >>> k_2 = 1 >>> gen_pauli(k_1, k_2, dim) [[ 0.+0.0000000e+00j, -1.+1.2246468e-16j], [ 1.+0.0000000e+00j, 0.+0.0000000e+00j]]) References ========== .. [WikGenPaul] Wikipedia: Generalizations of Pauli matrices https://en.wikipedia.org/wiki/Generalizations_of_Pauli_matrices .. [WatrousLec6] Lecture 6: Further remarks on measurements and channels https://cs.uwaterloo.ca/~watrous/LectureNotes/CS766.Fall2011/06.pdf :param k_1: (a non-negative integer from 0 to :code:`dim-1` inclusive). :param k_2: (a non-negative integer from 0 to :code:`dim-1` inclusive). :param dim: (a positive integer indicating the dimension). :return: A generalized Pauli operator. """ gen_pauli_x = shift(dim) gen_pauli_z = clock(dim) gen_pauli_w = np.linalg.matrix_power(gen_pauli_x, k_1) @ np.linalg.matrix_power( gen_pauli_z, k_2 ) return gen_pauli_w
python
"""base classes to be inherited from for various purposes""" from abc import ABC from abc import abstractmethod import argparse from typing import List, Type from ec2mc.validate import validate_perms class CommandBase(ABC): """base class for most ec2mc command classes to inherit from""" _module_postfix = "_cmd" def __init__(self, cmd_args): pass @abstractmethod def main(self, cmd_args): """overridden by child class to implement command's functionality""" pass @classmethod def add_documentation(cls, argparse_obj): """initialize child's argparse entry and help""" return argparse_obj.add_parser(cls.cmd_name(), help=cls.cmd_doc()) def blocked_actions(self, cmd_args) -> List[str]: """return list of denied IAM actions needed for child's main""" return [] @classmethod def cmd_name(cls) -> str: """return child class' file name to use as argparse command name""" name_str = cls.__module__.rsplit('.', 1)[-1] if not name_str.endswith(cls._module_postfix): raise ImportError(f"{name_str} module name must end with " f"\"{cls._module_postfix}\".") return name_str[:-len(cls._module_postfix)] @classmethod def cmd_doc(cls) -> str: """return first line of main method's docstring""" docstring = cls.main.__doc__ if docstring is not None: return docstring.strip().splitlines()[0] raise NotImplementedError(f"{cls.__name__}'s main missing docstring.") class ParentCommand(CommandBase): """base class for command which just acts as parent for other commands""" _module_postfix = "_cmds" _sub_commands: List[Type[CommandBase]] def __init__(self, cmd_args): self._chosen_cmd = next(cmd(cmd_args) for cmd in self._sub_commands if cmd.cmd_name() == cmd_args.subcommand) def main(self, cmd_args): """Execute chosen subcommand""" self._chosen_cmd.main(cmd_args) @classmethod def add_documentation(cls, argparse_obj): """set up argparse for command and all of its subcommands""" cmd_parser = super().add_documentation(argparse_obj) subcommands = cmd_parser.add_subparsers( title="subcommands", metavar="<subcommand>", dest="subcommand") subcommands.required = True for sub_command in cls._sub_commands: sub_command.add_documentation(subcommands) def blocked_actions(self, cmd_args) -> List[str]: """pass along selected subcommand's denied IAM actions""" return self._chosen_cmd.blocked_actions(cmd_args) class ComponentSetup(ABC): """base class for aws_setup component checking/uploading/deleting""" describe_actions: List[str] upload_actions: List[str] delete_actions: List[str] def __init__(self, config_aws_setup): pass @abstractmethod def check_component(self): """check if AWS already has component, and if it is up to date""" pass @abstractmethod def notify_state(self, component_info): """print the component's status relative to AWS""" pass @abstractmethod def upload_component(self, component_info): """create component on AWS if not present, update if present""" pass @abstractmethod def delete_component(self): """remove component from AWS if present""" pass @classmethod @abstractmethod def blocked_actions(cls, sub_command: str) -> List[str]: """check whether IAM user is allowed to perform actions on component Should be overridden by child classes in the following fashion: @classmethod def blocked_actions(cls, sub_command): cls.describe_actions = [] cls.upload_actions = [] cls.delete_actions = [] return super().blocked_actions(sub_command) """ needed_actions = cls.describe_actions if sub_command == "upload": needed_actions.extend(cls.upload_actions) elif sub_command == "delete": needed_actions.extend(cls.delete_actions) return validate_perms.blocked(actions=needed_actions) class ProperIndentParser(argparse.ArgumentParser): """Use formatter_class that properly indents help in subparsers""" def __init__(self, *args, **kwargs): formatter_class = lambda prog: ProperIndentFormatter(prog) argparse.ArgumentParser.__init__( self, *args, **kwargs, formatter_class=formatter_class) class ProperIndentFormatter(argparse.HelpFormatter): """Corrected _max_action_length for the indenting of subactions Source: https://stackoverflow.com/a/32891625/2868017 """ def add_argument(self, action): if action.help is not argparse.SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] current_indent = self._current_indent for subaction in self._iter_indented_subactions(action): # compensate for the indent that will be added indent_chg = self._current_indent - current_indent added_indent = "x"*indent_chg invocations.append(added_indent + get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent self._action_max_length = max( self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action])
python
try: import unzip_requirements except ImportError: pass import json, os, sys, re import base64 import boto3 from botocore.signers import RequestSigner from kubernetes import client from kubernetes.client import ApiClient, Configuration from kubernetes.config.kube_config import KubeConfigLoader def get_bearer_token(cluster_id): """ Get the AWS token for the user. This is from this lovely code base: https://github.com/kubernetes-sigs/aws-iam-authenticator#api-authorization-from-outside-a-cluster """ STS_TOKEN_EXPIRES_IN = 60 session = boto3.session.Session() client = session.client('sts') service_id = client.meta.service_model.service_id signer = RequestSigner( service_id, region, 'sts', 'v4', session.get_credentials(), session.events ) params = { 'method': 'GET', 'url': 'https://sts.{}.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15'.format(region), 'body': {}, 'headers': { 'x-k8s-aws-id': cluster_id }, 'context': {} } signed_url = signer.generate_presigned_url( params, region_name=region, expires_in=STS_TOKEN_EXPIRES_IN, operation_name='' ) base64_url = base64.urlsafe_b64encode(signed_url.encode('utf-8')).decode('utf-8') # remove any base64 encoding padding: return 'k8s-aws-v1.' + re.sub(r'=*', '', base64_url) # normal headers we return when things are good. headers = { "Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": True } cluster = os.getenv('CLUSTER', 'matomo') region = os.getenv('REGION', 'us-west-2') def formatted_error(message, statusCode=400): print("error:" , message) return { "statusCode": statusCode, "headers": headers, "body": json.dumps({"error": message}) } class DecimalEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal): return int(obj) return super(DecimalEncoder, self).default(obj) def serializer(obj): """Default JSON serializer.""" import calendar, datetime if isinstance(obj, datetime.datetime): if obj.utcoffset() is not None: obj = obj - obj.utcoffset() millis = int( calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000 ) return millis raise TypeError('Not sure how to serialize %s' % (obj,)) def make_config(): """ List kubernetes deployments in the cluster. """ eks_client = boto3.client('eks') cluster_details = eks_client.describe_cluster(name=cluster) #print(json.dumps(cluster_details, indent=4, sort_keys=True, default=serializer)) conn = { "name": cluster_details['cluster']['name'], "endpoint": cluster_details['cluster']['endpoint'], "ca": cluster_details['cluster']['certificateAuthority']['data'], } token = get_bearer_token(conn['name']) #print("Token: ", token) #print("ca is: ", conn['ca']) kube_config = { "contexts": [ { "name": conn['name'], "context" : { "cluster": conn['name'], "user": "aws_user", } } ], "clusters" : [ { "name" : conn['name'], "cluster": { "server": conn['endpoint'], "certificate-authority-data": conn['ca'] } } ], "users" : [ { "name": "aws_user", "user": { "token": token } } ] } return conn['name'], kube_config def list_deployments(event, context): context, kube_config = make_config() loader = KubeConfigLoader(config_dict=kube_config, active_context=context) config = Configuration() loader.load_and_set(config) apiClient = ApiClient(configuration=config) v1 = client.CoreV1Api(apiClient) pods = [] try: ret = v1.list_pod_for_all_namespaces(watch=False) for i in ret.items: pods.append({"ip": i.status.pod_ip, "namespace": i.metadata.namespace, "name": i.metadata.name}) except client.rest.ApiException as e: formatted_error(str(e)) return { "statusCode": 200, "headers": headers, "body": json.dumps({"pods": pods}, cls=DecimalEncoder, default=serializer) } def create_deployments(event, context): """ Create a Kubernetes deployment. """ return formatted_error("Not yet implemented.") if __name__ == "__main__": list_deployments(None, None)
python
# -*- coding: utf-8 -*- from .utils import TestUtils from .ticker import TestTicker from .visuals import TestVisuals from .figure import TestFigure from .dates import TestDates #-----------------------------------------------------------------------------
python
from smexperiments import api_types def test_parameter_str_string(): param = api_types.TrialComponentParameterValue("kmeans", None) param_str = str(param) assert "kmeans" == param_str def test_parameter_str_number(): param = api_types.TrialComponentParameterValue(None, 2.99792458) param_str = str(param) assert "2.99792458" == param_str def test_parameter_str_none(): param = api_types.TrialComponentParameterValue(None, None) param_str = str(param) assert "" == param_str
python
import socket def validate_ip4 (address): try: socket.inet_aton(address) ip4_address = address except (socket.error, TypeError): ip4_address = None return ip4_address def validate_ip6 (address): try: socket.inet_pton(socket.AF_INET6, address) ip6_address = address except (socket.error, TypeError): ip6_address = None return ip6_address def invalidate (address): return None class ProxyProtocol: ip_validators = { 'TCP4' : validate_ip4, 'TCP6' : validate_ip6, 'UNKNOWN' : invalidate } def parseRequest (self, header): if '\r\n' in header: proxy_line, http_request = header.split('\r\n', 1) else: proxy_line, http_request = '', None try: magic, fproto, source, destination, sport, dport = proxy_line.split(' ') except ValueError: proxy_line, http_request = '', None magic, fproto, source, destination, sport, dport = None, None, None, None, None, None if magic != 'PROXY': # We don't care about parsing the source or destination ports http_request = None source, destination = None, None validate = self.ip_validators.get(fproto, invalidate) source_addr = validate(source) dest_addr = validate(destination) # pylint: disable=W0612 return source_addr, http_request
python
# Generated by Django 3.0.2 on 2020-03-04 20:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('userprofile', '0030_skill'), ] operations = [ migrations.AddField( model_name='profile', name='skills', field=models.ManyToManyField(blank=True, to='userprofile.Skill'), ), ]
python
import os import sys from typing import List import numpy as np import scipy as sp import scipy.stats from utilities.plotting import Plot def main(): figure_num = int(sys.argv[1]) for_print = bool(int(sys.argv[2])) def load_and_plot(dir: str, plot: Plot, name: str): series, means, confidences = load(dir) plot.plot_evaluations(series, means, confidences, name) if figure_num == 0: plot = Plot("Mean evaluation grade", for_print, small=True) load_and_plot("results/s0-q-1-1k/collected", plot, "k=1") plot.save("figure0", "report") elif figure_num == 1: plot = Plot("Mean evaluation grade", for_print, small=True) load_and_plot("results/cmac-1-Wed-Dec-07-02-38/collected", plot, "k=1") load_and_plot("results/cmac-2-Wed-Dec-07-02-37/collected ", plot, "k=2") load_and_plot("results/cmac-3-Wed-Dec-07-02-37/collected", plot, "k=3") plot.save("figure1", "report") elif figure_num == 2: plot = Plot("Mean evaluation grade", for_print, small=True) load_and_plot("results/cmac-1-inv-Wed-Dec-07-02-38/collected", plot, "k=1") load_and_plot("results/cmac-2-inv-Wed-Dec-07-02-38/collected ", plot, "k=2") plot.save("figure2", "report") def load(dir: str): trials = get_trials(dir) rewards_by_step = extract_data(trials) means = [] confidences = [] for rewards in rewards_by_step: mean, confidence = mean_confidence_interval(rewards) means.append(mean) confidences.append(confidence) series = [i * 100 for i in range(0, len(means))] return series, means, confidences def mean_confidence_interval(data, confidence=0.90): a = 1.0 * np.array(data) n = len(a) m, se = np.mean(a), scipy.stats.sem(a) h = se * sp.stats.t._ppf((1 + confidence) / 2., n - 1) return m, h * 2 def extract_data(paths: List[str]) -> List[List[float]]: rewards_by_episode = [[] for i in range(0, 1000)] for path in paths: episodes, rewards, _, _ = np.loadtxt(path, delimiter=",").T i = 0 for (steps, reward) in zip(episodes, rewards): rewards_by_episode[i].append(reward) i += 1 rewards_by_episode = [episode for episode in rewards_by_episode if len(episode) > 0] return rewards_by_episode[0:min(200, len(rewards_by_episode))] def get_trials(dir: str) -> List[str]: dir = dir.strip() return [os.path.join(dir, name) for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name)) and not name.startswith(".") and name.endswith(".csv")] main()
python
import utils as util import tensorflow as tf import numpy as np def forecast_model(series, time,forecastDays): split_time=2555 time_train=time[:split_time] x_train=series[:split_time] split_time_test=3285 time_valid=time[split_time:split_time_test] x_valid=series[split_time:split_time_test] time_test=time[split_time_test:] x_test=series[split_time_test:] window_size=30 batch_size=32 shuffle_buffer_size=1000 tf.keras.backend.clear_session() tf.random.set_seed(51) np.random.seed(51) train_set = util.windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size) valid_set=util.windowed_dataset(x_valid,window_size,batch_size,shuffle_buffer_size) model = tf.keras.models.Sequential([ tf.keras.layers.Conv1D(filters=60, kernel_size=5, strides=1, padding="causal", activation="relu", input_shape=[None, 1]), tf.keras.layers.LSTM(60, return_sequences=True), tf.keras.layers.LSTM(60, return_sequences=True), tf.keras.layers.Dense(30, activation="relu"), tf.keras.layers.Dense(10, activation="relu"), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 400) ]) optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(train_set,validation_data=(valid_set),epochs=5) rnn_forecast = util.model_forecast(model, series[..., np.newaxis], window_size) rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0] mae=tf.keras.metrics.mean_absolute_error(x_test, rnn_forecast[:365]).numpy() accuracy=100-mae return (accuracy,mae,rnn_forecast[:forecastDays])
python
# terrascript/provider/hashicorp/template.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:28:21 UTC) import terrascript class template(terrascript.Provider): """terraform-provider-template""" __description__ = "terraform-provider-template" __namespace__ = "hashicorp" __name__ = "template" __source__ = "https://github.com/hashicorp/terraform-provider-template" __version__ = "2.2.0" __published__ = "2020-10-08T16:16:33Z" __tier__ = "official" __all__ = ["template"]
python
import itertools from typing import List, Tuple from card_utils import deck from card_utils.deck.utils import ( rank_partition, suit_partition, ranks_to_sorted_values ) from card_utils.games.gin.deal import new_game def deal_new_game(): """ shuffle up and deal each player 7 cards, put one card in the discard list, and put remaining cards in deck :return: (dict) { 'p1_hand': [str], 'p2_hand': [str], 'discard': [str], 'deck': [str] } """ return new_game(n_cards=7) def sorted_hand_points(hand): """ :param hand: ([str]) list of cards :return: ([str], int) """ runs_3, runs_4 = get_runs(hand) sets_3, sets_4 = get_sets(hand) melds_3 = runs_3 + sets_3 melds_4 = runs_4 + sets_4 sorted_hand = sort_cards_by_rank(hand) hand_points_ = sum_points_by_ranks(hand) if len(hand) == 8: hand_points_ -= max(deck.rank_to_value[r] for r, _ in hand) if len(melds_3 + melds_4) == 0: return sorted_hand, hand_points_ for meld_3, meld_4 in itertools.product(melds_3, melds_4): cards_in_meld = {*meld_3, *meld_4} if len(cards_in_meld) == 7: # if there is a non-intersecting 3-meld and 4-meld, # then you have 0 points and win remaining_cards = list(set(hand) - set(cards_in_meld)) return meld_4 + meld_3 + remaining_cards, 0 for meld in melds_3 + melds_4: hand_without_meld = [card for card in hand if card not in meld] # print(hand, hand_without_meld, meld) meld_points = sum_points_by_ranks(hand_without_meld) if len(hand) == 8: meld_points -= max(deck.rank_to_value[r] for r, _ in hand_without_meld) if meld_points < hand_points_: sorted_hand = meld + sort_cards_by_rank(hand_without_meld) hand_points_ = min(hand_points_, meld_points) return sorted_hand, hand_points_ def rank_straights(ranks, straight_length, aces_high=True, aces_low=True, suit=''): """ :param ranks: ([str]) e.g. ['A', '2', '7', 'T', 'J', 'Q', 'K'] :param straight_length: (int) e.g. 5 :param aces_high: (bool) :param aces_low: (bool) :param suit: (str) optional: inject a suit in the final returned value :return: ([[str]]) list of list of straights, each with length straight_length e.g. [['T','J','Q','K','A']] or [['Th', 'Jh', 'Qh', 'Kh', 'Ah']] """ if len(ranks) < straight_length: # don't waste our time if its impossible to make a straight return [] if suit not in {'', *deck.suits}: raise ValueError( f'rank_straights: suit parameter must either be ' f'the empty string "" or one of {deck.suits}' ) values = ranks_to_sorted_values(ranks, aces_high=aces_high, aces_low=aces_low) values_in_a_row = 0 num_values = len(values) last_value = values[0] straights = [] for ii, value in enumerate(values[1:]): if last_value + 1 == value: values_in_a_row += 1 else: values_in_a_row = 0 if values_in_a_row >= straight_length - 1: straights.append([ f'{deck.value_to_rank[v]}{suit}' for v in range(value - straight_length + 1, value + 1) ]) if num_values + values_in_a_row < straight_length + ii: # exit early if there aren't enough cards left # to complete a straight return straights last_value = value return straights def get_runs(hand): """ cleaner but slower (!?) method to get runs :param hand: ([str]) :return: ([[str]], [[str]]) """ suit_to_ranks = suit_partition(hand) runs_3, runs_4 = [], [] for suit, ranks in suit_to_ranks.items(): runs_3.extend(rank_straights(ranks, 3, True, True, suit=suit)) runs_4.extend(rank_straights(ranks, 4, True, True, suit=suit)) return runs_3, runs_4 def get_sets(hand): """ :param hand: ([str]) :return: ([[str]], [[str]]) """ rank_to_suits = rank_partition(hand) sets_3, sets_4 = [], [] for rank, suits in rank_to_suits.items(): if len(suits) == 4: sets_4.append([f'{rank}{s}' for s in suits]) sets_3.extend([ [f'{rank}{s}' for s in suit_combo] for suit_combo in itertools.combinations(suits, 3) ]) elif len(suits) == 3: sets_3.append([f'{rank}{s}' for s in suits]) return sets_3, sets_4 def get_melds(hand) -> Tuple: """ :param hand: ([str]) :return: ([[str], [str]]) """ runs_3, runs_4 = get_runs(hand) sets_3, sets_4 = get_sets(hand) return runs_3 + sets_3, runs_4 + sets_4 def are_two_distinct_3_melds(melds_3: List[List]): """ :param melds_3: ([[str]]) :return: (bool) """ if len(melds_3) < 2: return False for m1, m2 in itertools.combinations(melds_3, 2): if len({*m1, *m2}) == 6: return True return False def sum_points_by_ranks(hand): """ :param hand: ([str]) :return: (int) """ return sum(deck.rank_to_value[r] for r, _ in hand) def sort_cards_by_rank(cards): """ :param cards: ([str]) :return: ([str]) """ return sorted(cards, key=lambda c: deck.rank_to_value[c[0]]) def sort_hand(hand): """ :param hand: ([str]) :return: ([str]) """ sorted_hand, _ = sorted_hand_points(hand) return sorted_hand def hand_points(hand): """ :param hand: ([str]) :return: (int) """ _, points = sorted_hand_points(hand) return points
python
"""\ Setup Kubernetes on cloud """ import logging import os import sys sys.path.append(os.path.abspath("../..")) import main def start(config, machines): """Setup Kubernetes on cloud VMs using Ansible. Args: config (dict): Parsed configuration machines (list(Machine object)): List of machine objects representing physical machines """ logging.info("Start Kubernetes cluster on VMs") processes = [] # Setup cloud controller command = [ "ansible-playbook", "-i", config["home"] + "/.continuum/inventory_vms", config["home"] + "/.continuum/cloud/control_install.yml", ] processes.append(machines[0].process(command, output=False)) # Setup cloud worker command = [ "ansible-playbook", "-i", config["home"] + "/.continuum/inventory_vms", config["home"] + "/.continuum/cloud/install.yml", ] processes.append(machines[0].process(command, output=False)) # Check playbooks for process in processes: logging.debug( "Check output for Ansible command [%s]" % (" ".join(process.args)) ) output = [line.decode("utf-8") for line in process.stdout.readlines()] error = [line.decode("utf-8") for line in process.stderr.readlines()] main.ansible_check_output((output, error))
python
class Player(object): """A class used to represent a poker player. Attributes: name: name of the player stack: amount of money the player has hand: two Cards """ def __init__(self, name, stack, hand): """Inits Player with name, stack, and two cards that will compose their hand""" self.name = name self.stack = stack self.hand = hand # TODO fold, bet, receive(?)
python
import re import requests from hashlib import sha1 from urllib.parse import urlsplit from apphelpers.rest.hug import user_id from app.libs import asset as assetlib from app.libs import publication as publicationlib from app.models import AssetRequest, asset_request_statuses from app.models import moderation_policies, groups, SYSTEM_USER_ID def create(url, title, requester: user_id): domain = urlsplit(url).netloc publication = publicationlib.get_by_domain(domain) if publication is None: publication_id = publicationlib.create(name=domain, domain=domain) else: publication_id = publication['id'] # asset ids are hashes generated from URLs. Idea is client doesn't need to # query server to find id for certain asset. Client can generate the id # itself from the asset url (provided it knows the hashing technique used) asset_id = sha1(bytes(url, 'utf8')).hexdigest() if not exists(asset_id): asset = AssetRequest.create( id=asset_id, url=url, title=title, publication=publication_id, requester=requester ) return asset_id create.groups_required = [groups.requester.value, groups.moderator.value] def create_and_approve(url, title, requester: user_id): asset_id = create(url, title, requester) approve(asset_id, approver=requester) return asset_id create_and_approve.groups_required = [groups.moderator.value] def get(id): asset_request = AssetRequest.select().where(AssetRequest.id == id).first() return asset_request.to_dict() if asset_request else None get.groups_required = [groups.moderator.value] def exists(id): return bool(AssetRequest.get_or_none(AssetRequest.id == id)) def list_(page=1, size=20): asset_requests = AssetRequest.select().order_by(AssetRequest.created.desc()).paginate(page, size) return [asset_request.to_dict() for asset_request in asset_requests] list_.groups_required = [groups.moderator.value] def update(id, mod_data): updatables = ('url', 'requester') update_dict = dict((k, v) for (k, v) in list(mod_data.items()) if k in updatables) update_dict['status'] = asset_request_statuses.pending.value AssetRequest.update(**update_dict).where(AssetRequest.id == id).execute() update.groups_required = [groups.moderator.value] def approve(id, approver: user_id, open_till=None, moderation_policy=None): mod_data = {'approver': approver, 'status': asset_request_statuses.accepted.value} AssetRequest.update(**mod_data).where(AssetRequest.id == id).execute() asset_request = get(id) assetlib.create_or_replace( id=id, url=asset_request['url'], title=asset_request['title'], publication=asset_request['publication'], moderation_policy=moderation_policy or moderation_policies.default.value, open_till=open_till ) approve.groups_required = [groups.moderator.value] def reject(id, approver: user_id): mod_data = {'approver': approver, 'status': asset_request_statuses.rejected.value} AssetRequest.update(**mod_data).where(AssetRequest.id == id).execute() reject.groups_required = [groups.moderator.value] def cancel(id, approver: user_id): asset_request = get(id) if asset_request['status'] == asset_request_statuses.accepted.value: raise Exception('not possible') mod_data = {'approver': approver, 'status': asset_request_statuses.cancelled.value} AssetRequest.update(**mod_data).where(AssetRequest.id == id).execute() cancel.groups_required = [groups.moderator.value, groups.requester.value]
python
model = dict( type='LiteFlowNet', encoder=dict( type='NetC', in_channels=3, pyramid_levels=[ 'level1', 'level2', 'level3', 'level4', 'level5', 'level6' ], out_channels=(32, 32, 64, 96, 128, 192), strides=(1, 2, 2, 2, 2, 2), num_convs=(1, 3, 2, 2, 1, 1), conv_cfg=None, norm_cfg=None, act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None), decoder=dict( type='NetE', in_channels=dict(level5=128, level6=192), corr_channels=dict(level5=49, level6=49), sin_channels=dict(level5=258, level6=386), rin_channels=dict(level5=131, level6=195), feat_channels=64, mfeat_channels=(128, 64, 32), sfeat_channels=(128, 64, 32), rfeat_channels=(128, 128, 64, 64, 32, 32), patch_size=dict(level5=3, level6=3), corr_cfg=dict( level5=dict(type='Correlation', max_displacement=3), level6=dict(type='Correlation', max_displacement=3)), warp_cfg=dict(type='Warp', align_corners=True, use_mask=True), flow_div=20., conv_cfg=None, norm_cfg=None, act_cfg=dict(type='LeakyReLU', negative_slope=0.1), scaled_corr=False, regularized_flow=True, extra_training_loss=False, flow_loss=dict( type='MultiLevelEPE', weights=dict(level6=0.32, level5=0.08), p=2, reduction='sum'), init_cfg=None), init_cfg=dict( type='Kaiming', nonlinearity='leaky_relu', layer=['Conv2d', 'ConvTranspose2d'], mode='fan_in', bias=0), # model training and testing settings train_cfg=dict(), test_cfg=dict(), )
python
# Copyright 2021 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data Source resources for version 1 of the Timesketch API.""" import logging from flask import jsonify from flask import request from flask import abort from flask_restful import Resource from flask_login import login_required from flask_login import current_user from timesketch.api.v1 import resources from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST from timesketch.lib.definitions import HTTP_STATUS_CODE_FORBIDDEN from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND from timesketch.models import db_session from timesketch.models.sketch import Sketch from timesketch.models.sketch import Timeline from timesketch.models.sketch import DataSource logger = logging.getLogger('timesketch.datasource_api') class DataSourceListResource(resources.ResourceMixin, Resource): """Resource for listing DataSources associated with a Sketch.""" @login_required def get(self, sketch_id): """Handles GET request to the resource. Args: sketch_id (int): Identifier for the Sketch the datasource belongs to. Returns: A list of JSON representations of the data sources. """ sketch = Sketch.query.get_with_acl(sketch_id) if not sketch: abort( HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.') if sketch.get_status.status == 'archived': abort( HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to fetch data sources from an archived sketch.') number_of_timelines = 0 data_sources = [] for timeline in sketch.active_timelines: number_of_timelines += 1 for data_source in timeline.datasources: data_sources.append(data_source) schema = { 'meta': { 'number_of_timelines': number_of_timelines, 'number_of_sources': len(data_sources) }, 'objects': data_sources, } return jsonify(schema) @login_required def post(self, sketch_id): """Handles POST request to the resource. Args: sketch_id (int): Identifier for the Sketch the datasource belongs to. Returns: A datasource in JSON (instance of flask.wrappers.Response) """ sketch = Sketch.query.get_with_acl(sketch_id) if not sketch: abort( HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.') if sketch.get_status.status == 'archived': abort( HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to fetch data sources from an archived sketch.') if not sketch.has_permission(current_user, 'write'): abort( HTTP_STATUS_CODE_FORBIDDEN, ( 'User does not have sufficient write access to ' 'to the sketch.')) form = request.json if not form: form = request.data timeline_id = form.get('timeline_id') if not timeline_id: abort( HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to create a data source without a timeline ' 'identifier.') timeline = Timeline.query.get(timeline_id) if not timeline: abort( HTTP_STATUS_CODE_NOT_FOUND, 'No timeline found with this ID.') if timeline not in sketch.active_timelines: abort( HTTP_STATUS_CODE_NOT_FOUND, 'The timeline is not part of the active timelines in ' 'the sketch.') datasource = DataSource( timeline=timeline, user=current_user, provider=form.get('provider', 'N/A'), context=form.get('context', 'N/A'), file_on_disk='', file_size=0, original_filename=form.get('original_filename', ''), data_label=form.get('data_label', 'data') ) timeline.datasources.append(datasource) db_session.add(datasource) db_session.add(timeline) db_session.commit() return self.to_json(datasource, status_code=HTTP_STATUS_CODE_CREATED) class DataSourceResource(resources.ResourceMixin, Resource): """Resource for accessing data sources.""" def _verify_sketch_and_datasource(self, sketch_id, datasource_id): """Verify and abort if unable to proceed. This function aborts if the ACLs on the sketch are not sufficient and the data source does not belong to the sketch in question. """ sketch = Sketch.query.get_with_acl(sketch_id) if not sketch: abort( HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.') if sketch.get_status.status == 'archived': abort( HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to fetch data sources from an archived sketch.') data_source = DataSource.query.get(datasource_id) if not data_source: abort( HTTP_STATUS_CODE_NOT_FOUND, 'No DataSource found with this ID.') if data_source.timeline.sketch.id != sketch.id: abort( HTTP_STATUS_CODE_BAD_REQUEST, 'Data Source does not match the Sketch ID.') @login_required def get(self, sketch_id, datasource_id): """Handles GET request to the resource. Args: sketch_id (int): Identifier for the Sketch the datasource belongs to. datasource_id (int): Identifier for the datasource. Returns: A JSON representation of the data source. """ self._verify_sketch_and_datasource(sketch_id, datasource_id) data_source = DataSource.query.get(datasource_id) return self.to_json(data_source) @login_required def post(self, sketch_id, datasource_id): """Handles POST request to the resource. Args: sketch_id (int): Identifier for the Sketch the datasource belongs to. datasource_id (int): Identifier for the datasource. Returns: A sketch in JSON (instance of flask.wrappers.Response) """ self._verify_sketch_and_datasource(sketch_id, datasource_id) data_source = DataSource.query.get(datasource_id) changed = False form = request.json if not form: form = request.data provider = form.get('provider') if provider: changed = True data_source.provider = provider context = form.get('context') if context: changed = True data_source.context = context if changed: db_session.add(data_source) db_session.commit() return self.to_json(data_source)
python
""" Codemonk link: https://www.hackerearth.com/problem/algorithm/lonely-monk-code-monk-ebca6e4a/ Being alone in the new world, Monk was little afraid and wanted to make some friends. So he decided to go the famous dance club of that world, i.e "DS Club" and met a very beautiful array A of N integers, but for some reasons she was very sad. Being asked by Monk, she told him that she wants to find out the total number of sub arrays in it, having their sum even. In order to impress her, Monk wants to solve this problem for her. Input - Output: First line of input consists of integer N. Next line will consists of N integers. Print the total number of sub arrays of this array with even sum. Sample input: 5 2 5 4 4 4 Sample Output: 7 """ """ The implementation of this problem is very easy but the thought is quite more hard. We can solve the problem in linear time. We just have to think that if we subtract or add 2 even numbers we get an even number and the same goes for adding or subtracting 2 odd numbers, we once again get an even number. We are going to keep the cumulative sum and each time we end up in an even or odd number we are going to add +1 the amount of even of odd sums up to that point. Before we do that, if we end to an even number we add the amount of even numbers up to that point and we do the same if we end up to an odd number. Final complexity: O(N) """ n = int(input()) array = list(map(int, input().split())) current = 0 ans = 0 odd = 0 even = 1 for i in range(n): current += array[i] temp = current % 2 if temp == 0: ans += even even += 1 else: ans += odd odd += 1 print(ans)
python
#!/usr/bin/python n = int(input()) matrix = [] res = [] for _ in range(n): matrix.append([int(i) for i in input().split()]) for i in range(2 * n): for j in range(n): if 0 <= i - j < n: res.append(matrix[i - j][j]) print(' '.join(map(str, res)))
python
""" Run PCA using the covariance matrix estimated with empirical Bayes """ import numpy as np import scanpy.api as sc import simplesc if __name__ == '__main__': data_path = '/netapp/home/mincheol/parameter_estimation/inteferon_data/' adata = sc.read(data_path + 'interferon.raw.h5ad') estimator = simplesc.SingleCellEstimator( adata=adata, group_label='cell', n_umis_column='n_counts', num_permute=10000, p=0.1) x_pca = estimator.pca() np.save(data_path + 'x_pca_all.npy', x_pca)
python
import os, sys; sys.path.insert(0, os.path.join("..", "..")) from pattern.en import sentiment, polarity, subjectivity, positive # Sentiment analysis (or opinion mining) attempts to determine if # a text is objective or subjective, positive or negative. # The sentiment analysis lexicon bundled in Pattern focuses on adjectives. # It contains adjectives that occur frequently in customer reviews, # hand-tagged with values for polarity and subjectivity. # polarity() measures positive vs. negative, as a number between -1.0 and +1.0. # subjectivity() measures objective vs. subjective, as a number between 0.0 and 1.0. # sentiment() returns a tuple of (polarity, subjectivity) for a given string. for word in ("amazing", "horrible", "public"): print word, sentiment(word) print print sentiment( "The movie attempts to be surreal by incorporating time travel and various time paradoxes," "but it's presented in such a ridiculous way it's seriously boring.") # The input string can also be a Synset, or a parsed Sentence, Text, Chunk or Word. # positive() returns True if the string's polarity >= threshold. # The threshold can be lowered or raised, # but overall for strings with multiple words +0.1 yields the best results. print print "good:", positive("good", threshold=0.1) print " bad:", positive("bad") print # You can also do sentiment analysis in Dutch, it works exactly the same: #from pattern.nl import sentiment as sentiment_nl #print "In Dutch:" #print sentiment_nl("Een onwijs spannend goed boek!") # You can also use Pattern with SentiWordNet. # You can get SentiWordNet at: http://sentiwordnet.isti.cnr.it/ # Put the file "SentiWordNet*.txt" in pattern/en/wordnet/ # You can then use Synset.weight() and wordnet.sentiwordnet: #from pattern.en import wordnet, ADJECTIVE #print wordnet.synsets("horrible", pos=ADJECTIVE)[0].weight # Yields a (polarity, subjectivity)-tuple. #print wordnet.sentiwordnet["horrible"]
python
import numpy as np import tensorflow as tf class MNIST: """MNIST dataset wrapper. Attributes: x_train: np.ndarray, [B, 28, 28, 1], dataset for training. x_test: np.ndarray, [B, 28, 28, 1], dataset for testing. y_train: np.ndarray, [B], label for training, 0 ~ 9. y_test: np.ndarray, [B], label for testing, 0 ~ 9. """ def __init__(self): mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() self.x_train = x_train[..., None].astype(np.float32) / 127.5 - 1. self.x_test = x_test[..., None].astype(np.float32) / 127.5 - 1. self.y_train, self.y_test = y_train, y_test def rawdata(self, train=True): """Raw dataset pair. Args: train: bool, whether training mode or not. Returns: (np.ndarray, np.ndarray), [[B, 28, 28], [B]], dataset and label pair. """ return (self.x_train, self.y_train) \ if train else (self.x_test, self.y_test) def datasets(self, bsize=128, bufsiz=10000, padding=None, flatten=False, condition=False, train=True): """Image dataset. Args: bsize: int, batch size. bufsiz: int, buffer size for shuffle. padding: int, pad side or not. flatten: bool, whether flatten image or not. condition: bool, whether add condition or not. train: bool, whether training mode or not. Returns: tf.data.Dataset, tensorflow dataset object, Iterable[tf.Tensor=[B, 28, 28]], iterable. """ x, y = self.rawdata(train) if padding is not None: x = np.pad( x, [[0, 0], [padding, padding], [padding, padding], [0, 0]], 'constant', constant_values=-1) if flatten: x = x.reshape(x.shape[0], -1) if condition: x = np.concatenate([x, np.eye(10)[y]], axis=-1) elif condition: _, height, width, _ = x.shape cond = np.eye(10)[y] cond = np.tile(cond[:, None, None], [1, height, width, 1]) x = np.concatenate([x, cond], axis=-1) return tf.data.Dataset.from_tensor_slices(x) \ .shuffle(bufsiz) \ .batch(bsize)
python
import os import unittest import json from cloudsplaining.scan.managed_policy_detail import ManagedPolicyDetails from cloudsplaining.scan.group_details import GroupDetailList from cloudsplaining.scan.role_details import RoleDetailList from cloudsplaining.scan.user_details import UserDetailList from cloudsplaining.scan.authorization_details import AuthorizationDetails example_authz_details_file = os.path.abspath( os.path.join( os.path.dirname(__file__), os.path.pardir, "files", "example-authz-details.json", ) ) with open(example_authz_details_file) as f: contents = f.read() auth_details_json = json.loads(contents) class TestActionLinks(unittest.TestCase): def test_infrastructure_modification_actions(self): policy_details = ManagedPolicyDetails(auth_details_json.get("Policies")) infra_mod_actions = sorted(policy_details.all_infrastructure_modification_actions) self.assertTrue(len(infra_mod_actions) > 3000) def test_group_details_infra_mod_actions(self): group_details_json_input = auth_details_json["GroupDetailList"] policy_details = ManagedPolicyDetails(auth_details_json.get("Policies")) group_detail_list = GroupDetailList(group_details_json_input, policy_details) results = group_detail_list.all_infrastructure_modification_actions_by_inline_policies print(json.dumps(results, indent=4)) expected_results = [ "s3:GetObject", "s3:PutObjectAcl" ] self.assertListEqual(results, expected_results) self.assertTrue(len(results) >= 2) def test_role_details_infra_mod_actions(self): role_details_json_input = auth_details_json["RoleDetailList"] policy_details = ManagedPolicyDetails(auth_details_json.get("Policies")) role_detail_list = RoleDetailList(role_details_json_input, policy_details) results = role_detail_list.all_infrastructure_modification_actions_by_inline_policies expected_results = [ "ec2:AssociateIamInstanceProfile", "ec2:DisassociateIamInstanceProfile", "iam:AddRoleToInstanceProfile", "iam:CreateAccessKey", "iam:CreateInstanceProfile", "iam:PassRole", "s3:GetObject", "secretsmanager:GetSecretValue" ] print(json.dumps(results, indent=4)) self.assertListEqual(results, expected_results) def test_user_details_infra_mod_actions(self): user_details_json_input = auth_details_json["UserDetailList"] policy_details = ManagedPolicyDetails(auth_details_json.get("Policies")) group_details_json_input = auth_details_json["GroupDetailList"] group_detail_list = GroupDetailList(group_details_json_input, policy_details) user_detail_list = UserDetailList( user_details=user_details_json_input, policy_details=policy_details, all_group_details=group_detail_list ) results = user_detail_list.all_infrastructure_modification_actions_by_inline_policies expected_results = [ "s3:GetObject", "s3:PutObject", "s3:PutObjectAcl" ] print(json.dumps(results, indent=4)) self.assertListEqual(results, expected_results) def test_authorization_files_action_links(self): authorization_details = AuthorizationDetails(auth_details_json) results = authorization_details.links """ # It will look like this, but : { "a4b:AssociateContactWithAddressBook": "https://docs.aws.amazon.com/a4b/latest/APIReference/API_AssociateContactWithAddressBook.html", "a4b:AssociateDeviceWithRoom": "https://docs.aws.amazon.com/a4b/latest/APIReference/API_AssociateDeviceWithRoom.html", ... } """ print(len(results.keys())) self.assertTrue(len(results.keys()) > 3500) print(json.dumps(results, indent=4))
python
"""This module defines some handy :py:class:`Importable` elements. An ``Importable`` is usually composed of two different parts: * A *natural key* used to identify *the same* element across different systems. This is the only required component for an ``Importable``. * An optional set of properties that form *the contents*. The data in this properties is carried across systems in the process of syncing the elements. Two elements that are *the same* and have *equal contents* are said to be *in sync*. For example an element representing an online video can use the value of the streaming URL to be its natural key. The contents of the element can be formed from a view counter and the video title. In this scenario changes on the video title and view counter can be detected and carried across systems thus keeping elements which are the same in sync. Changes to the video URL will make the video element lose any correspondence with elements belonging to other systems. """ __all__ = ['Importable', 'RecordingImportable'] class _AutoContent(type): """ >>> class MockImportable(Importable): ... __content_attrs__ = 'attr' # doctest:+IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: >>> class MockImportable(Importable): ... __content_attrs__ = 123 # doctest:+IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: """ def __new__(cls, name, bases, d): _magic_name = '__content_attrs__' if _magic_name not in d: return type.__new__(cls, name, bases, d) ca = d[_magic_name] # XXX: py3 if isinstance(ca, basestring): raise ValueError( '%s must be an iterable not a string.' % _magic_name ) try: ca = frozenset(ca) except TypeError: raise ValueError('%s must be iterable.' % _magic_name) def __init__(self, *args, **kwargs): update_kwargs = {} for content_attr in self._content_attrs: try: update_kwargs[content_attr] = kwargs.pop(content_attr) except KeyError: pass # All arguments are optional self._update(update_kwargs) super(klass, self).__init__(*args, **kwargs) def __repr__(self): attrs = [] for attr_name in self._content_attrs: try: attr_value = getattr(self, attr_name) except AttributeError: continue attrs.append('%s=%r' % (attr_name, attr_value)) if attrs: cls_name = self.__class__.__name__ return '%s(%r, %s)' % ( cls_name, self._natural_key, ', '.join(attrs) ) return super(klass, self).__repr__() d['__init__'] = __init__ d.setdefault('__repr__', __repr__) d['__slots__'] = frozenset(d.get('__slots__', [])) | ca d['_content_attrs'] = ca klass = type.__new__(cls, name, bases, d) return klass class Importable(object): """A default implementation representing an importable element. This class is intended to be specialized in order to provide the element content and to override its behaviour if needed. The :py:meth:`sync` implementation in this class doesn't keep track of changed values. For such an implementation see :py:class:`RecordingImportable`. ``Importable`` instances are hashable and comparable based on the *natural_key* value. Because of this the *natural_key* must also be hashable and should implement equality and less then operators: >>> i1 = Importable(0) >>> i2 = Importable(0) >>> hash(i1) == hash(i2) True >>> i1 == i2 True >>> not i1 < i2 True ``Importable`` elements can access the *natural_key* value used on instantiation trough the ``natural_key`` property: >>> i = Importable((123, 'abc')) >>> i.natural_key (123, 'abc') Listeners can register to observe an ``Importable`` element for changes. Every time the content attributes change with a value that is not equal to the previous one all registered listeners will be notified: >>> class MockImportable(Importable): ... _content_attrs = ['a', 'b'] >>> i = MockImportable(0) >>> notifications = [] >>> i.register(lambda x: notifications.append(x)) >>> i.a = [] >>> i.b = 'b' >>> i.b = 'bb' >>> len(notifications) 3 >>> notifications[0] is notifications[1] is notifications[2] is i True >>> notifications = [] >>> l = [] >>> i.a = l >>> len(notifications) 0 >>> i.a is l True There is also a shortcut for defining new ``Importable`` classes other than using inheritance by setting ``__content_attrs__`` to an iterable of attribute names. This will automatically create a constructor for your class that accepts all values in the list as keyword arguments. It also sets ``_content_attrs`` and ``__slots__`` to include this values and generates a ``__repr__`` for you. This method however may not fit all your needs, in that case subclassing ``Importable`` is still your best option. One thing to keep in mind is that it's not possible to dinamicaly change ``_content_attrs`` for instances created from this class because of the ``__slots__`` usage. >>> class MockImportable(Importable): ... __content_attrs__ = ['a', 'b'] >>> MockImportable(0) MockImportable(0) >>> MockImportable(0, a=1, b=('a', 'b')) MockImportable(0, a=1, b=('a', 'b')) >>> i = MockImportable(0, a=1) >>> i.b = 2 >>> i.a, i.b (1, 2) >>> i.update(a=100, b=200) True """ __metaclass__ = _AutoContent __slots__ = ('_listeners', '_natural_key') _content_attrs = frozenset([]) _sentinel = object() def __init__(self, natural_key, *args, **kwargs): self._listeners = [] self._natural_key = natural_key super(Importable, self).__init__(*args, **kwargs) @property def natural_key(self): return self._natural_key def __setattr__(self, attr, value): is_different = False if attr in self._content_attrs: is_different = getattr(self, attr, object()) != value super(Importable, self).__setattr__(attr, value) if is_different: self._notify() def update(self, **kwargs): """Update multiple content attrtibutes and fire a single notification. Multiple changes to the element content can be grouped in a single call to :py:meth:`update`. This method should return ``True`` if at least one element differed from the original values or else ``False``. >>> class MockImportable(Importable): ... _content_attrs = ['a', 'b'] >>> i = MockImportable(0) >>> i.register(lambda x: notifications.append(x)) >>> notifications = [] >>> i.update(a=100, b=200) True >>> len(notifications) 1 >>> notifications[0] is i True >>> notifications = [] >>> i.update(a=100, b=200) False >>> len(notifications) 0 Trying to call update using keywords that are not present in ``_content_attrs`` souhld raise ``ValueError``: >>> i.update(c=1) # doctest:+IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: """ content_attrs = self._content_attrs for attr_name, value in kwargs.items(): if attr_name not in content_attrs: raise ValueError( 'Attribute %s is not part of the element content.' % attr_name ) has_changed = self._update(kwargs) if has_changed: self._notify() return has_changed def _update(self, attrs): has_changed = False super_ = super(Importable, self) for attr_name, value in attrs.items(): if not has_changed: current_value = getattr(self, attr_name, self._sentinel) # object() sentinel will also be different if current_value != value: has_changed = True super_.__setattr__(attr_name, value) return has_changed def sync(self, other): """Puts this element in sync with the *other*. The default implementation uses ``_content_attrs`` to search for the attributes that need to be synced between the elements and it copies the values of each attribute it finds from the *other* element in this one. By default the ``self._content_attrs`` is an empty list so no synchronization will take place: >>> class MockImportable(Importable): ... pass >>> i1 = MockImportable(0) >>> i2 = MockImportable(0) >>> i1.a, i1.b = 'a1', 'b1' >>> i2.a, i2.b = 'a2', 'b2' >>> has_changed = i1.sync(i2) >>> i1.a 'a1' >>> class MockImportable(Importable): ... _content_attrs = ['a', 'b', 'x'] >>> i1 = MockImportable(0) >>> i2 = MockImportable(0) >>> i1.a, i1.b = 'a1', 'b1' >>> i2.a, i2.b = 'a2', 'b2' >>> has_changed = i1.sync(i2) >>> i1.a, i1.b ('a2', 'b2') If no synchronization was needed (i.e. the content of the elements were equal) this method should return ``False``, otherwise it should return ``True``: >>> i1.sync(i2) False >>> i1.a = 'a1' >>> i1.sync(i2) True If the sync mutated this element all listeners should be notified. See :py:meth:`register`: >>> i1.a = 'a1' >>> notifications = [] >>> i1.register(lambda x: notifications.append(x)) >>> has_changed = i1.sync(i2) >>> len(notifications) 1 >>> notifications[0] is i1 True All attributes that can't be found in the *other* element are skipped: >>> i1._content_attrs = ['a', 'b', 'c'] >>> has_changed = i1.sync(i2) >>> hasattr(i1, 'c') False """ has_changed = self._sync(self._content_attrs, other) if has_changed: self._notify() return has_changed def _sync(self, content_attrs, other): attrs = {} for attr in content_attrs: try: that = getattr(other, attr) except AttributeError: continue else: attrs[attr] = that return self._update(attrs) def register(self, listener): """Register a callable to be notified when ``sync`` changes data. This method should raise an ``ValueError`` if *listener* is not a callable: >>> i = Importable(0) >>> i.register(1) # doctest:+IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: Same listener can register multiple times: >>> notifications = [] >>> listener = lambda x: notifications.append(x) >>> i.register(listener) >>> i.register(listener) >>> i._notify() >>> notifications[0] is notifications[1] is i True """ if not callable(listener): raise ValueError('Listener is not callable: %s' % listener) self._listeners.append(listener) def is_registered(self, listener): """Check if the listener is already registered. >>> i = Importable(0) >>> a = lambda x: None >>> i.is_registered(a) False >>> i.register(a) >>> i.is_registered(a) True """ return listener in self._listeners def _notify(self): """Sends a notification to all listeners passing this element.""" for listener in self._listeners: listener(self) def __hash__(self): return hash(self._natural_key) def __eq__(self, other): """ >>> Importable(0) == None False """ try: return self._natural_key == other.natural_key except AttributeError: return NotImplemented def __lt__(self, other): """ >>> Importable(0) < None False """ try: return self._natural_key < other.natural_key except AttributeError: return NotImplemented def __repr__(self): """ >>> Importable((1, 'a')) Importable((1, 'a')) >>> class MockImportable(Importable): pass >>> MockImportable('xyz') MockImportable('xyz') """ cls_name = self.__class__.__name__ return '%s(%r)' % (cls_name, self._natural_key) class _Original(Importable): def copy(self, content_attrs, other): self.__dict__.clear() self._sync(content_attrs, other) class RecordingImportable(Importable): """Very similar to :py:class:`Importable` but tracks changes. This class records the original values that the attributes had before any change introduced by attribute assignment or call to ``update`` and ``sync``. Just as in :py:class:`Importable` case you can define new classes using ``__content_attrs__`` as a shortcut. >>> class MockImportable(RecordingImportable): ... __content_attrs__ = ['a', 'b'] >>> MockImportable(0) MockImportable(0) >>> MockImportable(0, a=1, b=('a', 'b')) MockImportable(0, a=1, b=('a', 'b')) >>> i = MockImportable(0, a=1) >>> i.b = 2 >>> i.a, i.b (1, 2) >>> i.update(a=100, b=200) True >>> i.orig.a 1 """ __slots__ = ('_original', ) def __init__(self, *args, **kwargs): super(RecordingImportable, self).__init__(*args, **kwargs) self._original = _Original(self.natural_key) self.reset() @property def orig(self): """An object that can be used to access the elements original values. The object has all the attributes that this element had when it was instantiated or last time when :py:meth:`reset` was called. >>> class MockImportable(RecordingImportable): ... _content_attrs = ['a'] >>> i = MockImportable(0) >>> hasattr(i.orig, 'a') False >>> i.a = 'a' >>> i.reset() >>> i.a 'a' >>> i.orig.a 'a' >>> i.a = 'aa' >>> i.a 'aa' >>> i.orig.a 'a' >>> del i.a >>> i.reset() >>> hasattr(i.orig, 'a') False """ return self._original def reset(self): """Create a snapshot of the current values. >>> class MockImportable(RecordingImportable): ... _content_attrs = ['a'] >>> i = MockImportable(0) >>> hasattr(i.orig, 'a') False >>> i.a = 'a' >>> i.reset() >>> i.a = 'aa' >>> i.orig.a 'a' >>> i.reset() >>> i.orig.a 'aa' """ self._original.copy(self._content_attrs, self)
python
import rosnode import subprocess import time import os ros_nodes = rosnode.get_node_names() if not '/robot_state_publisher' in ros_nodes: os.system('ifconfig eth0 192.168.0.2') command='roslaunch sick_tim sick_tim571_2050101.launch' process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) time.sleep(10)
python
#!/usr/bin/env python3 """ Requires: python-mnist numpy sklearn """ import sys sys.path.insert(0, 'src/') import mnist import numpy as np from numpy.linalg import norm as l21_norm from sklearn.metrics.cluster import normalized_mutual_info_score as nmi import os np.random.seed(int(os.environ.get('seed', '42'))) print('Using seed:', os.environ.get('seed', '42')) epsilon = 0.03 gamma = .1 / 30 / epsilon # np.random.seed(42) # Download t10k_* from http://yann.lecun.com/exdb/mnist/ # Change to directory containing unzipped MNIST data mndata = mnist.MNIST('data/MNIST-10K/') def welsch_func(x): result = (1 - np.exp(- epsilon * x ** 2)) / epsilon return result from basics.ours._numba import E, solve_U, update_V def target(U, V, X): return E(U, V, X, gamma, epsilon) def NMI(U): return nmi(labels, np.argmax(U, axis=1)) if __name__ == '__main__': images, labels = mndata.load_testing() ndim = 784 N = size = len(labels) C = 10 X = np.array(images).reshape((size, ndim)) / 255 t = 0 V = np.random.random((C, ndim)) U = np.ones((size, C)) * .1 / (C - 1) for i in range(size): xi = np.repeat(X[i, :].reshape((1, ndim)), C, axis=0) U[i, np.argmin(l21_norm(xi - V, axis=1))] = .9 S = np.ones((size, C)) delta_U = 10 while delta_U > 0.1: print('-------------') print('== t = ', t) delta_U = 100 old_V = V.copy() new_V = update_V(old_V, U, X, epsilon) delta_V = l21_norm(new_V - V) V = new_V new_U = solve_U(X, V, old_V, gamma, epsilon) delta_U = l21_norm(U - new_U) U = new_U print('DELTA V', delta_V) print('DELTA U', delta_U) print('NMI', NMI(U)) print(target(U, V, X)) t += 1
python
#!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import patch from google.datacatalog_connectors.vertica import scrape class MetadataScraperTest(unittest.TestCase): __SCRAPE_PACKAGE = 'google.datacatalog_connectors.vertica.scrape' @patch( '{}.metadata_scraper.vertica_python.connect'.format(__SCRAPE_PACKAGE)) def test_create_rdbms_connection_should_provide_connection_info( self, mock_connect): # noqa: E125 scraper = scrape.MetadataScraper() connection_args = { 'host': 'test-host', 'user': 'test-user', 'pass': 'test-pass' } scraper._create_rdbms_connection(connection_args) expected_connection_info = { 'host': 'test-host', 'port': 5433, 'user': 'test-user', 'password': 'test-pass', 'connection_timeout': 5 } mock_connect.assert_called_with(**expected_connection_info)
python
from kafka import KafkaConsumer from kafka import KafkaProducer import time import os import json print('Pinger Demo 1.0') kafka_url = str(os.environ['KAFKA_URL']) kafka_port = int(os.environ['KAFKA_PORT']) kafka_address = kafka_url + ':' + str(kafka_port) consumer = None while True: try: consumer = KafkaConsumer( 'pongs', bootstrap_servers=kafka_address, group_id='pingers') break except Exception: time.sleep(1) producer = KafkaProducer( value_serializer=lambda v: json.dumps(v).encode('utf-8'), bootstrap_servers=kafka_address) time.sleep(2) producer.send('pings', {'message' : 'This is a ping!'}) time.sleep(1) print('Waiting for messages') for msg in consumer: time.sleep(1) print('Received ' + str(msg.value)) print('Writing Ping!') producer.send('pings', {'message' : 'This is a ping!'})
python
import tkinter.filedialog as tk import pandas as pd class Dados(): def __init__(self): super().__init__() def importarDados(self): file_name = tk.askopenfilename(filetypes=(('csv files', '*.csv'), ('csv files', '*.csv'))) return file_name def abrirArquivoCsv(self,file_name): df = pd.read_csv(file_name) if df.shape[1] == 1: df = pd.read_csv(file_name,sep = ";") return df
python
#!/usr/bin/python import sys import zlib import time import os import requests import re #import requests-futures from baseconv import base62 from etaprogress.progress import ProgressBar def main(): PROGRAM_NAME = "zbing" if len(sys.argv) != 3: print("USAGE: python "+PROGRAM_NAME+".py <URL> <length>") print("For example: python "+PROGRAM_NAME+".py http://pastebing.ns.agency/raw/2uKYCmrAg 1") else: # init # https://stackoverflow.com/a/16060908 URL = str(sys.argv[1]) brute_len = int(sys.argv[2]) SLEEP_TIME = 1/1000 logname = PROGRAM_NAME+"_"+str(int(time.time()))+".log" log = open(logname, "a+") cookies = { "zid": "z5214048", "token": "28adf547358c1e2f1da1d204e5409a0731727ce2dc533e2c340139fbd09f7a01", "session": "eyJ1c2VybmFtZSI6Im1hbWEifQ.D35zOQ.uM2R-fEJvlCqQc5RguOovYZyF1E" } work = brute_len bar = ProgressBar(work) # print for user print(PROGRAM_NAME+" initialised") print("URL: "+URL) print("len: "+str(brute_len)) print("stdout will be logged to "+logname) print("the bruteforce will start in 3s") # allow user to change mind time.sleep(2) #payload i = 0 for i in range(work+1): k=i+1 k_string = ("=")*k #k_string = k_string.zfill(brute_len) #decorating r = requests.get(URL+k_string, cookies=cookies) txt = r.text # https://docs.python.org/3/library/zlib.html SAYS #'An Adler-32 checksum is almost as reliable as a CRC32 but can be computed much more quickly' #'Changed in version 3.0: Always returns an unsigned value' => GOOD txt_hash = toHexCustom(zlib.adler32(txt.encode('utf-8'))) # write to payload listings f_payload = open("pay_"+txt_hash+".txt", "a+") f_payload.write(k_string+"\n") f_payload.close() # if no transcription => first time resp encountered if not(os.path.isfile("plain_"+txt_hash+".txt")): # write to plaintext transcription f_plain = open("plain_"+txt_hash+".txt", "w+", encoding="utf-8") f_plain.write(txt) f_plain.close() # now log stuff whatToLog = "[N]"+k_string+"; New hash found! "+txt_hash+" ("+str(r.status_code)+")" log.write(whatToLog+"\n") print(whatToLog) # if hash already encountered else: # boring log, what else to do whatToLog = "[B]"+k_string+": "+txt_hash+" ("+str(r.status_code)+")" log.write(whatToLog+"\n") print(whatToLog) bar.numerator = i print(str(bar)) #sys.stdout.flush() #myCoolTitle = PROGRAM_NAME+" "+k_string #os.system("title "+myCoolTitle) #https://stackoverflow.com/a/10229529 #time.sleep(SLEEP_TIME/1000) #payload (for-loop) over whatToLog = "[F] Fin" log.write(whatToLog+"\n") print(whatToLog) log.close() def toHexCustom(dec): return str(hex(dec).split('x')[-1]) if __name__ == '__main__': main()
python
import pathlib import unittest from deep_hipsc_tracking import pipeline # Tests class TestPipelineStages(unittest.TestCase): def test_stages_exist(self): cls = pipeline.ImagePipeline exp = [ 'write_config_file', 'extract_frames', 'ensemble_detect_cells', 'track_cells', 'mesh_cells', ] self.assertEqual(cls.pipeline_stages, exp) for stage in cls.pipeline_stages: self.assertTrue(hasattr(cls, stage)) def test_can_instantiate_class(self): basedir = pathlib.Path('fake') obj = pipeline.ImagePipeline(basedir) self.assertEqual(obj.script_dir.name, 'scripts') self.assertEqual(obj.log_file, basedir / 'deep_tracking.log') self.assertEqual(obj.config_file, basedir / 'deep_tracking.ini')
python
# -*- coding: utf-8 -*- """ Created on Thu Aug 20 13:39:18 2020 @author: Administrator """ from __future__ import division import time import torch import torch.nn as nn from torch.autograd import Variable import numpy as np import cv2 # from models_nolambda import * from models_nolambda_focallossw import * from utils.parse_config import * from preprocess import prep_image, inp_to_image, letterbox_image from utils.utils_mulanchor import * import pandas as pd import random import pickle as pkl import argparse from PIL import Image def get_test_input(input_dim, CUDA): img = cv2.imread("dog-cycle-car.png") img = cv2.resize(img, (input_dim[1], input_dim[0])) # resize: w h img_ = img[:,:,::-1].transpose((2,0,1)) img_ = img_[np.newaxis,:,:,:]/255.0 img_ = torch.from_numpy(img_).float() img_ = Variable(img_) if CUDA: img_ = img_.cuda() return img_ def prep_image(img, inp_dim): """ Prepare image for inputting to the neural network. Returns a Variable """ orig_im = img dim = orig_im.shape[1], orig_im.shape[0] # w h img = (letterbox_image(orig_im, (inp_dim[1], inp_dim[0]))) # orig_im 352 608 img_ = img[:, :, ::-1].transpose((2, 0, 1)).copy() img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0) return img_, orig_im, dim def write(x, img, color_dict): c1 = tuple(x[1:3].int()) c2 = tuple(x[3:5].int()) cls = int(x[-1]) label = "{0}".format(classes[cls]) #color = random.choice(colors) color = color_dict[str(cls)] if cls <= 22: cv2.rectangle(img, c1, c2,color, 2) t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0] c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4 cv2.rectangle(img, c1, c2,color, -1) cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1); return img def output(img, CUDA, model, device,num_classes,confidence=0.05, nms_thesh=0.02,inp_dim=[416,416]): img = np.array(img) img, orig_im, dim = prep_image(img, inp_dim) # resize img im_dim = torch.FloatTensor(dim).repeat(1, 2) # tensor([[512., 256., 512., 256.]]) if CUDA: im_dim = im_dim.cuda() img = img.cuda() with torch.no_grad(): output = model(Variable(img)).to(device) output = write_results(output, confidence, num_classes, nms=True, nms_conf=nms_thesh) im_dim = im_dim.repeat(output.size(0), 1) # tensor([[512., 256., 512., 256.], # [512., 256., 512., 256.], # [512., 256., 512., 256.]], device='cuda:0') scaling_factor_w = torch.min(inp_dim[1] / im_dim, 1)[0].view(-1, 1) scaling_factor_h = torch.min(inp_dim[0] / im_dim, 1)[0].view(-1, 1) output[:, [1, 3]] -= (inp_dim[1] - scaling_factor_w * im_dim[:, 0].view(-1, 1))/2 output[:, [2, 4]] -= (inp_dim[0] - scaling_factor_w * im_dim[:, 1].view(-1, 1))/2 output[:, [1, 3]] /= scaling_factor_w output[:, [2, 4]] /= scaling_factor_w for i in range(output.shape[0]): output[i, [1, 3]] = torch.clamp(output[i, [1, 3]], 0.0, im_dim[i, 0]) output[i, [2, 4]] = torch.clamp(output[i, [2, 4]], 0.0, im_dim[i, 1]) output_dicts = [] for i in range(output.shape[0]): if output[i, -1] == 0.0: output_dict ={'car_box': np.array(output[i, 1: 5].detach().cpu()), 'armor_box': np.array([])} output_dicts.append(output_dict) for i in range(output.shape[0]): if output[i, -1] != 0.0: for j in range(len(output_dicts)): box1 = np.array(output[i, 1: 5].detach().cpu()) box2 = output_dicts[j]['car_box'] b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] inter_rect_x1 = max(b1_x1, b2_x1) inter_rect_y1 = max(b1_y1, b2_y1) inter_rect_x2 = min(b1_x2, b2_x2) inter_rect_y2 = min(b1_y2, b2_y2) # Intersection area inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, abs(inter_rect_x2 - inter_rect_x1 + 1)) * np.clip( inter_rect_y2 - inter_rect_y1 + 1, 0, abs(inter_rect_y2 - inter_rect_y1 + 1) ) # clamp: 将input中的元素限制在[min,max]范围内并返回一个Tensor torch.clamp(input,min,max,out=None) # Union Area b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) iou = inter_area / (b1_area + b2_area - inter_area + 1e-16) if iou > 0.01: output_dicts[j]['armor_box'] = np.append(output_dicts[j]['armor_box'], np.array(output[i, 1: 5].detach().cpu())).reshape((-1, 4)) return output_dicts # return np.array(output[:, 1: 5].detach().cpu()), np.array(output[:, -1].detach().cpu()) ''' # 注: # 输出为一个list, list中包含每辆车的字典, 目前字典中有两个key值, 'car_box'与'armor_box', car_box为一维数组, armor_box为二维数组. # 'armor_box'为[]时没有匹配到对应的装甲板. 匹配的iou阈值可以调整. # 格式如: # [ # { # 'car_box': array([468.017 , 86.88042, 526.57666, 138.35327], dtype=float32), # 'armor_box': array([], dtype=float64) # }, # { # 'car_box': array([382.3557 , 167.36795, 459.72476, 228.34549], dtype=float32), # 'armor_box': array( # [[394.31442261, 204.36643982, 415.21707153, 218.80717468], # [442.17236328, 205.49127197, 459.47769165, 221.09608459]] # ) # }, # { # 'car_box': array([ 63.237453, 135.55783 , 137.73201 , 192.92749 ], dtype=float32), # 'armor_box': array([[112.04547119, 166.20730591, 128.70788574, 178.04029846]]) # } # ] # 在程序中调用时,注释下一句 img = Image.open(img),直接将图片输入到output函数中即可 ''' #print(output(Image.open('/media/xuer/Seagate Slim Drive/camera_raw_morning_0814/10000/camera_raw_left/12-2020-08-14_09_56_11.jpg'))) # position, label = output((Image.open('/media/xuer/Seagate Slim Drive/camera_raw/8000_exposure/0-2020-08-09_21_29_05.jpg'))) # print(position, label)
python
#!/usr/bin/env python from scipy import * from scipy import weave from scipy import linalg from pylab import * import sys def ReadKlist(fklist, ReadBS=False): fk = open(fklist,'r') data = fk.readlines() nkp = [line[:3]=='END' for line in data].index(True) if data[nkp][:3]!='END': print 'wrong klist ', fklist kp=[] for i in range(nkp): kp.append( map(int, [data[i][10:15], data[i][15:20], data[i][20:25], data[i][25:30]]) ) if (ReadBS): BS = [map(float,line.split()) for line in data[nkp+1:nkp+4]] BSI = matrix(array(BS).T).I return (array(kp), array(BS), array(BSI)) else: return array(kp) class K_index: def __init__(self, BSI, kps): self.BSI = BSI self.SCALE = kps[0][3] self.ind1={} for ik,k in enumerate(kps): wik = tuple(map(int, dot(BSI,k[:3]))) self.ind1[wik] = ik def __call__(self, ik): wik = tuple(map(int, dot(self.BSI,ik[:3])%self.SCALE)) return self.ind1[wik] def CheckPPHermisity(): for ik in range(nkp): CA=zeros((norb*norb,norb*norb),dtype=float) for i1 in range(norb): for i2 in range(norb): for i3 in range(norb): for i4 in range(norb): CA[findex2(i1,i2,norb,norb),findex2(i3,i4,norb,norb)] = Chi0PP[ik,i1,i2,i3,i4] if sum(abs(CA-transpose(CA)))>1e-3: print 'ERROR' ei,ev=linalg.eigh(CA) print ik, ei.tolist() def CheckTimeReversal(): for ik in range(nkp): for i1 in range(norb): for i2 in range(norb): for i3 in range(norb): for i4 in range(norb): imk = k_index(-kps[ik]) diff = GammaPH[ik,i1,i2,i3,i4]-GammaPH[imk,i3,i4,i1,i2] if abs(diff)>1e-3: print 'DIFF-1=', ik, i1, i2, i3, i4, GammaPH[ik,i1,i2,i3,i4], GammaPH[imk,i3,i4,i1,i2] diff = GammaPH[ik,i1,i2,i3,i4]-GammaPH[ik,i2,i1,i4,i3] if abs(diff)>1e-3: print 'DIFF-2=', ik, i1, i2, i3, i4, GammaPH[ik,i1,i2,i3,i4], GammaPH[ik,i2,i1,i4,i3] def findex3(i1,i2,i3,n1,n2,n3): return (i1*n2+i2)*n3+i3 def findex2(i1,i2,n1,n2): return i1*n2+i2 if __name__ == '__main__': if len(sys.argv)<2: print 'ERROR : need input filename' print 'The input file should contain: ' print 'case.klist # filename with k-list' print 'Qlist.dat # filename with Qlist' print 'rmesh.dat # real axis mesh' print 'G_k1r_ # file with real axis k-dependent Grens function' print 'G_local1r_ # file with real axis local Grens function' print 'chi0_real. # name of the Bubble on real axis' print 'G_k1i_ # imaginary axis k-dependent Greens function' print 'G_local1i_ # imaginary axis local Greens function' print 'tvertex.dat # ctqmc local vertex function' print '100 # inverse temperature for bose function in Sq(omega)' sys.exit(1) fin = open(sys.argv[1], 'r') fin.next() # case.klist fQlist = fin.next().split()[0] # case.qlist #fin.next() # rmesh.dat #fin.next() # G_k1r_ #fin.next() # G_local1r_ #fin.next() # chi0_real. fin.next() # G_k1i_ fin.next() # G_local1i_ fvertex = fin.next().split()[0] # tvertex.dat fin.close() fi=open(fvertex) fi.next() # comment # beta, Nvfl, nomv, nOm nom beta = float(fi.next().split()[0]) fi.close() print 'beta=', beta print 'fQlist=', fQlist fileC0 = 'Chi0pp.dat' fileGpm = 'Gpm.dat' fileGmm = 'Gmm.dat' (kps, BS, BSI) = ReadKlist(fQlist,True) k_index = K_index(BSI,kps) nkp = len(kps) GammaPM = loadtxt(fileGpm) # format is (NQ, Norb**4) GammaMM = loadtxt(fileGmm) # format is (NQ, Norb**4) Chi0PP = loadtxt(fileC0) # format is (NQ, Norb**4) if shape(GammaPM)[0]!=nkp: print 'len('+fileGpm+') should be nkp, but is not compatible with '+fQlist if shape(GammaMM)[0]!=nkp: print 'len('+fileGmm+') should be nkp, but is not compatible with '+fQlist if shape(Chi0PP)[0]!=nkp: print 'len('+fileC0+') should be nkp, but is not compatible with '+fQlist n4 = shape(GammaPM)[1] norb = int(sqrt(sqrt(n4))) print 'norb=', norb GammaPM = GammaPM.reshape((nkp,norb,norb,norb,norb)) GammaMM = GammaMM.reshape((nkp,norb,norb,norb,norb)) Chi0PP = Chi0PP.reshape((nkp,norb,norb,norb,norb)) print 'shape(GammaPM)=', shape(GammaPM) print 'shape(GammaMM)=', shape(GammaMM) print 'shape(Chi0PP)=', shape(Chi0PP) BCS=zeros((nkp*norb*norb,nkp*norb*norb),dtype=float) chi0=zeros((norb*norb, norb*norb), dtype=float) Gamma=zeros((norb*norb, norb*norb), dtype=float) for ik1 in range(nkp): print 'ik=', ik1 for ik2 in range(nkp): k1 = kps[ik1][:3] k2 = kps[ik2][:3] ik2mk1 = k_index(k2-k1) ik1pk2 = k_index(k1+k2) imk2mk1 = k_index(-k1-k2) support_code=""" #line 78 "BCS.py" int findex3(int i1, int i2, int i3, int n1, int n2, int n3){ return (i1*n2+i2)*n3+i3; } int findex2(int i1, int i2, int n1, int n2){ return i1*n2+i2; } """ code=""" #line 162 "BCS.py" for (int i1=0; i1<norb; i1++){ for (int i2=0; i2<norb; i2++){ for (int i3=0; i3<norb; i3++){ for (int i4=0; i4<norb; i4++){ int i1i2 = findex2(i1,i2,norb,norb); int i3i4 = findex2(i3,i4,norb,norb); chi0(i1i2,i3i4) = Chi0PP(ik2,i1,i2,i3,i4); //Gamma(i1i2,i3i4) = 0.5*(GammaPM(ik1pk2,i3,i1,i2,i4)+GammaMM(ik2mk1,i4,i1,i2,i3)); Gamma(i1i2,i3i4) = 0.5*(GammaPM(ik2mk1,i3,i1,i2,i4)+GammaMM(imk2mk1,i4,i1,i2,i3)); } } } } """ weave.inline(code, ['chi0','Gamma','norb','GammaPM','GammaMM','ik2','ik2mk1','ik1pk2','imk2mk1','Chi0PP'],support_code=support_code,type_converters=weave.converters.blitz, compiler='gcc') GammaChi0 = dot(Gamma, chi0) code=""" #line 182 "BCS.py" for (int i1=0; i1<norb; i1++){ for (int i2=0; i2<norb; i2++){ for (int i3=0; i3<norb; i3++){ for (int i4=0; i4<norb; i4++){ int index1 = findex3(ik1,i1,i2, nkp,norb,norb); int index2 = findex3(ik2,i3,i4, nkp,norb,norb); int i1i2 = findex2(i1,i2,norb,norb); int i3i4 = findex2(i3,i4,norb,norb); BCS(index1,index2) = -GammaChi0(i1i2,i3i4)/(nkp); } } } } """ weave.inline(code, ['BCS','GammaChi0','norb','nkp','ik1','ik2'],support_code=support_code,type_converters=weave.converters.blitz, compiler='gcc') #print 'Diff=', sum(abs(transpose(BCS)-BCS),axis=None) print 'Now diagonalizing matrix of size ', shape(BCS) evalues,vector = linalg.eig(BCS) aevals = real(evalues.real) ind = range(len(aevals)) ind = sorted(ind, key=lambda i: aevals[i]) for i in range(len(ind)): print i, evalues[ind[i]], vector[:,ind[i]] for i in range(-1,-6,-1): gs=zeros((nkp,norb*norb),dtype=complex) for ik in range(nkp): for i1 in range(norb): for i2 in range(norb): gs[ik,findex2(i1,i2,norb,norb)]=vector[findex3(ik,i1,i2, nkp,norb,norb),ind[i]] savetxt('gs_symmetryr.'+str(abs(i)), real(gs)) savetxt('gs_symmetryi.'+str(abs(i)), imag(gs))
python
"""Hacking, by Al Sweigart [email protected] The hacking mini-game from "Fallout 3". Find out which seven-letter word is the password by using clues each guess gives you.""" __version__ = 1 import random, sys # Setup the constants: # The "filler" characters for the board. GARBAGE_CHARS = '~!@#$%^&*()_+-={}[]|;:,.<>?/\\' # Load the WORDS list from a text file that has 7-letter words. with open('sevenletterwords.txt') as dictionaryFile: WORDS = dictionaryFile.readlines() for i in range(len(WORDS)): WORDS[i] = WORDS[i].strip().upper() def main(): """Run a single game of Hacking.""" print('''HACKING MINIGAME By Al Sweigart [email protected] ''') gameWords = getWords() gameBoard = getBoard(gameWords) secretPassword = random.choice(gameWords) print('Find the password in the computer\'s memory:') print(gameBoard) for triesRemaining in range(4, 0, -1): playerMove = getPlayerMove(gameWords, triesRemaining) if playerMove == secretPassword: print('A C C E S S G R A N T E D') return else: numMatches = numMatchingLetters(secretPassword, playerMove) print('Access Denied ({}/7 correct)'.format(numMatches)) print('Out of tries. Secret password was {}.'.format(secretPassword)) def getBoard(words): """Return a string representing the "computer memory".""" # Pick which lines contain words: linesWithWords = random.sample(range(16 * 2), len(words)) # The starting memory address (this is also cosmetic). memoryAddress = 16 * random.randint(0, 4000) # board = [] nextWord = 0 for i in range(16): leftLine = '' rightLine = '' for j in range(16): leftLine += random.choice(GARBAGE_CHARS) rightLine += random.choice(GARBAGE_CHARS) if i in linesWithWords: insertionIndex = random.randint(0, 9) leftLine = leftLine[:insertionIndex] + words[nextWord] + leftLine[insertionIndex + 7:] nextWord += 1 if i + 16 in linesWithWords: insertionIndex = random.randint(0, 9) rightLine = rightLine[:insertionIndex] + words[nextWord] + rightLine[insertionIndex + 7:] nextWord += 1 board.append('0x' + hex(memoryAddress)[2:].zfill(4) + ' ' + leftLine + ' ' + '0x' + hex(memoryAddress + (16*16))[2:].zfill(4) + ' ' + rightLine) memoryAddress += 16 # Each string in `board` is joined into one large string to return: return '\n'.join(board) def getPlayerMove(words, tries): """Let the player enter a password guess.""" while True: print('Enter password: ({} tries remaining)'.format(tries)) move = input().upper() if move in words: return move print('That is not one of the possible passwords listed above.') def numMatchingLetters(word1, word2): """Returns the number of matching letters between these two words.""" matches = 0 for i in range(len(word1)): if word1[i] == word2[i]: matches += 1 return matches def getOneWordExcept(blocklist=None): """Returns a random word from WORDS that isn't in blocklist.""" if blocklist == None: blocklist = [] while True: randomWord = random.choice(WORDS) if randomWord not in blocklist: return randomWord def getWords(): """Return the words that could possibly be the password. To make the game fair, we want to only have at most 2 words that have 0 letters in common with the secret password.""" secretPassword = random.choice(WORDS) words = [secretPassword] # Find two words more that have zero matching letters. # `< 3` because the secret password is already in `words`. while len(words) < 3: randomWord = getOneWordExcept(words) if numMatchingLetters(secretPassword, randomWord) == 0: words.append(randomWord) # Find two words that have 3 matching letters (but give up at 500 # tries if not enough can be found). for i in range(500): if len(words) == 5: break randomWord = getOneWordExcept(words) if numMatchingLetters(secretPassword, randomWord) == 3: words.append(randomWord) # Find seven words that have at least one matching letter (but give # up at 500 tries if not enough can be found). for i in range(500): if len(words) == 12: break randomWord = getOneWordExcept(words) if numMatchingLetters(secretPassword, randomWord) != 0: words.append(randomWord) # Add any random words needed to get 12 words total. words.extend(random.sample(WORDS, 12 - len(words))) assert len(words) == 12 return words # If this program was run (instead of imported), run the game: if __name__ == '__main__': try: main() except KeyboardInterrupt: sys.exit() # When Ctrl-C is pressed, end the program.
python
class Settings: database_location = "./db/instapy.db" browser_location = "./assets/chromedriver"
python
""" Luxafor abstracted interface """ import time from .api import API from .constants import ( LED_FLAG_BOTTOM, LED_FLAG_MIDDLE, LED_FLAG_TOP, LED_POLE_BOTTOM, LED_POLE_MIDDLE, LED_POLE_TOP ) LEDS = [ ['LED_FLAG_TOP', 1, LED_FLAG_TOP], ['LED_FLAG_MIDDLE', 2, LED_FLAG_MIDDLE], ['LED_FLAG_BOTTOM', 3, LED_FLAG_BOTTOM], ['LED_POLE_TOP', 4, LED_POLE_TOP], ['LED_POLE_MIDDLE', 5, LED_POLE_MIDDLE], ['LED_POLE_BOTTOM', 6, LED_POLE_BOTTOM], ] class Luxafor(object): def __init__(self, api=API()): self.api = api self.led = {} self.reset() self.push() def reset(self): for row in LEDS: self.led[row[1]] = (0, 0, 0) def _set_by_part(self, rgb, part): for row in LEDS: if part in row[0]: self.led[row[1]] = rgb def set_flag(self, rgb): self._set_by_part(rgb, 'FLAG') def set_pole(self, rgb): self._set_by_part(rgb, 'POLE') def set_top(self, rgb): self._set_by_part(rgb, 'TOP') def set_middle(self, rgb): self._set_by_part(rgb, 'MIDDLE') def set_bottom(self, rgb): self._set_by_part(rgb, 'BOTTOM') def set_leds(self, rgb, leds=None): if not leds: leds = [] if not isinstance(leds, (list, tuple)): leds = [leds] for led in leds: self.led[led] = rgb def set_all(self, rgb): self._set_by_part(rgb, 'LED') def push(self, delta_only=False): if not delta_only: # Don't cut any corners, just push what it is. for index, rgb in self.led.items(): index -= 1 api_led_id = LEDS[index][2] self.api.mode_colour(rgb, api_led_id) return # No change # - Just return # All the same # - set led-id to all # Only flag changed to the same colour # - set led-id to flag # Only pole changed to the same colour # - set led-id to pole # Only set led-id's that have changed def play_sequence(self, sequence): for leds, rgb, wait_time in sequence: self.set_leds(rgb, leds) if wait_time > 0: self.push() time.sleep(wait_time) self.reset() self.push()
python
student_scores = { "Harry": 81, "Ron": 78, "Hermione": 99, "Draco": 74, "Neville": 62, } # TODO-1: Create an empty dictionary called student_grades. student_grades = {} # TODO-2: Write your code below to add the grades to student_grades.👇 for student_name in student_scores: score = student_scores[student_name] if 91 <= score <= 100: student_grades[student_name] = 'Outstanding' elif 81 <= score <= 90: student_grades[student_name] = 'Exceeds Expectations' elif 71 <= score <= 80: student_grades[student_name] = 'Acceptable' else: student_grades[student_name] = 'Fail' print(student_grades)
python
import os import time def get_exec_out(sxcute_str): out_list = os.popen(sxcute_str).readlines() return out_list if __name__ == '__main__': excute_str = 'nvidia-smi' out_list = get_exec_out(excute_str) # print(out_list) for oo in out_list: if oo.find('python') != -1: # split()函数默认可以按空格分割,并且把结果中的空字符串删除掉,留下有用信息 proc_list = oo.split() pid = proc_list[2].strip() kill_str = 'kill -9 ' + pid print(kill_str) time.sleep(0.3) os.system(kill_str)
python
bind = ["0.0.0.0:8000"] workers = 4 threads = 2 max_requests = 10000 max_requests_jitter = 100 accesslog = "-" errorlog = "-" limit_request_line = 0
python
from ._version import get_versions __version__ = get_versions()['version'] del get_versions import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") # Register nbextension def _jupyter_nbextension_paths(): return [{ 'section': 'notebook', 'src': 'static', 'dest': 'nglview-js-widgets', 'require': 'nglview-js-widgets/extension' }] # TODO: do not use import * # interface from .config import BACKENDS from .widget import NGLWidget, write_html from .base_adaptor import * from .adaptor import * from .show import * from . import datafiles # utils from .utils import widget_utils, js_utils # for doc from . import widget_box, widget, adaptor, show __all__ = ['NGLWidget', 'write_html'] + widget.__all__ + adaptor.__all__ + show.__all__
python
# zwei 12/16/2013 # accumulate generator def group_iter(iterator, n): # print(iterator) accumulator = [] for item in iterator: accumulator.append(item) if len(accumulator) == n: yield accumulator accumulator = [] if len(accumulator) != 0: yield accumulator ll = ["w", "c", "g", "h", "z"] for i in group_iter(ll, 3): print(i)
python
from typing import List from cloudrail.knowledge.context.aws.ec2.security_group import SecurityGroup from cloudrail.knowledge.context.aws.networking_config.network_configuration import NetworkConfiguration from cloudrail.knowledge.context.aws.networking_config.network_entity import NetworkEntity from cloudrail.knowledge.context.aws.service_name import AwsServiceName class DirectoryService(NetworkEntity): """ Attributes: name: The name of the Directory Service. arn: The ARN of the service. vpc_id: The VPC the Directory Service is deployed in. directory_type: The directory's type. vpc_config: The network configuration of the Directory Service. security_group_controller: The Security Group used with this service, may be Cloudrail-generated in case only the rules are defined and no specific SG is configured. """ def __init__(self, account: str, region: str, name: str, directory_id: str, vpc_id: str, directory_type: str, vpc_config: NetworkConfiguration): super().__init__(name, account, region, AwsServiceName.AWS_DIRECTORY_SERVICE_DIRECTORY) self.name: str = name self.directory_id: str = directory_id self.vpc_id: str = vpc_id self.directory_type: str = directory_type self.vpc_config: NetworkConfiguration = vpc_config if self.account: self.arn: str = f'arn:aws:clouddirectory:{self.region}:{self.account}:directory/{self.directory_id}' else: self.arn = None self.security_group_controller: SecurityGroup = None def get_keys(self) -> List[str]: return [self.arn] def get_id(self) -> str: return self.directory_id def get_arn(self) -> str: return self.arn def get_name(self) -> str: return self.name def get_all_network_configurations(self) -> List[NetworkConfiguration]: return [NetworkConfiguration(self.vpc_config.assign_public_ip, self.vpc_config.security_groups_ids, self.vpc_config.subnet_list_ids)] def get_cloud_resource_url(self) -> str: return '{0}directoryservicev2/home?region={1}#!/directories/{2}'\ .format(self.AWS_CONSOLE_URL, self.region, self.directory_id) def get_type(self, is_plural: bool = False) -> str: if not is_plural: return 'Directory' else: return 'Directories' @property def is_tagable(self) -> bool: return True
python
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class XsdkExamples(CMakePackage): """XSDK Examples show usage of libraries in the XSDK package.""" homepage = "http://xsdk.info" url = "http://github.com/xsdk-project/xsdk-examples/xsdk-examples-0.1.0.tar.gz" maintainers = ['balos1', 'luszczek'] version('0.1.0', '8a2561d48bea92cdecf16e428f876f30') variant('cuda', default=False, description='Enable CUDA dependent packages') depends_on('xsdk@develop', when='@develop') depends_on('[email protected]', when='@0.1.0') depends_on('mpi') def cmake_args(self): spec = self.spec args = [ '-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc, '-DMPI_DIR=%s' % spec['mpi'].prefix, '-DSUNDIALS_DIR=%s' % spec['sundials'].prefix, '-DPETSC_DIR=%s' % spec['petsc'].prefix, '-DPETSC_INCLUDE_DIR=%s' % spec['petsc'].prefix.include, '-DPETSC_LIBRARY_DIR=%s' % spec['petsc'].prefix.lib, '-DSUPERLUDIST_INCLUDE_DIR=%s' % spec['superlu-dist'].prefix.include, '-DSUPERLUDIST_LIBRARY_DIR=%s' % spec['superlu-dist'].prefix.lib, ] if 'trilinos' in spec: args.extend([ '-DTRILINOS_DIR:PATH=%s' % spec['trilinos'].prefix, ]) return args
python
import pytest from mold.parser import TemplateSyntaxError, parse from mold.tokenizer import tokenize from .common import load_fixture def test_alltags(): filename, contents = load_fixture("alltags") assert list(parse(tokenize(filename, contents))) def test_unexpected_end(): filename, contents = load_fixture("unexpected_end") with pytest.raises(TemplateSyntaxError): list(parse(tokenize(filename, contents))) def test_missing_end(): filename, contents = load_fixture("missing_end") with pytest.raises(TemplateSyntaxError): list(parse(tokenize(filename, contents)))
python
#!/usr/bin/env python """ Create factor graphs for LQR control Author: Gerry Chen, Yetong Zhang, and Frank Dellaert """ import gtsam import numpy as np import matplotlib.pyplot as plt from dynamics_lti import create_lti_fg, plot_trajectory, solve_lti_fg def add_lqr_costs_fg(graph, X, U, Q, R, x_goal=np.array([])): '''Adds LQR quadratic costs to states and controls in a factor graph Arguments: graph: a factor graph describing system dynamics X: a list of keys for the states U: a list of keys for the controls Q: nxn state cost matrix R: pxp control cost matrix x_goal: desired goal state (may be n-vector or Txn) Returns: graph: linear factor graph of the LQR problem X: keys for the states U: keys for the controls ''' T = len(X) n = np.size(Q, 0) # dimension of state space p = np.size(R, 0) # dimension of control space # condition x_goal if x_goal.size == 0: x_goal = np.zeros((len(X), n)) if (x_goal.size == n and np.issubdtype(x_goal[0], np.number)): x_goal = np.repeat(np.reshape(x_goal, (1, n)), T, axis=0) if x_goal.shape != (len(X), n): raise ValueError('Goal position array is not the right shape, must either be n-vector or'+ ' (num_time_steps, n)') # noises q_noise = gtsam.dynamic_cast_noiseModel_Diagonal_noiseModel_Gaussian( gtsam.noiseModel_Gaussian.Information(Q)) r_noise = gtsam.dynamic_cast_noiseModel_Diagonal_noiseModel_Gaussian( gtsam.noiseModel_Gaussian.Information(R)) # note: GTSAM 4.0.2 python wrapper doesn't have 'Information' # wrapper, use this instead if you are not on develop branch: # `gtsam.noiseModel_Gaussian.SqrtInformation(np.sqrt(Q)))` # set cost functions as unary factors for i, x in enumerate(X): graph.add(x, np.eye(n), x_goal[i, :], q_noise) for u in U: graph.add(u, np.eye(p), np.array([0.]), r_noise) return graph, X, U def create_lqr_fg(A, B, Q, R, X0=np.array([0., 0.]), num_time_steps=500, x_goal=np.array([0., 0.])): '''Creates a factor graph for solving a discrete, finite horizon LQR problem given system dynamics in state space representation. Arguments: A: nxn state transition matrix B: nxp control input matrix Q: nxn state cost matrix R: pxp control cost matrix X0: initial state (n-vector) num_time_steps: number of time steps x_goal: desired goal state (may be n-vector or Txn) Returns: graph: linear factor graph of the LQR problem X: keys for the states U: keys for the controls ''' graph, X, U = create_lti_fg(A, B, X0=X0, num_time_steps=num_time_steps) graph, X, U = add_lqr_costs_fg(graph, X, U, Q, R, x_goal=x_goal) return graph, X, U def solve_lqr_fg(graph, X, U): '''Solves an LQR problem given in factor graph form. Arguments: graph: a factor graph X: a list of keys for the states U: a list of keys for the controls toPlot: bool whether or not you want to visualize results Returns: x_sol: an array of states u_sol: an array of controls ''' return solve_lti_fg(graph, X, U) def solve_lqr(A, B, Q, R, X0=np.array([0., 0.]), num_time_steps=500, x_goal=np.array([0., 0.])): '''Solves a discrete, finite horizon LQR problem given system dynamics in state space representation. Arguments: A: nxn state transition matrix B: nxp control input matrix Q: nxn state cost matrix R: pxp control cost matrix X0: initial state (n-vector) num_time_steps: number of time steps x_goal: desired goal state (may be n-vector or Txn) Returns: x_sol: an array of states u_sol: an array of controls ''' graph, X, U = create_lqr_fg(A, B, Q, R, X0, num_time_steps, x_goal) return solve_lqr_fg(graph, X, U) def get_return_cost(graph, key): '''Returns the value function matrix at variable `key` given a graph which goes up and including `key`, but no further (i.e. all time steps after `key` have already been eliminated). Does so by aggregating all unary factors on `key`. If value function is x^TPx, then this returns P. "Return Cost" aka "Cost-to-go" aka "Value Function". Arguments: graph: factor graph in LTI form key: key in the factor graph for which we want to obtain the return cost Returns: return_cost: return cost, an nxn array where `n` is dimension of `key` ''' new_fg = gtsam.GaussianFactorGraph() for i in range(graph.size()): # loop through all factors f = graph.at(i) if (f.keys().size() == 1) and (f.keys().at(0) == key): # collect unary factors on `key` new_fg.push_back(f) sol_end = new_fg.eliminateSequential() return sol_end.back().information() def get_k_and_p(graph, X, U): '''Finds optimal control law given by $u=Kx$ and value function $Vx^2$ aka cost-to-go which corresponds to solutions to the algebraic, finite horizon Ricatti Equation. K is Extracted from the bayes net and V is extracted by incrementally eliminating the factor graph. Arguments: graph: factor graph containing factor graph in LQR form X: list of state Keys U: list of control Keys Returns: K: optimal control matrix, shape (T-1, 1) V: value function, shape (T, 1) TODO(gerry): support n-dimensional state space ''' T = len(X) # Find K and V by using bayes net solution marginalized_fg = graph K = np.zeros((T-1, 1)) P = np.zeros((T, 1)) P[-1] = get_return_cost(marginalized_fg, X[-1]) for i in range(len(U)-2, -1, -1): # traverse backwards in time ordering = gtsam.Ordering() ordering.push_back(X[i+1]) ordering.push_back(U[i]) bayes_net, marginalized_fg = marginalized_fg.eliminatePartialSequential(ordering) P[i] = get_return_cost(marginalized_fg, X[i]) K[i] = bayes_net.back().S() # note: R is 1 return K, P def main(): '''Solves open loop LQR problem using factor graph for a spring-mass system ''' # Simulation setup del_t = 0.005 tf = 5 num_time_steps = int(tf / del_t) t = np.arange(num_time_steps)*del_t # Problem setup K = 1 m = .5 X0 = np.array([1, 1], dtype=np.float) x_goal = np.array([0, 0], dtype=np.float) Q = np.eye(2) R = np.eye(1) # Matrices A = np.array([[1., del_t], [-K/m*del_t, 1.]]) B = np.array([[0.], [del_t/m]]) # solve x_sol, u_sol = solve_lqr(A, B, Q, R, X0, num_time_steps=num_time_steps, x_goal=x_goal) # plot plot_trajectory(t, x_sol, u_sol, state_labels=['position', 'velocity']) plt.suptitle('LQR control of a spring-mass system by GTSAM') plt.show() if __name__ == '__main__': main()
python
# coding: UTF-8 # Install XIMEA software package # Copy 'XIMEA\API\Python\v3\ximea' to 'PythonXX\Lib' from ximea import xiapi import cv2 import numpy as np # Connect to camera cam = xiapi.Camera() cam.open_device_by_SN('XXXXXXXX') # Enter serial number of your Ximea camera # Configuration cam.set_exposure(100000) cam.set_gain(1) cam.set_gammaY(1) # Allocate memory for ximea image img = xiapi.Image() # Start acquisition cam.start_acquisition() # Preview output from camera key = -1 while key == -1: cam.get_image(img) cvimg = img.get_image_data_numpy() cv2.imshow('camera', cvimg) key = cv2.waitKey(1) cv2.namedWindow('pattern', cv2.WINDOW_NORMAL) cv2.moveWindow('pattern', 1920, 0) cv2.setWindowProperty( 'pattern', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) key = -1 while key == -1: cam.get_image(img) cvimg = img.get_image_data_numpy() cv2.imshow('camera', cvimg) key = cv2.waitKey(1) # Preview output from camera for v in range(0, 256, 5): pat = v*np.ones((1080, 1920), np.uint8) cv2.imshow('pattern', pat) cv2.waitKey(400) cam.get_image(img) cvimg = img.get_image_data_numpy() cv2.imwrite('gamma_' + str(v) + '.png', cvimg) # Terminate cam.stop_acquisition() cam.close_device()
python
import pytest from sqlalchemy.orm import Session from connexion_sql_utils import BaseMixin, BaseMixinABC, get, event_func, \ to_json from .conftest import Foo import json def test_save(): foo = Foo(bar='some data') foo.save() assert Foo.query_by(bar='some data').first() is not None foo.id = 'bad id' with pytest.raises(Exception): foo.save() def test_save_with_session(): foo = Foo(bar='a bar') with Foo.session_scope() as session: foo.save(session=session) session.commit() assert Foo.query_by(bar='a bar').first() is not None def test_update(): foo = Foo(bar='data') assert foo.bar == 'data' foo.update(bar='different data') assert foo.bar == 'different data' def test_update_with_session(): foo = Foo.query_by().first() old_bar = foo.bar new_bar = '{}-new'.format(old_bar) with Foo.session_scope() as session: foo.update(bar=new_bar, session=session) assert foo.bar == new_bar session.commit() # check that changes persist loaded = Foo.query_by(bar=new_bar).first() assert loaded is not None def test_get_id(): foo = json.loads(next(iter(get(Foo, limit=1)))) assert foo['id'] is not None queried = Foo.get_id(foo['id']) assert queried.id == foo['id'] assert Foo.get_id(1000) is None def test_query_by(): query = Foo.query_by().all() for q in query: assert isinstance(q, Foo) def test_query_by_with_session(): with Foo.session_scope() as session: query = Foo.query_by(session=session).all() for q in query: assert isinstance(q, Foo) def test_event_func_fails_with_no_event_name(): with pytest.raises(TypeError): @event_func() def oops(): pass def test_quote_if_string(): foo = Foo(bar='something') assert "'something'" in repr(foo) foo = Foo(bar=1) assert str(1) in repr(foo) def test_to_json_funcs(): class JSON(BaseMixin): def __init__(self, data=None, other=None): self.data = data self.other = other @to_json('data', 'other') def hello_world(self, val): return 'hello world' j = JSON(data='data', other='other') assert j.data == 'data' assert j.other == 'other' jl = json.loads(j.dump()) assert jl['data'] == 'hello world' assert jl['other'] == 'hello world' def test_session_scope(): with Foo.session_scope() as s: assert isinstance(s, Session) foo = Foo(bar='custom data') s.add(foo) saved = Foo.query_by(bar='custom data') assert saved is not None with pytest.raises(Exception): with Foo.session_scope() as s: foo = s.query(Foo).first() foo.id = 'invalid' s.add(foo) s.commit() def test_dump(): foo = Foo(bar='data') # baz is added with the dump_method decorator. dumped = foo.dump() assert isinstance(dumped, str) assert json.loads(foo.dump()) == {"bar": "data", "baz": "bang"} assert json.loads(str(foo)) == {"bar": "data", "baz": "bang"} dumped = foo.dump(_dict=True) assert isinstance(dumped, dict) assert dumped == {"bar": "data", "baz": "bang"} def test_delete(): foo = Foo(bar='my data') foo.save() assert foo.id is not None id = foo.id foo.delete() with Foo.session_scope() as s: q = s.query(Foo).filter(Foo.id == id).first() assert q is None def test_delete_with_session(): foo = Foo(bar='delete-ses') foo.save() with Foo.session_scope() as session: foo.delete(session=session) session.commit() # check delete persists assert Foo.query_by(bar='delete-ses').first() is None def test_session_scope_fails_with_invalid_subclass(): class Invalid(BaseMixin): pass assert not issubclass(Invalid, BaseMixinABC) with pytest.raises(TypeError): with Invalid.session_scope(): pass def test_dump_method(): foo = Foo.query_by().first() loaded = json.loads(foo.dump()) assert 'baz' in loaded assert loaded['baz'] == 'bang'
python
from platform import node import torch import torch.nn as nn from torch.nn import functional as F from torch.nn.modules import padding from torch.nn.modules.normalization import LayerNorm from models.modules import BiMatchingNet from models.treeGNN import TreeGNN import pdb class BranT(nn.Module): def __init__(self, var_dim, node_dim, mip_dim, hidden_size=256, dropout_rate=0.1, nhead=1, num_encoder_layers=1, dim_feedforward=256, tree_gate=True): super(BranT, self).__init__() print('BranT cat') # define the dimensionality of the features and the hidden states self.var_dim = var_dim self.node_dim = node_dim self.mip_dim = mip_dim self.hidden_size = hidden_size self.tree_gate = tree_gate # define CandidateEmbeddingNet self.CandidateEmbeddingNet = [LayerNorm(var_dim), nn.Linear(var_dim, hidden_size)] self.CandidateEmbeddingNet = nn.Sequential(*self.CandidateEmbeddingNet) self.TreeEmbeddingNet = [LayerNorm(node_dim + mip_dim), nn.Linear(node_dim + mip_dim, hidden_size)] self.TreeEmbeddingNet = nn.Sequential(*self.TreeEmbeddingNet) self.globalEmbeddingNet = [nn.Linear(hidden_size * 2, hidden_size)] self.globalEmbeddingNet = nn.Sequential(*self.globalEmbeddingNet) encoder_layer = nn.TransformerEncoderLayer(d_model=hidden_size, nhead=nhead, dim_feedforward=dim_feedforward, activation='gelu') encoder_norm = LayerNorm(hidden_size) self.transformer = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) self.linear1 = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(dropout_rate) self.linear2 = nn.Linear(hidden_size, 1) self.classifier = nn.Linear(hidden_size, 1) # do the Xavier initialization for the linear layers for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(tensor=m.weight, gain=nn.init.calculate_gain('relu')) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, cands_state_mat, padding_mask, node_state=None, mip_state=None): ''' dim: cands_state_mat: N * S * E padding_mask: N * S node_state: N * E mip_state: N * E ''' # go through the CandidateEmbeddingNet cands_state_mat = self.CandidateEmbeddingNet(cands_state_mat) tree_state = self.TreeEmbeddingNet(torch.cat((node_state, mip_state), dim=1)) if self.tree_gate: repeat_count = cands_state_mat.size(1) cands_state_mat = torch.cat((cands_state_mat, tree_state.unsqueeze(1).repeat(1, repeat_count, 1)), dim=2) cands_state_mat = self.globalEmbeddingNet(cands_state_mat) cands_state_mat = cands_state_mat.transpose(0, 1) # S N E padding_mask = (padding_mask == 1) cands_embedding = self.transformer(cands_state_mat, src_key_padding_mask=padding_mask) cands_embedding = cands_embedding.transpose(0, 1) output = self.linear1(cands_embedding) output = self.dropout(output) output = self.linear2(output) output = torch.squeeze(output, dim=-1) output.masked_fill_( padding_mask, float('-inf') ) raw_output = self.classifier(cands_embedding).squeeze(dim=-1) raw_output.masked_fill_( padding_mask, float('-inf') ) return output, raw_output class BranchFormer(nn.Module): def __init__(self, var_dim, node_dim, mip_dim, hidden_size=256, dropout_rate=0.1, nhead=1, num_encoder_layers=1, dim_feedforward=256, tree_gate=True, graph=False): super(BranchFormer, self).__init__() print('Bidirection Matching G+l_ori') # define the dimensionality of the features and the hidden states self.var_dim = var_dim self.node_dim = node_dim self.mip_dim = mip_dim self.hidden_size = hidden_size self.tree_gate = tree_gate self.graph = graph # define CandidateEmbeddingNet self.CandidateEmbeddingNet = [LayerNorm(var_dim), nn.Linear(var_dim, hidden_size)] self.CandidateEmbeddingNet = nn.Sequential(*self.CandidateEmbeddingNet) self.TreeEmbeddingNet = [LayerNorm(node_dim + mip_dim), nn.Linear(node_dim + mip_dim, hidden_size)] self.TreeEmbeddingNet = nn.Sequential(*self.TreeEmbeddingNet) self.globalEmbeddingNet = [nn.Linear(hidden_size * 2, hidden_size)] self.globalEmbeddingNet = nn.Sequential(*self.globalEmbeddingNet) encoder_layer = nn.TransformerEncoderLayer(d_model=hidden_size, nhead=nhead, dim_feedforward=dim_feedforward, activation='gelu') encoder_norm = LayerNorm(hidden_size) self.transformer = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) self.BiMatchingNet = BiMatchingNet(hidden_size) self.linear1 = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(dropout_rate) self.linear2 = nn.Linear(hidden_size, 1) if graph: self.BABSTreeGNNNet = TreeGNN(node_dim + mip_dim, hidden_size) self.BiMatchingNet2 = BiMatchingNet(hidden_size) self.linear3 = nn.Linear(hidden_size * 2, hidden_size) self.classifier = nn.Linear(hidden_size, 1) # do the Xavier initialization for the linear layers for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(tensor=m.weight, gain=nn.init.calculate_gain('relu')) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, cands_state_mat, padding_mask, node_state=None, mip_state=None, tree_batch=None): ''' dim: cands_state_mat: N * S * E padding_mask: N * S node_state: N * E mip_state: N * E ''' # pdb.set_trace() # go through the CandidateEmbeddingNet cands_state_mat = self.CandidateEmbeddingNet(cands_state_mat) # pdb.set_trace() tree_state = self.TreeEmbeddingNet(torch.cat((node_state, mip_state), dim=1)) if self.tree_gate: repeat_count = cands_state_mat.size(1) cands_state_mat = torch.cat((cands_state_mat, tree_state.unsqueeze(1).repeat(1, repeat_count, 1)), dim=2) cands_state_mat = self.globalEmbeddingNet(cands_state_mat) cands_state_mat = cands_state_mat.transpose(0, 1) # S N E padding_mask = (padding_mask == 1) cands_embedding = self.transformer(cands_state_mat, src_key_padding_mask=padding_mask) cands_embedding = cands_embedding.transpose(0, 1) # pdb.set_trace() if self.graph: tree_state_avg, _ = self.BABSTreeGNNNet(tree_batch) output = self.BiMatchingNet(tree_state_avg, cands_embedding, padding_mask) output2 = self.BiMatchingNet2(tree_state, cands_embedding, padding_mask) output = self.linear3(torch.cat((output, output2), dim=-1)) else: output = self.BiMatchingNet(tree_state, cands_embedding, padding_mask) output = self.linear1(output) output = self.dropout(output) output = self.linear2(output) output = torch.squeeze(output, dim=-1) output.masked_fill_( padding_mask, float('-inf') ) raw_output = self.classifier(cands_embedding).squeeze(dim=-1) raw_output.masked_fill_( padding_mask, float('-inf') ) return output, raw_output
python
import logging from lib.amazon_properties import get_properties_compilers_and_libraries, get_specific_library_version_details logger = logging.getLogger(__name__) logger.level = 9 # def test_should_contain_some_compilers_and_libraries(): # [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) # assert len(_compilers) > 0 # assert len(_libraries) > 0 def test_should_have_staticliblink(): [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) assert 'googletest' in _libraries assert len(_libraries['googletest']['staticliblink']) > 0 assert _libraries['googletest']['staticliblink'][0] == 'gtestd' assert _libraries['googletest']['staticliblink'][1] == 'gmockd' def test_googletest_should_have_versions(): [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) assert 'googletest' in _libraries assert len(_libraries['googletest']['versionprops']) > 0 assert _libraries['googletest']['versionprops']['110']['lookupversion'] == 'release-1.10.0' assert _libraries['googletest']['versionprops']['110']['version'] == '1.10.0' details = get_specific_library_version_details(_libraries, 'googletest', '1.10.0') assert details != False details = get_specific_library_version_details(_libraries, 'googletest', 'release-1.10.0') assert details != False # def test_should_not_contain_g412(): # [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) # assert not 'g412' in _compilers # def test_should_not_contain_msvc(): # [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) # assert not 'cl19_2015_u3_64' in _compilers # def test_should_contain_gcc101(): # [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) # assert 'g101' in _compilers # def test_should_contain_clang800(): # [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) # assert 'clang800' in _compilers # def test_should_contain_optionsforclang800(): # [_compilers, _libraries] = get_properties_compilers_and_libraries('c++', logger) # assert '--gcc-toolchain=/opt/compiler-explorer/gcc-8.3.0' in _compilers['clang800']['options']
python
class Cache(object): def __init__(self, capacity = -1): self.capacity = capacity self.cache = {} self.index = {} @property def size(self): return len(self.cache) @property def has_capacity(self): return (self.capacity == -1) or (self.capacity > len(self.cache)) def set(self, key, value): if self.has_capacity: self.cache[key] = value self.index[id(value)] = key def get(self, key): return self.cache.get(key) def clear(): self.cache.clear() self.index.clear()
python
from serpent.environment import Environment from serpent.input_controller import KeyboardKey from serpent.utilities import SerpentError import time import collections import numpy as np class StartRegionsEnvironment(Environment): def __init__(self, game_api=None, input_controller=None, episodes_per_startregions_track=5): super().__init__("COD Environment", game_api=game_api, input_controller=input_controller) self.episodes_per_startregions_track = episodes_per_startregions_track self.reset() @property def new_episode_data(self): return {} @property def end_episode_data(self): return {} def new_episode(self, maximum_steps=None, reset=False): self.reset_startregions_state() time.sleep(1) super().new_episode(maximum_steps=maximum_steps, reset=reset) def end_episode(self): super().end_episode() def reset(self): self.reset_startregions_state() super().reset() def reset_startregions_state(self): self.startregions_state = { "ammo_levels": False, "health_levels": False } def update_startregions_state(self, image): self.startregions_state["ammo_levels"] = self.game_api.parse_ammo(image) self.startregions_state["health_levels"] = self.game_api.get_health(image) return True
python
import asyncio import aiohttp import pickle import csv from bs4 import BeautifulSoup import re import argparse import sys import getpass import time def parse_arguments(): parser = argparse.ArgumentParser( description=( 'Descarga las paginas [START, FINISH) del foro de la facultad.\n' 'El tamanno default del batch es 10, tener cuidado con este parametro ' 'porque hacerlo muy grande puede hacer que bloqueen la cuenta.\n' 'Leer el readme para una descripcion mas detrallada de uso y requisitos.' 'Los archivos de salida se generan automaticamente y se llaman root_START-FINISH.tsv' 'y child_START-FINISH.tsv' ) ) parser.add_argument("start", metavar="START", help="primera pagina que se quiere bajar", type=int) parser.add_argument("finish", metavar="FINISH", help="ultima pagina que se quiere bajar", type=int) parser.add_argument("-b", "--batch_size", default=10, help="cantidad de paginas que se bajan a la vez, default 10", type=int) parser.add_argument("-l", "--login_data", help="un pickle con los datos del usuario para realizar la conexion, si se omite el script pide login") args = parser.parse_args() return args def extract_data(raw_html): """ Esta wea devuelve un diccionario y una lista. El diccionario tiene las weas que vamos a guardar del OP y la lista contiene diccionarios con la info que vamos a guardar en cada comentario hijo de la publicacion """ soup = BeautifulSoup(re.sub(r'>\s+<', '><', raw_html), features='html5lib') # para el OP raices = soup.find_all('div', class_='raiz') roots = [] for raiz in raices: temp = {} temp['id'] = raiz.attrs['id'].split('_')[1] temp['titulo'] = raiz.h1.getText(strip=True) temp['autor'] = ( raiz.find('a', class_='usuario').getText(strip=True) if raiz.find('a', class_='usuario') is not None else "NO_AUTHOR" ) temp['fecha'] = raiz.find('li', class_='fecha').getText(strip=True) temp['tema'] = raiz.find('li', class_='tema').a.getText(strip=True) # para sacar el texto de un comentario hay que eliminar la lista # de botones que tiene al final, como responder, padre, etc. comentario = raiz.find('div', class_='texto') # cuidado que esto modifica la sopa, el ul se borra definitivamente comentario.ul.decompose() text = ' '.join(comentario.stripped_strings) temp['mensaje'] = text if len(text) > 0 else 'NO_TEXT' temp['current_time'] = time.time() roots.append(temp) hijos = soup.find_all('div', class_='hijo') childs = [] for hijo in hijos: temp = {} temp['id'] = hijo.attrs['id'].split('_')[1] temp['id_th'] = hijo.attrs['class'][1][1:] temp['id_p'] = hijo.parent.attrs['id'].split('_')[1] temp['autor'] = ( hijo.find('a', class_='usuario').getText(strip=True) if hijo.find('a', class_='usuario') is not None else "NO_AUTHOR" ) temp['fecha'] = hijo.find('em').getText(strip=True) # mismos comentarios que arriba comentario = hijo.find('div', class_='texto') comentario.ul.decompose() text = ' '.join(comentario.stripped_strings) temp['mensaje'] = text if len(text) > 0 else 'NO_TEXT' temp['current_time'] = time.time() childs.append(temp) return roots, childs # async def fetch(session, url): # async with session.get(url) as response: # return await response.text() async def download_page(session, url, root_writer, child_writer): """ Esta funcion recibe la sesion (que deberia estar logueada), la url y una wea pa escribir en un archivo, baja la pagina y la escribe en el archivo. PUM que sorpresa, no me lo esperaba. """ async with session.get(url) as response: # por ahora voy a probar solo con example.com y me se donde esta el texto # print(f'\t{url}') roots, childs = extract_data(await response.text()) for root in roots: root_writer.writerow(root) for child in childs: child_writer.writerow(child) async def download_batch(session, batch, root_writer, child_writer): tasks = [] for i, url in enumerate(batch): if i is 0: print(f'\tPrimera url del batch: {url}') task = asyncio.ensure_future( download_page(session, url, root_writer, child_writer) ) tasks.append(task) await asyncio.gather(*tasks) async def download_all(batches, root_writer, child_writer, login_data): async with aiohttp.ClientSession() as session: # conectar a cuenta de ucursos aqui, si no se pasa un archivo # el script pide login # tengo mis datos escondidos, porque obvio if login_data: with open('user_data.pic', 'rb') as f: payload = pickle.load(f) else: payload = {} payload['username'] = input('Nombre de usuario: ') payload['password'] = getpass.getpass('Contrasenna (tranqui no se muestra): ') # es importante agregarle esto a la wea que se envia pa poder loguearse payload['servicio'] = 'ucursos' # payload['debug'] = 0 # esta wea es a logearse con el usuario de cada uno y mantener la sesion # abierta pa poder seguir SURFEANDO ucursos post_url = 'https://www.u-cursos.cl/upasaporte/adi' async with session.post(post_url, data=payload) as resp: print(f"Hola, {payload['username'].split('.')[0].capitalize()} !") print('Respuesta login: ', resp.status) print() assert resp.status == 200, 'diablos, deberia ser 200' for i, batch in enumerate(batches): print(f'Descargando batch {i}') await download_batch(session, batch, root_writer, child_writer) if __name__ == '__main__': args = parse_arguments() # print(args) # sys.exit() # N es la cantidad de paginas que se quiere descargar (el ultimo offset) N = args.finish - args.start # M es la cantidad de requests que se quieren hacer de una # WARNING: CUIDADO CON HACER ESTO MUY GRANDE, PUEDE QUEDAR LA CAGADA M = args.batch_size print(f'Cantidad total de requests: {N}') print(f'Cantidad de requests a la vez: {M}') print(f'Numero de batches: {(N + M - 1) // M}') print(f'\nAfirmense cabros...\n') # url base, los parentesis son pa puro quede mas bonito el codigo base_url = ( 'https://www.u-cursos.cl/ingenieria/2/foro_institucion/' '?id_tema=&offset={}' ) # base_url = 'https://example.com/{}' # esta wea vuelve un generator pa todas las url que queremos descargar, # si fuera un lista normal pesaria como 100kb lo que no es mucho pero # igual es sacrilegio batches = ( ( base_url.format(args.start + j) for j in range( i * M, (i + 1) * M if (i + 1) * M < N else N ) ) for i in range((N + M - 1) // M) ) # ahora empieza el mambo con I/O with open(f'root_{args.start}-{args.finish}.tsv', 'w') as f_root,\ open(f'child_{args.start}-{args.finish}.tsv', 'w') as f_child: root_fields = ['id', 'titulo', 'autor', 'fecha', 'tema', 'mensaje', 'current_time'] root_writer = csv.DictWriter( f_root, fieldnames=root_fields, delimiter='\t' ) # mejor no escribir el header, para que sea mas facil unir # los archivos usando cat # root_writer.writeheader() child_fields = ['id', 'id_th', 'id_p', 'autor', 'fecha', 'mensaje', 'current_time'] child_writer = csv.DictWriter( f_child, fieldnames=child_fields, delimiter='\t' ) # mismo comentario de mas arriba # child_writer.writeheader() asyncio.get_event_loop().run_until_complete( download_all(batches, root_writer, child_writer, args.login_data) ) print() print("Creo que termine, igual revisa que la cantidad de comentarios descargados tenga sentido")
python
from numpy import array,dot from numpy.linalg import inv from getopt import getopt import sys def calc_displacements(initial,final): icoord=parse_poscar(initial)[1] fcoord=parse_poscar(final)[1] disp=fcoord-icoord return disp def parse_poscar(ifile): with open(ifile, 'r') as file: lines=file.readlines() sf=float(lines[1]) latticevectors=[float(lines[i].split()[j])*sf for i in range(2,5) for j in range(3)] latticevectors=array(latticevectors).reshape(3,3) atomtypes=lines[5].split() atomnums=[int(i) for i in lines[6].split()] if 'Direct' in lines[7] or 'Cartesian' in lines[7]: start=8 mode=lines[7].split()[0] else: mode=lines[8].split()[0] start=9 seldyn=[''.join(lines[i].split()[-3:]) for i in range(start,sum(atomnums)+start)] coord=array([[float(lines[i].split()[j]) for j in range(3)] for i in range(start,sum(atomnums)+start)]) if mode!='Cartesian': for i in range(sum(atomnums)): for j in range(3): while coord[i][j]>1.0 or coord[i][j]<0.0: if coord[i][j]>1.0: coord[i][j]-=1.0 elif coord[i][j]<0.0: coord[i][j]+=1.0 coord[i]=dot(coord[i],latticevectors) #latticevectors formatted as a 3x3 array #coord holds the atomic coordinates with shape () try: return latticevectors, coord, atomtypes, atomnums, seldyn except NameError: return latticevectors, coord, atomtypes, atomnums def write_poscar(ofile, lv, coord, atomtypes, atomnums, **args): with open(ofile,'w') as file: if 'title' in args: file.write(str(args['title'])) file.write('\n1.0\n') for i in range(3): for j in range(3): file.write(str('{:<018f}'.format(lv[i][j]))) if j<2: file.write(' ') file.write('\n') for i in atomtypes: file.write(' '+str(i)) file.write('\n') for i in atomnums: file.write(' '+str(i)) file.write('\n') if 'seldyn' in args: file.write('Selective Dynamics\n') file.write('Direct\n') for i in range(len(coord)): coord[i]=dot(coord[i],inv(lv)) for i in range(len(coord)): for j in range(3): file.write(str('{:<018f}'.format(coord[i][j]))) if j<2: file.write(' ') if 'seldyn' in args: for j in range(3): file.write(' ') file.write(args['seldyn'][i][j]) file.write('\n') if __name__ == '__main__': short_opts='h' long_opts=['help'] try: initial=sys.argv[1] final=float(sys.argv[2]) except IndexError: print('missing required arguments. exiting...') sys.exit() try: opts,args=getopt(sys.argv[3:],short_opts,long_opts) except IndexError: print('error specifying optional arguments') sys.exit() for i,j in opts: if i in ['-h','--help']: print(''' help options: -h, --help calculates displacement vectors for each atom between final and initial structures initial structure is the first argument; final structure is the second argument ''') sys.exit() try: calc_displacements(initial,final) except NameError: print('incorrect specification of files. exiting...') sys.exit()
python
import math import numpy from sympy import Rational, gamma, prod class NSphereScheme: def __init__(self, name, dim, weights, points, degree, citation): self.name = name self.dim = dim self.degree = degree self.citation = citation if weights.dtype == numpy.float64: self.weights = weights else: assert weights.dtype in [numpy.dtype("O"), numpy.int64] self.weights = weights.astype(numpy.float64) self.weights_symbolic = weights if points.dtype == numpy.float64: self.points = points else: assert points.dtype in [numpy.dtype("O"), numpy.int64] self.points = points.astype(numpy.float64) self.points_symbolic = points return def integrate(self, f, center, radius, dot=numpy.dot): center = numpy.array(center) rr = numpy.multiply.outer(radius, self.points) rr = numpy.swapaxes(rr, 0, -2) ff = numpy.array(f((rr + center).T)) return numpy.array(radius) ** (self.dim - 1) * dot(ff, self.weights) def integrate_monomial_over_unit_nsphere(alpha, symbolic=False): """ Gerald B. Folland, How to Integrate a Polynomial over a Sphere, The American Mathematical Monthly, Vol. 108, No. 5 (May, 2001), pp. 446-448, <https://doi.org/10.2307/2695802>. """ if any(a % 2 == 1 for a in alpha): return 0 if symbolic: return 2 * ( prod([gamma(Rational(a + 1, 2)) for a in alpha]) / gamma(sum([Rational(a + 1, 2) for a in alpha])) ) # Use lgamma since other with ordinary gamma, numerator and denominator # might overflow. return 2 * math.exp( math.fsum([math.lgamma(0.5 * (a + 1)) for a in alpha]) - math.lgamma(math.fsum([0.5 * (a + 1) for a in alpha])) )
python
def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation # {"feature": "Education", "instances": 23, "metric_value": 0.9986, "depth": 1} if obj[1]<=2: # {"feature": "Coupon", "instances": 16, "metric_value": 0.896, "depth": 2} if obj[0]<=3: # {"feature": "Occupation", "instances": 11, "metric_value": 0.994, "depth": 3} if obj[2]>3: return 'True' elif obj[2]<=3: return 'False' else: return 'False' elif obj[0]>3: return 'False' else: return 'False' elif obj[1]>2: # {"feature": "Coupon", "instances": 7, "metric_value": 0.5917, "depth": 2} if obj[0]<=2: return 'True' elif obj[0]>2: # {"feature": "Occupation", "instances": 3, "metric_value": 0.9183, "depth": 3} if obj[2]>6: return 'True' elif obj[2]<=6: return 'False' else: return 'False' else: return 'True' else: return 'True'
python
# -*- coding: utf-8 -*- """Test to verify that the scheduled actions are properly executed.""" import os import test from datetime import datetime import pytz from celery.contrib.testing.worker import start_worker from django.conf import settings from django.contrib.auth import get_user_model from django.core import mail from ontask import OnTaskSharedState, tasks from ontask.core.celery import app from ontask.models import Action, ScheduledAction class ScheduledActionTaskTestCase(test.OnTaskTestCase): """Test the functions to execute through celery.""" fixtures = ['schedule_actions'] filename = os.path.join( settings.BASE_DIR(), 'ontask', 'fixtures', 'schedule_actions.sql' ) @classmethod def setUpClass(cls): super().setUpClass() app.loader.import_module('celery.contrib.testing.tasks') cls.celery_worker = start_worker(app) cls.celery_worker.__enter__() @classmethod def tearDownClass(cls): super().tearDownClass() cls.celery_worker.__exit__(None, None, None) def test_scheduled_email_action(self): """Create a scheduled send email action and execute it.""" user = get_user_model().objects.get(email='[email protected]') # User must exist self.assertIsNotNone(user, 'User [email protected] not found') action = Action.objects.get(name='send email') scheduled_item = ScheduledAction( user=user, name='send email action', action=action, execute=datetime.now(pytz.timezone(settings.TIME_ZONE)), status=ScheduledAction.STATUS_PENDING, item_column=action.workflow.columns.get(name='email'), payload={ 'subject': 'Email subject', 'cc_email': '', 'bcc_email': '', 'send_confirmation': False, 'track_read': False}) scheduled_item.save() # Execute the scheduler tasks.execute_scheduled_actions_task(True) scheduled_item.refresh_from_db() assert scheduled_item.status == ScheduledAction.STATUS_DONE assert len(mail.outbox) == 2 assert 'Hi Student Two' in mail.outbox[0].body assert 'Hi Student Three' in mail.outbox[1].body def test_scheduled_json_action(self): """Create a scheduled send list action and execute it.""" token = 'fake token' OnTaskSharedState.json_outbox = [] settings.EXECUTE_ACTION_JSON_TRANSFER = False user = get_user_model().objects.get(email='[email protected]') # User must exist self.assertIsNotNone(user, 'User [email protected] not found') action = Action.objects.get(name='send json') scheduled_item = ScheduledAction( user=user, name='JSON scheduled action', action=action, execute=datetime.now(pytz.timezone(settings.TIME_ZONE)), status=ScheduledAction.STATUS_PENDING, item_column=action.workflow.columns.get(name='email'), payload={'token': token}) scheduled_item.save() # Execute the scheduler tasks.execute_scheduled_actions_task(True) scheduled_item.refresh_from_db() json_outbox = OnTaskSharedState.json_outbox assert scheduled_item.status == ScheduledAction.STATUS_DONE assert len(json_outbox) == 3 assert all(item['target'] == action.target_url for item in json_outbox) assert all(token in item['auth'] for item in json_outbox) def test_scheduled_send_list_action(self): """Create a scheduled send list action and execute it.""" user = get_user_model().objects.get(email='[email protected]') # User must exist self.assertIsNotNone(user, 'User [email protected] not found') action = Action.objects.get(name='send list') scheduled_item = ScheduledAction( user=user, name='send list scheduled action', action=action, execute=datetime.now(pytz.timezone(settings.TIME_ZONE)), status=ScheduledAction.STATUS_PENDING, payload={ 'email_to': '[email protected]', 'subject': 'Action subject', 'cc_email': '', 'bcc_email': ''}) scheduled_item.save() # Execute the scheduler tasks.execute_scheduled_actions_task(True) scheduled_item.refresh_from_db() assert scheduled_item.status == ScheduledAction.STATUS_DONE assert len(mail.outbox) == 1 assert ( '[email protected], [email protected]' in mail.outbox[0].body) def test_scheduled_json_list_action(self): """Create a scheduled send list action and execute it.""" token = 'false token' settings.EXECUTE_ACTION_JSON_TRANSFER = False OnTaskSharedState.json_outbox = [] user = get_user_model().objects.get(email='[email protected]') # User must exist self.assertIsNotNone(user, 'User [email protected] not found') action = Action.objects.get(name='send json list') scheduled_item = ScheduledAction( user=user, name='JSON List scheduled action', action=action, execute=datetime.now(pytz.timezone(settings.TIME_ZONE)), status=ScheduledAction.STATUS_PENDING, payload={'token': token}) scheduled_item.save() # Execute the scheduler tasks.execute_scheduled_actions_task(True) json_outbox = OnTaskSharedState.json_outbox scheduled_item.refresh_from_db() assert scheduled_item.status == ScheduledAction.STATUS_DONE assert len(json_outbox) == 1 assert all(token in item['auth'] for item in json_outbox)
python
# -*- coding:utf-8 -*- # @atime : 2021/1/24 12:58 下午 """ edit distance https://leetcode-cn.com/problems/edit-distance/ """ def solution1(word1: str, word2: str): """ 计算编辑距离 Args: word1 (str): 字符串1 word2 (str): 字符串2 Returns: (int) distance """ if not word1 or not word2: return max(len(word1), len(word2)) if word1 == word2: return 0 res = [] for i in range(len(word1) + 1): line_res = [] for j in range(len(word2) + 1): if i == 0: line_res.append(j) continue if j == 0: line_res.append(i) else: if word1[i - 1] == word2[j - 1]: line_res.append(res[i - 1][j - 1]) else: temp_dis = min(res[i - 1][j - 1], res[i - 1][j], line_res[j - 1]) + 1 line_res.append(temp_dis) res.append(line_res) return res[-1][-1] def solution2(word1, word2): l1, l2 = len(word1), len(word2) if not word1 or not word2: return max(l1, l2) if word1 == word2: return 0 dp = [[0] * (l2 + 1) for _ in range(l1 + 1)] for i in range(l1 + 1): for j in range(l2 + 1): if i == 0: dp[i][j] = j continue if j == 0: dp[i][j] = i else: if word1[i - 1] == word2[j - 1]: dp[i][j] = dp[i - 1][j - 1] else: dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1 return dp[-1][-1] def solution3(word1, word2): from heapq import heappop, heappush heap = [(0, word1, word2)] visited_set = set() while heap: d, w1, w2 = heappop(heap) if (w1, w2) in visited_set: continue visited_set.add((w1, w2)) if w1 == w2: return d if w1 and w2 and w1[0] == w2[0]: heappush(heap, (d, w1[1:], w2[1:])) else: if w1: heappush(heap, (d + 1, w1[1:], w2)) # delete if w2: heappush(heap, (d + 1, w1, w2[1:])) # add if w1 and w2: heappush(heap, (d + 1, w1[1:], w2[1:])) # replace if __name__ == '__main__': r = solution3('abc', 'agcag') print(r) pass
python
# # This file is part of the FFEA simulation package # # Copyright (c) by the Theory and Development FFEA teams, # as they appear in the README.md file. # # FFEA is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFEA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with FFEA. If not, see <http://www.gnu.org/licenses/>. # # To help us fund FFEA development, we humbly ask that you cite # the research papers on the package. # import sys, os, subprocess import __builtin__ import argparse as _argparse # Set up argparse parser = _argparse.ArgumentParser(description="Convert an FFEA trajectory to a pseudo-pdb system for PCA analysis") parser.add_argument("i", help="Input PCZ file (.pcz)") parser.add_argument("t", help="Input PDB topology file (_frame0.pdb)") parser.add_argument("-n", action="store", nargs='?', default = '10', help="Number of Modes to Analyse") parser.add_argument("-s", action="store", nargs='?', default = '1e-10', help="FFEA scale value") parser.add_argument("-o", action="store", nargs='?', help="Output filename") def FFEA_get_PCA_animations(infile, topfile, outfile, num_modes, scale): scriptdir = os.path.dirname(os.path.abspath(sys.argv[0])) # Check for problems base, ext = os.path.splitext(infile) if outfile == None: outfile = base + "_PCAanim" else: outfile = os.path.splitext(outfile)[0] if os.path.exists(outfile + "_anim" + str(0) + ".pdb") or os.path.exists(outfile + "_anim" + str(0) + ".ftj"): print("Default output file ('" + outfile + "_anim" + str(0) + ".pdb" + "') or ('" + outfile + "_anim" + str(0) + ".ftj" + "') already exists.\n") raise IOError try: num_modes = int(num_modes) except(ValueError): raise # Do some PCZ analysis # Check version (for some reason, it's written to stderr :/) p = subprocess.Popen(["pyPczdump", "--version"], stderr=subprocess.PIPE) sys.stderr.flush() pyPczver = p.communicate()[1].strip() sys.stdout.write("Found pyPczdump version " + pyPczver + "\n\n") pyPczver = [int(bit) for bit in pyPczver.split(".")] # Print help to file and hack your way to num_evecs try: num_avail_modes = int(subprocess.check_output(["pyPczdump", "-i", infile, "-n"]).split("\n")[8][:-1].split()[-1]) except OSError as e: if e.errno == os.errno.ENOENT: raise OSError else: print("Unknown problem running 'pyPczdump. Perhaps conflicting versions (before and after 2.0)") raise IOError if num_modes > num_avail_modes: print("Too many modes requested. Defaulting to maximum (%d modes)" % (num_avail_modes)) num_modes = num_avail_modes print("Calculating Eigenvector Animations...") for i in range(num_modes): anim_outfname = outfile + "_anim" + str(i + 1) + ".pdb" anim_outfname_ffea = outfile + "_anim" + str(i + 1) + ".ftj" sys.stdout.write("\rEigenvector %d" % (i + 1)) if(pyPczver[0] >= 2): try: subprocess.call(["pyPczdump", "-i", infile, "-m", str(i + 1), "-o", anim_outfname]) except OSError as e: if e.errno == os.errno.ENOENT: raise OSError else: print("Unknown problem running 'pyPczdump. Perhaps conflicting versions (before and after 2.0)") raise IOError else: try: subprocess.call(["pyPczdump", "-i", infile, "--pdb", topfile, "-m", str(i), "-o", anim_outfname]) except OSError as e: if e.errno == os.errno.ENOENT: raise OSError else: print("Unknown problem running 'pyPczdump. Perhaps conflicting versions (before and after 2.0)") raise IOError subprocess.call(["python", scriptdir + "/../../FFEA_analysis/FFEA_traj_tools/PDB_convert_to_FFEA_trajectory.py", anim_outfname, anim_outfname_ffea, str(scale)]) print("\ndone!") if sys.stdin.isatty() and hasattr(__builtin__, 'FFEA_API_mode') == False: try: args = parser.parse_args() except: somehelp = parser.format_help().split("\n", 1)[1] print somehelp sys.exit() try: FFEA_get_PCA_animations(args.i, args.t, args.o, args.n, args.s) except IOError: parser.print_help() except ValueError: print("'-n' must be an integer") parser.print_help() except TypeError: parser.print_help() print("\nLikely missing argument. Please try again :)\n") except OSError: print("\n'pyPczdump' program not found. Please add to your $PATH") parser.print_help()
python
# 1. python 中函数的工作原理 import inspect frame = None def bar(): global frame frame = inspect.currentframe() def foo(): bar() # python.exe 会用一个叫做 PyEvalFrameEx(c函数)去执行foo函数,首先会创建一个栈帧(stack_frame) """ python 一切皆对象,栈帧对象, 字节码对象 当foo调用子函数bar, 又会创建一个栈帧 所有的栈帧都是分配在 堆内存 上,这就决定了栈帧可以独立于调用者存在 (Python 动态语言 函数调用完成,栈帧不会销毁) (静态语言函数放在栈内存上,调用完成即销毁) """ # import dis # print(dis.dis(foo)) foo() print(frame.f_code.co_name) caller_frame = frame.f_back print(caller_frame.f_code.co_name) def gen_func(): yield 1 name = 'bobby' yield 2 age = 30 return 'imooc' import dis gen = gen_func() print(dis.dis(gen)) print(gen.gi_frame.f_lasti) # -1 print(gen.gi_frame.f_locals) # {} next(gen) print(gen.gi_frame.f_lasti) # 2 2 YIELD_VALUE print(gen.gi_frame.f_locals) # {} next(gen) print(gen.gi_frame.f_lasti) # 12 12 YIELD_VALUE print(gen.gi_frame.f_locals) # {'name': 'bobby'} from collections import UserList from _collections_abc import Sequence
python
# conding=utf-8 import Putil.base.logger as plog logger = plog.PutilLogConfig('data_sampler_factory').logger() logger.setLevel(plog.DEBUG) from Putil.demo.deep_learning.base import data_sampler as standard from util import data_sampler as project def data_sampler_factory(args, data_sampler_source, data_sampler_name, property_type='', **kwargs): if args.framework == 'torch': pass else: raise NotImplementedError('data_loader of framework: {} is not implemented'.format(args.framework)) data_sampler = '{}.{}'.format(data_sampler_source, data_sampler_name) return eval('{}(args, property_type, **kwargs)'.format(data_sampler)) def data_sampler_arg_factory(parser, source, name, property_type='', **kwargs): arg = '{}.{}Arg'.format(source, name) logger.info('data_sampler_arg: {}'.format(arg)) return eval('{}(parser, property_type, **kwargs)'.format(arg))
python
# file_loader.py """ Importe les bibliotheques "XML", "SQLite" et "Pygame" """ import xml.etree.ElementTree as ET import sqlite3 import pygame as pg vec = pg.math.Vector2 """ Classe SpriteSheet - But : decouper les sprites en fonction des donnees XML fournies. - Fonctionnement : decoupe l'image associee grace aux coordonnes et dimensions lues dans le fichier XML et renvoyer (return) le decoupage. - Utilisation : dans une boucle for, va decouper une image et sauvegarder le decoupage dans une liste. Ceci est fait au debut du jeu, pour eviter d'avoir a charger les images pendant le jeu. """ class SpriteSheet(): # load an atlas image and cut a specific piece out of it # can also pass an associated XML file def __init__(self, img_file, data_file=None): self.spritesheet = img_file if data_file: tree = ET.parse(data_file) self.map = {} for node in tree.iter(): if node.attrib.get('name'): name = node.attrib.get('name') self.map[name] = {} self.map[name]['x'] = int(node.attrib.get('x')) self.map[name]['y'] = int(node.attrib.get('y')) self.map[name]['width'] = int(node.attrib.get('width')) self.map[name]['height'] = int(node.attrib.get('height')) if node.attrib.get('num'): num = node.attrib.get('num') self.map[num] = {} self.map[num]['x'] = int(node.attrib.get('x')) self.map[num]['y'] = int(node.attrib.get('y')) self.map[num]['width'] = int(node.attrib.get('width')) self.map[num]['height'] = int(node.attrib.get('height')) """ Fonction get_image_rect - Fonctionnement : renvoie l'image en fonction des dimensions et des coordonees """ def get_image_rect(self, x, y, w, h): return self.spritesheet.subsurface(pg.Rect(x, y, w, h)) """ Fonction get_image_name - Fonctionnement : renvoie l'image en fonction de son nom dans le fichier XML associe a des coordonnees et des dimensions. """ def get_image_name(self, name): rect = pg.Rect(self.map[name]['x'], self.map[name]['y'], self.map[name]['width'], self.map[name]['height']) return self.spritesheet.subsurface(rect) """ Fonction get_image_num - Fonctionnement : renvoie l'image en fonction du numro dans la liste XML associee a des coordonnees et des dimensions. """ def get_image_num(self, num): rect = pg.Rect(self.map[num]['x'], self.map[num]['y'], self.map[num]['width'], self.map[num]['height']) return self.spritesheet.subsurface(rect) """ Fonction File_Loader - Fonctionnement : charge tout le contenu du jeu et le sauvegarde sous la classe Prgm(). """ def File_Loader(self): # cannon sounds self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/shoot/DryFire.ogg")) #0 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/shoot/WetFire.ogg")) #1 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/shoot/canon.ogg")) #2 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/missile/Missle_Launch.ogg")) #3 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/explosion/Cracking.ogg")) #4 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/explosion/doing.ogg")) #5 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/pen_click.ogg")) #6 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/drop_click.ogg")) #7 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/Construction.ogg")) #8 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/Construction_quick.ogg")) #9 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/Swing.ogg")) #10 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/Swing_lox.ogg")) #11 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/Tzwing.ogg")) #12 self.sounds.append(pg.mixer.Sound("files/sound/audio_fx/activation/Ka_Tching.ogg")) #13 self.game_music = "files/sound/music/Game_music.ogg" self.menu_music = "files/sound/music/Menu_music.ogg" #self.game_music = "files\sound\music\Machinimasound.com_-_Gold_Coast.ogg" #self.menu_music = "files\sound\music\Bit_Coast.ogg" # Fill sprite library with ready to use sprites self.img_interface = pg.image.load("files/img/sprites/tile_maps/interface_design.png").convert_alpha() self.img_trans = pg.image.load("files/img/sprites/tile_maps/trans_display_tilemap.png").convert_alpha() self.img_all_obj = pg.image.load("files/img/sprites/tile_maps/all_obj.png").convert_alpha() self.img_all_windows = pg.image.load("files/img/sprites/tile_maps/all_windows.png").convert_alpha() self.img_exp1 = pg.image.load("files/img/sprites/visual fx/exp1_200x200px.png").convert_alpha() self.img_exp2 = pg.image.load("files/img/sprites/visual fx/exp2_200x200px.png").convert_alpha() self.img_poster = pg.image.load("files/img/sprites/poster.png").convert_alpha() self.img_title = pg.image.load("files/img/sprites/title.png").convert_alpha() self.img_rb = pg.image.load("files/img/sprites/shallow_wave/sprite_0.png").convert_alpha() self.sprite['interface'] = [] self.sprite['obj'] = [] self.sprite['windows'] = [] self.sprite['trans_doors'] = [] self.sprite['map'] = [] self.sprite['exp1'] = [] self.sprite['exp2'] = [] self.sprite['anim_energy_leak_exp'] = [] self.sprite['anim_vapour_trail'] = [] self.sprite['anim_bullet_flame'] = [] self.sprite['anim_yellow_exp'] = [] self.sprite['anim_fire'] = [] self.sprite['anim_spark'] = [] # When creating list add name for easier finding for i in range(0, 27 + 1, 1): self.sprite['interface'].append(SpriteSheet(self.img_interface, "files/img/sprites/tile_maps/xml/interface_tiles.xml").get_image_num("{}".format(i)).convert_alpha()) for i in range(0, 1 + 1, 1): self.sprite['trans_doors'].append(SpriteSheet(self.img_trans, "files/img/sprites/tile_maps/xml/trans_display_tilemap.xml").get_image_num("{}".format(i)).convert_alpha()) for i in range(0, 69 + 1, 1): self.sprite['obj'].append(SpriteSheet(self.img_all_obj, "files/img/sprites/tile_maps/xml/all_obj.xml").get_image_num("{}".format(i)).convert_alpha()) for i in range(0, 0 + 1, 1): self.sprite['windows'].append(SpriteSheet(self.img_all_windows, "files/img/sprites/tile_maps/xml/all_windows.xml").get_image_num("{}".format(i)).convert_alpha()) for i in range(0, 15 + 1, 1): self.sprite['map'].append(pg.image.load("files/img/sprites/maps/map{}.png".format(i)).convert_alpha()) for i in range(0, 21 + 1, 1): self.sprite['exp1'].append(SpriteSheet(self.img_exp1, "files/img/sprites/visual fx/anim_xml/exp1_200x200px.xml").get_image_num("{}".format(i)).convert_alpha()) for i in range(0, 16 + 1, 1): self.sprite['exp2'].append(SpriteSheet(self.img_exp2, "files/img/sprites/visual fx/anim_xml/exp2_200x200px.xml").get_image_num("{}".format(i)).convert_alpha()) for i in range(0, 15 + 1, 1): self.sprite['anim_energy_leak_exp'].append(pg.image.load("files/img/sprites/visual fx/anim_energy_leak_explosion/sprite_{}.png".format(i)).convert_alpha()) for i in range(0, 9 + 1, 1): self.sprite['anim_vapour_trail'].append(pg.image.load("files/img/sprites/visual fx/anim_vapour_trail/sprite_{}.png".format(i)).convert_alpha()) for i in range(0, 2 + 1, 1): self.sprite['anim_bullet_flame'].append(pg.image.load("files/img/sprites/visual fx/anim_bullet_flame/sprite_{}.png".format(i)).convert_alpha()) for i in range(0, 20 + 1, 1): self.sprite['anim_yellow_exp'].append(pg.image.load("files/img/sprites/visual fx/anim_yellow_exp/sprite_{}.png".format(i)).convert_alpha()) for i in range(0, 98 + 1, 1): self.sprite['anim_fire'].append(pg.image.load("files/img/sprites/visual fx/anim_fire/sprite_{}.png".format(i)).convert_alpha()) for i in range(0, 13 + 1, 1): self.sprite['anim_spark'].append(pg.image.load("files/img/sprites/visual fx/anim_spark/sprite_{}.png".format(i)).convert_alpha()) """ Classe DataBase - But : sauvegarder les donnees du joueur - Fonctionnement : lors de son initialisation, va creer une base de donnees avec des latices predefinies pour contenir les donnees du joueur. - Utilisation : la classe DataBase() est appelee dans la classe Prgm() quand le programme est lance pour charger toutes les donnes dans une bibliotheque. Lors de la fermeture du programme, DataBase().db_dict_update() va sauvegarder les donnees de la bibliotheque dans la base .db. """ class DataBase: """ Fonction __init__ - Fonctionnement : se connecte a la base de donnees session.db et sauvegarde les noms des latices dans self.db_name_list. """ def __init__(self): self.conn = sqlite3.connect('files/session/session.db') self.c = self.conn.cursor() self.db_name_list = [ "game_lvl", "credit", "credit_gain_lvl", "cannon_1_lvl", "cannon_2_lvl", "cannon_3_lvl", "cannon_4_lvl", "base_shielding_lvl", "energy_production_lvl", "energy_storage_lvl", "build_time_lvl", "equip_cost_lvl", "build_slots", "rover_lvl", "rocket_lvl", "panther_lvl", "flak_lvl", "tanker_lvl", "build_b1", "build_b2", "build_b3", "build_b4", "build_b5", "build_b6", "build_b7", "music_sound_lvl", "fx_sound_lvl", "screen_width", "screen_height", "full_screen" ] """ Fonction db_spawn - Fonctionnement : va creer la base de donnee et va inserer des donnees par defaut """ def db_spawn(self): # Create table self.c.execute("""CREATE TABLE session ( game_lvl integer, credit integer, credit_gain_lvl integer, cannon_1_lvl integer, cannon_2_lvl integer, cannon_3_lvl integer, cannon_4_lvl integer, base_shielding_lvl integer, energy_production_lvl integer, energy_storage_lvl integer, build_time_lvl integer, equip_cost_lvl integer, build_slots integer, rover_lvl integer, rocket_lvl integer, panther_lvl integer, flak_lvl integer, tanker_lvl integer, build_b1 TEXT, build_b2 TEXT, build_b3 TEXT, build_b4 TEXT, build_b5 TEXT, build_b6 TEXT, build_b7 TEXT, music_sound_lvl integer, fx_sound_lvl integer, screen_width integer, screen_height integer, full_screen integer )""") # Insert a row of data self.c.execute("""INSERT INTO session VALUES ( 0, --game_lvl 0, --credit 0, --credit_gain_lvl 0, --cannon_1_lvl 0, --cannon_2_lvl 0, --cannon_3_lvl 0, --cannon_4_lvl 0, --base_shielding_lvl 0, --energy_production_lvl 0, --energy_storage_lvl 0, --build_time_lvl 0, --equip_cost_lvl 1, --build_slots (1 at start) 1, --rover_lvl # 0, --rocket_lvl # 0, --panther_lvl # 0, --flak_lvl # 0, --tanker_lvl # 'Rover_1', --build_b1 TEXT 'None', --build_b2 TEXT 'None', --build_b3 TEXT 'None', --build_b4 TEXT 'None', --build_b5 TEXT 'None', --build_b6 TEXT 'None', --build_b7 TEXT 0.5, --music_sound_lvl 0.5, --fx_sound_lvl 1280,--screen_width 720,--screen_height 0 --full_screen )""") self.conn.commit() self.conn.close() """ Fonction db_update - Fonctionnement : sauvegarde une donnee precise designe par data_name dans la base .db. """ def db_update(self, data_name, data_input): # Change a specific value in db self.c.execute("UPDATE session SET {} = {}".format(data_name, data_input)) self.conn.commit() self.conn.close() """ Fonction db_dict_update - Fonctionnement : sauvegarde toutes les donnee contenues dans data_dict_input dans la base .db. """ def db_dict_update(self, data_dict_input): for i in range(0, len(self.db_name_list)): data_name = self.db_name_list[i] data_input = data_dict_input[self.db_name_list[i]] print(data_input, data_name) self.c.execute("UPDATE session SET {} = '{}'".format(data_name, data_input)) self.conn.commit() self.conn.close() """ Fonction db_get - Fonctionnement : va chercher une donnee precise et retourner sa valeur """ def db_get(self, data_name): # Return a specific value from db self.c.execute("SELECT {} FROM session".format(data_name)) val = self.c.fetchone()[0] self.conn.close() return val """ Fonction db_dict_get - Fonctionnement : va chercher toutes les donnees et les sauvegarde dans data_dict_input (bibliotheque). """ def db_dict_get(self, data_dict_input): data_dict_input.clear() for i in range(0, len(self.db_name_list)): data_name = self.db_name_list[i] self.c.execute("SELECT {} FROM session".format(data_name)) data_dict_input[self.db_name_list[i]] = self.c.fetchone()[0] self.conn.close() """ Fonction db_check - Fonctionnement : verifie si la base de donnees existe et renvoie un bool qui verifie cette condition. """ def db_check(self): # Check if db has some table if not return False if true return True self.c.execute("""SELECT name FROM sqlite_master WHERE type='table';""") result = self.c.fetchone() if result: self.conn.close() return True else: self.conn.close() return False """ Fonction db_remove - Fonctionnement : va suppr toutes les donnes de la base de donnnees """ def db_remove(self): # Clear the entire db self.c.execute("DROP TABLE session") self.conn.commit() self.conn.close()
python
from storage import read_region_snapshot, _round_15min import datetime from dateutil.parser import parse def test_read_region_snapshot(): read_region_snapshot('slc_ut', '2021-09-01T00:00:00Z') def test__round_15min(): ts = parse('2021-01-31T23:59:01Z') ret = _round_15min(ts) assert ret == parse('2021-02-01T00:00:00Z') ts = parse('2021-01-31T23:50:01Z') ret = _round_15min(ts) assert ret == parse('2021-01-31T23:45:00Z') print('test__round_15min: All tests passed') def run_tests(): # test__round_15min() # test_read_region_snapshot() print('All IO tests passed') if __name__ == '__main__': run_tests()
python
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from .cumulative_return import cumulative_return_graph from .score_ic import score_ic_graph from .report import report_graph from .rank_label import rank_label_graph from .risk_analysis import risk_analysis_graph
python
from django.apps import AppConfig class GameForumOtherConfig(AppConfig): name = 'tulius.gameforum.other' label = 'game_forum_other'
python
import pickle import time import os import random from time import sleep import communicate.dealer_pb2 as dealer_pb2 import communicate.dealer_pb2_grpc as rpc # V1.4 # 0 黑桃 1 红桃 2 方片 3 草花 # 牌的id: 0-51 ''' 牌面level编号 皇家同花顺:10 同花顺 :9 四条 :8 葫芦 :7 同花 :6 顺子 :5 三条 :4 两对 :3 一对 :2 高牌 :1 ''' ''' DealerRequest message Definition: type: 0 heartbeat 1 response from server for state update 2 request from server for decision 3 request from server for state control 4 response from server for client init 5 response from server for game over status: -1 uninitialized ''' MessageType_HeartBeat = 0 MessageType_StateUpdate = 1 MessageType_GameDecision = 2 MessageType_StateControl = 3 MessageType_ClientInit = 4 MessageType_GameOver = 5 MessageType_InvalidToken = 6 MessageType_GameStarted = 7 MessageType_IllegalDecision = 8 #ClientState_Uninitialized = -1 #ClientState_Connected = 1 #ClientState_Disconnected = 2 # InitStatus when ClientInit # user already in game, and connected, and rejected InitStatus_InGameRejected = -2 # user already in queue, and connected, and rejected InitStatus_InQueueRejected = -1 InitStatus_InQueue = 0 # user added in queue # user already in game, and disconnected, and continue game InitStatus_InGameContinue = 1 # user already in queue, and disconnected, and continue in queue InitStatus_InQueueReInit = 2 GameStatus_Reseted = 0 GameStatus_Started = 1 GameStatus_Running = 2 GameStatus_Finished = 3 SERVER_TIMEOUT_SECONDS = 15 # alter the card id into color def id2color(card): return card % 4 # alter the card id into number def id2num(card): return card // 4 COLOR = ['C', 'D', 'H', 'S'] NUM = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A'] def id2card(card_id): color = id2color(card_id) num = id2num(card_id) return (COLOR[color]+NUM[num]) def card2id(card): color = card[0] num = card[1] return NUM.index(num) * 4 + COLOR.index(color) ''' hand.level 牌面等级:高牌 1 一对 2 两对 3 三条 4 顺子 5 同花 6 葫芦 7 四条 8 同花顺 9 皇家同花顺:10 ''' def judge_exist(x): if x >= 1: return True return False # poker hand of 7 card class Hand(object): def __init__(self, cards): cards = cards[:] self.level = 0 self.cnt_num = [0] * 13 self.cnt_color = [0] * 4 self.cnt_num_eachcolor = [[0 for col in range(13)] for row in range(4)] self.maxnum = -1 self.single = [] self.pair = [] self.tripple = [] self.nums = [] for x in cards: self.cnt_num[id2num(x)] += 1 self.cnt_color[id2color(x)] += 1 self.cnt_num_eachcolor[id2color(x)][id2num(x)] += 1 self.nums.append(id2num(x)) self.judge_num_eachcolor = [[] for i in range(4)] for i in range(4): self.judge_num_eachcolor[i] = list( map(judge_exist, self.cnt_num_eachcolor[i])) self.nums.sort(reverse=True) for i in range(12, -1, -1): if self.cnt_num[i] == 1: self.single.append(i) elif self.cnt_num[i] == 2: self.pair.append(i) elif self.cnt_num[i] == 3: self.tripple.append(i) self.single.sort(reverse=True) self.pair.sort(reverse=True) self.tripple.sort(reverse=True) # calculate the level of the poker hand for i in range(4): if self.judge_num_eachcolor[i][8:13].count(True) == 5: self.level = 10 return for i in range(4): for j in range(7, -1, -1): if self.judge_num_eachcolor[i][j:j+5].count(True) == 5: self.level = 9 self.maxnum = j + 4 return if self.judge_num_eachcolor[i][12] and self.judge_num_eachcolor[i][:4].count(True) == 4: self.level = 9 self.maxnum = 3 return for i in range(12, -1, -1): if self.cnt_num[i] == 4: self.maxnum = i self.level = 8 for j in range(4): self.nums.remove(i) return tripple = self.cnt_num.count(3) if tripple > 1: self.level = 7 return elif tripple > 0: if self.cnt_num.count(2) > 0: self.level = 7 return for i in range(4): if self.cnt_color[i] >= 5: self.nums = [] for card in cards: if id2color(card) == i: self.nums.append(id2num(card)) self.nums.sort(reverse=True) self.nums = self.nums[:5] self.maxnum = self.nums[0] self.level = 6 return for i in range(8, -1, -1): flag = 1 for j in range(i, i + 5): if self.cnt_num[j] == 0: flag = 0 break if flag == 1: self.maxnum = i + 4 self.level = 5 return if self.cnt_num[12] and list(map(judge_exist, self.cnt_num[:4])).count(True) == 4: self.maxnum = 3 self.level = 5 return for i in range(12, -1, -1): if self.cnt_num[i] == 3: self.maxnum = i self.level = 4 self.nums.remove(i) self.nums.remove(i) self.nums.remove(i) self.nums = self.nums[:min(len(self.nums), 2)] return if self.cnt_num.count(2) > 1: self.level = 3 return for i in range(12, -1, -1): if self.cnt_num[i] == 2: self.maxnum = i self.level = 2 self.nums.remove(i) self.nums.remove(i) self.nums = self.nums[:min(len(self.nums), 3)] return if self.cnt_num.count(1) == 7: self.level = 1 self.nums = self.nums[:min(len(self.nums), 5)] return self.level = -1 def __str__(self): return 'level = %s' % self.level def cmp(x, y): # x < y return 1 if x > y: return -1 elif x == y: return 0 else: return 1 # find the bigger of two poker hand(7 cards), if cards0 < cards1 then return 1, cards0 > cards1 return -1, else return 0 def judge_two(cards0, cards1): hand0 = Hand(cards0) hand1 = Hand(cards1) if hand0.level > hand1.level: return -1 elif hand0.level < hand1.level: return 1 else: if hand0.level in [5, 9]: return cmp(hand0.maxnum, hand1.maxnum) elif hand0.level in [1, 2, 4]: t = cmp(hand0.maxnum, hand1.maxnum) if t == 1: return 1 elif t == -1: return -1 else: if hand0.nums < hand1.nums: return 1 elif hand0.nums == hand1.nums: return 0 else: return -1 elif hand0.level == 6: if hand0.nums < hand1.nums: return 1 elif hand0.nums > hand1.nums: return -1 else: return 0 elif hand0.level == 8: t = cmp(hand0.maxnum, hand1.maxnum) if t == 1: return 1 elif t == -1: return -1 else: return cmp(hand0.nums[0], hand1.nums[0]) elif hand0.level == 3: if cmp(hand0.pair[0], hand1.pair[0]) != 0: return cmp(hand0.pair[0], hand1.pair[0]) elif cmp(hand0.pair[1], hand1.pair[1]) != 0: return cmp(hand0.pair[1], hand1.pair[1]) else: hand0.pair = hand0.pair[2:] hand1.pair = hand1.pair[2:] tmp0 = hand0.pair + hand0.pair + hand0.single tmp0.sort(reverse=True) tmp1 = hand1.pair + hand1.pair + hand1.single tmp1.sort(reverse=True) if tmp0[0] < tmp1[0]: return 1 elif tmp0[0] == tmp1[0]: return 0 else: return -1 elif hand0.level == 7: if cmp(hand0.tripple[0], hand1.tripple[0]) != 0: return cmp(hand0.tripple[0], hand1.tripple[0]) else: tmp0 = hand0.pair tmp1 = hand1.pair if len(hand0.tripple) > 1: tmp0.append(hand0.tripple[1]) if len(hand1.tripple) > 1: tmp1.append(hand1.tripple[1]) tmp0.sort(reverse=True) tmp1.sort(reverse=True) if tmp0[0] < tmp1[0]: return 1 elif tmp0[0] == tmp1[0]: return 0 else: return -1 else: pass # assert 0 return 0 class Player(object): def __init__(self, _init_money, _username="unknown"): # user profile self.username = _username # username, 'unknown' is unknown self.init_money = _init_money # init money self.inited = False self.money = _init_money # money player remains # game states self.active = True # if the player is active(haven't giveups) self.bet = 0 # the bet in this round self.cards = [] # private cards self.totalbet = 0 # the bet in total(all round) self.allin = 0 # if the player has all in #self.state = # state # session data self.token = '' self.connected = False self.last_msg_time = None self.game_over_sent = False # raise the bet by amount def raisebet(self, amount): self.money -= amount self.bet += amount assert self.money > 0 # player allin def allinbet(self): self.bet += self.money self.allin = 1 self.money = 0 def getcards(self, sharedcards): return self.cards + sharedcards # return self.cards + self.state.sharedcards def __str__(self): return 'player: active = %s, money = %s, bet = %s, allin = %s' % (self.active, self.money, self.bet, self.allin) class State(object): def __init__(self, logger, totalPlayer, usernames, initMoney, bigBlind, button): ''' class to hold the game ''' self.totalPlayer = totalPlayer # total players in the game self.bigBlind = bigBlind # bigBlind, every bet should be multiple of smallBlind which is half of bigBlind. self.button = button # the button position self.currpos = 0 # current position self.playernum = 0 # active player number self.moneypot = 0 # money in the pot self.minbet = bigBlind # minimum bet to call in this round, total bet self.sharedcards = [] # shared careds in the game self.turnNum = 0 # 0, 1, 2, 3 for pre-flop round, flop round, turn round and river round self.last_raised = bigBlind # the amount of bet raise last time self.player = [] # All players. You can check them to help your decision. The 'cards' field of other player is not visiable for sure. self.decision_history = {0:[],1:[],2:[],3:[]} # all th history of this game for pos in range(totalPlayer): # initMoney # if (len(username_list) <= i): self.player.append(Player(initMoney)) self.player[pos].username = usernames.get(pos, 'unknown') self.logger = logger def set_user_money(self, initMoney): for i in range(self.totalPlayer): self.player[i].init_money = initMoney[i] self.player[i].money = initMoney[i] self.logger.info('[SET MONEY] Player at pos {} has {}'.format(i, self.player[i].money)) def __str__(self): return 'currpos = %s, playernum = %s, moneypot = %s, minbet = %s, last_raised = %s' \ % (self.currpos, self.playernum, self.moneypot, self.minbet, self.last_raised) def restore(self, turn, button, bigBlind): # restore the state before each round self.turnNum = turn self.currpos = button self.minbet = 0 self.last_raised = bigBlind def update(self, totalPlayer): # update the state after each round for i in range(totalPlayer): self.player[i].totalbet += self.player[i].bet self.player[i].bet = 0 # judge if the round is over def round_over(self): if self.playernum == 1: return 1 for i in range(self.totalPlayer): if (self.player[i].active is True) and (self.player[i].allin == 0): return 0 for i in range(self.totalPlayer): if self.player[i].active is True and (self.player[i].bet != self.minbet and self.player[i].allin == 0): return 0 if self.turnNum != 0 and self.minbet == 0: return 0 return 1 # calculate the next position def nextpos(self, pos): self.currpos = (pos + 1) % self.totalPlayer return self.currpos def dump(self, file): with open(file, 'wb') as handler: pickle.dump(self, handler) print('dump') def save_game_replay(self, folder=""): replay_id = random.randint(10000,99999) time_str = time.strftime("%Y_%m_%d_%H_%M_%S", time.gmtime()) replay_filename = time_str+ "_" + str(replay_id) + ".txt" replay_filename = os.path.join(folder, replay_filename) with open(replay_filename, 'w') as f: f.write("%d,%d,%d \n" % (self.totalPlayer, self.bigBlind, self.button )) f.write(','.join([p.username for p in self.player])+"\n") f.write(','.join([str(p.init_money) for p in self.player])+"\n") f.write(','.join([str(p.init_money) for p in self.player])+"\n") for term in self.decision_history: decion_for_this_term = self.decision_history[term] for decision in decion_for_this_term: _term = term _actionNum = int(decision.actionNum) _pos = int(decision.pos) _amount = int(decision.amount) _type = int(decision.type) action = "" if int(decision.raisebet) == 1: action = 'raisebet' elif int(decision.callbet) == 1: action = 'callbet' elif int(decision.check) == 1: action = 'check' elif int(decision.giveup) == 1: action = 'fold' elif int(decision.allin) == 1: action = 'allin' f.write("%d,%d,%d,%s,%d,%d" % (_term, _actionNum, _pos, action, _amount, _type) + "\n") for p in self.player: f.write(str(p)) for card in p.cards: f.write(" "+id2card(card)) f.write("\n") for card in self.sharedcards: f.write(id2card(card) + " ") f.write("\n") f.write(','.join([str(p.money) for p in self.player])+"\n") class Decision(object): giveup = 0 # 弃牌 allin = 0 # 全押 check = 0 # 过牌 callbet = 0 # 跟注 raisebet = 0 # 加注 amount = 0 # 本轮中加注到amount def clear(self): self.giveup = self.allin = self.check = self.callbet = self.raisebet = self.amount = 0 def update(self, a): self.giveup = a[0] self.allin = a[1] self.check = a[2] self.callbet = a[3] self.raisebet = a[4] self.amount = a[5] def isValid(self): if self.giveup + self.allin + self.check + self.callbet + self.raisebet == 1: if self.raisebet == 1 and self.amount == 0: return False return True return False def make_decision(self, action, amount=0): ''' we have to make sure that this is the only entrance to make decisions thus to ensure no bugs in decision making''' self.clear() if (action == "fold"): self.giveup = 1 assert (self.amount == 0) elif (action == "check"): self.check = 1 assert (self.amount == 0) elif (action == "call"): self.callbet = 1 assert (self.amount == 0) elif (action == "allin"): self.allin = 1 assert (self.amount == 0) elif (action == "raise"): if (amount == 0): self.raisebet = 1 self.amount = amount else: self.callbet = 1 else: raise Exception("Action not understood") def fix(self): amount = self.amount setname = '' for k, v in self.__dict__.items(): if v == 1 and k != 'amount': setname = k setattr(self, k, 0) if setname == '': setattr(self, 'giveup', 1) else: setattr(self, setname, 1) if setname == 'raisebet': if amount != 0: setattr(self, 'amount', amount) else: setattr(self, 'callbet', 1) setattr(self, 'raisebet', 0) def __str__(self): return 'giveup=%s, allin=%s, check=%s, callbet=%s, raisebet=%s, amount=%s' % (self.giveup, self.allin, self.check, self.callbet, self.raisebet, self.amount)
python
# Buy first thing in the morning # Sell the moment we get 1% profit after commission # Buy again # Cut losses only when it is at 80%. # repeat # The idea # we should buy in 10% increments (tunable) throughout the day if the price is going up # every buy should be around 10 mins apart (tunable) # Thus we have 10 sales, by mid morning. # Sell each tranche when they generate 1% profit during the day # the moment the price crosses 2% profit of first sale, sell everything, dont buy again that day # If nothing gets sold by 1 hr before close, sell everything at whatever cost and take the loss # How the algo performsa # Horribly any which way you slice it import time from pytz import timezone import datetime import pytz import pandas as pd import numpy as np # Put any initialization logic here. The context object will be passed to # the other methods in your algorithm. def initialize(context): # context.stock = sid(3951) # add some specific securities stocks = [sid(21724), sid(22257), sid(18522), sid(351), sid(6295), sid(20914)] context.stocks = stocks context.no_of_stocks = 6 context.max = 30000 context.min = 0 context.profit = 0.01 set_commission(commission.PerShare(cost=0.005)) set_slippage(slippage.FixedSlippage(spread=0.00)) context.last_sold_date = 0 context.last_bought_date = 0 # This defines when we actually want to buy the stock context.buy_time_hour = 10 context.buy_time_minute = 10 context.sell_time_hour = 12 context.sell_time_minute = 10 context.increment_to_buy = 0.1 context.time_diff_between_buys = 10 # minutes context.buy = [0]*context.no_of_stocks context.buy_price = [0]*context.no_of_stocks # context.all_sids = [sid(21724), sid(3951), sid(6295), sid(23709), sid(12959)] # add some specific securities context.buy_and_hold_number = [0]*context.no_of_stocks context.run_once = 1 context.last_bought_date = [0]*context.no_of_stocks context.last_sold_date = [0]*context.no_of_stocks context.last_bought_price = [0]*context.no_of_stocks set_commission(commission.PerShare(cost=0.005)) set_slippage(slippage.FixedSlippage(spread=0.00)) ########### HANDLE_DATA() IS RUN ONCE PER MINUTE ####################### def handle_data(context, data): # If the stock has not yet started trading, exit it for stock in context.stocks : if stock not in data: log.info(stock) continue # Get the current exchange time, in local timezone: exchange_time = pd.Timestamp(get_datetime()).tz_convert('US/Eastern') today = exchange_time.day + exchange_time.month*30 + exchange_time.year*365 # This is to compare against the buy and hold strategy # So buy the first time when the algo runs, and then never sell if context.run_once == 1: i = 0 for stock in context.stocks : context.buy_and_hold_number[i] = (context.max/context.no_of_stocks)/data[stock].price log.info(stock) log.info(context.buy_and_hold_number[i]) context.run_once = 0 i = i + 1 i = 0 total_buy_and_hold = 0 for stock in context.stocks : # This is the graph of what would happen if we had just bought and kept total_buy_and_hold = total_buy_and_hold + context.buy_and_hold_number[i] * data[stock].price i = i + 1 # This is the graph of what would happen if we had just bought and kept record(BuyAndHold=total_buy_and_hold) # All the records i = 0 for stock in context.stocks : # This is the Price of the stock today record(PRICE=data[stock].price) # This is the value of the portfolio including current value of stock + cash we have record(PortfolioValue=context.portfolio.positions_value \ + int(context.portfolio.cash)) # this is the max of capital, to compare against the buy and hold value and portfolio values #record(InitialCapital=context.max) i = i + 1 if exchange_time.hour < context.buy_time_hour : return # First buy if exchange_time.hour == context.buy_time_hour and \ exchange_time.minute == context.buy_time_minute: i = -1 for stock in context.stocks : i = i + 1 # # do all the buying here # if (context.portfolio.positions[stock].amount == 0) : # amount_to_buy = min(context.portfolio.cash, (context.max/context.no_of_stocks)) # else : # amount_to_buy = min(context.portfolio.cash, \ # (context.max/context.no_of_stocks) - context.portfolio.positions[stock].amount*data[stock].price) # context.order_id = order_value(stock, 0.19*(amount_to_buy)) # # Check the order to make sure that it has bought. Right now the filled below returns zero # stock_order = get_order(context.order_id) # # The check below shows if the object exists. Only if it exists, should you # # refer to it. Otherwise you will get a runtime error # if stock_order: # message = ',buy,stock={stock},amount to buy={amount_to_buy},price={price},amount={amount}' # message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_buy=amount_to_buy) # log.info(message) # record(BUY=data[stock].price) context.last_bought_price[i] = data[stock].price # continue continue # Second buy i = -1 for stock in context.stocks : i = i + 1 if exchange_time.hour == context.buy_time_hour and \ exchange_time.minute == context.buy_time_minute + 10 and \ data[stock].price > context.last_bought_price[i] : # do all the buying here if (context.portfolio.positions[stock].amount == 0) : amount_to_buy = min(context.portfolio.cash, (context.max/context.no_of_stocks)) else : amount_to_buy = min(context.portfolio.cash, \ (context.max/context.no_of_stocks) - context.portfolio.positions[stock].amount*data[stock].price) context.order_id = order_value(stock, 0.39*(amount_to_buy)) # Check the order to make sure that it has bought. Right now the filled below returns zero stock_order = get_order(context.order_id) # The check below shows if the object exists. Only if it exists, should you # refer to it. Otherwise you will get a runtime error if stock_order: message = ',buy,stock={stock},amount to buy={amount_to_buy},price={price},amount={amount}' message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_buy=amount_to_buy) log.info(message) record(BUY=data[stock].price) context.last_bought_price[i] = data[stock].price continue continue # Third buy i = -1 for stock in context.stocks : i = i + 1 if exchange_time.hour == context.buy_time_hour and \ exchange_time.minute == context.buy_time_minute + 20 and \ data[stock].price > context.last_bought_price[i] : # do all the buying here if (context.portfolio.positions[stock].amount == 0) : amount_to_buy = min(context.portfolio.cash, (context.max/context.no_of_stocks)) else : amount_to_buy = min(context.portfolio.cash, \ (context.max/context.no_of_stocks) - context.portfolio.positions[stock].amount*data[stock].price) context.order_id = order_value(stock, 0.59*(amount_to_buy)) # Check the order to make sure that it has bought. Right now the filled below returns zero stock_order = get_order(context.order_id) # The check below shows if the object exists. Only if it exists, should you # refer to it. Otherwise you will get a runtime error if stock_order: message = ',buy,stock={stock},amount to buy={amount_to_buy},price={price},amount={amount}' message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_buy=amount_to_buy) log.info(message) record(BUY=data[stock].price) context.last_bought_price[i] = data[stock].price continue continue # Fourth buy i = -1 for stock in context.stocks : i = i + 1 if exchange_time.hour == context.buy_time_hour and \ exchange_time.minute == context.buy_time_minute + 30 and \ data[stock].price > context.last_bought_price[i] : # do all the buying here if (context.portfolio.positions[stock].amount == 0) : amount_to_buy = min(context.portfolio.cash, (context.max/context.no_of_stocks)) else : amount_to_buy = min(context.portfolio.cash, \ (context.max/context.no_of_stocks) - context.portfolio.positions[stock].amount*data[stock].price) context.order_id = order_value(stock, 0.79*(amount_to_buy)) # Check the order to make sure that it has bought. Right now the filled below returns zero stock_order = get_order(context.order_id) # The check below shows if the object exists. Only if it exists, should you # refer to it. Otherwise you will get a runtime error if stock_order: message = ',buy,stock={stock},amount to buy={amount_to_buy},price={price},amount={amount}' message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_buy=amount_to_buy) log.info(message) record(BUY=data[stock].price) context.last_bought_price[i] = data[stock].price continue continue # Fifth buy i = -1 for stock in context.stocks : i = i + 1 if exchange_time.hour == context.buy_time_hour and \ exchange_time.minute == context.buy_time_minute + 40 and \ data[stock].price > context.last_bought_price[i] : # do all the buying here if (context.portfolio.positions[stock].amount == 0) : amount_to_buy = min(context.portfolio.cash, (context.max/context.no_of_stocks)) else : amount_to_buy = min(context.portfolio.cash, \ (context.max/context.no_of_stocks) - context.portfolio.positions[stock].amount*data[stock].price) context.order_id = order_value(stock, 0.94*(amount_to_buy)) # Check the order to make sure that it has bought. Right now the filled below returns zero stock_order = get_order(context.order_id) # The check below shows if the object exists. Only if it exists, should you # refer to it. Otherwise you will get a runtime error if stock_order: message = ',buy,stock={stock},amount to buy={amount_to_buy},price={price},amount={amount}' message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_buy=amount_to_buy) log.info(message) record(BUY=data[stock].price) context.last_bought_price[i] = data[stock].price continue continue if exchange_time.hour == context.sell_time_hour and \ exchange_time.minute == context.sell_time_minute: i = 0 for stock in context.stocks : context.order_id = order(stock, -context.portfolio.positions[stock].amount) stock_order = get_order(context.order_id) # The check below shows if the object exists. Only if it exists, should you # refer to it. Otherwise you will get a runtime error if stock_order: # log the order amount and the amount that is filled message = ',sell,stock={stock},amount to sell={amount_to_sell},price={price},amount={amount}' message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_sell=stock_order.amount*data[stock].price) log.info(message) record(SELL=data[stock].price) i = i + 1
python
#!/usr/bin/env python3 # ---------------------------------------------------------------------------- # Copyright (c) 2020--, Qiyun Zhu. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- __name__ = 'woltka' __description__ = 'Web of Life ToolKit App' __version__ = '0.1.2' __license__ = 'BSD-3-Clause' __author__ = 'Qiyun Zhu' __email__ = '[email protected]' __url__ = 'https://github.com/qiyunzhu/woltka'
python
from __future__ import absolute_import # need to get system mendeley library from mendeley.exception import MendeleyException import mendeley as mendeley_lib import os def get_mendeley_session(): mendeley_client = mendeley_lib.Mendeley( client_id=os.getenv("MENDELEY_OAUTH2_CLIENT_ID"), client_secret=os.getenv("MENDELEY_OAUTH2_SECRET")) auth = mendeley_client.start_client_credentials_flow() session = auth.authenticate() return session def query_mendeley(doi): resp = None doc = None try: mendeley_session = get_mendeley_session() try: doc = mendeley_session.catalog.by_identifier( doi=doi, view='stats') except (UnicodeEncodeError, IndexError): return None if not doc: return None resp = {} resp["reader_count"] = doc.reader_count resp["reader_count_by_academic_status"] = doc.reader_count_by_academic_status resp["reader_count_by_subdiscipline"] = doc.reader_count_by_subdiscipline resp["reader_count_by_country"] = doc.reader_count_by_country resp["mendeley_url"] = doc.link resp["abstract"] = doc.abstract except (KeyError, MendeleyException): pass return resp
python
# Copyright 2014 Alistair Muldal <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np def apply_gaussian_blur(network_pos, fluor, A=0.15, lamb=0.0315): """ Simulate optical blurring of fluorescence signal as a Gaussian function of distance (as described in Stetter et al., 2012) Arguments: ------------ network_pos: (2, ncells) float array the x, y positions of each cell (nominally in mm) fluor: (ncells, ntimebins) float array the fluorescence traces for each cell A: float, optional* the amplitude of the Gaussian function lamb: float, optional* the length constant of the Gaussian function Returns: ------------ blurred: (ncells, ntimebins) the blurred fluorescence traces * The default values of A and lamb were obtained by fitting the normal1 competition dataset, using theano_unblur.fit_blur() """ # handle HDF5 nodes network_pos = network_pos[:] fluor = fluor[:] blurmat = get_blurring_matrix(network_pos, A, lamb) crosstalk = np.dot((np.eye(blurmat.shape[0]) + blurmat), fluor) blurred_fluor = fluor + crosstalk return blurred_fluor def fake_positions(ncells, x_lim=(0, 1), y_lim=(0, 1)): """ Generate fake x, y coordinates for each cell, drawn from a uniform distribution bounded on x_lim and y_lim """ x = np.random.uniform(low=x_lim[0], high=x_lim[1], size=ncells) y = np.random.uniform(low=y_lim[0], high=y_lim[1], size=ncells) return np.vstack((x, y)).T def gauss(A, lamb, d): # we set the diagonal terms to zero return A * (np.exp(- (d / lamb) ** 2) - np.eye(d.shape[0])) def all_distances(pos): x, y = pos.T dx = (x[:, None] - x[None, :]) dy = (y[:, None] - y[None, :]) dist = np.sqrt((dx * dx) + (dy * dy)) return dist def get_blurring_matrix(pos, A, lamb): dist = all_distances(pos) # the amplitude still isn't quite right... blurmat = gauss(A, lamb, dist) return blurmat
python
from . import item, user
python
import layer import torch.nn as nn import torch from torch.autograd import Variable try: import ipdb except ImportError: pass class Translator(object): def __init__(self, opt, model=None, dataset=None): self.opt = opt if model is None: checkpoint = torch.load(opt.model) model_opt = checkpoint['opt'] self.src_dict = checkpoint['dicts']['src'] self.tgt_dict = checkpoint['dicts']['tgt'] self.enc_rnn_size = model_opt.enc_rnn_size self.dec_rnn_size = model_opt.dec_rnn_size encoder = layer.Models.Encoder(model_opt, self.src_dict) decoder = layer.Models.Decoder(model_opt, self.tgt_dict) decIniter = layer.Models.DecInit(model_opt) model = layer.Models.NMTModel(encoder, decoder, decIniter) generator = nn.Sequential( nn.Linear(model_opt.dec_rnn_size // model_opt.maxout_pool_size, self.tgt_dict.size()), nn.LogSoftmax()) model.load_state_dict(checkpoint['model']) generator.load_state_dict(checkpoint['generator']) if opt.cuda: model.cuda() generator.cuda() else: model.cpu() generator.cpu() model.generator = generator else: self.src_dict = dataset['dicts']['src'] self.tgt_dict = dataset['dicts']['tgt'] self.enc_rnn_size = opt.enc_rnn_size self.dec_rnn_size = opt.dec_rnn_size self.opt.cuda = True if len(opt.gpus) >= 1 else False self.opt.n_best = 1 self.opt.replace_unk = False self.tt = torch.cuda if opt.cuda else torch self.model = model self.model.eval() self.copyCount = 0 def buildData(self, srcBatch, goldBatch): srcData = [self.src_dict.convertToIdx(b, layer.Constants.UNK_WORD) for b in srcBatch] tgtData = None if goldBatch: tgtData = [self.tgt_dict.convertToIdx(b, layer.Constants.UNK_WORD, layer.Constants.BOS_WORD, layer.Constants.EOS_WORD) for b in goldBatch] return layer.Dataset(srcData, tgtData, self.opt.batch_size, self.opt.cuda) def buildTargetTokens(self, pred, src, attn): pred_word_ids = [x.item() for x in pred] tokens = self.tgt_dict.convertToLabels(pred_word_ids, layer.Constants.EOS) tokens = tokens[:-1] # EOS if self.opt.replace_unk: for i in range(len(tokens)): if tokens[i] == layer.Constants.UNK_WORD: _, maxIndex = attn[i].max(0) tokens[i] = src[maxIndex[0]] return tokens def translateBatch(self, srcBatch, tgtBatch): batchSize = srcBatch[0].size(1) beamSize = self.opt.beam_size # (1) run the encoder on the src encStates, context = self.model.encoder(srcBatch) srcBatch = srcBatch[0] # drop the lengths needed for encoder decStates = self.model.decIniter(encStates[1]) # batch, dec_hidden # (3) run the decoder to generate sentences, using beam search # Expand tensors for each beam. context = context.data.repeat(1, beamSize, 1) decStates = decStates.unsqueeze(0).data.repeat(1, beamSize, 1) att_vec = self.model.make_init_att(context) padMask = srcBatch.data.eq(layer.Constants.PAD).transpose(0, 1).unsqueeze(0).repeat(beamSize, 1, 1).float() beam = [layer.Beam(beamSize, self.opt.cuda) for k in range(batchSize)] batchIdx = list(range(batchSize)) remainingSents = batchSize for i in range(self.opt.max_sent_length): # Prepare decoder input. input = torch.stack([b.getCurrentState() for b in beam if not b.done]).transpose(0, 1).contiguous().view(1, -1) g_outputs, decStates, attn, att_vec = self.model.decoder(input, decStates, context, padMask.view(-1, padMask.size(2)), att_vec) # g_outputs: 1 x (beam*batch) x numWords g_outputs = g_outputs.squeeze(0) g_out_prob = self.model.generator.forward(g_outputs) # batch x beam x numWords wordLk = g_out_prob.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous() attn = attn.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous() active = [] father_idx = [] for b in range(batchSize): if beam[b].done: continue idx = batchIdx[b] if not beam[b].advance(wordLk.data[idx], attn.data[idx]): active += [b] father_idx.append(beam[b].prevKs[-1]) # this is very annoying if not active: break # to get the real father index real_father_idx = [] for kk, idx in enumerate(father_idx): real_father_idx.append(idx * len(father_idx) + kk) # in this section, the sentences that are still active are # compacted so that the decoder is not run on completed sentences activeIdx = self.tt.LongTensor([batchIdx[k] for k in active]) batchIdx = {beam: idx for idx, beam in enumerate(active)} def updateActive(t, rnnSize): # select only the remaining active sentences view = t.data.view(-1, remainingSents, rnnSize) newSize = list(t.size()) newSize[-2] = newSize[-2] * len(activeIdx) // remainingSents return view.index_select(1, activeIdx).view(*newSize) decStates = updateActive(decStates, self.dec_rnn_size) context = updateActive(context, self.enc_rnn_size) att_vec = updateActive(att_vec, self.enc_rnn_size) padMask = padMask.index_select(1, activeIdx) # set correct state for beam search previous_index = torch.stack(real_father_idx).transpose(0, 1).contiguous() decStates = decStates.view(-1, decStates.size(2)).index_select(0, previous_index.view(-1)).view( *decStates.size()) att_vec = att_vec.view(-1, att_vec.size(1)).index_select(0, previous_index.view(-1)).view(*att_vec.size()) remainingSents = len(active) # (4) package everything up allHyp, allScores, allAttn = [], [], [] n_best = self.opt.n_best for b in range(batchSize): scores, ks = beam[b].sortBest() allScores += [scores[:n_best]] valid_attn = srcBatch.data[:, b].ne(layer.Constants.PAD).nonzero().squeeze(1) hyps, attn = zip(*[beam[b].getHyp(k) for k in ks[:n_best]]) attn = [a.index_select(1, valid_attn) for a in attn] allHyp += [hyps] allAttn += [attn] return allHyp, allScores, allAttn, None def translate(self, srcBatch, goldBatch): # (1) convert words to indexes dataset = self.buildData(srcBatch, goldBatch) # (wrap(srcBatch), lengths), (wrap(tgtBatch), ), indices src, tgt, indices = dataset[0] # (2) translate pred, predScore, attn, _ = self.translateBatch(src, tgt) pred, predScore, attn = list(zip( *sorted(zip(pred, predScore, attn, indices), key=lambda x: x[-1])))[:-1] # (3) convert indexes to words predBatch = [] for b in range(src[0].size(1)): predBatch.append( [self.buildTargetTokens(pred[b][n], srcBatch[b], attn[b][n]) for n in range(self.opt.n_best)] ) return predBatch, predScore, None
python
from server.settings.base import * # noqa
python
import numpy as np import scipy.stats as stats class SimpleImputer: """ Simple mean/most frequent imputation. """ def __init__(self, ncat, method='mean'): self.ncat = ncat assert method in ['mean', 'mode'], "%s is not supported as imputation method." %method self.method = method def fit(self, data): assert data.shape[1] == len(self.ncat), "Data does not match the predefined number of variables." self.data = data self.values = np.zeros(data.shape[1]) for j in range(data.shape[1]): # Filter missing values first ref_data = self.data[~np.isnan(self.data[:, j]), j] if self.ncat[j] == 1: if self.method == 'mode': self.values[j] = stats.mode(ref_data)[0] elif self.method == 'mean': self.values[j] = np.mean(ref_data) else: self.values[j] = stats.mode(ref_data)[0] return self def transform(self, data): data = data.copy() if data.ndim == 1: data = np.expand_dims(data, axis=0) missing_coordinates = np.where(np.isnan(data)) for j in range(data.shape[1]): ind = missing_coordinates[0][missing_coordinates[1]==j] data[ind, j] = self.values[j] return data
python
from netmiko.cdot.cdot_cros_ssh import CdotCrosSSH __all__ = ["CdotCrosSSH"]
python
from .helpers import deprecated_alias @deprecated_alias('ioat_scan_copy_engine') @deprecated_alias('scan_ioat_copy_engine') def ioat_scan_accel_engine(client): """Enable IOAT accel engine. """ return client.call('ioat_scan_accel_engine')
python
import time import paddle import paddle.fluid as fluid from network import word2vec_net from conf import * import logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger("fluid") logger.setLevel(logging.INFO) def get_dataset_reader(inputs): dataset = fluid.DatasetFactory().create_dataset() dataset.set_use_var(inputs) pipe_command = "python dataset_generator.py" dataset.set_pipe_command(pipe_command) dataset.set_batch_size(batch_size) thread_num = cpu_num dataset.set_thread(thread_num) return dataset def train(): loss, inputs = word2vec_net(dict_size, embedding_size, neg_num) optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.exponential_decay( learning_rate=learning_rate, decay_steps=decay_steps, decay_rate=decay_rate, staircase=True)) optimizer.minimize(loss) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) dataset = get_dataset_reader(inputs) file_list = [str(train_files_path) + "/%s" % x for x in os.listdir(train_files_path)] for epoch in range(num_epochs): dataset.set_filelist(file_list) start_time = time.time() class fetch_vars(fluid.executor.FetchHandler): def handler(self, fetch_target_vars): loss_value = fetch_target_vars[0] logger.info( "epoch -> {}, loss -> {}, at: {}".format(epoch, loss_value, time.ctime())) exe.train_from_dataset(program=fluid.default_main_program(), dataset=dataset, fetch_handler=fetch_vars([loss.name], 5, True)) end_time = time.time() model_path = str(model_path) + '/trainer_' + str(role.worker_index()) + '_epoch_' + str(epoch) fluid.io.save_persistables(executor=exe, dirname=model_path) logger.info("Train Success!") if __name__ == '__main__': train()
python
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from lndgrpc.compiled import signer_pb2 as lndgrpc_dot_compiled_dot_signer__pb2 class SignerStub(object): """Signer is a service that gives access to the signing functionality of the daemon's wallet. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.SignOutputRaw = channel.unary_unary( '/signrpc.Signer/SignOutputRaw', request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignResp.FromString, ) self.ComputeInputScript = channel.unary_unary( '/signrpc.Signer/ComputeInputScript', request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.FromString, ) self.SignMessage = channel.unary_unary( '/signrpc.Signer/SignMessage', request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.FromString, ) self.VerifyMessage = channel.unary_unary( '/signrpc.Signer/VerifyMessage', request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.FromString, ) self.DeriveSharedKey = channel.unary_unary( '/signrpc.Signer/DeriveSharedKey', request_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.FromString, ) class SignerServicer(object): """Signer is a service that gives access to the signing functionality of the daemon's wallet. """ def SignOutputRaw(self, request, context): """ SignOutputRaw is a method that can be used to generated a signature for a set of inputs/outputs to a transaction. Each request specifies details concerning how the outputs should be signed, which keys they should be signed with, and also any optional tweaks. The return value is a fixed 64-byte signature (the same format as we use on the wire in Lightning). If we are unable to sign using the specified keys, then an error will be returned. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ComputeInputScript(self, request, context): """ ComputeInputScript generates a complete InputIndex for the passed transaction with the signature as defined within the passed SignDescriptor. This method should be capable of generating the proper input script for both regular p2wkh output and p2wkh outputs nested within a regular p2sh output. Note that when using this method to sign inputs belonging to the wallet, the only items of the SignDescriptor that need to be populated are pkScript in the TxOut field, the value in that same field, and finally the input index. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SignMessage(self, request, context): """ SignMessage signs a message with the key specified in the key locator. The returned signature is fixed-size LN wire format encoded. The main difference to SignMessage in the main RPC is that a specific key is used to sign the message instead of the node identity private key. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def VerifyMessage(self, request, context): """ VerifyMessage verifies a signature over a message using the public key provided. The signature must be fixed-size LN wire format encoded. The main difference to VerifyMessage in the main RPC is that the public key used to sign the message does not have to be a node known to the network. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeriveSharedKey(self, request, context): """ DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key derivation between the ephemeral public key in the request and the node's key specified in the key_desc parameter. Either a key locator or a raw public key is expected in the key_desc, if neither is supplied, defaults to the node's identity private key: P_shared = privKeyNode * ephemeralPubkey The resulting shared public key is serialized in the compressed format and hashed with sha256, resulting in the final key length of 256bit. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SignerServicer_to_server(servicer, server): rpc_method_handlers = { 'SignOutputRaw': grpc.unary_unary_rpc_method_handler( servicer.SignOutputRaw, request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignResp.SerializeToString, ), 'ComputeInputScript': grpc.unary_unary_rpc_method_handler( servicer.ComputeInputScript, request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignReq.FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.SerializeToString, ), 'SignMessage': grpc.unary_unary_rpc_method_handler( servicer.SignMessage, request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.SerializeToString, ), 'VerifyMessage': grpc.unary_unary_rpc_method_handler( servicer.VerifyMessage, request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.SerializeToString, ), 'DeriveSharedKey': grpc.unary_unary_rpc_method_handler( servicer.DeriveSharedKey, request_deserializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.FromString, response_serializer=lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'signrpc.Signer', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Signer(object): """Signer is a service that gives access to the signing functionality of the daemon's wallet. """ @staticmethod def SignOutputRaw(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/SignOutputRaw', lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.SignResp.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ComputeInputScript(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/ComputeInputScript', lndgrpc_dot_compiled_dot_signer__pb2.SignReq.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.InputScriptResp.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def SignMessage(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/SignMessage', lndgrpc_dot_compiled_dot_signer__pb2.SignMessageReq.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.SignMessageResp.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def VerifyMessage(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/VerifyMessage', lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageReq.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.VerifyMessageResp.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeriveSharedKey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/signrpc.Signer/DeriveSharedKey', lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyRequest.SerializeToString, lndgrpc_dot_compiled_dot_signer__pb2.SharedKeyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
python
""" @name: PyHouse/Project/src/_test/test_testing_mixin.py @author: D. Brian Kimmel @contact: [email protected] @copyright: (c) 2014-2019 by D. Brian Kimmel @license: MIT License @note: Created on Oct 6, 2014 @Summary: Passed all 16 tests - DBK - 2019-06-23 """ from Modules.Core import PyHouseInformation from Modules.Computer.computer import ComputerInformation from Modules.House import HouseInformation __updated__ = '2020-02-04' # Import system type stuff from twisted.trial import unittest # Import PyMh files from _test.testing_mixin import SetupPyHouseObj from Modules.Core.Utilities.debug_tools import PrettyFormatAny class SetupMixin: def setUp(self): self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj() def setUpObj(self): self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj() class A0(unittest.TestCase): def test_00_Print(self): _x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up. print('Id: test_testing_mixin') class A1_Setup(SetupMixin, unittest.TestCase): """ This section tests the SetupMixin Class """ def setUp(self): pass def test_01_BuildObjs(self): """ Be sure that the PyHouse obj is built correctly """ l_obj = SetupPyHouseObj().BuildPyHouseObj() print(PrettyFormatAny.form(l_obj, 'A1-02-A - PyHouseObj', 90)) self.assertIsInstance(l_obj, PyHouseInformation) self.assertIsInstance(l_obj.Computer, ComputerInformation) self.assertIsInstance(l_obj.House, HouseInformation) def test_03_YAML(self): """ Be sure that the YAML contains the right stuff. """ l_obj = SetupPyHouseObj().BuildPyHouseObj() # print(PrettyFormatAny.form(l_obj, 'A1-03-A - PyHouseObj', 90)) print(PrettyFormatAny.form(l_obj._Config, 'A1-03-B - _Config', 90)) class B1_Empty(SetupMixin, unittest.TestCase): """ This section tests the SetupMixin Class """ def setUp(self): SetupMixin.setUpObj(self) pass def test_01_Obj(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'B1-01-A - PyHouse')) pass def test_02_Computer(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj.Computer, 'B1-02-A - PyHouse.Computer')) pass def test_03_House(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'B1-03-A - PyHouse.House')) pass def test_04_Location(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'B1-04-A - PyHouse.House.Location')) pass class B2_Long(SetupMixin, unittest.TestCase): """ This section tests the SetupMixin Class """ def setUp(self): SetupMixin.setUpObj(self) pass def test_01_Obj(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'B2-01-A - PyHouse')) pass def test_02_Computer(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj.Computer, 'B2-02-A - PyHouse.Computer')) pass def test_03_House(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'B2-03-A - PyHouse.House')) pass def test_04_Location(self): # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'B2-04-A - PyHouse.House.Location')) pass class C1_Build(SetupMixin, unittest.TestCase): """ This section tests the reading and writing of XML used by inernet. """ def setUp(self): self.m_api = SetupPyHouseObj() def test_01_Computer(self): l_config = self.m_api._build_computer() # print(PrettyFormatAny.form(l_config, 'C1-01-A - Config')) # self.assertDictEqual(l_config.Email, {}) # self.assertDictEqual(l_config.InternetConnection, {}) # self.assertDictEqual(l_config.Nodes, {}) # self.assertDictEqual(l_config.Web, {}) pass # ## END DBK
python
import unittest import gtirb from helpers import SearchScope, parameterize_one class ByteIntervalsOnTests(unittest.TestCase): @parameterize_one( "scope", (SearchScope.ir, SearchScope.module, SearchScope.section) ) def test_byte_intervals_on(self, scope): ir = gtirb.IR() m = gtirb.Module(name="test", ir=ir) s = gtirb.Section(module=m) search_in = scope.select(ir, m, s, None) bi1 = gtirb.ByteInterval(address=0x1000, size=4, section=s) bi2 = gtirb.ByteInterval(address=0x1004, size=4, section=s) found = set(search_in.byte_intervals_on(0x1000)) self.assertEqual(found, {bi1}) found = set(search_in.byte_intervals_on(0x1001)) self.assertEqual(found, {bi1}) found = set(search_in.byte_intervals_on(range(0x1000, 0x1008))) self.assertEqual(found, {bi1, bi2}) found = set(search_in.byte_intervals_on(range(0x1000, 0x1008, 16))) self.assertEqual(found, {bi1, bi2}) # Change the address to verify we update the index bi2.address = 0x2000 found = set(search_in.byte_intervals_on(0x1005)) self.assertEqual(found, set()) found = set(search_in.byte_intervals_on(0x2001)) self.assertEqual(found, {bi2}) # Discard the interval to verify we update the index bi2.section = None found = set(search_in.byte_intervals_on(0x2001)) self.assertEqual(found, set()) # Now add it back to verify we update the index s.byte_intervals.add(bi2) found = set(search_in.byte_intervals_on(0x2001)) self.assertEqual(found, {bi2}) @parameterize_one( "scope", [SearchScope.ir, SearchScope.module, SearchScope.section] ) def test_byte_intervals_overlapping(self, scope): ir = gtirb.IR() m = gtirb.Module(name="test", ir=ir) s = gtirb.Section(module=m) search_in = scope.select(ir, m, s, None) bi1 = gtirb.ByteInterval(address=0x1000, size=8, section=s) bi2 = gtirb.ByteInterval(address=0x1004, size=4, section=s) found = set(search_in.byte_intervals_on(0x1005)) self.assertEqual(found, {bi1, bi2})
python
from django.contrib import admin from .models import Tags,Category,Blog admin.site.register([Tags,Category,Blog]) # Register your models here.
python
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from typing import Any, Dict, Optional, Tuple, Union from packaging.utils import canonicalize_name as canonicalize_project_name from pants.engine.target import InvalidFieldException from pants.util.collections import ensure_str_list OVERRIDES_TYPE = Optional[Dict[Union[str, Tuple[str, ...]], Dict[str, Any]]] def flatten_overrides_to_dependency_field( overrides_value: OVERRIDES_TYPE, *, macro_name: str, build_file_dir: str ) -> dict[str, list[str]]: """Flatten `overrides` by ensuring that only `dependencies` is specified.""" result: dict[str, list[str]] = {} for maybe_key_or_keys, override in (overrides_value or {}).items(): keys = (maybe_key_or_keys,) if isinstance(maybe_key_or_keys, str) else maybe_key_or_keys for _raw_key in keys: key = canonicalize_project_name(_raw_key) for field, value in override.items(): if field != "dependencies": raise InvalidFieldException( "Can only specify the `dependencies` field (for now) in the `overrides` " f"field of the {macro_name} macro in the BUILD file in {build_file_dir} " f"for the key `{key}`, but you specified `{field}`." ) if key in result: raise InvalidFieldException( f"Conflicting overrides in the `overrides` field of " f"the {macro_name} macro in the BUILD file in {build_file_dir} for the key " f"`{key}` for the field `{field}`. You cannot specify the same field name " "multiple times for the same key.\n\n" f"(One override sets the field to `{repr(result[key])}` " f"but another sets to `{repr(value)}`.)" ) try: normalized_value = ensure_str_list(value) except ValueError: raise InvalidFieldException( f"The 'overrides' field in the {macro_name} macro in the BUILD file in " f"{build_file_dir} must be `dict[str | tuple[str, ...], dict[str, Any]]`, " f"but was `{repr(value)}` with type `{type(value).__name__}`." ) result[key] = normalized_value return result
python
import numpy as np import mbuild as mb from mbuild.lib.bulk_materials import AmorphousSilicaBulk from mbuild.lib.recipes import SilicaInterface from mbuild.tests.base_test import BaseTest class TestSilicaInterface(BaseTest): def test_silica_interface(self): tile_x = 1 tile_y = 1 thickness = 0.6 interface = SilicaInterface( bulk_silica=AmorphousSilicaBulk(), tile_x=tile_x, tile_y=tile_y, thickness=thickness, ) thickness_tolerance = 0.05 z = [atom.pos[2] for atom in interface.particles() if atom.name == "Si"] assert abs(max(z) - min(z) - thickness) < thickness_tolerance density_tolerance = 0.1 area = interface.box.lengths[0] * interface.box.lengths[1] oh_count = len(list(interface.particles_by_name("O_surface"))) assert abs((oh_count / area) - 5.0) < density_tolerance def test_seed(self): tile_x = 1 tile_y = 1 thickness = 0.6 seed = 12345 interface1 = SilicaInterface( bulk_silica=AmorphousSilicaBulk(), tile_x=tile_x, tile_y=tile_y, thickness=thickness, seed=seed, ) atom_names1 = np.array([atom.name for atom in interface1.particles()]) interface2 = mb.recipes.SilicaInterface( bulk_silica=AmorphousSilicaBulk(), tile_x=tile_x, tile_y=tile_y, thickness=thickness, seed=seed, ) atom_names2 = np.array([atom.name for atom in interface2.particles()]) assert np.array_equal(atom_names1, atom_names2) assert np.array_equal(interface1.xyz, interface2.xyz)
python
import unittest import importlib import asyncio import time,os from contextlib import contextmanager import hashlib from datetime import datetime @contextmanager def add_to_path(p): import sys old_path = sys.path sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path def path_import(name,absolute_path): '''implementation taken from https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly''' with add_to_path(os.path.dirname(absolute_path)): spec = importlib.util.spec_from_file_location(name, absolute_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module orm = path_import('orm','../www/orm.py') models = path_import('models','../www/models.py') def next_email(): t = str(time.time()) a = hashlib.sha256(t.encode('ascii')) return a.hexdigest()[-6:] #orm.setDatabase('../www/awesome.db') class TestOrm(unittest.TestCase): #tester for basic sql executions def test_insert_select(self): loop = asyncio.get_event_loop() #insert one entry for every table idd = models.next_id() insert_user = "insert into users (email, passwd, admin, name, image, created_at, id) values (?,?,?,?,?,?,?)" args = (next_email()+'@dummy.com','12345678',True,'fathergod','about:blank','19260817',idd) affected_insert = loop.run_until_complete(orm.execute(insert_user,args)) self.assertEqual(affected_insert,1) checked_insert = "select * from users where id=?" cond = (idd,) result = loop.run_until_complete(orm.select(checked_insert,cond)) self.assertEqual(len(result),1) #print(result) def test_class_method(self): now = datetime.now() signature = str(now.minute) orm.setDatabase('../www/awesome.db') loop = asyncio.get_event_loop() test_users = [ models.User(name=str(time.time()),passwd=signature,email=next_email()+'@dummy.com',image="about:blank",admin=False), models.User(name=str(time.time()),passwd=signature,email=next_email()+'@dummy.com',image="about:blank",admin=False), models.User(name=str(time.time()),passwd=signature,email=next_email()+'@dummy.com',image="about:blank",admin=False) ] for u in test_users: loop.run_until_complete(u.save()) inserted = loop.run_until_complete(models.User.findAll('passwd=?',[signature])) self.assertEqual(len(inserted),3) for u in test_users: u.passwd = signature + '1' loop.run_until_complete(u.update()) modified = loop.run_until_complete(models.User.findAll('passwd=?',[signature+'1'])) self.assertEqual(len(modified),3) #print(modified) for u in test_users: loop.run_until_complete(u.remove()) after = loop.run_until_complete(models.User.findAll('passwd=?',[signature+'1'])) self.assertEqual(len(after),0) def test_find(self): loop = asyncio.get_event_loop() num = loop.run_until_complete(models.User.findNumber('count(*)','email like ?',['%dummy%'])) #print(num) self.assertIsNot(num[0],0) res = loop.run_until_complete(models.User.find('0015615749997056198eaebaa0246339e1e1ac3e1883125000')) self.assertIsNot(res,None) if __name__=="__main__": unittest.main()
python