metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JontyBurden/local-movies",
"score": 4
} |
#### File: local-movies/db/local-movies.py
```python
import sqlite3, csv
class csv_read(object):
def csv_file(self):
self.readFile('movies.csv')
def readFile(self, filename):
conn = sqlite3.connect('local-movies.db')
cur = conn.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS movies (movie_title varchar)""")
filename.encode('utf-8')
with open(filename) as f:
reader = csv.reader(f)
for field in reader:
cur.execute("""INSERT INTO movies VALUES (?);""", field)
conn.commit()
conn.close()
c = csv_read().csv_file()
``` |
{
"source": "jontymorris/StockWatch",
"score": 3
} |
#### File: StockWatch/stockwatch/market.py
```python
import numpy as np
from stockwatch import util
from datetime import datetime, timedelta
import config
class Market:
@staticmethod
def vwap(df):
''' Calculates Volume-weighted average price '''
q = df.Volume.values
p = (df.Close.values + df.High.values + df.Low.values) / 3
if not q.any():
return df.assign(vwap=p)
return df.assign(vwap=(p * q).cumsum() / q.cumsum())
@staticmethod
def should_buy(market_price, history, margin_percent):
''' Decides if the bot should buy or not '''
# ignore zero divide errors
np.seterr(divide='ignore', invalid='ignore')
try:
# calculate vwap
history = history.groupby(history.index.date, group_keys=False)
history = history.apply(Market.vwap)
# calculate direction
moves = np.gradient(history['vwap'])
median = np.median(moves)
average = np.average(moves)
# calculate margin price
margin_price = history['vwap'][-1]
margin_price -= (margin_price * (margin_percent/100))
# agree if going up and below margin
if median > 0 and average > 0 and market_price <= margin_price:
return True
except Exception as e:
util.log(f'Warning: {e}')
return False
@staticmethod
def should_sell(original_price, market_price):
''' Decides if the bot should sell or not '''
difference = market_price - original_price
percent_change = (difference / original_price) * 100
# have we reached our profit percentage?
return percent_change >= config.sell_profit_margin
@staticmethod
def minutes_till_trading():
''' Time till NZX is open for trading '''
now = util.get_nz_time()
open_dt = datetime.strptime(config.open_time, '%I:%M%p')
close_dt = datetime.strptime(config.close_time, '%I:%M%p')
# market is open today
if now.strftime('%A').lower() not in config.days_closed:
# open now
if now.time() >= open_dt.time() and now.time() <= close_dt.time():
return 0
# hasn't open yet
if now.time() < open_dt.time():
open_time = datetime.combine(now, open_dt.time()) - now
return open_time.total_seconds() / 60
# market has closed
for i in range(7):
future = now + timedelta(days=(i+1))
if future.strftime('%A').lower() not in config.days_closed:
next_open = datetime.combine(future, open_dt.time()) - now
return next_open.total_seconds() / 60
util.log("market is never open according to config", error=True)
``` |
{
"source": "jontypage/trl",
"score": 2
} |
#### File: trl/trl/t5.py
```python
from transformers import T5Model, T5ForConditionalGeneration, T5PreTrainedModel
from transformers import top_k_top_p_filtering
from torch import nn
import torch.nn.functional as F
import torch
class T5ValueHead(nn.Module):
"""The T5ValueHead class implements a head for T5 that returns a scalar for each output token."""
def __init__(self, config):
super().__init__()
self.detach_head = False
if config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = nn.Tanh()
self.first_dropout = nn.Dropout(config.dropout_rate)
self.last_dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states, cls_index=None):
if self.detach_head:
output = hidden_states.detach()
else:
output = hidden_states
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
class T5HeadWithValueModel(T5PreTrainedModel):
"""The T5HeadWithValueModel class implements a T5 language model with a secondary, scalar head."""
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = T5Model(config)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.v_head = T5ValueHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.transformer.get_encoder()
def detach_value_head(self):
self.v_head.detach_head = True
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None
):
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
hidden_states = transformer_outputs[0]
past_key_values = transformer_outputs[1]
lm_logits = self.lm_head(hidden_states)
value = self.v_head(hidden_states).squeeze(-1)
outputs = (lm_logits,) + (past_key_values,) + (value,)
return outputs
class T5ForConditionalGenerationHeadWithValueModel(T5PreTrainedModel):
"""The T5HeadWithValueModel class implements a T5 language model with a secondary, scalar head."""
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
config.output_hidden_states = True
self.transformer = T5ForConditionalGeneration(config)
self.v_head = T5ValueHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.transformer
def get_encoder(self):
return self.transformer.get_encoder()
def detach_value_head(self):
self.v_head.detach_head = True
def forward(
self,
input_ids=None,
attention_mask=None,
labels=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None
):
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
if labels is not None:
last_hidden_state = transformer_outputs[3][-1]
lm_logits = transformer_outputs[1]
else:
last_hidden_state = transformer_outputs[2][-1]
lm_logits = transformer_outputs[0]
value = self.v_head(last_hidden_state).squeeze(-1)
outputs = (lm_logits,) + (transformer_outputs,) + (value,)
return outputs
def respond_to_batch(model, input_ids, txt_len=15, top_k=50, top_p=0.95):
"""Sample text from language model."""
encoder = model.transformer.get_encoder()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
attention_mask[input_ids == model.config.pad_token_id] = 0
encoder_outputs = encoder(input_ids, attention_mask=attention_mask, return_dict=True)
batch_size = input_ids.shape[0]
decoder_input_ids = torch.ones((batch_size, 1), dtype=torch.long) * model.config.decoder_start_token_id
for i in range(txt_len):
# Get Logits
outputs = model(decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs)
next_token_logits = outputs[0][:, -1, :]
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
_, next_token = torch.max(probs, dim=1) # Greedy Decoding
# next_token = torch.multinomial(probs, num_samples=1).squeeze(1) # Multinomial Sampling
next_token[torch.any(decoder_input_ids == model.config.eos_token_id, axis=1)] = model.config.pad_token_id
decoder_input_ids = torch.cat([decoder_input_ids, next_token.unsqueeze(-1)], dim=-1)
return decoder_input_ids[:, 1:]
``` |
{
"source": "jontyparab/my_django_apps",
"score": 2
} |
#### File: my_django_apps/accounts/views.py
```python
from django.contrib import messages
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views import View
@login_required
def index(request):
return render(request, 'accounts/index.html')
class SignUpView(View):
def get(self, request):
context = {}
return render(request, 'registration/sign-up.html', context)
def post(self, request):
if request.POST['password1'] == '' or request.POST['username'] == '':
messages.error(request, 'Username or password cannot be empty.')
elif len(request.POST['password1']) < 8:
messages.error(request, 'Password is too short.')
elif request.POST['password1'] == request.POST['password2']:
try:
user = User.objects.create_user(request.POST['username'], password=request.POST['password1'])
user.save()
login(request, user)
messages.success(request, 'Signed Up and Logged In to your account.')
return redirect('accounts:home')
except IntegrityError:
messages.error(request, 'That username has already been taken. Please choose a new username.')
else:
messages.error(request, 'Passwords did not match.')
return render(request, 'registration/sign-up.html')
class LoginView(View):
def get(self, request):
context = {}
return render(request, 'registration/login.html', context)
def post(self, request):
context = {}
user = authenticate(request, username=request.POST['username'], password=request.POST['password'])
if user is None:
messages.error(request, "Invalid Credentials")
return render(request, 'registration/login.html')
else:
login(request, user)
messages.success(request, 'Logged In Successfully!!')
return redirect('accounts:home')
class LogoutView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'registration/logged-out.html')
def post(self, request):
logout(request)
messages.info(request, 'Logged out successfully.')
return redirect('accounts:login')
``` |
{
"source": "JontySR/htm",
"score": 4
} |
#### File: JontySR/htm/markov.py
```python
import markovify
from util import *
def generate(filename):
# Get raw text as string.
with open(filename) as f:
text = f.read()
# Build the model.
text_model = markovify.Text(text, state_size=2)
# Print five randomly-generated sentences
for i in range(5):
print(text_model.make_sentence())
def create_model(file):
text = read_file(file)
output = open(file+'.json', 'w')
output.write(markovify.Text(text, state_size=2))
output.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'path', help='Local Path for text model (.txt)')
args = parser.parse_args()
print(generate('{}'.format(args.path)))
``` |
{
"source": "Jonulo/web-scraping",
"score": 3
} |
#### File: Jonulo/web-scraping/newspaper_scraper.py
```python
import requests
# traer la funcion html para convertir html a un archivo para aplicar xpath
import lxml.html as html
# Crear carpeta con fecha de hoy
import os
# Traer la fecha actual
import datetime
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//div[@class="V_Trends"]/h2/a/@href'
XPATH_TITLE = '//h2/a[contains(@class, "") and not(@href)]/text()'
XPATH_SUMMARY = '//div[@class="lead"]/p/text()'
XPATH_BODY = '//div[@class="html-content"]/p[not(@class)]/text()'
def parse_notice(link, today):
try:
print(link)
response = requests.get(link)
if response.status_code == 200:
# Todo igual como en la función principal
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)
# Hay titulos que vienen entre comillas, con lo siguiente como sabes que TITLE es un string
# reemplazamos las comillas por NADA:
title = title.replace('\"', '')
title = title.replace('/', '')
title = title.replace(':', '')
title = title.replace('"', '')
print(title)
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
# Este error se maneja porque hay noticia que no tienen SUMMARY y si pasa eso que no traiga
# esa noticia y pase a la siguiente:
except IndexError:
print('error creating notice files')
return
# WITH es un manejador contextual que si el archivo se cierra por el script que no funciona
# este manejador mantiene el archivo seguro para que no se corrompa.
# today es la carpeta que se creo en la main function y dentro va la nueva noticia que se guardara
# el segundo parametro es abrir el doc en modo escritura y el encoding para los caracteres especiales
with open(f'data/{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(title)
f.write('\n\n')
f.write(summary)
f.write('\n\n')
for p in body:
f.write(p)
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
# si el requests es diferente de code 200
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
# Obtenemos el html y decode transforma caracteres especiales(ñ) en algo que python no tenga errores
home = response.content.decode()
# Toma el contenido html de home y lo transforma de forma que podemos usar XPATH
parsed = html.fromstring(home)
# Obtenemos una lista de resultados al aplicar XPATH ($x('...'))
link_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(link_to_notices)
# date trae una fecha y today() la de hoy. Y convertimos a cadenas de caracteres con el formato
today = datetime.date.today().strftime('%d-%m-%Y')
# Si no existe una carpeta con el nombre(fecha de hoy) entonces creala
local_path = f'./data/{today}'
if not os.path.isdir(local_path):
os.mkdir(local_path)
else:
print('folder already exist')
# por cada link la funcion entra y extrae la info de la noticia y lo guardara con fecha de hoy
for link in link_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
run()
``` |
{
"source": "jonutz93/neuroevolutionTest",
"score": 3
} |
#### File: jonutz93/neuroevolutionTest/Bird.py
```python
import Brasin as BirdBrain
import pygame
import random
id = 0
class Bird(object):
def __init__(self):
global id
self.id = id
self.brain = BirdBrain.Brain(self.id)
id+=1
self.brain.randomWeights()
self.resetBird
self.velocityY =-9
self.playerFlapped = False
self.PosY = 0
self.PosX = 0
self.height = 0
self.width = 0
#they all start above 0
self.fitness = 100
def resetBird(self):
self.fitness = 100
self.brain.randomWeights()
def reesetFitness(self):
self.fitness = 100
def updateFitness(fitness):
self.fitness = fitness
def setSprite(self,sprite):
self.sprite = sprite
self.width = sprite[0].get_width()
self.height = sprite[0].get_height()
def getBrain(self):
return self.brain
```
#### File: jonutz93/neuroevolutionTest/flappy.py
```python
from itertools import cycle
import random
import sys
import Brasin
import pygame
from pygame.locals import *
import win32com.client as comclt
import Bird
import time
import datetime
import Logger
import copy
#Constants
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
AIScore = 1
PopulationSize = 20
howManyWePick = 4 # how many we pick based on their fitness
currentPopulation=1
BirdsIteration = 1
maxScore = 0
birds = []
savedBirds = []
mutateRate = 1
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
try:
xrange
except NameError:
xrange = range
def main():
global SCREEN, FPSCLOCK,birds,PopulationSize,savedBirds
birds = [Bird.Bird() for x in range(PopulationSize)]
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
while True:
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
for bird in birds:
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
sprite=(
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
bird.setSprite(sprite)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
HITMASKS['player'] = (
getHitmask(birds[0].sprite[0]),
getHitmask(birds[0].sprite[1]),
getHitmask(birds[0].sprite[2]),
)
mainGame()
def crossOver(birdA,birdB):
#get a cross over cutting point
numberOfBiases = birdA.brain.n_hidden-1
limit = random.randint(0,numberOfBiases)
#swap 'bias' information between both parents from the hidden layer:
# 1. left side to the crossover point is copied from one parent
# 2. right side after the crossover point is copied from the second parent
networkA = birdA.brain.getWeights()
networkB = birdB.brain.getWeights()
for i in range(limit,numberOfBiases):
biasFrombirdA = networkA["bias1"][i]
networkA["bias1"][i] = networkB["bias1"][i]
networkB["bias1"][i] = biasFrombirdA;
whichBirdShouldIchoose = random.randint(0,1)
if whichBirdShouldIchoose == 1:
return networkA
else:
return networkB
def resetGame():
global BirdsIteration,PopulationSize,currentPopulation,maxScore,bestBrain,birdBrain,AIScore,howManyWePick,mutateRate,birds
#pick the best birds
currentPopulation=currentPopulation+1
if(mutateRate == 1 and savedBirds[PopulationSize-1].fitness<110):
#this is bad. None reached the first pipe. Instead of mutating and crossover we will recreate the population
#We set again random weights
for bird in savedBirds:
print("reset")
Logger.Logger.Log("reset")
bird.resetBird();
birds = savedBirds.copy()
else:
#the real mutate rate
mutateRate = 0.1
# the top 4 birds
Winners = []
newlist = sorted(savedBirds, key=lambda x: x.fitness, reverse=True)
for i in range(0,howManyWePick):
#the birds are sorted based on their fitness.
#This means that savedBirds[PopulationSize-1] has the best fitness
birds.insert(len(birds),newlist[i])
Winners.insert(len(birds),newlist[i])
for i in range(howManyWePick,howManyWePick+1):
parentA = Winners[0]
parentB = Winners[1]
newWeights = crossOver(parentA,parentB)
newlist[i].brain.updateWeightsJson(newWeights)
birds.insert(len(birds),newlist[i])
for i in range(howManyWePick+1,howManyWePick+4):
#get 2 random parrents of the top 4
parentA = random.choice(Winners)
parentB = random.choice(Winners)
newWeights = crossOver(parentA,parentB)
newlist[i].brain.updateWeightsJson(newWeights)
birds.insert(len(birds),newlist[i])
for i in range(howManyWePick+4,PopulationSize):
randomWinner = random.choice(Winners)
newWeights = randomWinner.brain.getWeights()
newlist[i].brain.updateWeightsJson(newWeights)
birds.insert(len(birds),newlist[i])
#save the best score of all time
if maxScore < Winners[0].fitness:
#save the best score
maxScore = Winners[0].fitness
print(maxScore)
for i in range(0,PopulationSize):
if i>=howManyWePick:
newlist[i].brain.mutate(mutateRate)
newlist[i].reesetFitness()
Winners.clear()
#for i in range (PopulationSize,PopulationSize-howManyWePick):
BirdsIteration=0
AIScore = 0
BirdsIteration += 1
savedBirds.clear()
print("restart")
Logger.Logger.Log("restart")
mainGame()
def mainGame():
global currentPopulation,AIScore,birds
score = playerIndex = loopIter = 0
#playerIndexGen = movementInfo['playerIndexGen']
#playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
#playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
for bird in birds:
#uncomment this to test that it works
#random.uniform(0, 1)
bird.PosX = int(SCREENWIDTH * 0.2)
bird.PosY = int((SCREENHEIGHT - bird.sprite[0].get_height()) / 2)
basex = 0
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerRot = 45 # player's rotation
playerVelRot = 3 # angular speed
playerRotThr = 20 # rotation threshold
playerFlapAcc = -9 # players speed on flapping
oldtime =datetime.datetime.now()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
for bird in birds:
jump(bird)
#SOUNDS['wing'].play()
# check for crash here
for bird in birds:
crashTest = checkCrash(bird,
upperPipes, lowerPipes)
if crashTest[0]:
#here we actuualy substract from the fitness the distance to the next pipe.
#This is in order to punish them.
#Also if all birds fitness is below 0 than we will recreate the population
#bird.fitness =bird.fitness + (bird.PosX - upperPipes[0]["x"])
birds.remove(bird)
savedBirds.insert(len(savedBirds),bird)
print("death")
Logger.Logger.Log("death")
if len(birds) == 0:
resetGame()
# check for score
#playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
onePassed = False
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
for bird in birds:
playerMidPos = bird.PosX + bird.sprite[0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
onePassed =True
#SOUNDS['point'].play()
if onePassed ==True :
score+=1
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex =(playerIndex+1)%3
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# rotate the player
if playerRot > -90:
playerRot -= playerVelRot
# player's movement
for bird in birds:
if bird.velocityY < playerMaxVelY and not bird.playerFlapped:
bird.velocityY += playerAccY
if bird.playerFlapped:
bird.playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
playerRot = 45
bird.PosY += min(bird.velocityY, BASEY - bird.PosY - bird.height)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
showPopulation(currentPopulation)
if len(birds) > 0 and birds[0].fitness>0:
showNumber(birds[0].fitness,100, SCREENHEIGHT * 0.9)
showNumber(maxScore,0, SCREENHEIGHT * 0.9)
# Player rotation has a threshold
visibleRot = playerRotThr
if playerRot <= playerRotThr:
visibleRot = playerRot
for bird in birds:
playerSurface = pygame.transform.rotate(bird.sprite[playerIndex], visibleRot)
SCREEN.blit(playerSurface, (bird.PosX, bird.PosY))
pygame.display.update()
FPSCLOCK.tick(FPS)
pipesX = upperPipes[0]["x"]
upperPipeY = upperPipes[0]["y"] + IMAGES['pipe'][0].get_height()
lowerPipeY = upperPipeY + PIPEGAPSIZE
#call the brain with location of bird and pipes
AIScore = AIScore+1
start = time.time()
#we should make this check only once per second
if (datetime.datetime.now() - oldtime).total_seconds() >= 0:
oldtime = datetime.datetime.now()
for bird in birds:
bird.fitness+=1
Logger.Logger.Log("bird id " + str(bird.brain.id) + "fitness " + str(bird.fitness))
for bird in birds:
if(bird.PosX>pipesX+IMAGES['pipe'][0].get_width()):
upperPipeY = upperPipes[1]["y"] + IMAGES['pipe'][0].get_height()
lowerPipeY = upperPipeY + PIPEGAPSIZE
response = bird.brain.Think(bird.PosY,pipesX,upperPipeY,lowerPipeY)
if(response > 0.5):
jump(bird)
def showGameOverScreen(crashInfo):
"""crashes the player down ans shows gameover image"""
score = crashInfo['score']
playerx = SCREENWIDTH * 0.2
playery = crashInfo['y']
playerHeight = IMAGES['player'][0].get_height()
playerVelY = crashInfo['playerVelY']
playerAccY = 2
playerRot = crashInfo['playerRot']
playerVelRot = 7
basex = crashInfo['basex']
upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']
# play hit and die sounds
#SOUNDS['hit'].play()
#if not crashInfo['groundCrash']:
#SOUNDS['die'].play()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery + playerHeight >= BASEY - 1:
return
# player y shift
if playery + playerHeight < BASEY - 1:
playery += min(playerVelY, BASEY - playery - playerHeight)
# player velocity change
if playerVelY < 15:
playerVelY += playerAccY
# rotate only when it's a pipe crash
if not crashInfo['groundCrash']:
if playerRot > -90:
playerRot -= playerVelRot
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
showScore(score)
playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot)
SCREEN.blit(playerSurface, (playerx,playery))
FPSCLOCK.tick(FPS)
pygame.display.update()
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = 110 #random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showNumber(text,xPos,yPos):
scoreDigits = [int(x) for x in list(str(text))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = xPos
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, yPos))
Xoffset += IMAGES['numbers'][digit].get_width()
def showScore(score):
showNumber(score,(SCREENWIDTH) / 1.5, SCREENHEIGHT * 0.1)
def showPopulation(population):
showNumber(population,0, SCREENHEIGHT * 0.1)
def checkCrash(bird, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = 0
birdX = bird.PosX
birdY = bird.PosY
birdW = bird.sprite[0].get_width()
birdH = bird.sprite[0].get_height()
# if player hits the sky
if birdY + birdH >= BASEY - 1:
return [True, True]
# if player crashes into ground
if birdY + birdH <0:
return [True, True]
else:
playerRect = pygame.Rect(birdX, birdY,
birdW, birdH)
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def jump(bird):
bird.velocityY = -9
bird.Flapped = True
if __name__ == '__main__':
main()
```
#### File: jonutz93/neuroevolutionTest/Logger.py
```python
class Logger(object):
@staticmethod
def Log(message):
file = open("logs.txt", "a")
#file.write(message)
#file.write("\n")
file.close()
``` |
{
"source": "jonvadney/concordd",
"score": 2
} |
#### File: concord232/concord232/api.py
```python
import flask
import json
import logging
import time;
LOG = logging.getLogger('api')
CONTROLLER = None
app = flask.Flask('concord232')
LOG.info("API Code Loaded")
def show_zone(zone):
return {
'partition': zone['partition_number'],
'area': zone['area_number'],
'group': zone['group_number'],
'number': zone['zone_number'],
'name': zone['zone_text'],
'state': zone['zone_state'],
'type': zone['zone_type'],
#'bypassed': zone.bypassed,
#'condition_flags': zone.condition_flags,
#'type_flags': zone.type_flags,
}
def show_partition(partition):
return {
'number': partition['partition_number'],
'area': partition['area_number'],
'arming_level': partition['arming_level'],
'arming_level_code': partition['arming_level_code'],
'partition_text': partition['partition_text'],
'zones': sum(z['partition_number'] == partition['partition_number'] for z in CONTROLLER.get_zones().values()),
}
@app.route('/panel')
def index_panel():
try:
result = json.dumps({
'panel': CONTROLLER.get_panel()
})
return flask.Response(result,
mimetype='application/json')
except Exception as e:
LOG.exception('Failed to index zones')
@app.route('/zones')
def index_zones():
try:
zones = CONTROLLER.get_zones()
result = json.dumps({
'zones': [show_zone(zone) for zone in zones.values()]})
return flask.Response(result,
mimetype='application/json')
except Exception as e:
LOG.exception('Failed to index zones')
@app.route('/partitions')
def index_partitions():
try:
partitions = CONTROLLER.get_partitions()
result = json.dumps({
'partitions': [show_partition(partition)
for partition in partitions.values()]})
return flask.Response(result,
mimetype='application/json')
except Exception as e:
LOG.exception('Failed to index partitions')
@app.route('/command')
def command():
args = flask.request.args
if args.get('cmd') == 'arm':
option = args.get('option')
if args.get('level') == 'stay':
CONTROLLER.arm_stay(option)
elif args.get('level') == 'away':
CONTROLLER.arm_away(option)
elif args.get('cmd') == 'disarm':
CONTROLLER.disarm(args.get('master_pin'))
elif args.get('cmd') == 'keys':
CONTROLLER.send_keys(args.get('keys'),args.get('group'))
return flask.Response()
@app.route('/version')
def get_version():
return flask.Response(json.dumps({'version': '1.1'}),
mimetype='application/json')
@app.route('/equipment')
def get_equipment():
CONTROLLER.refresh()
return flask.Response()
@app.route('/all_data')
def get_all_data():
CONTROLLER.refresh
return flask.Response()
```
#### File: concord232/concord232/client.py
```python
import json
import requests
import time
class Client(object):
def __init__(self, url):
self._url = url
self._session = requests.Session()
self._last_event_index = 0
def list_zones(self):
r = self._session.get(self._url + '/zones')
try:
return r.json['zones']
except TypeError:
return r.json()['zones']
def list_partitions(self):
r = self._session.get(self._url + '/partitions')
try:
return r.json['partitions']
except TypeError:
return r.json()['partitions']
def arm(self, level, option = None):
r = self._session.get(
self._url + '/command',
params={'cmd': 'arm',
'level': level,
'option': option})
return r.status_code == 200
def disarm(self, master_pin):
r = self._session.get(
self._url + '/command',
params={'cmd': 'disarm',
'master_pin': master_pin})
return r.status_code == 200
def send_keys(self, keys, group=False):
r = self._session.get(
self._url + '/command',
params={'cmd': 'keys',
'keys': keys,
'group': group})
return r.status_code == 200
def get_version(self):
r = self._session.get(self._url + '/version')
if r.status_code == 404:
return '1.0'
else:
return r.json()['version']
``` |
{
"source": "jonvadney/FlightCircle",
"score": 2
} |
#### File: jonvadney/FlightCircle/flight_circle.py
```python
from bs4 import BeautifulSoup
import datetime
import time
import json
import requests
class FlightCircle:
"""Class for interacting with the FlightCircle application"""
__base_url__ = "https://www.flightcircle.com/v1/" # Trailing is required for login
__fbos_url__ = "%sapi/associations" % __base_url__
__users_url__ = "%sapi/customers" % __base_url__
__default_headers__ = {'User-Agent': 'Indin River Flying Club Client (<EMAIL>)',
'accept': 'application/json',
'referer': __base_url__,
}
def __init__(self, username, password):
"""Initialize the object"""
self.username = username
self.password = password
self.csrf_token = None
self.user_data = None
self.session = None
self.__init_session__()
def get_fbos(self):
"""Get a list of active FBO's"""
# Get available associations
payload = {"csrfToken": self.csrf_token,
"fields": "UserID",
"q": self.user_data['ID']
}
print ("Connecting to %s" % FlightCircle.__fbos_url__)
r = self.session.get(FlightCircle.__fbos_url__,
headers=FlightCircle.__default_headers__,
params = payload)
if (r.status_code != 200):
print (r)
print (r.text)
raise Exception("Unable to get FBO List")
associations = json.loads(r.text)
return associations
def get_fbo_id_by_name(self, fbo_name):
"""Gets the FBO by name"""
fbos = self.get_fbos()
for fbo in fbos:
if (fbo["name"] == fbo_name):
return fbo["FboID"]
return None
def get_users(self, fbo_id):
""""Gets a list of users for an FBO"""
payload = {"csrfToken": self.csrf_token,
"fields": "FboID",
"include_deleted": "0",
"include_photo": "0",
"q": fbo_id
}
print ("Connecting to %s" % FlightCircle.__users_url__)
r = self.session.get(FlightCircle.__users_url__,
headers=FlightCircle.__default_headers__,
params = payload)
if (r.status_code != 200):
print (r)
print (r.text)
raise Exception("Unable to get User List")
customers = json.loads(r.text)
return customers
def get_users_with_checkout(self, fbo_id, make_and_model):
"""Gets a list of customers checked out in a given make & model"""
selected_users = []
users = self.get_users(fbo_id)
for user in users:
if ('aircraft_checkouts' in user and user["aircraft_checkouts"] != None):
checkouts = json.loads(user["aircraft_checkouts"])
for checkout in checkouts:
if (checkout["Make_Model"] == make_and_model):
selected_users.append(user)
return selected_users
def update_checkout_dates(self, fbo_id, make_and_model, get_new_checkout_date_and_expiration_func):
"""
Function calls get_new_checkout_date_and_expiration_func for every user with a valid checkout
recorded for make & model.
Args:
fbo_id:
make_and_model:
get_new_checkout_date_and_expiration_func: A reference to a function to be called to
get a new checkout date and expiration days
def func(last_name, first_name, email, groups, checkout) returns ((timestamp)checkoutdate, (int) days_valid)
"""
users = self.get_users(fbo_id)
for user in users:
if ('aircraft_checkouts' in user and user["aircraft_checkouts"] != None):
checkouts = json.loads(user["aircraft_checkouts"])
for checkout in checkouts:
if (checkout["Make_Model"] == make_and_model):
groups = None
if ('groups' in user):
groups = user['groups']
(checkout_date, days_valid) = get_new_checkout_date_and_expiration_func(user['last_name'],
user['first_name'],
user['email'],
groups,
checkout)
if (checkout_date != None and days_valid != None):
#print ("Update checkout date")
url = "%s/%s" % (FlightCircle.__users_url__, user["CustomerID"])
params = {"csrfToken": self.csrf_token,
"FboID": fbo_id
}
checkout_date = checkout_date.replace(hour=0, minute=0, second=0, microsecond=0)
expires_date = checkout_date + datetime.timedelta(days=days_valid)
checkout["checkout_expires_specfic"] = str(days_valid)
checkout["checkout_date"] = int(checkout_date.timestamp())
checkout["checkout_expires_date"] = expires_date.strftime("%Y-%m-%d 00:00:00")
checkout["checkout_expires"] = "Specify"
formated_checkout = str(checkouts)
formated_checkout = formated_checkout.replace("'", '\\"').replace(': ', ':').replace(', ', ',')
payload = "{\"aircraft_checkouts\":\"%s\"}" % (formated_checkout)
#print (url)
#print (params)
#print (payload)
r = self.session.put(url,
headers=FlightCircle.__default_headers__,
params = params,
data = str(payload))
json_result = json.loads(r.text)
if ('status' in json_result and json_result['status'] != 200):
raise Exception("Unable to update checkout for %s %s" % (user['last_name'],user['first_name']))
if (len(json_result) != 1):
raise Exception("Unable to update checkout for %s %s" % (user['last_name'],user['first_name']))
def __init_session__(self):
"""Init FlightCircle Session"""
# Init Requests Session
self.session = requests.Session()
# Get the csrf_token
r = self.session.get(FlightCircle.__base_url__,
headers=FlightCircle.__default_headers__)
soup = BeautifulSoup(r.text, features="html.parser")
self.csrf_token = soup.find(id="csrf_token").get("value")
if (self.csrf_token is None or self.csrf_token == ""):
raise Exception("Unable to obtain initial csrf_token")
# Login
payload = {'email': self.username,
'password': <PASSWORD>,
'csrf_token': self.csrf_token,
}
r = self.session.post(FlightCircle.__base_url__,
headers=FlightCircle.__default_headers__,
data = payload)
soup = BeautifulSoup(r.text, features="html.parser")
script_data = soup.find("script")
if (script_data != None and script_data != ""):
for line in str(script_data).splitlines():
if ("RB.User" in line):
self.user_data = json.loads(line.replace("RB.User = RB.User || ", '')[:-1])
else:
raise Exception("Login Failed")
if (self.user_data is None):
raise Exception("Login Failed")
``` |
{
"source": "jonvtruong/GameWithFriends",
"score": 3
} |
#### File: jonvtruong/GameWithFriends/main.py
```python
import socket, sys, socketserver, threading
HOST = '' # Symbolic name, meaning all available interfaces
PORT = 8880 # Arbitrary non-privileged port
START_ACCOUNT = 1500
bank = []
playerConn = []
playerNames = []
BUFFER_SIZE = 128
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class ManageClient(socketserver.BaseRequestHandler):
'''
Request handler class for server
Instantiated for each client connection
'''
def setup(self):
print("connected to " + str(self.client_address))
self.newPlayer()
def handle(self):
while True:
#now keep talking with the client
#wait to accept a connection - blocking call
try:
data = self.request.recv(BUFFER_SIZE)
if(len(data) > 0):
decode = data.decode()
if (decode == 'quit'):
print("quit")
break
else:
self.gameProtocol(decode)
else:
break
except ConnectionResetError:
print("connection broken")
break
def finish(self):
print("closing connection with " + str(self.client_address))
playerConn[playerConn.index(self.request)] = False
self.request.close()
if(not any(playerConn)): #if no more players connected then close server
print("No more players, closing server")
self.server.shutdown()
def gameProtocol(self, message):
'''
messages will be in the format: t 1 2 200 = transfer player 1 sends player 2, $200
returns a tuple of strings
'''
parse = message.split(' ') #splits message into array of strings
command = parse.pop(0)
print("command received: " + message)
if(command == 't'): #request transfer money t 1 2 200 = transfer player 1 sends player 2, $200
parse = [int(i) for i in parse] #convert list of strings to int
# get the to player num from message
if(parse[1] == -1): #if player is trying to pay/withdraw money to bank
if(parse[2] >= 0): #if paying the bank
self.updateAccount(parse[0], -parse[2]) #update from player account
else:
self.askConfirm(0, message)
else: #player to player transactions
toPlayer = parse[1]
self.askConfirm(toPlayer, message)
elif(command == 'y'): #confirmed transfer money y 1 2 200 = transfer player 1 sends player 2, $200
parse = [int(i) for i in parse] #convert list of strings to int
self.updateAccount(parse[0], -parse[2]) #update from player account
self.updateAccount(parse[1], parse[2]) #update to player account
elif(command == 'b'): #confirmed bank withdrawal b 2 -1 -200 = player 2 withdraws $200
parse = [int(i) for i in parse] #convert list of strings to int
self.updateAccount(parse[0], -parse[2]) #update from player account
elif(command == 'n'): #new player created (message = n Name)
playerNames.append(parse[0])
print("name received: " + parse[0])
self.sendNames()
def sendNames(self): #sends list of names to all players
nameList = ' '.join(name for name in playerNames)
# nameList = 'p '.join(nameList)
print("sending list: " + nameList)
message = 'p ' + nameList
for conn in playerConn: #send the latest name list to all players
conn.sendall(message.encode())
def updateAccount(self, player, amount):
print("amount: " + str(amount))
bank[player] += amount
message = 'a ' + str(bank[player])
print("sending player message: " + message)
playerConn[player].sendall(message.encode())
def askConfirm(self, to, m):
print("sending ToPlayer message: " + m)
playerConn[to].sendall(m.encode())
def newPlayer(self):
playerConn.append(self.request)
playerNum = playerConn.index(self.request)
bank.append(START_ACCOUNT)
#sends the player their player number and starting balance
message = 'n ' + str(playerNum) + " " + str(bank[playerNum])
self.request.sendall(message.encode())
print("new player added, sending: " + message)
if(__name__ == '__main__'):
server = ThreadedTCPServer((HOST, PORT), ManageClient)
print('Socket listening')
server.serve_forever()
```
#### File: GameWithFriends/test client/main.py
```python
import socket, sys
HOST = socket.gethostname()
PORT = 8888
player = 0
account = 0
def gameProtocol(message):
'''
messages will be in the format: a 200 = total account value 200
'''
parse = message.split(' ')
command = parse.pop(0)
if(command == 'n'): #if creating new player, update player number and account starting balance
player, account = parse
print("Player number: " + player + " account balance: " + account)
elif(command == 'a'):
print('parse: ' + str(parse))
account = int(*parse)
print('account updated: ' + str(account))
elif(command == 'p'): #receive player name list
for name in parse:
print(name)
if(__name__ == '__main__'):
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connection to hostname on the port.
s.connect((HOST, PORT))
decode = s.recv(64).decode()
print("connected " + decode)
gameProtocol(decode)
message = input("Input: ")
while True:
s.sendall(message.encode())
if(message == 'quit'):
break
data = s.recv(128)
if(len(data) > 0):
decode = data.decode()
print("Client received: " + decode)
gameProtocol(decode)
message = input("Input: ")
s.close()
sys.exit()
``` |
{
"source": "jonvw28/GoL_Sensehat",
"score": 3
} |
#### File: jonvw28/GoL_Sensehat/GoL.py
```python
import numpy as np
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
def GoL_step(cells, size=(8,8)):
'''
Take a single Game of Life Step
'''
new_cells = np.zeros(size, dtype = cells.dtype)
for i in range(size[0]):
for j in range(size[1]):
tot_nbrs = cells[(i-1)%size[0],(j-1)%size[1]]
tot_nbrs += cells[i%size[0],(j-1)%size[1]]
tot_nbrs += cells[(i+1)%size[0],(j-1)%size[1]]
tot_nbrs += cells[(i-1)%size[0],(j)%size[1]]
tot_nbrs += cells[(i+1)%size[0],(j)%size[1]]
tot_nbrs += cells[(i-1)%size[0],(j+1)%size[1]]
tot_nbrs += cells[(i)%size[0],(j+1)%size[1]]
tot_nbrs += cells[(i+1)%size[0],(j+1)%size[1]]
if cells[i,j] == 0 and tot_nbrs == 3:
new_cells[i,j] = 1
elif cells[i,j] == 1 and (tot_nbrs == 2 or tot_nbrs == 3):
new_cells[i,j] = 1
else:
new_cells[i,j]=0
return new_cells
def GoL_show(cells):
'''
Display Alive cells as green and dead cells as red
'''
for i in range(8):
for j in range(8):
if cells[i,j] == 1:
sense.set_pixel(i,j,0,255,0)
else:
sense.set_pixel(i,j,255,0,0)
def GoL_show_ages(cells,old_cells):
'''
Display deaths as dull red, births as green and surviving cells as blue
'''
for i in range(8):
for j in range(8):
if cells[i,j] == 0 and old_cells[i,j] == 0:
sense.set_pixel(i,j,0,0,0)
elif cells[i,j] == 0 and old_cells[i,j] == 1:
sense.set_pixel(i,j,127,0,0)
elif old_cells[i,j] == 0:
sense.set_pixel(i,j,0,255,0)
else:
sense.set_pixel(i,j,0,0,255)
def GoL_init():
'''
Initialise a random grid of live cells
'''
cells = np.random.choice((0,1),size=(8,8))
return( cells )
def GoL_blinker():
'''
Initialise a Blinker
'''
cells = np.zeros(shape = (8,8), dtype = 'int32')
cells[2,2] = 1
cells[2,3] = 1
cells[2,4] = 1
return cells
def GoL_glider():
'''
Initialise a Glider
'''
cells = np.zeros(shape = (8,8), dtype = 'int32')
cells[2,2] = 1
cells[2,3] = 1
cells[2,4] = 1
cells[3,4] = 1
cells[4,3] = 1
return cells
def GoL_pentomino():
'''
Initialise a cool shape - more fun on bigger boards
'''
cells = np.zeros(shape = (8,8), dtype = 'int32')
cells[2,3] = 1
cells[2,4] = 1
cells[3,2] = 1
cells[3,3] = 1
cells[4,3] = 1
return cells
``` |
{
"source": "jonvw28/SLICUAV",
"score": 3
} |
#### File: 2019_09_03_1_generate_grid_superpixels/trees/clusterfeatures.py
```python
import numpy as np
import scipy as sp
import skimage.color as skcolor
import mahotas.features as mht
# methods
from skimage.util import img_as_float
from skimage.feature.texture import local_binary_pattern
from skimage.exposure import rescale_intensity
from scipy.ndimage import convolve
# Create class definition
class ClusterFeatures:
"""Object for holding clusterwise data
More to follow"""
def __init__(self,tag_id,tag,rgb_img,rgb_mask,ms_img,ms_mask,dsm_img,dsm_mask):
# store inputs
self.tag_id = tag_id
self.tag = tag
self.rgb_img = rgb_img
self.ms_img = ms_img
self.dsm_img = dsm_img
self.rgb_mask = rgb_mask
self.ms_mask = ms_mask
self.dsm_mask = dsm_mask
# mask out the pixel values of interest
rgb_pixels = rgb_img[rgb_mask]
self.rgb_pixels = rgb_pixels[:,:3:]
ms_pixels = ms_img[ms_mask]
self.ms_pixels = ms_pixels[:,:4:]
dsm_pixels = dsm_img[dsm_mask]
self.dsm_pixels = dsm_pixels
# clip the raw image to a buffer of 20 pixels around the region of interest
rgb_bounds = [np.maximum(rgb_mask[0].min()-20,0),np.minimum(rgb_mask[0].max()+20,rgb_img.shape[0]),np.maximum(rgb_mask[1].min()-20,0),np.minimum(rgb_mask[1].max()+20,rgb_img.shape[1])]
ms_bounds = [np.maximum(ms_mask[0].min()-20,0),np.minimum(ms_mask[0].max()+20,ms_img.shape[0]),np.maximum(ms_mask[1].min()-20,0),np.minimum(ms_mask[1].max()+20,ms_img.shape[1])]
dsm_bounds = [np.maximum(dsm_mask[0].min()-20,0),np.minimum(dsm_mask[0].max()+20,dsm_img.shape[0]),np.maximum(dsm_mask[1].min()-20,0),np.minimum(dsm_mask[1].max()+20,dsm_img.shape[1])]
self.rgb_img_clip = rgb_img[rgb_bounds[0]:rgb_bounds[1]+1:,rgb_bounds[2]:rgb_bounds[3]+1:,:]
self.rgb_mask_clip = (rgb_mask[0]-rgb_bounds[0],rgb_mask[1]-rgb_bounds[2])
self.ms_img_clip = ms_img[ms_bounds[0]:ms_bounds[1]+1:,ms_bounds[2]:ms_bounds[3]+1:,:]
self.ms_mask_clip = (ms_mask[0]-ms_bounds[0],ms_mask[1]-ms_bounds[2])
self.dsm_img_clip = dsm_img[dsm_bounds[0]:dsm_bounds[1]+1:,dsm_bounds[2]:dsm_bounds[3]+1:]
self.dsm_mask_clip = (dsm_mask[0]-dsm_bounds[0],dsm_mask[1]-dsm_bounds[2])
def createRGBBandFeats(self,mode=False):
# Compute stats on each of the RGB bands once masked
# only compute mode value if mode = True
self.rgb_band_max = self.rgb_pixels.max(axis=0) # max value
self.rgb_band_min = self.rgb_pixels.min(axis=0) # min value
self.rgb_band_mean = self.rgb_pixels.mean(axis=0) # mean value
self.rgb_band_std = self.rgb_pixels.std(axis=0) # standard deviation
self.rgb_band_median = np.median(self.rgb_pixels,axis=0) # median value
self.rgb_band_cov = np.divide(self.rgb_band_std,self.rgb_band_mean) # coefficient of variation
self.rgb_band_skew = sp.stats.skew(self.rgb_pixels,axis=0) # skewness
self.rgb_band_kurt = sp.stats.kurtosis(self.rgb_pixels,axis=0) # kurtosis
self.rgb_band_sum = self.rgb_pixels.sum(axis=0) # sum of all values
self.rgb_band_rng = self.rgb_band_max-self.rgb_band_min # range of values
self.rgb_band_rngsig = np.divide(self.rgb_band_rng,self.rgb_band_std) # range in number of sds
self.rgb_band_rngmean = np.divide(self.rgb_band_rng,self.rgb_band_mean) # range expressed in means
if(mode):
self.rgb_band_mode = sp.stats.mode(self.rgb_pixels,axis=0)[0][0] # modal value
self.rgb_band_deciles = np.percentile(
self.rgb_pixels,np.linspace(10,90,9),axis=0) # deciles of band
self.rgb_band_quartiles = np.percentile(self.rgb_pixels,[25,75],axis=0) # quartiles of band
self.rgb_band_iqr = self.rgb_band_quartiles[1,:]-self.rgb_band_quartiles[0,:] # iqr
self.rgb_band_iqrsig = np.divide(self.rgb_band_iqr,self.rgb_band_std) # iqr expressed in sds
self.rgb_band_iqrmean = np.divide(self.rgb_band_iqr,self.rgb_band_mean) # iqr expressed in means
self.rgb_band_ratio = self.rgb_band_mean[:3:]/np.sum(self.rgb_band_mean[:3:])# ratio of band to sum of bands
def createRGBThreshFeats(self,thresh=0.5,mode=False):
# compute bandwaise stats only on top thresh proportion of pixels based on L in cielab space
lab_img = skcolor.rgb2lab(self.rgb_img[:,:,:3:])
self.lab_pixels = lab_img[self.rgb_mask]
lab_thresh = np.percentile(self.lab_pixels,100*thresh,axis=0)
top_pixels = self.rgb_pixels[self.lab_pixels[:,0]>=lab_thresh[0],:]
self.top_rgb_max = top_pixels.max(axis=0) # max value
self.top_rgb_min = top_pixels.min(axis=0) # min value
self.top_rgb_mean = top_pixels.mean(axis=0) # mean value
self.top_rgb_std = top_pixels.std(axis=0) # standard deviation
self.top_rgb_median = np.median(top_pixels,axis=0) # median value
self.top_rgb_cov = np.divide(self.top_rgb_std,self.top_rgb_mean) # coeffiient of variation
self.top_rgb_skew = sp.stats.skew(top_pixels,axis=0) # skewness
self.top_rgb_kurt = sp.stats.kurtosis(top_pixels,axis=0) # kurtosis
self.top_rgb_sum = top_pixels.sum(axis=0) # sum of all values
self.top_rgb_rng = self.top_rgb_max-self.top_rgb_min # range
self.top_rgb_rngsig = np.divide(self.top_rgb_rng,self.top_rgb_std) # range in sds
self.top_rgb_rngmean = np.divide(self.top_rgb_rng,self.top_rgb_mean) # range in means
if(mode):
self.top_rgb_mode = sp.stats.mode(stop_pixels,axis=0)[0][0] # modal value
self.top_rgb_deciles = np.percentile(
top_pixels,np.linspace(10,90,9),axis=0) # deciles
self.top_rgb_quartiles = np.percentile(top_pixels,[25,75],axis=0) # quartile
self.top_rgb_iqr = self.top_rgb_quartiles[1,:]-self.top_rgb_quartiles[0,:] #iqr
self.top_rgb_iqrsig = np.divide(self.top_rgb_iqr,self.top_rgb_std) #iqr in sds
self.top_rgb_iqrmean = np.divide(self.top_rgb_iqr,self.top_rgb_mean) # iqr in means
self.top_rgb_ratio = self.top_rgb_mean[:3:]/np.sum(self.top_rgb_mean[:3:]) # ratio compared to all bands
def __createGLCMimgs(self): # maxes are used to ensure where there are many 0s (when rounded) that this method works
glcm_rgb = np.zeros((self.rgb_img.shape[0],self.rgb_img.shape[1],3))
glcm_rgb[self.rgb_mask[0],self.rgb_mask[1],:]=np.maximum(self.rgb_img[self.rgb_mask[0],self.rgb_mask[1],:3:],0.004*np.ones((self.rgb_mask[0].__len__(),3)))
glcm_rgb = skcolor.rgb2gray(glcm_rgb)
glcm_rgb = 255*glcm_rgb
# remove zeros
glcm_rgb = glcm_rgb[~np.all(glcm_rgb==0,axis=1),:]
glcm_rgb = glcm_rgb[:,~np.all(glcm_rgb==0,axis=0)]
self.glcm_rgb_img = glcm_rgb.astype('uint8')
# MS
glcm_ms= np.zeros((self.ms_img.shape[0],self.ms_img.shape[1],4))
glcm_ms[self.ms_mask[0],self.ms_mask[1],:]=np.maximum(self.ms_img[self.ms_mask[0],self.ms_mask[1],:4:],0.004*np.ones((self.ms_mask[0].__len__(),4)))
glcm_ms = 255*glcm_ms
# remove zeros
glcm_ms = glcm_ms[~np.all(np.all(glcm_ms==0,axis=1),axis=1),:,:]
glcm_ms = glcm_ms[:,~np.all(np.all(glcm_ms==0,axis=0),axis=1),:]
self.glcm_ms_img = glcm_ms.astype('uint8')
def createRGBGLCMfeats(self,distance=1):
if not hasattr(self,'glcm_rgb_img'):
self.__createGLCMimgs()
glcm_rgb_vals = mht.haralick(self.glcm_rgb_img,ignore_zeros=True,
return_mean_ptp=True,distance=distance)
if not hasattr(self,'glcm_rgb_vals'):
self.glcm_rgb_vals = glcm_rgb_vals
else:
self.glcm_rgb_vals = np.concatenate((self.glcm_rgb_vals,
glcm_rgb_vals))
if not hasattr(self,'glcm_rgb_dist'):
self.glcm_rgb_dist = [distance]
else:
self.glcm_rgb_dist.append(distance)
def __imgAutocorrelate(self,img,dx,dy):
if dy >=0:
im1 = img_as_float(img[:img.shape[0]-dx:,:img.shape[1]-dy:])
im2 = img_as_float(img[dx:img.shape[0]:,dy:img.shape[1]:])
else:
mody = -dy
im1 = img_as_float(img[:img.shape[0]-dx:,mody:img.shape[1]:])
im2 = img_as_float(img[dx:img.shape[0]:,:img.shape[1]-mody:])
# set to mean zero
im1_mean = im1[np.nonzero(im1)].mean()
im1[np.nonzero(im1)] -= im1_mean
im2_mean = im2[np.nonzero(im2)].mean()
im2[np.nonzero(im2)] -= im2_mean
nom = np.multiply(im1,im2).sum()
# average both sub-images
denom = (np.multiply(im1,im1).sum() + np.multiply(im2,im2).sum())/2
return nom/denom
def createRGBautoCorFeats(self,distance=1):
if not hasattr(self,'glcm_rgb_img'):
self.__createGLCMimgs()
N = self.__imgAutocorrelate(self.glcm_rgb_img,0,distance)
NE = self.__imgAutocorrelate(self.glcm_rgb_img,distance,distance)
E = self.__imgAutocorrelate(self.glcm_rgb_img,distance,0)
SE = self.__imgAutocorrelate(self.glcm_rgb_img,distance,-distance)
acors = np.array([N,NE,E,SE])
acfeats = np.array([acors.mean(),acors.max()-acors.min()])
if not hasattr(self,'acor_rgb_vals'):
self.acor_rgb_vals = acfeats
else:
self.acor_rgb_vals = np.concatenate((self.acor_rgb_vals,
acfeats))
if not hasattr(self,'acor_rgb_dist'):
self.acor_rgb_dist = [distance]
else:
self.acor_rgb_dist.append(distance)
def createRGBLBPFeats(self,distance=1):
if not distance in [1,2,3]:
raise ValueError('distance can only be 1,2 or 3')
grayimg = skcolor.rgb2gray(self.rgb_img_clip)
lbp_img = local_binary_pattern(grayimg,8*distance,distance,method='uniform')
lbp_pix = lbp_img[self.rgb_mask_clip]
unique, counts = np.unique(lbp_pix, return_counts = True)
count_table = np.zeros([2+distance*8])
count_table[unique.astype('int')]=counts
count_table = count_table/count_table.sum()
if not hasattr(self,'lbp_rgb_vals'):
self.lbp_rgb_vals = count_table
else:
self.lbp_rgb_vals = np.concatenate((self.lbp_rgb_vals,count_table))
if not hasattr(self,'lbp_rgb_dist'):
self.lbp_rgb_dist = [distance]
else:
self.lbp_rgb_dist.append(distance)
def createRGBLawsFeats(self):
grayimg = skcolor.rgb2gray(self.rgb_img_clip)
mean_15 = convolve(grayimg,np.ones([15,15])/225,mode='reflect')
norm_gray = grayimg-mean_15
del mean_15, grayimg
# Constuct filter bank
L5 = np.array([1,4,6,4,1])
E5 = np.array([-1,-2,0,2,1])
S5 = np.array([-1,0,2,0,-1])
R5 = np.array([1,-4,6,-4,1])
W5 = np.array([-1,2,0,-2,1])
filtbank = [L5,E5,S5,R5,W5]
del L5, E5, S5, R5, W5
filtgrid = np.zeros([5,5,5,5])
for i in range(5):
for j in range(5):
filtgrid[i,j,:,:]=(np.outer(filtbank[i],filtbank[j]))
del filtbank
# compute features
lawsFeat = np.zeros([14,2])
count_i = 0;
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
if j==i:
convimg = convolve(norm_gray,filtgrid[i,j],mode='reflect')
lawsimg = convolve(np.absolute(convimg),np.ones([15,15]),mode='reflect')
lawsFeat[count_i,0] = lawsimg[self.rgb_mask_clip].mean()
lawsFeat[count_i,1] = lawsimg[self.rgb_mask_clip].std()
count_i += 1
else:
convimg1 = np.absolute(convolve(norm_gray,filtgrid[i,j],mode='reflect'))
convimg2 = np.absolute(convolve(norm_gray,filtgrid[j,i],mode='reflect'))
lawsimg = convolve(convimg1+convimg2,np.ones([15,15])/2,mode='reflect')
lawsFeat[count_i,0] = lawsimg[self.rgb_mask_clip].mean()
lawsFeat[count_i,1] = lawsimg[self.rgb_mask_clip].std()
count_i += 1
self.laws_rgb_feats = lawsFeat
def createHSVFeats(self,mode=False):
hsv_img = skcolor.rgb2hsv(self.rgb_img[:,:,:3:])
self.hsv_pixels = hsv_img[self.rgb_mask]
self.hsv_max = self.hsv_pixels.max(axis=0)
self.hsv_min = self.hsv_pixels.min(axis=0)
self.hsv_mean = self.hsv_pixels.mean(axis=0)
self.hsv_std = self.hsv_pixels.std(axis=0)
self.hsv_median = np.median(self.hsv_pixels,axis=0)
self.hsv_cov = np.divide(self.hsv_std,self.hsv_mean)
self.hsv_skew = sp.stats.skew(self.hsv_pixels,axis=0)
self.hsv_kurt = sp.stats.kurtosis(self.hsv_pixels,axis=0)
self.hsv_sum = self.hsv_pixels.sum(axis=0)
self.hsv_rng = self.hsv_max-self.hsv_min
self.hsv_rngsig = np.divide(self.hsv_rng,self.hsv_std)
self.hsv_rngmean = np.divide(self.hsv_rng,self.hsv_mean)
if(mode):
self.hsv_mode = sp.stats.mode(self.hsv_pixels,axis=0)[0][0]
self.hsv_deciles = np.percentile(
self.hsv_pixels,np.linspace(10,90,9),axis=0)
self.hsv_quartiles = np.percentile(self.hsv_pixels,[25,75],axis=0)
self.hsv_iqr = self.hsv_quartiles[1,:]-self.hsv_quartiles[0,:]
self.hsv_iqrsig = np.divide(self.hsv_iqr,self.hsv_std)
self.hsv_iqrmean = np.divide(self.hsv_iqr,self.hsv_mean)
def createMSBandFeats(self,mode=False):
self.ms_band_max = self.ms_pixels.max(axis=0)
self.ms_band_min = self.ms_pixels.min(axis=0)
self.ms_band_mean = self.ms_pixels.mean(axis=0)
self.ms_band_std = self.ms_pixels.std(axis=0)
self.ms_band_median = np.median(self.ms_pixels,axis=0)
self.ms_band_cov = np.divide(self.ms_band_std,self.ms_band_mean)
self.ms_band_skew = sp.stats.skew(self.ms_pixels,axis=0)
self.ms_band_kurt = sp.stats.kurtosis(self.ms_pixels,axis=0)
self.ms_band_sum = self.ms_pixels.sum(axis=0)
self.ms_band_rng = self.ms_band_max-self.ms_band_min
self.ms_band_rngsig = np.divide(self.ms_band_rng,self.ms_band_std)
self.ms_band_rngmean = np.divide(self.ms_band_rng,self.ms_band_mean)
if(mode):
self.ms_band_mode = sp.stats.mode(self.ms_pixels,axis=0)[0][0]
self.ms_band_deciles = np.percentile(
self.ms_pixels,np.linspace(10,90,9),axis=0)
self.ms_band_quartiles = np.percentile(self.ms_pixels,[25,75],axis=0)
self.ms_band_iqr = self.ms_band_quartiles[1,:]-self.ms_band_quartiles[0,:]
self.ms_band_iqrsig = np.divide(self.ms_band_iqr,self.ms_band_std)
self.ms_band_iqrmean = np.divide(self.ms_band_iqr,self.ms_band_mean)
self.ms_band_ratio = self.ms_band_mean[:4:]/np.sum(self.ms_band_mean[:4:])
def createMSGLCMfeats(self,distance=1):
if not hasattr(self,'glcm_rgb_img'):
self.__createGLCMimgs()
glcm_ms_vals = np.vstack((
mht.haralick(self.glcm_ms_img[:,:,0],ignore_zeros=True,
return_mean_ptp=True,distance=distance),
mht.haralick(self.glcm_ms_img[:,:,1],ignore_zeros=True,
return_mean_ptp=True,distance=distance),
mht.haralick(self.glcm_ms_img[:,:,2],ignore_zeros=True,
return_mean_ptp=True,distance=distance),
mht.haralick(self.glcm_ms_img[:,:,3],ignore_zeros=True,
return_mean_ptp=True,distance=distance)
))
glcm_ms_vals = np.vstack((glcm_ms_vals,glcm_ms_vals.mean(axis=0))).flatten('C')
if not hasattr(self,'glcm_ms_vals'):
self.glcm_ms_vals = glcm_ms_vals
else:
self.glcm_ms_vals = np.concatenate((self.glcm_ms_vals,
glcm_ms_vals))
if not hasattr(self,'glcm_ms_dist'):
self.glcm_ms_dist = [distance]
else:
self.glcm_ms_dist.append(distance)
def createMSautoCorFeats(self,distance=1):
if not hasattr(self,'glcm_rgb_img'):
self.__createGLCMimgs()
acfeats = np.empty([4,2])
for acor_i in range(4):
N = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],0,distance)
NE = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],distance,distance)
E = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],distance,0)
SE = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],distance,-distance)
acors = np.array([N,NE,E,SE])
acfeats[acor_i,:] = np.array([acors.mean(),acors.max()-acors.min()])
acfeats = np.vstack((acfeats,acfeats.mean(axis=0))).flatten('C')
if not hasattr(self,'acor_ms_vals'):
self.acor_ms_vals = acfeats
else:
self.acor_ms_vals = np.concatenate((self.acor_ms_vals,
acfeats))
if not hasattr(self,'acor_ms_dist'):
self.acor_ms_dist = [distance]
else:
self.acor_ms_dist.append(distance)
def createMSLBPFeats(self,distance=1):
if not distance in [1,2,3]:
raise ValueError('distance can only be 1,2 or 3')
count_table = np.zeros([4,2+distance*8])
for lbp_i in range(4):
lbp_img = local_binary_pattern(self.ms_img_clip[:,:,lbp_i],8*distance,distance,method='uniform')
lbp_pix = lbp_img[self.ms_mask_clip]
unique, counts = np.unique(lbp_pix, return_counts = True)
table = np.zeros([2+distance*8])
table[unique.astype('int')]=counts
count_table[lbp_i,:] = table/table.sum()
count_table = np.vstack((count_table,count_table.mean(axis=0))).flatten('C')
if not hasattr(self,'lbp_ms_vals'):
self.lbp_ms_vals = count_table
else:
self.lbp_ms_vals = np.concatenate((self.lbp_ms_vals,count_table))
if not hasattr(self,'lbp_ms_dist'):
self.lbp_ms_dist = [distance]
else:
self.lbp_ms_dist.append(distance)
def createMSLawsFeats(self):
# Construct filter bank
L5 = np.array([1,4,6,4,1])
E5 = np.array([-1,-2,0,2,1])
S5 = np.array([-1,0,2,0,-1])
R5 = np.array([1,-4,6,-4,1])
W5 = np.array([-1,2,0,-2,1])
filtbank = [L5,E5,S5,R5,W5]
del L5, E5, S5, R5, W5
filtgrid = np.zeros([5,5,5,5])
for i in range(5):
for j in range(5):
filtgrid[i,j,:,:]=(np.outer(filtbank[i],filtbank[j]))
del filtbank
# compute features
lawsFeat = np.zeros([4,28])
for band in range(4):
mean_15 = convolve(self.ms_img_clip[:,:,band],np.ones([15,15])/225,mode='reflect')
norm_gray = self.ms_img_clip[:,:,band]-mean_15
del mean_15
count_i = 0;
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
if j==i:
convimg = convolve(norm_gray,filtgrid[i,j],mode='reflect')
lawsimg = convolve(np.absolute(convimg),np.ones([15,15]),mode='reflect')
lawsFeat[band,count_i] = lawsimg[self.ms_mask_clip].mean()
lawsFeat[band,count_i+14] = lawsimg[self.ms_mask_clip].std()
count_i += 1
else:
convimg1 = np.absolute(convolve(norm_gray,filtgrid[i,j],mode='reflect'))
convimg2 = np.absolute(convolve(norm_gray,filtgrid[j,i],mode='reflect'))
lawsimg = convolve(convimg1+convimg2,np.ones([15,15])/2,mode='reflect')
lawsFeat[band,count_i] = lawsimg[self.ms_mask_clip].mean()
lawsFeat[band,count_i+14] = lawsimg[self.ms_mask_clip].std()
count_i += 1
self.laws_ms_feats = np.vstack((lawsFeat,lawsFeat.mean(axis=0)))
def createSpecIndices(self):
GRVI_pixels = np.divide(self.rgb_pixels[:,1]-self.rgb_pixels[:,0],
self.rgb_pixels[:,1]+self.rgb_pixels[:,0]+1e-15)
VARI_pixels = np.divide(self.rgb_pixels[:,1]-self.rgb_pixels[:,0],
self.rgb_pixels[:,1]+self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2]+1e-15)
GLIr_pixels = np.divide(2*self.rgb_pixels[:,0] - self.rgb_pixels[:,1]\
-self.rgb_pixels[:,2],
2*self.rgb_pixels[:,0]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,2]+1e-15)
GLIg_pixels = np.divide(2*self.rgb_pixels[:,1] - self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2],
2*self.rgb_pixels[:,1]+self.rgb_pixels[:,0]\
+self.rgb_pixels[:,2]+1e-15)
GLIb_pixels = np.divide(2*self.rgb_pixels[:,2] - self.rgb_pixels[:,1]\
-self.rgb_pixels[:,0],
2*self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExG_pixels = np.divide(2*self.rgb_pixels[:,1] - self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExR_pixels = np.divide(2*self.rgb_pixels[:,0] - self.rgb_pixels[:,1]\
-self.rgb_pixels[:,2],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExB_pixels = np.divide(2*self.rgb_pixels[:,2] - self.rgb_pixels[:,0]\
-self.rgb_pixels[:,1],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExGveg_pixels = 2*self.rgb_pixels[:,1]- self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2]+50
NegExR_pixels = self.rgb_pixels[:,1]- 1.4*self.rgb_pixels[:,0]
ExRveg_pixels = np.divide(1.4*self.rgb_pixels[:,1] -\
self.rgb_pixels[:,0],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExBveg_pixels = np.divide(1.4*self.rgb_pixels[:,2] -\
self.rgb_pixels[:,0],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
TGI_pixels = self.rgb_pixels[:,1] -0.39*self.rgb_pixels[:,0]\
-0.61*self.rgb_pixels[:,2]
mGRVI_pixels = np.divide(self.rgb_pixels[:,1]*self.rgb_pixels[:,1] -\
self.rgb_pixels[:,0]*self.rgb_pixels[:,0],
self.rgb_pixels[:,1]*self.rgb_pixels[:,1] +\
self.rgb_pixels[:,0]*self.rgb_pixels[:,0]+\
1e-15)
RGBVI_pixels = np.divide(self.rgb_pixels[:,1]*self.rgb_pixels[:,1] -\
self.rgb_pixels[:,0]*self.rgb_pixels[:,2],
self.rgb_pixels[:,1]*self.rgb_pixels[:,1] +\
self.rgb_pixels[:,0]*self.rgb_pixels[:,2]+\
1e-15)
IKAW_pixels = np.divide(self.rgb_pixels[:,0]-self.rgb_pixels[:,2],
self.rgb_pixels[:,0]+self.rgb_pixels[:,2]+1e-15)
NDVI_pixels = np.divide(self.ms_pixels[:,3]-self.ms_pixels[:,1],
self.ms_pixels[:,3]+self.ms_pixels[:,1]+1e-15)
NDVIg_pixels = np.divide(self.ms_pixels[:,3]-self.ms_pixels[:,0],
self.ms_pixels[:,3]+self.ms_pixels[:,0]+1e-15)
NDVIre_pixels = np.divide(self.ms_pixels[:,3]-self.ms_pixels[:,2],
self.ms_pixels[:,3]+self.ms_pixels[:,2]+1e-15)
CIG_pixels = np.divide(self.ms_pixels[:,3],self.ms_pixels[:,0]+1e-15)-1
CVI_pixels = np.divide(
np.multiply(
np.multiply(self.ms_pixels[:,3],self.ms_pixels[:,1]),
self.ms_pixels[:,1]
),
self.ms_pixels[:,0]+1e-15
)
GRVIms_pixels = np.divide(self.ms_pixels[:,0]-self.ms_pixels[:,1],
self.ms_pixels[:,0]+self.ms_pixels[:,1]+1e-15)
mGRVIms_pixels = np.divide(self.ms_pixels[:,0]*self.ms_pixels[:,0]-\
self.ms_pixels[:,1]*self.ms_pixels[:,1],
self.ms_pixels[:,0]*self.ms_pixels[:,0]+\
self.ms_pixels[:,1]*self.ms_pixels[:,1]
+1e-15)
NegExRms_pixels = self.ms_pixels[:,0] - 1.4* self.ms_pixels[:,1]
self.rgbindex_list = ('GRVI','VARI','GLIr','GLIg','GLIb','ExG','ExR',
'ExB','ExGveg','NegExR','ExRveg','ExBveg','TGI',
'mGRVI','RGBVI','IKAW')
self.msindex_list = ('NDVI','NDVIg','NDVIre','CIG','CVI','GRVI',
'mGRVI','NegExR')
self.rgb_indices = np.stack((
GRVI_pixels, VARI_pixels, GLIr_pixels, GLIg_pixels,
GLIb_pixels, ExG_pixels, ExR_pixels, ExB_pixels,
ExGveg_pixels, NegExR_pixels, ExRveg_pixels, ExBveg_pixels,
TGI_pixels, mGRVI_pixels, RGBVI_pixels, IKAW_pixels
),axis=1)
self.ms_indices = np.stack((
NDVI_pixels, NDVIg_pixels, NDVIre_pixels, CIG_pixels,
CVI_pixels, GRVIms_pixels, mGRVIms_pixels, NegExRms_pixels
),axis=1)
def createRGBIndFeats(self,mode=False):
if not hasattr(self,'rgb_indices'):
self.createSpecIndices()
self.rgb_ind_max = self.rgb_indices.max(axis=0)
self.rgb_ind_min = self.rgb_indices.min(axis=0)
self.rgb_ind_mean = self.rgb_indices.mean(axis=0)
self.rgb_ind_std = self.rgb_indices.std(axis=0)
self.rgb_ind_median = np.median(self.rgb_indices,axis=0)
self.rgb_ind_cov = np.divide(self.rgb_ind_std,self.rgb_ind_mean)
self.rgb_ind_skew = sp.stats.skew(self.rgb_indices,axis=0)
self.rgb_ind_kurt = sp.stats.kurtosis(self.rgb_indices,axis=0)
self.rgb_ind_sum = self.rgb_indices.sum(axis=0)
self.rgb_ind_rng = self.rgb_ind_max-self.rgb_ind_min
self.rgb_ind_rngsig = np.divide(self.rgb_ind_rng,self.rgb_ind_std)
self.rgb_ind_rngmean = np.divide(self.rgb_ind_rng,self.rgb_ind_mean)
if(mode):
self.rgb_ind_mode = sp.stats.mode(self.rgb_indices,axis=0)[0][0]
self.rgb_ind_deciles = np.percentile(self.rgb_indices,
np.linspace(10,90,9),axis=0)
self.rgb_ind_quartiles = np.percentile(self.rgb_indices,[25,75],axis=0)
self.rgb_ind_iqr = self.rgb_ind_quartiles[1,:]-self.rgb_ind_quartiles[0,:]
self.rgb_ind_iqrsig = np.divide(self.rgb_ind_iqr,self.rgb_ind_std)
self.rgb_ind_iqrmean = np.divide(self.rgb_ind_iqr,self.rgb_ind_mean)
def createMSIndFeats(self,mode=False):
if not hasattr(self,'ms_indices'):
self.createSpecIndices()
self.ms_ind_max = self.ms_indices.max(axis=0)
self.ms_ind_min = self.ms_indices.min(axis=0)
self.ms_ind_mean = self.ms_indices.mean(axis=0)
self.ms_ind_std = self.ms_indices.std(axis=0)
self.ms_ind_median = np.median(self.ms_indices,axis=0)
self.ms_ind_cov = np.divide(self.ms_ind_std,self.ms_ind_mean)
self.ms_ind_skew = sp.stats.skew(self.ms_indices,axis=0)
self.ms_ind_kurt = sp.stats.kurtosis(self.ms_indices,axis=0)
self.ms_ind_sum = self.ms_indices.sum(axis=0)
self.ms_ind_rng = self.ms_ind_max-self.ms_ind_min
self.ms_ind_rngsig = np.divide(self.ms_ind_rng,self.ms_ind_std)
self.ms_ind_rngmean = np.divide(self.ms_ind_rng,self.ms_ind_mean)
if(mode):
self.ms_ind_mode = sp.stats.mode(self.ms_indices,axis=0)[0][0]
self.ms_ind_deciles = np.percentile(self.ms_indices,
np.linspace(10,90,9),axis=0)
self.ms_ind_quartiles = np.percentile(self.ms_indices,[25,75],axis=0)
self.ms_ind_iqr = self.ms_ind_quartiles[1,:]-self.ms_ind_quartiles[0,:]
self.ms_ind_iqrsig = np.divide(self.ms_ind_iqr,self.ms_ind_std)
self.ms_ind_iqrmean = np.divide(self.ms_ind_iqr,self.ms_ind_mean)
def createDSMRawFeats(self,mode=False):
self.dsm_raw_max = np.array([self.dsm_pixels.max(axis=0)])
self.dsm_raw_min = np.array([self.dsm_pixels.min(axis=0)])
self.dsm_raw_mean = np.array([self.dsm_pixels.mean(axis=0)])
self.dsm_raw_std = np.array([self.dsm_pixels.std(axis=0)])
self.dsm_raw_median = np.array([np.median(self.dsm_pixels,axis=0)])
self.dsm_raw_cov = np.divide(self.dsm_raw_std,self.dsm_raw_mean)
self.dsm_raw_skew = np.array([sp.stats.skew(self.dsm_pixels,axis=0)])
self.dsm_raw_kurt = np.array([sp.stats.kurtosis(self.dsm_pixels,axis=0)])
self.dsm_raw_sum = np.array([self.dsm_pixels.sum(axis=0)])
self.dsm_raw_rng = self.dsm_raw_max-self.dsm_raw_min
self.dsm_raw_rngsig = np.divide(self.dsm_raw_rng,self.dsm_raw_std)
self.dsm_raw_rngmean = np.divide(self.dsm_raw_rng,self.dsm_raw_mean)
if(mode):
self.dsm_raw_mode = sp.stats.mode(self.dsm_pixels,axis=0)[0][0]
self.dsm_raw_deciles = np.percentile(
self.dsm_pixels,np.linspace(10,90,9),axis=0)
self.dsm_raw_quartiles = np.percentile(self.dsm_pixels,[25,75],axis=0)
self.dsm_raw_iqr = np.array([self.dsm_raw_quartiles[1]-self.dsm_raw_quartiles[0]])
self.dsm_raw_iqrsig = np.divide(self.dsm_raw_iqr,self.dsm_raw_std)
self.dsm_raw_iqrmean = np.divide(self.dsm_raw_iqr,self.dsm_raw_mean)
self.dsm_raw_mad = np.array([np.median(np.absolute(self.dsm_pixels - np.median(self.dsm_pixels)))])
self.dsm_raw_maxmed = self.dsm_raw_max - self.dsm_raw_median
self.dsm_raw_minmed = self.dsm_raw_min - self.dsm_raw_median
self.dsm_raw_summed = np.array([(self.dsm_pixels-self.dsm_raw_median).sum(axis=0)])
self.dsm_raw_decilesmed = self.dsm_raw_deciles - self.dsm_raw_median
self.dsm_raw_quartilesmed = self.dsm_raw_quartiles - self.dsm_raw_median
def __createDSMGLCMImg(self,levels=32):
# clamp levels number of height bands, spread uniformly so that
# 0 means below 5% percentile of H, levels-1 means above top 95%-ile
# and all else are spread out linearly in this range
# clamp minimum of 1 in region of interest to avoid issue of mostly zeroes
if(levels>255):
raise ValueError('max number of levels is 255')
lims = np.percentile(self.dsm_pixels,[5,95],axis=0)
scaleimg = rescale_intensity(self.dsm_img_clip,in_range = (lims[0],lims[1]))
self.dsm_glcm_img = (scaleimg*(levels-1)).astype('uint8')
local_img=np.zeros(self.dsm_glcm_img.shape,dtype='uint8')
local_img[self.dsm_mask_clip[0],self.dsm_mask_clip[1]]=np.maximum(self.dsm_glcm_img[self.dsm_mask_clip[0],self.dsm_mask_clip[1]],np.ones((self.dsm_mask[0].__len__())))
local_img = local_img[~np.all(local_img==0,axis=1),:]
local_img = local_img[:,~np.all(local_img==0,axis=0)]
self.dsm_glcm_img_masked = local_img
def createDSMGLCMfeats(self,distance=1):
if not hasattr(self,'dsm_glcm_img_masked'):
self.__createDSMGLCMImg()
glcm_dsm_vals = mht.haralick(self.dsm_glcm_img_masked,ignore_zeros=True,
return_mean_ptp=True,distance=distance)
if not hasattr(self,'glcm_dsm_vals'):
self.glcm_dsm_vals = glcm_dsm_vals
else:
self.glcm_dsm_vals = np.concatenate((self.glcm_dsm_vals,
glcm_dsm_vals))
if not hasattr(self,'glcm_dsm_dist'):
self.glcm_dsm_dist = [distance]
else:
self.glcm_dsm_dist.append(distance)
def createDSMautoCorFeats(self,distance=1):
local_img = np.zeros(self.dsm_img_clip.shape)
local_img[self.dsm_mask_clip[0],self.dsm_mask_clip[1]]=self.dsm_img_clip[self.dsm_mask_clip[0],self.dsm_mask_clip[1]]
N = self.__imgAutocorrelate(local_img,0,distance)
NE = self.__imgAutocorrelate(local_img,distance,distance)
E = self.__imgAutocorrelate(local_img,distance,0)
SE = self.__imgAutocorrelate(local_img,distance,-distance)
acors = np.array([N,NE,E,SE])
acfeats = np.array([acors.mean(),acors.max()-acors.min()])
if not hasattr(self,'acor_dsm_vals'):
self.acor_dsm_vals = acfeats
else:
self.acor_dsm_vals = np.concatenate((self.acor_dsm_vals,
acfeats))
if not hasattr(self,'acor_dsm_dist'):
self.acor_dsm_dist = [distance]
else:
self.acor_dsm_dist.append(distance)
def createDSMLBPFeats(self,distance=1):
if not distance in [1,2,3]:
raise ValueError('distance can only be 1,2 or 3')
if not hasattr(self,'dsm_glcm_img'):
self.__createDSMGLCMImg()
lbp_img = local_binary_pattern(self.dsm_glcm_img,8*distance,distance,method='uniform')
lbp_pix = lbp_img[self.dsm_mask_clip]
unique, counts = np.unique(lbp_pix, return_counts = True)
count_table = np.zeros([2+distance*8])
count_table[unique.astype('int')]=counts
count_table = count_table/count_table.sum()
if not hasattr(self,'lbp_dsm_vals'):
self.lbp_dsm_vals = count_table
else:
self.lbp_dsm_vals = np.concatenate((self.lbp_dsm_vals,count_table))
if not hasattr(self,'lbp_dsm_dist'):
self.lbp_dsm_dist = [distance]
else:
self.lbp_dsm_dist.append(distance)
def createDSMLawsFeats(self):
mean_15 = convolve(self.dsm_img_clip,np.ones([15,15])/225,mode='reflect')
norm_gray = self.dsm_img_clip-mean_15
del mean_15
# Constuct filter bank
L5 = np.array([1,4,6,4,1])
E5 = np.array([-1,-2,0,2,1])
S5 = np.array([-1,0,2,0,-1])
R5 = np.array([1,-4,6,-4,1])
W5 = np.array([-1,2,0,-2,1])
filtbank = [L5,E5,S5,R5,W5]
del L5, E5, S5, R5, W5
filtgrid = np.zeros([5,5,5,5])
for i in range(5):
for j in range(5):
filtgrid[i,j,:,:]=(np.outer(filtbank[i],filtbank[j]))
del filtbank
# compute features
lawsFeat = np.zeros([14,2])
count_i = 0;
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
if j==i:
convimg = convolve(norm_gray,filtgrid[i,j],mode='reflect')
lawsimg = convolve(np.absolute(convimg),np.ones([15,15]),mode='reflect')
lawsFeat[count_i,0] = lawsimg[self.dsm_mask_clip].mean()
lawsFeat[count_i,1] = lawsimg[self.dsm_mask_clip].std()
count_i += 1
else:
convimg1 = np.absolute(convolve(norm_gray,filtgrid[i,j],mode='reflect'))
convimg2 = np.absolute(convolve(norm_gray,filtgrid[j,i],mode='reflect'))
lawsimg = convolve(convimg1+convimg2,np.ones([15,15])/2,mode='reflect')
lawsFeat[count_i,0] = lawsimg[self.dsm_mask_clip].mean()
lawsFeat[count_i,1] = lawsimg[self.dsm_mask_clip].std()
count_i += 1
self.laws_dsm_feats = lawsFeat
def stackFeats(self):
featStack = np.array([])
featList = []
featClass = []
featSizeInvar = []
featHeightInvar=[]
featScale = []
if hasattr(self,'rgb_band_max'):
featList.extend(['rgb_band_max_R','rgb_band_max_G','rgb_band_max_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_max
else:
featStack = np.concatenate((featStack,self.rgb_band_max))
if hasattr(self,'rgb_band_min'):
featList.extend(['rgb_band_min_R','rgb_band_min_G','rgb_band_min_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_min
else:
featStack = np.concatenate((featStack,self.rgb_band_min))
if hasattr(self,'rgb_band_mean'):
featList.extend(['rgb_band_mean_R','rgb_band_mean_G','rgb_band_mean_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_mean
else:
featStack = np.concatenate((featStack,self.rgb_band_mean))
if hasattr(self,'rgb_band_std'):
featList.extend(['rgb_band_std_R','rgb_band_std_G','rgb_band_std_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_std
else:
featStack = np.concatenate((featStack,self.rgb_band_std))
if hasattr(self,'rgb_band_median'):
featList.extend(['rgb_band_median_R','rgb_band_median_G','rgb_band_median_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_median
else:
featStack = np.concatenate((featStack,self.rgb_band_median))
if hasattr(self,'rgb_band_cov'):
featList.extend(['rgb_band_cov_R','rgb_band_cov_G','rgb_band_cov_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_cov
else:
featStack = np.concatenate((featStack,self.rgb_band_cov))
if hasattr(self,'rgb_band_skew'):
featList.extend(['rgb_band_skew_R','rgb_band_skew_G','rgb_band_skew_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_skew
else:
featStack = np.concatenate((featStack,self.rgb_band_skew))
if hasattr(self,'rgb_band_kurt'):
featList.extend(['rgb_band_kurt_R','rgb_band_kurt_G','rgb_band_kurt_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_kurt
else:
featStack = np.concatenate((featStack,self.rgb_band_kurt))
if hasattr(self,'rgb_band_sum'):
featList.extend(['rgb_band_sum_R','rgb_band_sum_G','rgb_band_sum_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([False]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_sum
else:
featStack = np.concatenate((featStack,self.rgb_band_sum))
if hasattr(self,'rgb_band_rng'):
featList.extend(['rgb_band_rng_R','rgb_band_rng_G','rgb_band_rng_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.band_rng
else:
featStack = np.concatenate((featStack,self.rgb_band_rng))
if hasattr(self,'rgb_band_rngsig'):
featList.extend(['rgb_band_rngsig_R','rgb_band_rngsig_G','rgb_band_rngsig_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_rngsig
else:
featStack = np.concatenate((featStack,self.rgb_band_rngsig))
if hasattr(self,'rgb_band_rngmean'):
featList.extend(['rgb_band_rngmean_R','rgb_band_rngmean_G','rgb_band_rngmean_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_rngmean
else:
featStack = np.concatenate((featStack,self.rgb_band_rngmean))
if hasattr(self,'rgb_band_mode'):
featList.extend(['rgb_band_mode_R','rgb_band_mode_G','rgb_band_mode_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_mode
else:
featStack = np.concatenate((featStack,self.rgb_band_mode))
if hasattr(self,'rgb_band_deciles'):
featList.extend(['rgb_band_decile_R_1','rgb_band_decile_R_2',
'rgb_band_decile_R_3','rgb_band_decile_R_4',
'rgb_band_decile_R_5','rgb_band_decile_R_6',
'rgb_band_decile_R_7','rgb_band_decile_R_8',
'rgb_band_decile_R_9','rgb_band_decile_G_1',
'rgb_band_decile_G_2','rgb_band_decile_G_3',
'rgb_band_decile_G_4','rgb_band_decile_G_5',
'rgb_band_decile_G_6','rgb_band_decile_G_7',
'rgb_band_decile_G_8','rgb_band_decile_G_9',
'rgb_band_decile_B_1','rgb_band_decile_B_2',
'rgb_band_decile_B_3','rgb_band_decile_B_4',
'rgb_band_decile_B_5','rgb_band_decile_B_6',
'rgb_band_decile_B_7','rgb_band_decile_B_8',
'rgb_band_decile_B_9'])
featClass.extend(['rgb_band']*27)
featSizeInvar.extend([True]*27)
featHeightInvar.extend([True]*27)
featScale.extend([0]*27)
if featStack.size==0:
featStack = self.rgb_band_deciles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.rgb_band_deciles.flatten('F')))
if hasattr(self,'rgb_band_quartiles'):
featList.extend(['rgb_band_quartile_R_1','rgb_band_quartile_R_3',
'rgb_band_quartile_G_1','rgb_band_quartile_G_3',
'rgb_band_quartile_B_1','rgb_band_quartile_B_3'])
featClass.extend(['rgb_band']*6)
featSizeInvar.extend([True]*6)
featHeightInvar.extend([True]*6)
featScale.extend([0]*6)
if featStack.size==0:
featStack = self.rgb_band_quartiles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.rgb_band_quartiles.flatten('F')))
if hasattr(self,'rgb_band_iqr'):
featList.extend(['rgb_band_iqr_R','rgb_band_iqr_G','rgb_band_iqr_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_iqr
else:
featStack = np.concatenate((featStack,self.rgb_band_iqr))
if hasattr(self,'rgb_band_iqrsig'):
featList.extend(['rgb_band_iqrsig_R','rgb_band_iqrsig_G','rgb_band_iqrsig_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_iqrsig
else:
featStack = np.concatenate((featStack,self.rgb_band_iqrsig))
if hasattr(self,'rgb_band_iqrmean'):
featList.extend(['rgb_band_iqrmean_R','rgb_band_iqrmean_G','rgb_band_iqrmean_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_iqrmean
else:
featStack = np.concatenate((featStack,self.rgb_band_iqrmean))
if hasattr(self,'rgb_band_ratio'):
featList.extend(['rgb_band_ratio_R','rgb_band_ratio_G','rgb_band_ratio_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_ratio
else:
featStack = np.concatenate((featStack,self.rgb_band_ratio))
if hasattr(self,'top_rgb_max'):
featList.extend(['top_rgb_max_R','top_rgb_max_G','top_rgb_max_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_max
else:
featStack = np.concatenate((featStack,self.top_rgb_max))
if hasattr(self,'top_rgb_min'):
featList.extend(['top_rgb_min_R','top_rgb_min_G','top_rgb_min_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_min
else:
featStack = np.concatenate((featStack,self.top_rgb_min))
if hasattr(self,'top_rgb_mean'):
featList.extend(['top_rgb_mean_R','top_rgb_mean_G','top_rgb_mean_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_mean
else:
featStack = np.concatenate((featStack,self.top_rgb_mean))
if hasattr(self,'top_rgb_std'):
featList.extend(['top_rgb_std_R','top_rgb_std_G','top_rgb_std_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_std
else:
featStack = np.concatenate((featStack,self.top_rgb_std))
if hasattr(self,'top_rgb_median'):
featList.extend(['top_rgb_median_R','top_rgb_median_G','top_rgb_median_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_median
else:
featStack = np.concatenate((featStack,self.top_rgb_median))
if hasattr(self,'top_rgb_cov'):
featList.extend(['top_rgb_cov_R','top_rgb_cov_G','top_rgb_cov_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_cov
else:
featStack = np.concatenate((featStack,self.top_rgb_cov))
if hasattr(self,'top_rgb_skew'):
featList.extend(['top_rgb_skew_R','top_rgb_skew_G','top_rgb_skew_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_skew
else:
featStack = np.concatenate((featStack,self.top_rgb_skew))
if hasattr(self,'top_rgb_kurt'):
featList.extend(['top_rgb_kurt_R','top_rgb_kurt_G','top_rgb_kurt_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_kurt
else:
featStack = np.concatenate((featStack,self.top_rgb_kurt))
if hasattr(self,'top_rgb_sum'):
featList.extend(['top_rgb_sum_R','top_rgb_sum_G','top_rgb_sum_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([False]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_sum
else:
featStack = np.concatenate((featStack,self.top_rgb_sum))
if hasattr(self,'top_rgb_rng'):
featList.extend(['top_rgb_rng_R','top_rgb_rng_G','top_rgb_rng_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.band_rng
else:
featStack = np.concatenate((featStack,self.top_rgb_rng))
if hasattr(self,'top_rgb_rngsig'):
featList.extend(['top_rgb_rngsig_R','top_rgb_rngsig_G','top_rgb_rngsig_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_rngsig
else:
featStack = np.concatenate((featStack,self.top_rgb_rngsig))
if hasattr(self,'top_rgb_rngmean'):
featList.extend(['top_rgb_rngmean_R','top_rgb_rngmean_G','top_rgb_rngmean_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_rngmean
else:
featStack = np.concatenate((featStack,self.top_rgb_rngmean))
if hasattr(self,'top_rgb_mode'):
featList.extend(['top_rgb_mode_R','top_rgb_mode_G','top_rgb_mode_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_mode
else:
featStack = np.concatenate((featStack,self.top_rgb_mode))
if hasattr(self,'top_rgb_deciles'):
featList.extend(['top_rgb_decile_R_1','top_rgb_decile_R_2',
'top_rgb_decile_R_3','top_rgb_decile_R_4',
'top_rgb_decile_R_5','top_rgb_decile_R_6',
'top_rgb_decile_R_7','top_rgb_decile_R_8',
'top_rgb_decile_R_9','top_rgb_decile_G_1',
'top_rgb_decile_G_2','top_rgb_decile_G_3',
'top_rgb_decile_G_4','top_rgb_decile_G_5',
'top_rgb_decile_G_6','top_rgb_decile_G_7',
'top_rgb_decile_G_8','top_rgb_decile_G_9',
'top_rgb_decile_B_1','top_rgb_decile_B_2',
'top_rgb_decile_B_3','top_rgb_decile_B_4',
'top_rgb_decile_B_5','top_rgb_decile_B_6',
'top_rgb_decile_B_7','top_rgb_decile_B_8',
'top_rgb_decile_B_9'])
featClass.extend(['rgb_top']*27)
featSizeInvar.extend([True]*27)
featHeightInvar.extend([True]*27)
featScale.extend([0]*27)
if featStack.size==0:
featStack = self.top_rgb_deciles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.top_rgb_deciles.flatten('F')))
if hasattr(self,'top_rgb_quartiles'):
featList.extend(['top_rgb_quartile_R_1','top_rgb_quartile_R_3',
'top_rgb_quartile_G_1','top_rgb_quartile_G_3',
'top_rgb_quartile_B_1','top_rgb_quartile_B_3'])
featClass.extend(['rgb_top']*6)
featSizeInvar.extend([True]*6)
featHeightInvar.extend([True]*6)
featScale.extend([0]*6)
if featStack.size==0:
featStack = self.top_rgb_quartiles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.top_rgb_quartiles.flatten('F')))
if hasattr(self,'top_rgb_iqr'):
featList.extend(['top_rgb_iqr_R','top_rgb_iqr_G','top_rgb_iqr_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_iqr
else:
featStack = np.concatenate((featStack,self.top_rgb_iqr))
if hasattr(self,'top_rgb_iqrsig'):
featList.extend(['top_rgb_iqrsig_R','top_rgb_iqrsig_G','top_rgb_iqrsig_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_iqrsig
else:
featStack = np.concatenate((featStack,self.top_rgb_iqrsig))
if hasattr(self,'top_rgb_iqrmean'):
featList.extend(['top_rgb_iqrmean_R','top_rgb_iqrmean_G','top_rgb_iqrmean_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_iqrmean
else:
featStack = np.concatenate((featStack,self.top_rgb_iqrmean))
if hasattr(self,'top_rgb_ratio'):
featList.extend(['top_rgb_ratio_R','top_rgb_ratio_G','top_rgb_ratio_B'])
featClass.extend(['rgb_top']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.top_rgb_ratio
else:
featStack = np.concatenate((featStack,self.top_rgb_ratio))
if hasattr(self,'rgb_ind_max'):
featList.extend(['rgb_ind_max_GRVI','rgb_ind_max_VARI',
'rgb_ind_max_GLIr','rgb_ind_max_GLIg',
'rgb_ind_max_GLIb','rgb_ind_max_ExG',
'rgb_ind_max_ExR','rgb_ind_max_ExB',
'rgb_ind_max_ExGveg','rgb_ind_max_NegExR',
'rgb_ind_max_ExRveg','rgb_ind_max_ExBveg',
'rgb_ind_max_TGI','rgb_ind_max_mGRVI',
'rgb_ind_max_RGBVI','rgb_ind_max_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_max
else:
featStack = np.concatenate((featStack,self.rgb_ind_max))
if hasattr(self,'rgb_ind_min'):
featList.extend(['rgb_ind_min_GRVI','rgb_ind_min_VARI',
'rgb_ind_min_GLIr','rgb_ind_min_GLIg',
'rgb_ind_min_GLIb','rgb_ind_min_ExG',
'rgb_ind_min_ExR','rgb_ind_min_ExB',
'rgb_ind_min_ExGveg','rgb_ind_min_NegExR',
'rgb_ind_min_ExRveg','rgb_ind_min_ExBveg',
'rgb_ind_min_TGI','rgb_ind_min_mGRVI',
'rgb_ind_min_RGBVI','rgb_ind_min_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_min
else:
featStack = np.concatenate((featStack,self.rgb_ind_min))
if hasattr(self,'rgb_ind_mean'):
featList.extend(['rgb_ind_mean_GRVI','rgb_ind_mean_VARI',
'rgb_ind_mean_GLIr','rgb_ind_mean_GLIg',
'rgb_ind_mean_GLIb','rgb_ind_mean_ExG',
'rgb_ind_mean_ExR','rgb_ind_mean_ExB',
'rgb_ind_mean_ExGveg','rgb_ind_mean_NegExR',
'rgb_ind_mean_ExRveg','rgb_ind_mean_ExBveg',
'rgb_ind_mean_TGI','rgb_ind_mean_mGRVI',
'rgb_ind_mean_RGBVI','rgb_ind_mean_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_mean
else:
featStack = np.concatenate((featStack,self.rgb_ind_mean))
if hasattr(self,'rgb_ind_std'):
featList.extend(['rgb_ind_std_GRVI','rgb_ind_std_VARI',
'rgb_ind_std_GLIr','rgb_ind_std_GLIg',
'rgb_ind_std_GLIb','rgb_ind_std_ExG',
'rgb_ind_std_ExR','rgb_ind_std_ExB',
'rgb_ind_std_ExGveg','rgb_ind_std_NegExR',
'rgb_ind_std_ExRveg','rgb_ind_std_ExBveg',
'rgb_ind_std_TGI','rgb_ind_std_mGRVI',
'rgb_ind_std_RGBVI','rgb_ind_std_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_std
else:
featStack = np.concatenate((featStack,self.rgb_ind_std))
if hasattr(self,'rgb_ind_median'):
featList.extend(['rgb_ind_median_GRVI','rgb_ind_median_VARI',
'rgb_ind_median_GLIr','rgb_ind_median_GLIg',
'rgb_ind_median_GLIb','rgb_ind_median_ExG',
'rgb_ind_median_ExR','rgb_ind_median_ExB',
'rgb_ind_median_ExGveg','rgb_ind_median_NegExR',
'rgb_ind_median_ExRveg','rgb_ind_median_ExBveg',
'rgb_ind_median_TGI','rgb_ind_median_mGRVI',
'rgb_ind_median_RGBVI','rgb_ind_median_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_median
else:
featStack = np.concatenate((featStack,self.rgb_ind_median))
if hasattr(self,'rgb_ind_cov'):
featList.extend(['rgb_ind_cov_GRVI','rgb_ind_cov_VARI',
'rgb_ind_cov_GLIr','rgb_ind_cov_GLIg',
'rgb_ind_cov_GLIb','rgb_ind_cov_ExG',
'rgb_ind_cov_ExR','rgb_ind_cov_ExB',
'rgb_ind_cov_ExGveg','rgb_ind_cov_NegExR',
'rgb_ind_cov_ExRveg','rgb_ind_cov_ExBveg',
'rgb_ind_cov_TGI','rgb_ind_cov_mGRVI',
'rgb_ind_cov_RGBVI','rgb_ind_cov_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_cov
else:
featStack = np.concatenate((featStack,self.rgb_ind_cov))
if hasattr(self,'rgb_ind_skew'):
featList.extend(['rgb_ind_skew_GRVI','rgb_ind_skew_VARI',
'rgb_ind_skew_GLIr','rgb_ind_skew_GLIg',
'rgb_ind_skew_GLIb','rgb_ind_skew_ExG',
'rgb_ind_skew_ExR','rgb_ind_skew_ExB',
'rgb_ind_skew_ExGveg','rgb_ind_skew_NegExR',
'rgb_ind_skew_ExRveg','rgb_ind_skew_ExBveg',
'rgb_ind_skew_TGI','rgb_ind_skew_mGRVI',
'rgb_ind_skew_RGBVI','rgb_ind_skew_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_skew
else:
featStack = np.concatenate((featStack,self.rgb_ind_skew))
if hasattr(self,'rgb_ind_kurt'):
featList.extend(['rgb_ind_kurt_GRVI','rgb_ind_kurt_VARI',
'rgb_ind_kurt_GLIr','rgb_ind_kurt_GLIg',
'rgb_ind_kurt_GLIb','rgb_ind_kurt_ExG',
'rgb_ind_kurt_ExR','rgb_ind_kurt_ExB',
'rgb_ind_kurt_ExGveg','rgb_ind_kurt_NegExR',
'rgb_ind_kurt_ExRveg','rgb_ind_kurt_ExBveg',
'rgb_ind_kurt_TGI','rgb_ind_kurt_mGRVI',
'rgb_ind_kurt_RGBVI','rgb_ind_kurt_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_kurt
else:
featStack = np.concatenate((featStack,self.rgb_ind_kurt))
if hasattr(self,'rgb_ind_sum'):
featList.extend(['rgb_ind_sum_GRVI','rgb_ind_sum_VARI',
'rgb_ind_sum_GLIr','rgb_ind_sum_GLIg',
'rgb_ind_sum_GLIb','rgb_ind_sum_ExG',
'rgb_ind_sum_ExR','rgb_ind_sum_ExB',
'rgb_ind_sum_ExGveg','rgb_ind_sum_NegExR',
'rgb_ind_sum_ExRveg','rgb_ind_sum_ExBveg',
'rgb_ind_sum_TGI','rgb_ind_sum_mGRVI',
'rgb_ind_sum_RGBVI','rgb_ind_sum_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([False]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_sum
else:
featStack = np.concatenate((featStack,self.rgb_ind_sum))
if hasattr(self,'rgb_ind_rng'):
featList.extend(['rgb_ind_rng_GRVI','rgb_ind_rng_VARI',
'rgb_ind_rng_GLIr','rgb_ind_rng_GLIg',
'rgb_ind_rng_GLIb','rgb_ind_rng_ExG',
'rgb_ind_rng_ExR','rgb_ind_rng_ExB',
'rgb_ind_rng_ExGveg','rgb_ind_rng_NegExR',
'rgb_ind_rng_ExRveg','rgb_ind_rng_ExBveg',
'rgb_ind_rng_TGI','rgb_ind_rng_mGRVI',
'rgb_ind_rng_RGBVI','rgb_ind_rng_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_rng
else:
featStack = np.concatenate((featStack,self.rgb_ind_rng))
if hasattr(self,'rgb_ind_rngsig'):
featList.extend(['rgb_ind_rngsig_GRVI','rgb_ind_rngsig_VARI',
'rgb_ind_rngsig_GLIr','rgb_ind_rngsig_GLIg',
'rgb_ind_rngsig_GLIb','rgb_ind_rngsig_ExG',
'rgb_ind_rngsig_ExR','rgb_ind_rngsig_ExB',
'rgb_ind_rngsig_ExGveg','rgb_ind_rngsig_NegExR',
'rgb_ind_rngsig_ExRveg','rgb_ind_rngsig_ExBveg',
'rgb_ind_rngsig_TGI','rgb_ind_rngsig_mGRVI',
'rgb_ind_rngsig_RGBVI','rgb_ind_rngsig_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_rngsig
else:
featStack = np.concatenate((featStack,self.rgb_ind_rngsig))
if hasattr(self,'rgb_ind_rngmean'):
featList.extend(['rgb_ind_rngmean_GRVI','rgb_ind_rngmean_VARI',
'rgb_ind_rngmean_GLIr','rgb_ind_rngmean_GLIg',
'rgb_ind_rngmean_GLIb','rgb_ind_rngmean_ExG',
'rgb_ind_rngmean_ExR','rgb_ind_rngmean_ExB',
'rgb_ind_rngmean_ExGveg','rgb_ind_rngmean_NegExR',
'rgb_ind_rngmean_ExRveg','rgb_ind_rngmean_ExBveg',
'rgb_ind_rngmean_TGI','rgb_ind_rngmean_mGRVI',
'rgb_ind_rngmean_RGBVI','rgb_ind_rngmean_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_rngmean
else:
featStack = np.concatenate((featStack,self.rgb_ind_rngmean))
if hasattr(self,'rgb_ind_mode'):
featList.extend(['rgb_ind_mode_GRVI','rgb_ind_mode_VARI',
'rgb_ind_mode_GLIr','rgb_ind_mode_GLIg',
'rgb_ind_mode_GLIb','rgb_ind_mode_ExG',
'rgb_ind_mode_ExR','rgb_ind_mode_ExB',
'rgb_ind_mode_ExGveg','rgb_ind_mode_NegExR',
'rgb_ind_mode_ExRveg','rgb_ind_mode_ExBveg',
'rgb_ind_mode_TGI','rgb_ind_mode_mGRVI',
'rgb_ind_mode_RGBVI','rgb_ind_mode_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_mode
else:
featStack = np.concatenate((featStack,self.rgb_ind_mode))
if hasattr(self,'rgb_ind_deciles'):
featList.extend(['rgb_ind_decile_GRVI_1','rgb_ind_decile_GRVI_2',
'rgb_ind_decile_GRVI_3','rgb_ind_decile_GRVI_4',
'rgb_ind_decile_GRVI_5','rgb_ind_decile_GRVI_6',
'rgb_ind_decile_GRVI_7','rgb_ind_decile_GRVI_8',
'rgb_ind_decile_GRVI_9','rgb_ind_decile_VARI_1',
'rgb_ind_decile_VARI_2','rgb_ind_decile_VARI_3',
'rgb_ind_decile_VARI_4','rgb_ind_decile_VARI_5',
'rgb_ind_decile_VARI_6','rgb_ind_decile_VARI_7',
'rgb_ind_decile_VARI_8','rgb_ind_decile_VARI_9',
'rgb_ind_decile_GLIr_1','rgb_ind_decile_GLIr_2',
'rgb_ind_decile_GLIr_3','rgb_ind_decile_GLIr_4',
'rgb_ind_decile_GLIr_5','rgb_ind_decile_GLIr_6',
'rgb_ind_decile_GLIr_7','rgb_ind_decile_GLIr_8',
'rgb_ind_decile_GLIr_9','rgb_ind_decile_GLIg_1',
'rgb_ind_decile_GLIg_2','rgb_ind_decile_GLIg_3',
'rgb_ind_decile_GLIg_4','rgb_ind_decile_GLIg_5',
'rgb_ind_decile_GLIg_6','rgb_ind_decile_GLIg_7',
'rgb_ind_decile_GLIg_8','rgb_ind_decile_GLIg_9',
'rgb_ind_decile_GLIb_1','rgb_ind_decile_GLIb_2',
'rgb_ind_decile_GLIb_3','rgb_ind_decile_GLIb_4',
'rgb_ind_decile_GLIb_5','rgb_ind_decile_GLIb_6',
'rgb_ind_decile_GLIb_7','rgb_ind_decile_GLIb_8',
'rgb_ind_decile_GLIb_9','rgb_ind_decile_ExG_1',
'rgb_ind_decile_ExG_2','rgb_ind_decile_ExG_3',
'rgb_ind_decile_ExG_4','rgb_ind_decile_ExG_5',
'rgb_ind_decile_ExG_6','rgb_ind_decile_ExG_7',
'rgb_ind_decile_ExG_8','rgb_ind_decile_ExG_9',
'rgb_ind_decile_ExR_1','rgb_ind_decile_ExR_2',
'rgb_ind_decile_ExR_3','rgb_ind_decile_ExR_4',
'rgb_ind_decile_ExR_5','rgb_ind_decile_ExR_6',
'rgb_ind_decile_ExR_7','rgb_ind_decile_ExR_8',
'rgb_ind_decile_ExR_9','rgb_ind_decile_ExB_1',
'rgb_ind_decile_ExB_2','rgb_ind_decile_ExB_3',
'rgb_ind_decile_ExB_4','rgb_ind_decile_ExB_5',
'rgb_ind_decile_ExB_6','rgb_ind_decile_ExB_7',
'rgb_ind_decile_ExB_8','rgb_ind_decile_ExB_9',
'rgb_ind_decile_ExGveg_1','rgb_ind_decile_ExGveg_2',
'rgb_ind_decile_ExGveg_3','rgb_ind_decile_ExGveg_4',
'rgb_ind_decile_ExGveg_5','rgb_ind_decile_ExGveg_6',
'rgb_ind_decile_ExGveg_7','rgb_ind_decile_ExGveg_8',
'rgb_ind_decile_ExGveg_9','rgb_ind_decile_NegExR_1',
'rgb_ind_decile_NegExR_2','rgb_ind_decile_NegExR_3',
'rgb_ind_decile_NegExR_4','rgb_ind_decile_NegExR_5',
'rgb_ind_decile_NegExR_6','rgb_ind_decile_NegExR_7',
'rgb_ind_decile_NegExR_8','rgb_ind_decile_NegExR_9',
'rgb_ind_decile_ExRveg_1','rgb_ind_decile_ExRveg_2',
'rgb_ind_decile_ExRveg_3','rgb_ind_decile_ExRveg_4',
'rgb_ind_decile_ExRveg_5','rgb_ind_decile_ExRveg_6',
'rgb_ind_decile_ExRveg_7','rgb_ind_decile_ExRveg_8',
'rgb_ind_decile_ExRveg_9','rgb_ind_decile_ExBveg_1',
'rgb_ind_decile_ExBveg_2','rgb_ind_decile_ExBveg_3',
'rgb_ind_decile_ExBveg_4','rgb_ind_decile_ExBveg_5',
'rgb_ind_decile_ExBveg_6','rgb_ind_decile_ExBveg_7',
'rgb_ind_decile_ExBveg_8','rgb_ind_decile_ExBveg_9',
'rgb_ind_decile_TGI_1','rgb_ind_decile_TGI_2',
'rgb_ind_decile_TGI_3','rgb_ind_decile_TGI_4',
'rgb_ind_decile_TGI_5','rgb_ind_decile_TGI_6',
'rgb_ind_decile_TGI_7','rgb_ind_decile_TGI_8',
'rgb_ind_decile_TGI_9','rgb_ind_decile_mGRVI_1',
'rgb_ind_decile_mGRVI_2','rgb_ind_decile_mGRVI_3',
'rgb_ind_decile_mGRVI_4','rgb_ind_decile_mGRVI_5',
'rgb_ind_decile_mGRVI_6','rgb_ind_decile_mGRVI_7',
'rgb_ind_decile_mGRVI_8','rgb_ind_decile_mGRVI_9',
'rgb_ind_decile_RGBVI_1','rgb_ind_decile_RGBVI_2',
'rgb_ind_decile_RGBVI_3','rgb_ind_decile_RGBVI_4',
'rgb_ind_decile_RGBVI_5','rgb_ind_decile_RGBVI_6',
'rgb_ind_decile_RGBVI_7','rgb_ind_decile_RGBVI_8',
'rgb_ind_decile_RGBVI_9','rgb_ind_decile_IKAW_1',
'rgb_ind_decile_IKAW_2','rgb_ind_decile_IKAW_3',
'rgb_ind_decile_IKAW_4','rgb_ind_decile_IKAW_5',
'rgb_ind_decile_IKAW_6','rgb_ind_decile_IKAW_7',
'rgb_ind_decile_IKAW_8','rgb_ind_decile_IKAW_9'])
featClass.extend(['rgb_ind']*144)
featSizeInvar.extend([True]*144)
featHeightInvar.extend([True]*144)
featScale.extend([0]*144)
if featStack.size==0:
featStack = self.rgb_ind_deciles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.rgb_ind_deciles.flatten('F')))
if hasattr(self,'rgb_ind_quartiles'):
featList.extend(['rgb_ind_quartile_GRVI_1','rgb_ind_quartile_GRVI_3',
'rgb_ind_quartile_VARI_1','rgb_ind_quartile_VARI_3',
'rgb_ind_quartile_GLIr_1','rgb_ind_quartile_GLIr_3',
'rgb_ind_quartile_GLIg_1','rgb_ind_quartile_GLIg_3',
'rgb_ind_quartile_GLIb_1','rgb_ind_quartile_GLIb_3',
'rgb_ind_quartile_ExG_1','rgb_ind_quartile_ExG_3',
'rgb_ind_quartile_ExR_1','rgb_ind_quartile_ExR_3',
'rgb_ind_quartile_ExB_1','rgb_ind_quartile_ExB_3',
'rgb_ind_quartile_ExGveg_1','rgb_ind_quartile_ExGveg_3',
'rgb_ind_quartile_NegExR_1','rgb_ind_quartile_NegExR_3',
'rgb_ind_quartile_ExRveg_1','rgb_ind_quartile_ExRveg_3',
'rgb_ind_quartile_ExBveg_1','rgb_ind_quartile_ExBveg_3',
'rgb_ind_quartile_TGI_1','rgb_ind_quartile_TGI_3',
'rgb_ind_quartile_mGRVI_1','rgb_ind_quartile_mGRVI_3',
'rgb_ind_quartile_RGBVI_1','rgb_ind_quartile_RGBVI_3',
'rgb_ind_quartile_IKAW_1','rgb_ind_quartile_IKAW_3'])
featClass.extend(['rgb_ind']*32)
featSizeInvar.extend([True]*32)
featHeightInvar.extend([True]*32)
featScale.extend([0]*32)
if featStack.size==0:
featStack = self.rgb_ind_quartiles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.rgb_ind_quartiles.flatten('F')))
if hasattr(self,'rgb_ind_iqr'):
featList.extend(['rgb_ind_iqr_GRVI','rgb_ind_iqr_VARI',
'rgb_ind_iqr_GLIr','rgb_ind_iqr_GLIg',
'rgb_ind_iqr_GLIb','rgb_ind_iqr_ExG',
'rgb_ind_iqr_ExR','rgb_ind_iqr_ExB',
'rgb_ind_iqr_ExGveg','rgb_ind_iqr_NegExR',
'rgb_ind_iqr_ExRveg','rgb_ind_iqr_ExBveg',
'rgb_ind_iqr_TGI','rgb_ind_iqr_mGRVI',
'rgb_ind_iqr_RGBVI','rgb_ind_iqr_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_iqr
else:
featStack = np.concatenate((featStack,self.rgb_ind_iqr))
if hasattr(self,'rgb_ind_iqrsig'):
featList.extend(['rgb_ind_iqrsig_GRVI','rgb_ind_iqrsig_VARI',
'rgb_ind_iqrsig_GLIr','rgb_ind_iqrsig_GLIg',
'rgb_ind_iqrsig_GLIb','rgb_ind_iqrsig_ExG',
'rgb_ind_iqrsig_ExR','rgb_ind_iqrsig_ExB',
'rgb_ind_iqrsig_ExGveg','rgb_ind_iqrsig_NegExR',
'rgb_ind_iqrsig_ExRveg','rgb_ind_iqrsig_ExBveg',
'rgb_ind_iqrsig_TGI','rgb_ind_iqrsig_mGRVI',
'rgb_ind_iqrsig_RGBVI','rgb_ind_iqrsig_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_iqrsig
else:
featStack = np.concatenate((featStack,self.rgb_ind_iqrsig))
if hasattr(self,'rgb_ind_iqrmean'):
featList.extend(['rgb_ind_iqrmean_GRVI','rgb_ind_iqrmean_VARI',
'rgb_ind_iqrmean_GLIr','rgb_ind_iqrmean_GLIg',
'rgb_ind_iqrmean_GLIb','rgb_ind_iqrmean_ExG',
'rgb_ind_iqrmean_ExR','rgb_ind_iqrmean_ExB',
'rgb_ind_iqrmean_ExGveg','rgb_ind_iqrmean_NegExR',
'rgb_ind_iqrmean_ExRveg','rgb_ind_iqrmean_ExBveg',
'rgb_ind_iqrmean_TGI','rgb_ind_iqrmean_mGRVI',
'rgb_ind_iqrmean_RGBVI','rgb_ind_iqrmean_IKAW'])
featClass.extend(['rgb_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.rgb_ind_iqrmean
else:
featStack = np.concatenate((featStack,self.rgb_ind_iqrmean))
if hasattr(self,'glcm_rgb_vals'):
for rgb_glcm_d in self.glcm_rgb_dist:
glcm_list = ['glcm_rgb_asm_' + str(rgb_glcm_d),
'glcm_rgb_con_' + str(rgb_glcm_d),
'glcm_rgb_cor_' + str(rgb_glcm_d),
'glcm_rgb_var_' + str(rgb_glcm_d),
'glcm_rgb_idm_' + str(rgb_glcm_d),
'glcm_rgb_sumav_' + str(rgb_glcm_d),
'glcm_rgb_sumvar_' + str(rgb_glcm_d),
'glcm_rgb_sument_' + str(rgb_glcm_d),
'glcm_rgb_ent_' + str(rgb_glcm_d),
'glcm_rgb_difvar_' + str(rgb_glcm_d),
'glcm_rgb_difent_' + str(rgb_glcm_d),
'glcm_rgb_infcor1_' + str(rgb_glcm_d),
'glcm_rgb_infcor2_' + str(rgb_glcm_d),
'glcm_rgb_asm_rng_' + str(rgb_glcm_d),
'glcm_rgb_con_rng_' + str(rgb_glcm_d),
'glcm_rgb_cor_rng_' + str(rgb_glcm_d),
'glcm_rgb_var_rng_' + str(rgb_glcm_d),
'glcm_rgb_idm_rng_' + str(rgb_glcm_d),
'glcm_rgb_sumav_rng_' + str(rgb_glcm_d),
'glcm_rgb_sumvar_rng_' + str(rgb_glcm_d),
'glcm_rgb_sument_rng_' + str(rgb_glcm_d),
'glcm_rgb_ent_rng_' + str(rgb_glcm_d),
'glcm_rgb_difvar_rng_' + str(rgb_glcm_d),
'glcm_rgb_difent_rng_' + str(rgb_glcm_d),
'glcm_rgb_infcor1_rng_' + str(rgb_glcm_d),
'glcm_rgb_infcor2_rng_' + str(rgb_glcm_d)]
featList.extend(glcm_list)
featClass.extend(['rgb_glcm']*26)
featSizeInvar.extend([True]*26)
featHeightInvar.extend([True]*26)
featScale.extend([rgb_glcm_d]*26)
if featStack.size==0:
featStack = self.glcm_rgb_vals
else:
featStack = np.concatenate((featStack,self.glcm_rgb_vals))
if hasattr(self,'acor_rgb_vals'):
for rgb_acor_d in self.acor_rgb_dist:
acor_list = ['acor_rgb_mean_' + str(rgb_acor_d),
'acor_rgb_rng_' + str(rgb_acor_d)]
featList.extend(acor_list)
featClass.extend(['rgb_acor']*2)
featSizeInvar.extend([True]*2)
featHeightInvar.extend([True]*2)
featScale.extend([rgb_acor_d]*2)
if featStack.size==0:
featStack = self.acor_rgb_vals
else:
featStack = np.concatenate((featStack,self.acor_rgb_vals))
if hasattr(self,'lbp_rgb_vals'):
for rgb_lbp_d in self.lbp_rgb_dist:
for ft_i in range(2+8*rgb_lbp_d):
featList.extend(
['lbp_rgb_d_' + str(rgb_lbp_d) + '_feat_' + str(ft_i)]
)
featClass.extend(['rgb_lbp'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([rgb_lbp_d])
if featStack.size==0:
featStack = self.lbp_rgb_vals
else:
featStack = np.concatenate((featStack,self.lbp_rgb_vals))
if hasattr(self,'laws_rgb_feats'):
laws_list = []
filtbank = ['L5','E5','S5','R5','W5']
for stat in ['mean','std']:
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
else:
featList.append('laws_' + filtbank[i] + filtbank[j] +'_RGB_' + stat)
featClass.extend(['rgb_laws'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.laws_rgb_feats.flatten('F')
else:
featStack = np.concatenate((featStack,self.laws_rgb_feats.flatten('F')))
if hasattr(self,'ms_band_max'):
featList.extend(['ms_band_max_G','ms_band_max_R','ms_band_max_RE','ms_band_max_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_max
else:
featStack = np.concatenate((featStack,self.ms_band_max))
if hasattr(self,'ms_band_min'):
featList.extend(['ms_band_min_G','ms_band_min_R','ms_band_min_RE','ms_band_min_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_min
else:
featStack = np.concatenate((featStack,self.ms_band_min))
if hasattr(self,'ms_band_mean'):
featList.extend(['ms_band_mean_G','ms_band_mean_R','ms_band_mean_RE','ms_band_mean_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_mean
else:
featStack = np.concatenate((featStack,self.ms_band_mean))
if hasattr(self,'ms_band_std'):
featList.extend(['ms_band_std_G','ms_band_std_R','ms_band_std_RE','ms_band_std_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_std
else:
featStack = np.concatenate((featStack,self.ms_band_std))
if hasattr(self,'ms_band_median'):
featList.extend(['ms_band_median_G','ms_band_median_R','ms_band_median_RE','ms_band_median_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_median
else:
featStack = np.concatenate((featStack,self.ms_band_median))
if hasattr(self,'ms_band_cov'):
featList.extend(['ms_band_cov_G','ms_band_cov_R','ms_band_cov_RE','ms_band_cov_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_cov
else:
featStack = np.concatenate((featStack,self.ms_band_cov))
if hasattr(self,'ms_band_skew'):
featList.extend(['ms_band_skew_G','ms_band_skew_R','ms_band_skew_RE','ms_band_skew_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_skew
else:
featStack = np.concatenate((featStack,self.ms_band_skew))
if hasattr(self,'ms_band_kurt'):
featList.extend(['ms_band_kurt_G','ms_band_kurt_R','ms_band_kurt_RE','ms_band_kurt_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_kurt
else:
featStack = np.concatenate((featStack,self.ms_band_kurt))
if hasattr(self,'ms_band_sum'):
featList.extend(['ms_band_sum_G','ms_band_sum_R','ms_band_sum_RE','ms_band_sum_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([False]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_sum
else:
featStack = np.concatenate((featStack,self.ms_band_sum))
if hasattr(self,'ms_band_rng'):
featList.extend(['ms_band_rng_G','ms_band_rng_R','ms_band_rng_RE','ms_band_rng_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.band_rng
else:
featStack = np.concatenate((featStack,self.ms_band_rng))
if hasattr(self,'ms_band_rngsig'):
featList.extend(['ms_band_rngsig_G','ms_band_rngsig_R','ms_band_rngsig_RE','ms_band_rngsig_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_rngsig
else:
featStack = np.concatenate((featStack,self.ms_band_rngsig))
if hasattr(self,'ms_band_rngmean'):
featList.extend(['ms_band_rngmean_G','ms_band_rngmean_R','ms_band_rngmean_RE','ms_band_rngmean_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_rngmean
else:
featStack = np.concatenate((featStack,self.ms_band_rngmean))
if hasattr(self,'ms_band_mode'):
featList.extend(['ms_band_mode_G','ms_band_mode_R','ms_band_mode_RE','ms_band_mode_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_mode
else:
featStack = np.concatenate((featStack,self.ms_band_mode))
if hasattr(self,'ms_band_deciles'):
featList.extend(['ms_band_decile_G_1','ms_band_decile_G_2',
'ms_band_decile_G_3','ms_band_decile_G_4',
'ms_band_decile_G_5','ms_band_decile_G_6',
'ms_band_decile_G_7','ms_band_decile_G_8',
'ms_band_decile_G_9','ms_band_decile_R_1',
'ms_band_decile_R_2','ms_band_decile_R_3',
'ms_band_decile_R_4','ms_band_decile_R_5',
'ms_band_decile_R_6','ms_band_decile_R_7',
'ms_band_decile_R_8','ms_band_decile_R_9',
'ms_band_decile_RE_1','ms_band_decile_RE_2',
'ms_band_decile_RE_3','ms_band_decile_RE_4',
'ms_band_decile_RE_5','ms_band_decile_RE_6',
'ms_band_decile_RE_7','ms_band_decile_RE_8',
'ms_band_decile_RE_9','ms_band_decile_NIR_1',
'ms_band_decile_NIR_2','ms_band_decile_NIR_3',
'ms_band_decile_NIR_4','ms_band_decile_NIR_5',
'ms_band_decile_NIR_6','ms_band_decile_NIR_7',
'ms_band_decile_NIR_8','ms_band_decile_NIR_9'])
featClass.extend(['ms_band']*36)
featSizeInvar.extend([True]*36)
featHeightInvar.extend([True]*36)
featScale.extend([0]*36)
if featStack.size==0:
featStack = self.ms_band_deciles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.ms_band_deciles.flatten('F')))
if hasattr(self,'ms_band_quartiles'):
featList.extend(['ms_band_quartile_G_1','ms_band_quartile_G_3',
'ms_band_quartile_R_1','ms_band_quartile_R_3',
'ms_band_quartile_RE_1','ms_band_quartile_RE_3',
'ms_band_quartile_NIR_1','ms_band_quartile_NIR_3'])
featClass.extend(['ms_band']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_band_quartiles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.ms_band_quartiles.flatten('F')))
if hasattr(self,'ms_band_iqr'):
featList.extend(['ms_band_iqr_G','ms_band_iqr_R','ms_band_iqr_RE','ms_band_iqr_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_iqr
else:
featStack = np.concatenate((featStack,self.ms_band_iqr))
if hasattr(self,'ms_band_iqrsig'):
featList.extend(['ms_band_iqrsig_G','ms_band_iqrsig_R','ms_band_iqrsig_RE','ms_band_iqrsig_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_iqrsig
else:
featStack = np.concatenate((featStack,self.ms_band_iqrsig))
if hasattr(self,'ms_band_iqrmean'):
featList.extend(['ms_band_iqrmean_G','ms_band_iqrmean_R','ms_band_iqrmean_RE','ms_band_iqrmean_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_iqrmean
else:
featStack = np.concatenate((featStack,self.ms_band_iqrmean))
if hasattr(self,'ms_band_ratio'):
featList.extend(['ms_band_ratio_G','ms_band_ratio_R','ms_band_ratio_RE','ms_band_ratio_NIR'])
featClass.extend(['ms_band']*4)
featSizeInvar.extend([True]*4)
featHeightInvar.extend([True]*4)
featScale.extend([0]*4)
if featStack.size==0:
featStack = self.ms_band_ratio
else:
featStack = np.concatenate((featStack,self.ms_band_ratio))
if hasattr(self,'ms_ind_max'):
featList.extend(['ms_ind_max_NDVI','ms_ind_max_NDVIg',
'ms_ind_max_NDVIre','ms_ind_max_CIG',
'ms_ind_max_CVI','ms_ind_max_GRVI',
'ms_ind_max_mGRVI','ms_ind_max_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_max
else:
featStack = np.concatenate((featStack,self.ms_ind_max))
if hasattr(self,'ms_ind_min'):
featList.extend(['ms_ind_min_NDVI','ms_ind_min_NDVIg',
'ms_ind_min_NDVIre','ms_ind_min_CIG',
'ms_ind_min_CVI','ms_ind_min_GRVI',
'ms_ind_min_mGRVI','ms_ind_min_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_min
else:
featStack = np.concatenate((featStack,self.ms_ind_min))
if hasattr(self,'ms_ind_mean'):
featList.extend(['ms_ind_mean_NDVI','ms_ind_mean_NDVIg',
'ms_ind_mean_NDVIre','ms_ind_mean_CIG',
'ms_ind_mean_CVI','ms_ind_mean_GRVI',
'ms_ind_mean_mGRVI','ms_ind_mean_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_mean
else:
featStack = np.concatenate((featStack,self.ms_ind_mean))
if hasattr(self,'ms_ind_std'):
featList.extend(['ms_ind_std_NDVI','ms_ind_std_NDVIg',
'ms_ind_std_NDVIre','ms_ind_std_CIG',
'ms_ind_std_CVI','ms_ind_std_GRVI',
'ms_ind_std_mGRVI','ms_ind_std_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_std
else:
featStack = np.concatenate((featStack,self.ms_ind_std))
if hasattr(self,'ms_ind_median'):
featList.extend(['ms_ind_median_NDVI','ms_ind_median_NDVIg',
'ms_ind_median_NDVIre','ms_ind_median_CIG',
'ms_ind_median_CVI','ms_ind_median_GRVI',
'ms_ind_median_mGRVI','ms_ind_median_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_median
else:
featStack = np.concatenate((featStack,self.ms_ind_median))
if hasattr(self,'ms_ind_cov'):
featList.extend(['ms_ind_cov_NDVI','ms_ind_cov_NDVIg',
'ms_ind_cov_NDVIre','ms_ind_cov_CIG',
'ms_ind_cov_CVI','ms_ind_cov_GRVI',
'ms_ind_cov_mGRVI','ms_ind_cov_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_cov
else:
featStack = np.concatenate((featStack,self.ms_ind_cov))
if hasattr(self,'ms_ind_skew'):
featList.extend(['ms_ind_skew_NDVI','ms_ind_skew_NDVIg',
'ms_ind_skew_NDVIre','ms_ind_skew_CIG',
'ms_ind_skew_CVI','ms_ind_skew_GRVI',
'ms_ind_skew_mGRVI','ms_ind_skew_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_skew
else:
featStack = np.concatenate((featStack,self.ms_ind_skew))
if hasattr(self,'ms_ind_kurt'):
featList.extend(['ms_ind_kurt_NDVI','ms_ind_kurt_NDVIg',
'ms_ind_kurt_NDVIre','ms_ind_kurt_CIG',
'ms_ind_kurt_CVI','ms_ind_kurt_GRVI',
'ms_ind_kurt_mGRVI','ms_ind_kurt_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_kurt
else:
featStack = np.concatenate((featStack,self.ms_ind_kurt))
if hasattr(self,'ms_ind_sum'):
featList.extend(['ms_ind_sum_NDVI','ms_ind_sum_NDVIg',
'ms_ind_sum_NDVIre','ms_ind_sum_CIG',
'ms_ind_sum_CVI','ms_ind_sum_GRVI',
'ms_ind_sum_mGRVI','ms_ind_sum_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([False]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_sum
else:
featStack = np.concatenate((featStack,self.ms_ind_sum))
if hasattr(self,'ms_ind_rng'):
featList.extend(['ms_ind_rng_NDVI','ms_ind_rng_NDVIg',
'ms_ind_rng_NDVIre','ms_ind_rng_CIG',
'ms_ind_rng_CVI','ms_ind_rng_GRVI',
'ms_ind_rng_mGRVI','ms_ind_rng_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_rng
else:
featStack = np.concatenate((featStack,self.ms_ind_rng))
if hasattr(self,'ms_ind_rngsig'):
featList.extend(['ms_ind_rngsig_NDVI','ms_ind_rngsig_NDVIg',
'ms_ind_rngsig_NDVIre','ms_ind_rngsig_CIG',
'ms_ind_rngsig_CVI','ms_ind_rngsig_GRVI',
'ms_ind_rngsig_mGRVI','ms_ind_rngsig_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_rngsig
else:
featStack = np.concatenate((featStack,self.ms_ind_rngsig))
if hasattr(self,'ms_ind_rngmean'):
featList.extend(['ms_ind_rngsmean_NDVI','ms_ind_rngsmean_NDVIg',
'ms_ind_rngsmean_NDVIre','ms_ind_rngsmean_CIG',
'ms_ind_rngsmean_CVI','ms_ind_rngsmean_GRVI',
'ms_ind_rngsmean_mGRVI','ms_ind_rngsmean_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_rngmean
else:
featStack = np.concatenate((featStack,self.ms_ind_rngmean))
if hasattr(self,'ms_ind_mode'):
featList.extend(['ms_ind_mode_NDVI','ms_ind_mode_NDVIg',
'ms_ind_mode_NDVIre','ms_ind_mode_CIG',
'ms_ind_mode_CVI','ms_ind_mode_GRVI',
'ms_ind_mode_mGRVI','ms_ind_mode_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_mode
else:
featStack = np.concatenate((featStack,self.ms_ind_mode))
if hasattr(self,'ms_ind_deciles'):
featList.extend(['ms_ind_decile_NDVI_1','ms_ind_decile_NDVI_2',
'ms_ind_decile_NDVI_3','ms_ind_decile_NDVI_4',
'ms_ind_decile_NDVI_5','ms_ind_decile_NDVI_6',
'ms_ind_decile_NDVI_7','ms_ind_decile_NDVI_8',
'ms_ind_decile_NDVI_9','ms_ind_decile_NDVIg_1',
'ms_ind_decile_NDVIg_2','ms_ind_decile_NDVIg_3',
'ms_ind_decile_NDVIg_4','ms_ind_decile_NDVIg_5',
'ms_ind_decile_NDVIg_6','ms_ind_decile_NDVIg_7',
'ms_ind_decile_NDVIg_8','ms_ind_decile_NDVIg_9',
'ms_ind_decile_NDVIre_1','ms_ind_decile_NDVIre_2',
'ms_ind_decile_NDVIre_3','ms_ind_decile_NDVIre_4',
'ms_ind_decile_NDVIre_5','ms_ind_decile_NDVIre_6',
'ms_ind_decile_NDVIre_7','ms_ind_decile_NDVIre_8',
'ms_ind_decile_NDVIre_9','ms_ind_decile_CIG_1',
'ms_ind_decile_CIG_2','ms_ind_decile_CIG_3',
'ms_ind_decile_CIG_4','ms_ind_decile_CIG_5',
'ms_ind_decile_CIG_6','ms_ind_decile_CIG_7',
'ms_ind_decile_CIG_8','ms_ind_decile_CIG_9',
'ms_ind_decile_CVI_1','ms_ind_decile_CVI_2',
'ms_ind_decile_CVI_3','ms_ind_decile_CVI_4',
'ms_ind_decile_CVI_5','ms_ind_decile_CVI_6',
'ms_ind_decile_CVI_7','ms_ind_decile_CVI_8',
'ms_ind_decile_CVI_9','ms_ind_decile_GRVI_1',
'ms_ind_decile_GRVI_2','ms_ind_decile_GRVI_3',
'ms_ind_decile_GRVI_4','ms_ind_decile_GRVI_5',
'ms_ind_decile_GRVI_6','ms_ind_decile_GRVI_7',
'ms_ind_decile_GRVI_8','ms_ind_decile_GRVI_9',
'ms_ind_decile_mGRVI_1','ms_ind_decile_mGRVI_2',
'ms_ind_decile_mGRVI_3','ms_ind_decile_mGRVI_4',
'ms_ind_decile_mGRVI_5','ms_ind_decile_mGRVI_6',
'ms_ind_decile_mGRVI_7','ms_ind_decile_mGRVI_8',
'ms_ind_decile_mGRVI_9','ms_ind_decile_NegExR_1',
'ms_ind_decile_NegExR_2','ms_ind_decile_NegExR_3',
'ms_ind_decile_NegExR_4','ms_ind_decile_NegExR_5',
'ms_ind_decile_NegExR_6','ms_ind_decile_NegExR_7',
'ms_ind_decile_NegExR_8','ms_ind_decile_NegExR_9'])
featClass.extend(['ms_ind']*72)
featSizeInvar.extend([True]*72)
featHeightInvar.extend([True]*72)
featScale.extend([0]*72)
if featStack.size==0:
featStack = self.ms_ind_deciles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.ms_ind_deciles.flatten('F')))
if hasattr(self,'ms_ind_quartiles'):
featList.extend(['ms_ind_quartile_NDVI_1','ms_ind_quartile_NDVI_3',
'ms_ind_quartile_NDVIg_1','ms_ind_quartile_NDVIg_3',
'ms_ind_quartile_NDVIre_1','ms_ind_quartile_NDVIre_3',
'ms_ind_quartile_CIG_1','ms_ind_quartile_CIG_3',
'ms_ind_quartile_CVI_1','ms_ind_quartile_CVI_3',
'ms_ind_quartile_GRVI_1','ms_ind_quartile_GRVI_3',
'ms_ind_quartile_mGRVI_1','ms_ind_quartile_mGRVI_3',
'ms_ind_quartile_NegExR_1','ms_ind_quartile_NegExR_3'])
featClass.extend(['ms_ind']*16)
featSizeInvar.extend([True]*16)
featHeightInvar.extend([True]*16)
featScale.extend([0]*16)
if featStack.size==0:
featStack = self.ms_ind_quartiles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.ms_ind_quartiles.flatten('F')))
if hasattr(self,'ms_ind_iqr'):
featList.extend(['ms_ind_iqr_NDVI','ms_ind_iqr_NDVIg',
'ms_ind_iqr_NDVIre','ms_ind_iqr_CIG',
'ms_ind_iqr_CVI','ms_ind_iqr_GRVI',
'ms_ind_iqr_mGRVI','ms_ind_iqr_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_iqr
else:
featStack = np.concatenate((featStack,self.ms_ind_iqr))
if hasattr(self,'ms_ind_iqrsig'):
featList.extend(['ms_ind_iqrsig_NDVI','ms_ind_iqrsig_NDVIg',
'ms_ind_iqrsig_NDVIre','ms_ind_iqrsig_CIG',
'ms_ind_iqrsig_CVI','ms_ind_iqrsig_GRVI',
'ms_ind_iqrsig_mGRVI','ms_ind_iqrsig_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_iqrsig
else:
featStack = np.concatenate((featStack,self.ms_ind_iqrsig))
if hasattr(self,'ms_ind_iqrmean'):
featList.extend(['ms_ind_iqrmean_NDVI','ms_ind_iqrmean_NDVIg',
'ms_ind_iqrmean_NDVIre','ms_ind_iqrmean_CIG',
'ms_ind_iqrmean_CVI','ms_ind_iqrmean_GRVI',
'ms_ind_iqrmean_mGRVI','ms_ind_iqrmean_NegExR'])
featClass.extend(['ms_ind']*8)
featSizeInvar.extend([True]*8)
featHeightInvar.extend([True]*8)
featScale.extend([0]*8)
if featStack.size==0:
featStack = self.ms_ind_iqrmean
else:
featStack = np.concatenate((featStack,self.ms_ind_iqrmean))
if hasattr(self,'glcm_ms_vals'):
for ms_glcm_d in self.glcm_ms_dist:
for band in ['G','R','RE','NIR','mean']:
glcm_list = ['glcm_ms_asm_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_con_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_cor_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_var_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_idm_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_sumav_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_sumvar_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_sument_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_ent_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_difvar_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_difent_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_infcor1_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_infcor2_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_asm_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_con_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_cor_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_var_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_idm_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_sumav_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_sumvar_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_sument_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_ent_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_difvar_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_difent_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_infcor1_rng_' + str(ms_glcm_d) + '_' + band,
'glcm_ms_infcor2_rng_' + str(ms_glcm_d) + '_' + band]
featList.extend(glcm_list)
featClass.extend(['ms_glcm']*26)
featSizeInvar.extend([True]*26)
featHeightInvar.extend([True]*26)
featScale.extend([ms_glcm_d]*26)
if featStack.size==0:
featStack = self.glcm_ms_vals
else:
featStack = np.concatenate((featStack,self.glcm_ms_vals))
if hasattr(self,'acor_ms_vals'):
for acor_ms_d in self.acor_ms_dist:
for band in ['G','R','RE','NIR','mean']:
acor_list = ['acor_ms_mean_' + str(acor_ms_d) + '_' + band,
'acor_ms_rng_' + str(acor_ms_d) + '_' + band]
featList.extend(acor_list)
featClass.extend(['ms_acor']*2)
featSizeInvar.extend([True]*2)
featHeightInvar.extend([True]*2)
featScale.extend([acor_ms_d]*2)
if featStack.size==0:
featStack = self.acor_ms_vals
else:
featStack = np.concatenate((featStack,self.acor_ms_vals))
if hasattr(self,'lbp_ms_vals'):
for ms_lbp_d in self.lbp_ms_dist:
for band in ['G','R','RE','NIR','mean']:
for ft_i in range(2+8*ms_lbp_d):
featList.extend(
['lbp_ms_d_' + str(ms_lbp_d) + '_' + band + '_feat_' + str(ft_i)]
)
featClass.extend(['ms_lbp'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([ms_lbp_d])
if featStack.size==0:
featStack = self.lbp_ms_vals
else:
featStack = np.concatenate((featStack,self.lbp_ms_vals))
if hasattr(self,'laws_ms_feats'):
laws_list = []
filtbank = ['L5','E5','S5','R5','W5']
for band in ['G','R','RE','NIR','mean']:
for stat in ['mean','std']:
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
else:
featList.append('laws_' + filtbank[i] + filtbank[j] +'_'+band+'_' + stat)
featClass.extend(['ms_laws'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.laws_ms_feats.flatten('F')
else:
featStack = np.concatenate((featStack,self.laws_ms_feats.flatten('F')))
if hasattr(self,'hsv_max'):
featList.extend(['hsv_max_H','hsv_max_S','hsv_max_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_max
else:
featStack = np.concatenate((featStack,self.hsv_max))
if hasattr(self,'hsv_min'):
featList.extend(['hsv_min_H','hsv_min_S','hsv_min_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_min
else:
featStack = np.concatenate((featStack,self.hsv_min))
if hasattr(self,'hsv_mean'):
featList.extend(['hsv_mean_H','hsv_mean_S','hsv_mean_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_mean
else:
featStack = np.concatenate((featStack,self.hsv_mean))
if hasattr(self,'hsv_std'):
featList.extend(['hsv_std_H','hsv_std_S','hsv_std_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_std
else:
featStack = np.concatenate((featStack,self.hsv_std))
if hasattr(self,'hsv_median'):
featList.extend(['hsv_median_H','hsv_median_S','hsv_median_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_median
else:
featStack = np.concatenate((featStack,self.hsv_median))
if hasattr(self,'hsv_cov'):
featList.extend(['hsv_cov_H','hsv_cov_S','hsv_cov_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_cov
else:
featStack = np.concatenate((featStack,self.hsv_cov))
if hasattr(self,'hsv_skew'):
featList.extend(['hsv_skew_H','hsv_skew_S','hsv_skew_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_skew
else:
featStack = np.concatenate((featStack,self.hsv_skew))
if hasattr(self,'hsv_kurt'):
featList.extend(['hsv_kurt_H','hsv_kurt_S','hsv_kurt_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_kurt
else:
featStack = np.concatenate((featStack,self.hsv_kurt))
if hasattr(self,'hsv_sum'):
featList.extend(['hsv_sum_H','hsv_sum_S','hsv_sum_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([False]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_sum
else:
featStack = np.concatenate((featStack,self.hsv_sum))
if hasattr(self,'hsv_rng'):
featList.extend(['hsv_rng_H','hsv_rng_S','hsv_rng_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.band_rng
else:
featStack = np.concatenate((featStack,self.hsv_rng))
if hasattr(self,'hsv_rngsig'):
featList.extend(['hsv_rngsig_H','hsv_rngsig_S','hsv_rngsig_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_rngsig
else:
featStack = np.concatenate((featStack,self.hsv_rngsig))
if hasattr(self,'hsv_rngmean'):
featList.extend(['hsv_rngmean_H','hsv_rngmean_S','hsv_rngmean_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_rngmean
else:
featStack = np.concatenate((featStack,self.hsv_rngmean))
if hasattr(self,'hsv_mode'):
featList.extend(['hsv_mode_H','hsv_mode_S','hsv_mode_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_mode
else:
featStack = np.concatenate((featStack,self.hsv_mode))
if hasattr(self,'hsv_deciles'):
featList.extend(['hsv_decile_H_1','hsv_decile_H_2',
'hsv_decile_H_3','hsv_decile_H_4',
'hsv_decile_H_5','hsv_decile_H_6',
'hsv_decile_H_7','hsv_decile_H_8',
'hsv_decile_H_9','hsv_decile_S_1',
'hsv_decile_S_2','hsv_decile_S_3',
'hsv_decile_S_4','hsv_decile_S_5',
'hsv_decile_S_6','hsv_decile_S_7',
'hsv_decile_S_8','hsv_decile_S_9',
'hsv_decile_V_1','hsv_decile_V_2',
'hsv_decile_V_3','hsv_decile_V_4',
'hsv_decile_V_5','hsv_decile_V_6',
'hsv_decile_V_7','hsv_decile_V_8',
'hsv_decile_V_9'])
featClass.extend(['rgb_hsv']*27)
featSizeInvar.extend([True]*27)
featHeightInvar.extend([True]*27)
featScale.extend([0]*27)
if featStack.size==0:
featStack = self.hsv_deciles
else:
featStack = np.concatenate((featStack,
self.hsv_deciles.flatten('F')))
if hasattr(self,'hsv_quartiles'):
featList.extend(['hsv_quartile_H_1','hsv_quartile_H_3',
'hsv_quartile_S_1','hsv_quartile_S_3',
'hsv_quartile_V_1','hsv_quartile_V_3'])
featClass.extend(['rgb_hsv']*6)
featSizeInvar.extend([True]*6)
featHeightInvar.extend([True]*6)
featScale.extend([0]*6)
if featStack.size==0:
featStack = self.hsv_quartiles
else:
featStack = np.concatenate((featStack,
self.hsv_quartiles.flatten('F')))
if hasattr(self,'hsv_iqr'):
featList.extend(['hsv_iqr_H','hsv_iqr_S','hsv_iqr_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_iqr
else:
featStack = np.concatenate((featStack,self.hsv_iqr))
if hasattr(self,'hsv_iqrsig'):
featList.extend(['hsv_iqrsig_H','hsv_iqrsig_S','hsv_iqrsig_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_iqrsig
else:
featStack = np.concatenate((featStack,self.hsv_iqrsig))
if hasattr(self,'hsv_iqrmean'):
featList.extend(['hsv_iqrmean_H','hsv_iqrmean_S','hsv_iqrmean_V'])
featClass.extend(['rgb_hsv']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.hsv_iqrmean
else:
featStack = np.concatenate((featStack,self.hsv_iqrmean))
if hasattr(self,'dsm_raw_max'):
featList.extend(['dsm_raw_max_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_max
else:
featStack = np.concatenate((featStack,self.dsm_raw_max))
if hasattr(self,'dsm_raw_min'):
featList.extend(['dsm_raw_min_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_min
else:
featStack = np.concatenate((featStack,self.dsm_raw_min))
if hasattr(self,'dsm_raw_mean'):
featList.extend(['dsm_raw_mean_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_mean
else:
featStack = np.concatenate((featStack,self.dsm_raw_mean))
if hasattr(self,'dsm_raw_std'):
featList.extend(['dsm_raw_std_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_std
else:
featStack = np.concatenate((featStack,self.dsm_raw_std))
if hasattr(self,'dsm_raw_median'):
featList.extend(['dsm_raw_median_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_median
else:
featStack = np.concatenate((featStack,self.dsm_raw_median))
if hasattr(self,'dsm_raw_cov'):
featList.extend(['dsm_raw_cov_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_cov
else:
featStack = np.concatenate((featStack,self.dsm_raw_cov))
if hasattr(self,'dsm_raw_skew'):
featList.extend(['dsm_raw_skew_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_skew
else:
featStack = np.concatenate((featStack,self.dsm_raw_skew))
if hasattr(self,'dsm_raw_kurt'):
featList.extend(['dsm_raw_kurt_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_kurt
else:
featStack = np.concatenate((featStack,self.dsm_raw_kurt))
if hasattr(self,'dsm_raw_sum'):
featList.extend(['dsm_raw_sum_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([False])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_sum
else:
featStack = np.concatenate((featStack,self.dsm_raw_sum))
if hasattr(self,'dsm_raw_rng'):
featList.extend(['dsm_raw_rng_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_rng
else:
featStack = np.concatenate((featStack,self.dsm_raw_rng))
if hasattr(self,'dsm_raw_rngsig'):
featList.extend(['dsm_raw_rngsig_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_rngsig
else:
featStack = np.concatenate((featStack,self.dsm_raw_rngsig))
if hasattr(self,'dsm_raw_rngmean'):
featList.extend(['dsm_raw_rngmean_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_rngmean
else:
featStack = np.concatenate((featStack,self.dsm_raw_rngmean))
if hasattr(self,'dsm_raw_mode'):
featList.extend(['dsm_raw_mode_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([False])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_mode
else:
featStack = np.concatenate((featStack,self.dsm_raw_mode))
if hasattr(self,'dsm_raw_deciles'):
featList.extend(['dsm_raw_decile_H_1','dsm_raw_decile_H_2',
'dsm_raw_decile_H_3','dsm_raw_decile_H_4',
'dsm_raw_decile_H_5','dsm_raw_decile_H_6',
'dsm_raw_decile_H_7','dsm_raw_decile_H_8',
'dsm_raw_decile_H_9'])
featClass.extend(['dsm_raw']*9)
featSizeInvar.extend([True]*9)
featHeightInvar.extend([False]*9)
featScale.extend([0]*9)
if featStack.size==0:
featStack = self.dsm_raw_deciles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.dsm_raw_deciles.flatten('F')))
if hasattr(self,'dsm_raw_quartiles'):
featList.extend(['dsm_raw_quartile_H_1','dsm_raw_quartile_H_3'])
featClass.extend(['dsm_raw']*2)
featSizeInvar.extend([True]*2)
featHeightInvar.extend([False]*2)
featScale.extend([0]*2)
if featStack.size==0:
featStack = self.dsm_raw_quartiles.flatten('F')
else:
featStack = np.concatenate((featStack,
self.dsm_raw_quartiles.flatten('F')))
if hasattr(self,'dsm_raw_iqr'):
featList.extend(['dsm_raw_iqr_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_iqr
else:
featStack = np.concatenate((featStack,self.dsm_raw_iqr))
if hasattr(self,'dsm_raw_iqrsig'):
featList.extend(['dsm_raw_iqrsig_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_iqrsig
else:
featStack = np.concatenate((featStack,self.dsm_raw_iqrsig))
if hasattr(self,'dsm_raw_iqrmean'):
featList.extend(['dsm_raw_iqrmean_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_iqrmean
else:
featStack = np.concatenate((featStack,self.dsm_raw_iqrmean))
if hasattr(self,'dsm_raw_mad'):
featList.extend(['dsm_raw_mad_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_mad
else:
featStack = np.concatenate((featStack,self.dsm_raw_mad))
if hasattr(self,'dsm_raw_maxmed'):
featList.extend(['dsm_raw_maxmed_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_maxmed
else:
featStack = np.concatenate((featStack,self.dsm_raw_maxmed))
if hasattr(self,'dsm_raw_minmed'):
featList.extend(['dsm_raw_minmed_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_minmed
else:
featStack = np.concatenate((featStack,self.dsm_raw_minmed))
if hasattr(self,'dsm_raw_summed'):
featList.extend(['dsm_raw_summed_H'])
featClass.extend(['dsm_raw'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.dsm_raw_summed
else:
featStack = np.concatenate((featStack,self.dsm_raw_summed))
if hasattr(self,'dsm_raw_decilesmed'):
featList.extend(['dsm_raw_decilemed_H_1','dsm_raw_decilemed_H_2',
'dsm_raw_decilemed_H_3','dsm_raw_decilemed_H_4',
'dsm_raw_decilemed_H_5','dsm_raw_decilemed_H_6',
'dsm_raw_decilemed_H_7','dsm_raw_decilemed_H_8',
'dsm_raw_decilemed_H_9'])
featClass.extend(['dsm_raw']*9)
featSizeInvar.extend([True]*9)
featHeightInvar.extend([True]*9)
featScale.extend([0]*9)
if featStack.size==0:
featStack = self.dsm_raw_decilesmed.flatten('F')
else:
featStack = np.concatenate((featStack,
self.dsm_raw_decilesmed.flatten('F')))
if hasattr(self,'dsm_raw_quartilesmed'):
featList.extend(['dsm_raw_quartilemed_H_1','dsm_raw_quartilemed_H_3'])
featClass.extend(['dsm_raw']*2)
featSizeInvar.extend([True]*2)
featHeightInvar.extend([True]*2)
featScale.extend([0]*2)
if featStack.size==0:
featStack = self.dsm_raw_quartilesmed.flatten('F')
else:
featStack = np.concatenate((featStack,
self.dsm_raw_quartilesmed.flatten('F')))
if hasattr(self,'glcm_dsm_vals'):
for dsm_glcm_d in self.glcm_dsm_dist:
glcm_list = ['glcm_dsm_asm_' + str(dsm_glcm_d),
'glcm_dsm_con_' + str(dsm_glcm_d),
'glcm_dsm_cor_' + str(dsm_glcm_d),
'glcm_dsm_var_' + str(dsm_glcm_d),
'glcm_dsm_idm_' + str(dsm_glcm_d),
'glcm_dsm_sumav_' + str(dsm_glcm_d),
'glcm_dsm_sumvar_' + str(dsm_glcm_d),
'glcm_dsm_sument_' + str(dsm_glcm_d),
'glcm_dsm_ent_' + str(dsm_glcm_d),
'glcm_dsm_difvar_' + str(dsm_glcm_d),
'glcm_dsm_difent_' + str(dsm_glcm_d),
'glcm_dsm_infcor1_' + str(dsm_glcm_d),
'glcm_dsm_infcor2_' + str(dsm_glcm_d),
'glcm_dsm_asm_rng_' + str(dsm_glcm_d),
'glcm_dsm_con_rng_' + str(dsm_glcm_d),
'glcm_dsm_cor_rng_' + str(dsm_glcm_d),
'glcm_dsm_var_rng_' + str(dsm_glcm_d),
'glcm_dsm_idm_rng_' + str(dsm_glcm_d),
'glcm_dsm_sumav_rng_' + str(dsm_glcm_d),
'glcm_dsm_sumvar_rng_' + str(dsm_glcm_d),
'glcm_dsm_sument_rng_' + str(dsm_glcm_d),
'glcm_dsm_ent_rng_' + str(dsm_glcm_d),
'glcm_dsm_difvar_rng_' + str(dsm_glcm_d),
'glcm_dsm_difent_rng_' + str(dsm_glcm_d),
'glcm_dsm_infcor1_rng_' + str(dsm_glcm_d),
'glcm_dsm_infcor2_rng_' + str(dsm_glcm_d)]
featList.extend(glcm_list)
featClass.extend(['dsm_glcm']*26)
featSizeInvar.extend([True]*26)
featHeightInvar.extend([True]*26)
featScale.extend([dsm_glcm_d]*26)
if featStack.size==0:
featStack = self.glcm_dsm_vals
else:
featStack = np.concatenate((featStack,self.glcm_dsm_vals))
if hasattr(self,'acor_dsm_vals'):
for dsm_acor_d in self.acor_dsm_dist:
acor_list = ['acor_dsm_mean_' + str(dsm_acor_d),
'acor_dsm_rng_' + str(dsm_acor_d)]
featList.extend(acor_list)
featClass.extend(['dsm_acor']*2)
featSizeInvar.extend([True]*2)
featHeightInvar.extend([True]*2)
featScale.extend([dsm_acor_d]*2)
if featStack.size==0:
featStack = self.acor_dsm_vals
else:
featStack = np.concatenate((featStack,self.acor_dsm_vals))
if hasattr(self,'lbp_dsm_vals'):
for dsm_lbp_d in self.lbp_dsm_dist:
for ft_i in range(2+8*dsm_lbp_d):
featList.extend(
['lbp_dsm_d_' + str(dsm_lbp_d) + '_feat_' + str(ft_i)]
)
featClass.extend(['dsm_lbp'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([dsm_lbp_d])
if featStack.size==0:
featStack = self.lbp_dsm_vals
else:
featStack = np.concatenate((featStack,self.lbp_dsm_vals))
if hasattr(self,'laws_dsm_feats'):
laws_list = []
filtbank = ['L5','E5','S5','R5','W5']
for stat in ['mean','std']:
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
else:
featList.append('laws_' + filtbank[i] + filtbank[j] +'_DSM_' + stat)
featClass.extend(['dsm_laws'])
featSizeInvar.extend([True])
featHeightInvar.extend([True])
featScale.extend([0])
if featStack.size==0:
featStack = self.laws_dsm_feats.flatten('F')
else:
featStack = np.concatenate((featStack,self.laws_dsm_feats.flatten('F')))
self.featList = featList
self.featStack = featStack
self.featClass = featClass
self.featSizeInvar = featSizeInvar
self.featHeightInvar = featHeightInvar
self.featScale = featScale
def runFeaturePipeline(self,thresh=0.5,glcm_steps=5,acor_steps=5,mode=False,HSV=False):
self.createRGBBandFeats(mode)
self.createDSMRawFeats(mode)
self.createRGBThreshFeats(0.5,mode)
self.createMSBandFeats(mode)
for i in range(glcm_steps):
self.createRGBGLCMfeats(i+1)
self.createMSGLCMfeats(i+1)
self.createDSMGLCMfeats(i+1)
for i in range(acor_steps):
self.createRGBautoCorFeats(i+1)
self.createMSautoCorFeats(i+1)
self.createDSMautoCorFeats(i+1)
for i in range(3):
self.createRGBLBPFeats(i+1)
self.createMSLBPFeats(i+1)
self.createDSMLBPFeats(i+1)
self.createRGBLawsFeats()
self.createMSLawsFeats()
self.createDSMLawsFeats()
self.createSpecIndices()
self.createRGBIndFeats(mode)
self.createMSIndFeats(mode)
if HSV:
self.createHSVFeats(mode)
self.stackFeats()
``` |
{
"source": "jonwangio/momepy",
"score": 3
} |
#### File: momepy/momepy/utils.py
```python
import math
import geopandas as gpd
import libpysal
import networkx as nx
import numpy as np
from shapely.geometry import Point
__all__ = [
"unique_id",
"gdf_to_nx",
"nx_to_gdf",
"limit_range",
]
def unique_id(objects):
"""
Add an attribute with unique ID to each row of GeoDataFrame.
Parameters
----------
objects : GeoDataFrame
GeoDataFrame containing objects to analyse
Returns
-------
Series
Series containing resulting values.
"""
series = range(len(objects))
return series
def _angle(a, b, c):
"""
Measure angle between a-b, b-c. In radians.
Helper for gdf_to_nx.
"""
ba = [aa - bb for aa, bb in zip(a, b)]
bc = [cc - bb for cc, bb in zip(c, b)]
nba = math.sqrt(sum((x ** 2.0 for x in ba)))
ba = [x / nba for x in ba]
nbc = math.sqrt(sum((x ** 2.0 for x in bc)))
bc = [x / nbc for x in bc]
scal = sum((aa * bb for aa, bb in zip(ba, bc)))
angle = math.acos(round(scal, 10))
return angle
def _generate_primal(G, gdf_network, fields, multigraph):
"""
Generate primal graph.
Helper for gdf_to_nx.
"""
G.graph["approach"] = "primal"
key = 0
for row in gdf_network.itertuples():
first = row.geometry.coords[0]
last = row.geometry.coords[-1]
data = [r for r in row][1:]
attributes = dict(zip(fields, data))
if multigraph:
G.add_edge(first, last, key=key, **attributes)
key += 1
else:
G.add_edge(first, last, **attributes)
def _generate_dual(G, gdf_network, fields, angles, multigraph, angle):
"""
Generate dual graph
Helper for gdf_to_nx.
"""
G.graph["approach"] = "dual"
key = 0
sw = libpysal.weights.Queen.from_dataframe(gdf_network, silence_warnings=True)
cent = gdf_network.geometry.centroid
gdf_network["temp_x_coords"] = cent.x
gdf_network["temp_y_coords"] = cent.y
for i, row in enumerate(gdf_network.itertuples()):
centroid = (row.temp_x_coords, row.temp_y_coords)
data = [f for f in row][1:-2]
attributes = dict(zip(fields, data))
G.add_node(centroid, **attributes)
if sw.cardinalities[i] > 0:
for n in sw.neighbors[i]:
start = centroid
end = (
gdf_network["temp_x_coords"].iloc[n],
gdf_network["temp_y_coords"].iloc[n],
)
p0 = row.geometry.coords[0]
p1 = row.geometry.coords[-1]
geom = gdf_network.geometry.iloc[n]
p2 = geom.coords[0]
p3 = geom.coords[-1]
points = [p0, p1, p2, p3]
shared = [x for x in points if points.count(x) > 1]
if shared: # fix for non-planar graph
remaining = [e for e in points if e not in [shared[0]]]
if len(remaining) == 2:
if angles:
angle_value = _angle(remaining[0], shared[0], remaining[1])
if multigraph:
G.add_edge(start, end, key=0, **{angle: angle_value})
key += 1
else:
G.add_edge(start, end, **{angle: angle_value})
else:
if multigraph:
G.add_edge(start, end, key=0)
key += 1
else:
G.add_edge(start, end)
def gdf_to_nx(
gdf_network,
approach="primal",
length="mm_len",
multigraph=True,
directed=False,
angles=True,
angle="angle",
):
"""
Convert LineString GeoDataFrame to networkx.MultiGraph or other Graph as per
specification.
Preserves columns as edge or node attributes (depending on the ``approach``).
Index is not preserved.
See the User Guide page :doc:`../../user_guide/graph/convert` for details.
Parameters
----------
gdf_network : GeoDataFrame
GeoDataFrame containing objects to convert
approach : str, default 'primal'
Allowed options are ``'primal'`` or ``'dual'``. Primal graph represents
endpoints as nodes and LineStrings as edges, dual graph represents
LineStrings as nodes and their topological relation as edges. In such a
case, it can encode an angle between LineStrings as an edge attribute.
length : str, default 'mm_len'
name of attribute of segment length (geographical) which will be saved to graph
multigraph : bool, default True
create ``MultiGraph`` of ``Graph`` (potentially directed). ``MutliGraph``
allows multiple
edges between any pair of nodes, which is a common case in street networks.
directed : bool, default False
create directed graph (``DiGraph`` or ``MultiDiGraph``). Directionality follows
the order of LineString coordinates.
angles : bool, default True
capture angles between LineStrings as an attribute of a dual graph. Ignored if
``approach="primal"``.
length : str, default 'angle'
name of attribute of angle between LineStrings which will be saved to graph.
Ignored if ``approach="primal"``.
Returns
-------
networkx.Graph,
networkx.MultiGraph,
networkx.DiGraph,
networkx.MultiDiGraph
Graph as per specification
See also
--------
nx_to_gdf
Examples
--------
>>> import geopandas as gpd
>>> df = gpd.read_file(momepy.datasets.get_path('bubenec'), layer='streets')
>>> df.head(5)
geometry
0 LINESTRING (1603585.640 6464428.774, 1603413.2...
1 LINESTRING (1603268.502 6464060.781, 1603296.8...
2 LINESTRING (1603607.303 6464181.853, 1603592.8...
3 LINESTRING (1603678.970 6464477.215, 1603675.6...
4 LINESTRING (1603537.194 6464558.112, 1603557.6...
Primal graph:
>>> G = momepy.gdf_to_nx(df)
>>> G
<networkx.classes.multigraph.MultiGraph object at 0x7f8cf90fad50>
>>> G_directed = momepy.gdf_to_nx(df, directed=True)
>>> G_directed
<networkx.classes.multidigraph.MultiDiGraph object at 0x7f8cf90f56d0>
>>> G_digraph = momepy.gdf_to_nx(df, multigraph=False, directed=True)
>>> G_digraph
<networkx.classes.digraph.DiGraph object at 0x7f8cf9150c10>
>>> G_graph = momepy.gdf_to_nx(df, multigraph=False, directed=False)
>>> G_graph
<networkx.classes.graph.Graph object at 0x7f8cf90facd0>
Dual graph:
>>> G_dual = momepy.gdf_to_nx(df, approach="dual")
>>> G_dual
<networkx.classes.multigraph.MultiGraph object at 0x7f8cf9150fd0>
"""
gdf_network = gdf_network.copy()
if "key" in gdf_network.columns:
gdf_network.rename(columns={"key": "__key"}, inplace=True)
if multigraph and directed:
net = nx.MultiDiGraph()
elif multigraph and not directed:
net = nx.MultiGraph()
elif not multigraph and directed:
net = nx.DiGraph()
else:
net = nx.Graph()
net.graph["crs"] = gdf_network.crs
gdf_network[length] = gdf_network.geometry.length
fields = list(gdf_network.columns)
if approach == "primal":
_generate_primal(net, gdf_network, fields, multigraph)
elif approach == "dual":
if directed:
raise ValueError("Directed graphs are not supported in dual approach.")
_generate_dual(
net, gdf_network, fields, angles=angles, multigraph=multigraph, angle=angle
)
else:
raise ValueError(
f"Approach {approach} is not supported. Use 'primal' or 'dual'."
)
return net
def _points_to_gdf(net):
"""
Generate point gdf from nodes.
Helper for nx_to_gdf.
"""
node_xy, node_data = zip(*net.nodes(data=True))
if isinstance(node_xy[0], int) and "x" in node_data[0].keys():
geometry = [Point(data["x"], data["y"]) for data in node_data] # osmnx graph
else:
geometry = [Point(*p) for p in node_xy]
gdf_nodes = gpd.GeoDataFrame(list(node_data), geometry=geometry)
if "crs" in net.graph.keys():
gdf_nodes.crs = net.graph["crs"]
return gdf_nodes
def _lines_to_gdf(net, points, nodeID):
"""
Generate linestring gdf from edges.
Helper for nx_to_gdf.
"""
starts, ends, edge_data = zip(*net.edges(data=True))
gdf_edges = gpd.GeoDataFrame(list(edge_data))
if points is True:
node_start = []
node_end = []
for s in starts:
node_start.append(net.nodes[s][nodeID])
for e in ends:
node_end.append(net.nodes[e][nodeID])
gdf_edges["node_start"] = node_start
gdf_edges["node_end"] = node_end
if "crs" in net.graph.keys():
gdf_edges.crs = net.graph["crs"]
return gdf_edges
def _primal_to_gdf(net, points, lines, spatial_weights, nodeID):
"""
Generate gdf(s) from primal network.
Helper for nx_to_gdf.
"""
if points is True:
gdf_nodes = _points_to_gdf(net)
if spatial_weights is True:
W = libpysal.weights.W.from_networkx(net)
W.transform = "b"
if lines is True:
gdf_edges = _lines_to_gdf(net, points, nodeID)
if points is True and lines is True:
if spatial_weights is True:
return gdf_nodes, gdf_edges, W
return gdf_nodes, gdf_edges
if points is True and lines is False:
if spatial_weights is True:
return gdf_nodes, W
return gdf_nodes
return gdf_edges
def _dual_to_gdf(net):
"""
Generate linestring gdf from dual network.
Helper for nx_to_gdf.
"""
starts, edge_data = zip(*net.nodes(data=True))
gdf_edges = gpd.GeoDataFrame(list(edge_data))
gdf_edges.crs = net.graph["crs"]
return gdf_edges
def nx_to_gdf(net, points=True, lines=True, spatial_weights=False, nodeID="nodeID"):
"""
Convert ``networkx.Graph`` to LineString GeoDataFrame and Point GeoDataFrame.
Automatically detects an ``approach`` of the graph and assignes edges and nodes to
relevant geometry type.
See the User Guide page :doc:`../../user_guide/graph/convert` for details.
Parameters
----------
net : networkx.Graph
``networkx.Graph``
points : bool
export point-based gdf representing intersections
lines : bool
export line-based gdf representing streets
spatial_weights : bool
export libpysal spatial weights for nodes (only for primal graphs)
nodeID : str
name of node ID column to be generated
Returns
-------
GeoDataFrame
Selected gdf or tuple of both gdfs or tuple of gdfs and weights
See also
--------
gdf_to_nx
Examples
--------
>>> import geopandas as gpd
>>> df = gpd.read_file(momepy.datasets.get_path('bubenec'), layer='streets')
>>> df.head(2)
geometry
0 LINESTRING (1603585.640 6464428.774, 1603413.2...
1 LINESTRING (1603268.502 6464060.781, 1603296.8...
>>> G = momepy.gdf_to_nx(df)
Converting primal Graph to points as intersections and lines as street segments:
>>> points, lines = momepy.nx_to_gdf(G)
>>> points.head(2)
nodeID geometry
0 1 POINT (1603585.640 6464428.774)
1 2 POINT (1603413.206 6464228.730)
>>> lines.head(2)
geometry mm_len node_start node_end
0 LINESTRING (1603585.640... 264.103950 1 2
1 LINESTRING (1603561.740... 70.020202 1 9
Storing relationship between points/nodes as libpysal W object:
>>> points, lines, W = momepy.nx_to_gdf(G, spatial_weights=True)
>>> W
<libpysal.weights.weights.W object at 0x7f8d01837210>
Converting dual Graph to lines. Dual Graph does not export edges to GDF:
>>> G = momepy.gdf_to_nx(df, approach="dual")
>>> lines = momepy.nx_to_gdf(G)
>>> lines.head(2)
geometry mm_len
0 LINESTRING (1603585.640 6464428.774, 1603413.2... 264.103950
1 LINESTRING (1603607.303 6464181.853, 1603592.8... 199.746503
"""
# generate nodes and edges geodataframes from graph
primal = None
if "approach" in net.graph.keys():
if net.graph["approach"] == "primal":
primal = True
elif net.graph["approach"] == "dual":
return _dual_to_gdf(net)
else:
raise ValueError(
f"Approach {net.graph['approach']} is not supported. "
"Use 'primal' or 'dual'."
)
if not primal:
import warnings
warnings.warn("Approach is not set. Defaulting to 'primal'.")
nid = 1
for n in net:
net.nodes[n][nodeID] = nid
nid += 1
return _primal_to_gdf(
net, points=points, lines=lines, spatial_weights=spatial_weights, nodeID=nodeID
)
def limit_range(vals, rng):
"""
Extract values within selected range
Parameters
----------
vals : array
rng : Two-element sequence containing floats in range of [0,100], optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The order of the elements is not important.
Returns
-------
array
limited array
"""
vals = np.asarray(vals)
if len(vals) > 2:
rng = sorted(rng)
if np.isnan(vals).any():
lower = np.nanpercentile(vals, rng[0], interpolation="nearest")
higher = np.nanpercentile(vals, rng[1], interpolation="nearest")
else:
lower = np.percentile(vals, rng[0], interpolation="nearest")
higher = np.percentile(vals, rng[1], interpolation="nearest")
return vals[(lower <= vals) & (vals <= higher)]
return vals
def _azimuth(point1, point2):
"""azimuth between 2 shapely points (interval 0 - 180)"""
angle = np.arctan2(point2[0] - point1[0], point2[1] - point1[1])
return np.degrees(angle) if angle > 0 else np.degrees(angle) + 180
``` |
{
"source": "Jon-Webb-79/Core-Utilities",
"score": 2
} |
#### File: Core-Utilities/core_utilities/plotting.py
```python
from typing import List
import warnings
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.dates as mdates
from matplotlib import rc, pyplot as plt
# ============================================================================
# ============================================================================
# Date: December 18, 2020
# Purpose: This file contains classes and functions necessary for
# plotting.
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Jon Webb Inc."
__version__ = "1.0"
# ============================================================================
# ============================================================================
def text_date_plot(dates: List[List[str]], y_data: List[List[float]],
line_colors: List[str], line_style: List[str],
line_weight: List[str], x_label: str, y_label: str,
dat_labels: List[str], label_pos: str, y_scale: str = 'LIN',
plot_name: str = 'NULL', save: bool = False,
label_font_size: int = 18, tick_font_size: int = 18,
style_name: str = 'default', title: str = 'NULL',
title_font_size: int = 24) -> None:
"""
:param dates: A list of lists, where each inner list contains a list of dates
as a text string in the format YYYY-MM-DD or YYYY/MM/DD
:param y_data: A list of lists containing y-axis data corresponding to the
list of lists in `dates`
:param line_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for
matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param line_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for
`matplotlib style <https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html>`_.
:param line_weight: A list of line weights, one for each curve.
:param x_label: The x-axis label
:param y_label: The y-axis label
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``.
:param y_scale: 'LOG' or 'LIN' for logarithmic or linear scale
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets as a function of date. This function assumes that the
date string is in the format of a text string and not a Timestamp or datetime. This function also autonomusly
determines the appropriate date display format. If you desire plots as a
function of time you should use the ``text_time_plot`` function. The function can be used in the
following manner;
.. code-block:: python
> # Use stock data for example
> tickers = ['AAPL', 'WMT']
> data = yf.download(tickers, '2015-1-1')['Adj Close']
> # transform Timestamps to string
> dates = list(data.index.strftime('%Y-%m-%d'))
> date_list = [dates, dates]
> y_list = [list(data[tickers[0]]), list(data[tickers[1]])]
> colors = ['red', 'green']
> line_style = ['-', '-']
> weight = [1.0, 1.0]
> text_date_plot(date_list, y_list, colors, line_style, weight, 'Date',
'$', tickers, 'upper left')
.. image:: date.eps
:align: center
"""
# Adjust format for YYYY/MM/DD to YYYY-MM-DD
outer_list = []
for i in range(len(dates)):
inner_list = []
for j in range(len(dates[i])):
year = dates[i][j][0:4]
month = dates[i][j][5:7]
day = dates[i][j][8:10]
date_string = year + '-' + month + '-' + day
inner_list.append(datetime.strptime(date_string, '%Y-%m-%d'))
outer_list.append(inner_list)
# Determine time difference between min and max point
days = 0
for i in outer_list:
delta = (max(i) - min(i)).days
if delta > days:
days = delta
# Start plot
fig, td_plot = plt.subplots()
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
if days <= 15:
myfmt = mdates.DateFormatter('%d')
td_plot.xaxis.set_major_locator(mdates.DayLocator())
elif days <= 180:
myfmt = mdates.DateFormatter('%b-%y')
td_plot.xaxis.set_major_locator(mdates.MonthLocator())
else:
myfmt = mdates.DateFormatter('%b-%y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(4))
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
td_plot.xaxis.set_major_formatter(myfmt)
for i in range(len(outer_list)):
td_plot.plot(outer_list[i], y_data[i], color=line_colors[i],
label=dat_labels[i], linewidth=line_weight[i],
linestyle=line_style[i])
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def two_d_line_matplot(x_data: List[List[float]], y_data: List[List[float]],
line_colors: List[str], line_style: List[str],
line_weight: List[str], x_label: str, y_label: str,
dat_labels: List[str], label_pos: str, x_scale: str = 'LIN',
y_scale: str = 'LIN', plot_name: str = 'NULL',
save: bool = False, label_font_size: int = 18,
tick_font_size: int = 18, style_name: str = 'default',
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param x_data: A list of lists, where the inner lists contain data points
for the x-axis
:param y_data: A list of lists, where the inner lists contain data points
for the y-axis
:param line_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for
matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param line_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for
`matplotlib style <https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html>`_.
:param line_weight: A list of line weights, one for each curve.
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``.
:param x_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param y_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets. This function will only produce line plots and
not scatter plots or a combination of both. The function can be used in the following manner;
.. code-block:: python
> x_dat = np.linspace(0, 10, 15)
> y1_dat = x_dat
> y2_dat = x_dat ** 2.0
> y3_dat = x_dat ** 3.0
> x_list = [x_dat, x_dat, x_dat]
> y_list = [y1_dat, y2_dat, y3_dat]
> colors = ['red', 'blue', 'black']
> line_style = ['-', '-', '--']
> labels = ['linear', 'squared', 'cubed']
> weight = [1, 2, 3]
> two_d_line_matplot(x_list, y_list, colors, line_style, weight, 'x-data',
'y-data', labels, 'upper left')
.. image:: line_plot.eps
:scale: 90%
:align: center
"""
# Error checking and warnings
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if len(x_data) != len(y_data):
warnings.warn('length of x list of lists is not the same as y list of lists, plot not printed')
return
if len(line_colors) != len(x_data):
warnings.warn('line colors list not the same length as data lists, plot not printed')
return
if len(line_style) != len(x_data):
warnings.warn('line_style list not the same length as data lists, plot not printed')
return
if len(line_weight) != len(x_data):
warnings.warn('line_weight list not the same length as data lists, plot not printed')
return
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(line_colors)):
td_plot.plot(x_data[i], y_data[i], color=line_colors[i],
label=dat_labels[i], linewidth=line_weight[i],
linestyle=line_style[i])
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def two_d_scatter_matplot(x_data: List[List[float]], y_data: List[List[float]],
marker_colors: List[str], marker_style: List[str],
x_label: str, y_label: str, dat_labels: List[str],
label_pos: str, x_scale: str = 'LIN',
y_scale: str = 'LIN', plot_name: str = 'NULL',
save: bool = False, label_font_size: int = 18,
tick_font_size: int = 18, style_name: str = 'default',
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param x_data: A list of lists, where the inner lists contain data points
for the x-axis
:param y_data: A list of lists, where the inner lists contain data points
for the y-axis
:param marker_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for `matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param marker_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for `matplotlib style`_.
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``
:param x_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param y_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets. This function will only produce line plots and
not scatter plots or a combination of both. The function can be used in the following manner;
.. code-block:: python
> x_dat = np.linspace(0, 10, 15)
> y1_dat = x_dat
> y2_dat = x_dat ** 2.0
> y3_dat = x_dat ** 3.0
> x_list = [x_dat, x_dat, x_dat]
> y_list = [y1_dat, y2_dat, y3_dat]
> colors = ['red', 'blue', 'black']
> line_style = ['-', '-', '--']
> labels = ['linear', 'squared', 'cubed']
> weight = [1, 2, 3]
> two_d_scatter_matplot(x_list, y_list, colors, line_style, weight, 'x-data',
'y-data', labels, 'upper left')
.. image:: scatter_plot.eps
:align: center
"""
# Error checking and warnings
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if len(x_data) != len(y_data):
warnings.warn('length of x list of lists is not the same as y list of lists, plot not printed')
return
if len(marker_colors) != len(x_data):
warnings.warn('line colors list not the same length as data lists, plot not printed')
return
if len(marker_style) != len(x_data):
warnings.warn('line_style list not the same length as data lists, plot not printed')
return
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(marker_colors)):
td_plot.plot(x_data[i], y_data[i], color=marker_colors[i],
label=dat_labels[i], marker=marker_style[i],
linestyle=' ')
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def two_d_scatter_line_matplot(x_data: List[List[float]], y_data: List[List[float]],
marker_colors: List[str], marker_style: List[str],
line_style: List[str], line_weight: List[str],
x_label: str, y_label: str, dat_labels: List[str],
label_pos: str, x_scale: str = 'LIN',
y_scale: str = 'LIN', plot_name: str = 'NULL',
save: bool = False, label_font_size: int = 18,
tick_font_size: int = 18, style_name: str = 'default',
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param x_data: A list of lists, where the inner lists contain data points
for the x-axis
:param y_data: A list of lists, where the inner lists contain data points
for the y-axis
:param marker_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for `matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param marker_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for `matplotlib style`_.
:param line_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for
`matplotlib style <https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html>`_.
:param line_weight: A list of line weights, one for each curve.
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``
:param x_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param y_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets overlaid with line plots. This function will only produce
line plots and not scatter plots or a combination of both. The function can be used in the following manner;
.. code-block:: python
> x_dat = np.linspace(0, 10, 15)
> y1_dat = x_dat
> y2_dat = x_dat ** 2.0
> y3_dat = x_dat ** 3.0
> x_list = [x_dat, x_dat, x_dat]
> y_list = [y1_dat, y2_dat, y3_dat]
> colors = ['red', 'blue', 'black']
> line_style = ['-', '-', '--']
> labels = ['linear', 'squared', 'cubed']
> weight = [1, 2, 3]
> marker_style = ['^', 'o', 'd']
> two_d_scatter_line_matplot(x_list, y_list, colors, marker_style,
line_style, weight, 'x-axis', 'y-axis',
labels, 'upper left', save=True, plot_name=plt_name)
.. image:: line_mark.eps
:align: center
"""
# Error checking and warnings
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if len(x_data) != len(y_data):
warnings.warn('length of x list of lists is not the same as y list of lists, plot not printed')
return
if len(marker_colors) != len(x_data):
warnings.warn('line colors list not the same length as data lists, plot not printed')
return
if len(marker_style) != len(x_data):
warnings.warn('line_style list not the same length as data lists, plot not printed')
return
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(marker_colors)):
td_plot.plot(x_data[i], y_data[i], color=marker_colors[i],
label=dat_labels[i], marker=marker_style[i],
linestyle=line_style[i], linewidth=line_weight[i])
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def one_d_histogram_plot(data: List[List[float]], labels: List[List[str]],
x_label: str, y_label: str, colors: List[str],
edge_colors: List[str], shading: List[float],
label_pos: str, num_bins: int = 50, tick_font_size: int = 18,
label_font_size: str = 18, style_name: str = 'default',
save: bool = False, plot_name: str = 'NULL',
hist_type: str = 'bar', dens: bool = False,
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param data: A list of lists containing data for one or multiple
distributions
:param labels: A list of labels, one for each distribution
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param colors: The fill colors for each ``bar`` plot. If a ``step`` plot
is selected, this input is irrelevant, but data must still be
passed to the function.
:param edge_colors: The colors for the edge of each bar or step plot
:param shading: The level of transparency for bar plot fill. a Value of
0 is invisible, 1 is the maximum color density
:param label_pos: Where in the plot, the labels for each curve are to be
placed. ``upper left`` or ``lower right`` are examples.
:param num_bins: The number of bins to be plotted, defaulted to 50
:param tick_font_size: The size for each tick, defaulted to 18
:param label_font_size: The size for printed font, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param save: True or False, defaulted to False
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param hist_type: {``bar``, ``barstacked``, ``step``, ``stepfilled``}
See
`histogram <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html>`_
for more information.
:param dens: If True, the first element of the return tuple will be the counts
normalized to form a probability density, i.e., the area (or integral)
under the histogram will sum to 1
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single phistogram plots or multiple overlaid plots. The function can be used in the following manner;
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> colors = ['blue', 'green']
> edge_colors = ['black', 'black']
> alpha = [0.9, 0.2]
> x_label = 'x-axis'
> y_label = 'y-axis'
> one_d_histogram_plot(data, labels, x_label, y_label, colors, edge_colors,
alpha, 'upper left', num_bins=50, hist_type='step',
dens=True)
.. image:: hist1.eps
:align: center
The plot parameters can be changed to produce a normalized plot, only
showing the histogram outline with the following code.
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> colors = ['black', 'red']
> edge_colors = ['black', 'red']
> alpha = [1.0, 1.0]
> x_label = 'x-axis'
> y_label = 'y-axis'
> one_d_histogram_plot(data, labels, x_label, y_label, colors, edge_colors,
alpha, 'upper left', num_bins=50)
.. image:: hist2.eps
:align: center
"""
if len(labels) != len(data):
warnings.warn("data list should be the same length as the labels list")
if len(labels) != len(colors):
warnings.warn("data list should be the same length as the colors list")
if len(labels) != len(edge_colors):
warnings.warn("labels list should be the same length as the edge_colors list")
if len(labels) != len(shading):
warnings.warn("labels list should be the same length as the shading list")
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.15)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
plt.xlabel(x_label, fontsize=label_font_size)
plt.ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
for i in range(len(labels)):
plt.hist(data[i], bins=num_bins, color=colors[i], edgecolor=edge_colors[i],
alpha=shading[i], label=labels[i], histtype=hist_type, density=dens)
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# ================================================================================
# ================================================================================
class MatPlotDataFrame:
"""
:param df: Dataframe containing columnar data to be plotted
This class will plot user specified data from a pandas dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
self.colors = ['lightgrey', 'deepskyblue', 'sandybrown',
'teal', 'limegreen', 'coral',
'hotpink', 'magenta', 'red',
'white', 'gold', 'darkgreen',
'turqoise', 'olive', 'orange',
'mediumvioletred', 'purple' , 'darkred']
self.styles = ['o' for i in range(len(self.colors))]
# --------------------------------------------------------------------------------
def scatter_plot_parse_column(self, x_header: str, y_header: str, parsing_header: str,
column_values: List[str], style_name: str='default',
marker_colors: List[str]=['None'], marker_style: List[str]=['None'],
fill_alpha: np.float32=0.7, edge_color: str='black', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey') -> None:
"""
:param x_header: The title of the dataframe column containing the x-axis
data sets
:param y_header: The title of the dataframe column containing the y-axis
data sets
:param parsing_header: The title of the dataframe column containing the
values which will be used to parse the dataframe into
one or multiple data sets
:param column_values: The values contained in the parsing_header column
that will be used to parse the data set into
multiple data sets
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param marker_colors: A list of marker colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param marker_style: A list of marker styles, where each marker style corresponds
to a data set. This parameter has a default list of 18 circle
marker styles that the user can override. Marker styles
can be found at :href `marker style<https://matplotlib.org/stable/api/markers_api.html>`
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param edge_color: The color of the line surrounding the marker
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.scatter_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: mat_scatter_test1.eps
:align: center
"""
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
# Error checking
if marker_colors[0] == 'None':
marker_colors = self.colors
if len(marker_colors) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if marker_style[0] == 'None':
marker_style = self.styles
if len(marker_style) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker stye list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(df_list)):
td_plot.scatter(df_list[i][x_header], df_list[i][y_header],
label=column_values[i], marker=marker_style[i],
color=marker_colors[i], alpha=fill_alpha,
edgecolors=edge_color, s=marker_size,
linewidth=marker_edge_width)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def scatter_plot_columns(self, x_headers: List[str], y_headers: List[str],
labels: List[str], style_name: str='default',
marker_colors: List[str]=['None'],
marker_style: List[str]=['None'], fill_alpha: np.float32=0.7,
edge_color: str='black', x_label: str='', y_label: str='',
title: str='', label_pos: str='upper right', x_scale: str='LIN',
y_scale: str='LIN', plot_name: str='NULL', save: bool=False,
label_font_size: int=18, tick_font_size: int=18,
title_font_size: int=24, marker_size: int=35,
marker_edge_width: np.float32=0.8, grid: bool=False,
grid_style='-', grid_color='grey'):
"""
:param x_headers: The title of the dataframe columns containing the x-axis
data sets
:param y_headers: The title of the dataframe columns containing the y-axis
data sets
:param labels: A list of the label names for each data set
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param marker_colors: A list of marker colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param marker_style: A list of marker styles, where each marker style corresponds
to a data set. This parameter has a default list of 18 circle
marker styles that the user can override. Marker styles
can be found at :href `marker style<https://matplotlib.org/stable/api/markers_api.html>`
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param edge_color: The color of the line surrounding the marker
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will plot used defined dataframe columns for the x and
y axis of a 2-d plot as a scatter plot.
.. code-block:: python
> length = 20
> x = np.linspace(0, 20, num=20)
> linear = x
> squared = x ** 2.0
> # create dataframe
> dictionary = {'x': x, 'linear': linear, 'squared': squared}
> df = pd.DataFrame(dictionary)
> # plot data
> obj = MatPlotDataFrame(df)
> x_headers = ['x', 'x']
> y_headers = ['linear', 'squared']
> obj.scatter_plot_columns(x_headers, y_headers, y_headers,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default',marker_colors=['red', 'green'],
fill_alpha=0.7, marker_style=['o', '^'],
label_pos='upper left', grid=False, save=True,
plot_name=plt_name)
.. image:: mat_scatter_test2.eps
:align: center
"""
# Error checking
if marker_colors[0] == 'None':
marker_colors = self.colors
if len(x_headers) != len(y_headers):
sys.exit('FATAL ERROR: x and y arrays must be the same size')
if marker_style[0] == 'None':
marker_style = self.styles
if len(marker_style) < len(x_headers):
msg1 = 'FATAL ERROR: The length of the marker stye list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(x_headers)):
td_plot.scatter(self.df[x_headers[i]], self.df[y_headers[i]],
label=labels[i], marker=marker_style[i],
color=marker_colors[i], alpha=fill_alpha,
edgecolors=edge_color, s=marker_size,
linewidth=marker_edge_width)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def line_plot_parse_column(self, x_header: str, y_header: str, parsing_header: str,
column_values: List[str], style_name: str='default',
line_colors: List[str]=['None'], line_weight: np.float32=2.0,
fill_alpha: np.float32=0.7, line_style: str='-', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey') -> None:
"""
:param x_header: The title of the dataframe column containing the x-axis
data sets
:param y_header: The title of the dataframe column containing the y-axis
data sets
:param parsing_header: The title of the dataframe column containing the
values which will be used to parse the dataframe into
one or multiple data sets
:param column_values: The values contained in the parsing_header column
that will be used to parse the data set into
multiple data sets
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.line_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: line_scatter_test1.eps
:align: center
"""
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(df_list)):
td_plot.plot(df_list[i][x_header], df_list[i][y_header],
label=column_values[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def line_plot_columns(self, x_headers: str, y_headers: str, labels: List[str],
style_name: str='default', line_colors: List[str]=['None'],
line_weight: np.float32=2.0, fill_alpha: np.float32=0.7,
line_style: str='-', x_label: str='', y_label: str='',
title: str='', label_pos: str='upper right', x_scale: str='LIN',
y_scale: str='LIN', plot_name: str='NULL', save: bool=False,
label_font_size: int=18, tick_font_size: int=18,
title_font_size: int=24, marker_size: int=35,
marker_edge_width: np.float32=0.8, grid: bool=False,
grid_style='-', grid_color='grey') -> None:
"""
:param x_headers: The title of the dataframe columns containing the x-axis
data sets
:param y_headers: The title of the dataframe columns containing the y-axis
data sets
:param labels: A list containing the name of each label
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will plot used defined dataframe columns for the x and
y axis of a 2-d plot as a line plot.
.. code-block:: python
> length = 20
> x = np.linspace(0, 20, num=20)
> linear = x
> squared = x ** 2.0
> # create dataframe
> dictionary = {'x': x, 'linear': linear, 'squared': squared}
> df = pd.DataFrame(dictionary)
> # plot data
> obj = MatPlotDataFrame(df)
> x_headers = ['x', 'x']
> y_headers = ['linear', 'squared']
> obj.line_plot_columns(x_headers, y_headers, y_headers,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default',marker_colors=['red', 'green'],
fill_alpha=0.7, marker_style=['o', '^'],
label_pos='upper left', grid=False, save=True,
plot_name=plt_name)
.. image:: line_scatter_test2.eps
:align: center
"""
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(labels):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(x_headers)):
td_plot.plot(self.df[x_headers[i]], self.df[y_headers[i]],
label=labels[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def timedate_plot_parse_column(self, x_header: str, y_header: str, parsing_header: str,
column_values: List[str], style_name: str='default',
line_colors: List[str]=['None'], line_weight: np.float32=2.0,
fill_alpha: np.float32=0.7, line_style: str='-', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey'):
"""
:param x_header: The title of the dataframe column containing the x-axis
data sets. It is assumes that the x axis is the datetime
axis for this plot.
:param y_header: The title of the dataframe column containing the y-axis
data sets
:param parsing_header: The title of the dataframe column containing the
values which will be used to parse the dataframe into
one or multiple data sets
:param column_values: The values contained in the parsing_header column
that will be used to parse the data set into
multiple data sets
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.line_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: line_scatter_test1.eps
:align: center
"""
max_date = self.df[x_header].max()
min_date = self.df[x_header].min()
diff = (max_date - min_date) / np.timedelta64(1, 'D')
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
df_list = [df.set_index(x_header) for df in df_list]
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
if diff <= 2:
myfmt = mdates.DateFormatter('%H')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 15:
myfmt = mdates.DateFormatter('%b-%d')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 180:
myfmt = mdates.DateFormatter('%b-%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
elif diff <= 2191:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
else:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
td_plot.xaxis.set_major_formatter(myfmt)
for i in range(len(df_list)):
td_plot.plot(df_list[i].index, df_list[i][y_header],
label=column_values[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def timedate_plot_columns(self, x_headers: str, y_headers: str, labels: List[str],
style_name: str='default',
line_colors: List[str]=['None'], line_weight: np.float32=2.0,
fill_alpha: np.float32=0.7, line_style: str='-', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey'):
"""
:param x_headers: The title of the dataframe column containing the x-axis
data sets. It is assumes that the x axis is the datetime
axis for this plot.
:param y_headers: The title of the dataframe column containing the y-axis
data sets
:param labels: A list of the labels to use for each curve in the legend
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.line_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: line_scatter_test1.eps
:align: center
"""
diff = 0
for i in range(len(x_headers)):
max_date = self.df[x_headers[i]].max()
min_date = self.df[x_headers[i]].min()
delta = (max_date - min_date) / np.timedelta64(1, 'D')
if delta > diff:
diff = delta
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(x_headers):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
if diff <= 2:
myfmt = mdates.DateFormatter('%H')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 15:
myfmt = mdates.DateFormatter('%b-%d')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 180:
myfmt = mdates.DateFormatter('%b-%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
elif diff <= 2191:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
else:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
td_plot.xaxis.set_major_formatter(myfmt)
for i in range(len(x_headers)):
td_plot.plot(self.df[x_headers[i]], self.df[y_headers[i]],
label=labels[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def histogram_plot_parse_column(self, header: str, parsing_header: str,
column_values: List[str], x_label: str='',
y_label: str='', colors: List[str]=['None'],
edge_colors: List[str]=['None'],
shading: List[float]=['None'], label_pos: str='upper right',
num_bins: int = 50,
tick_font_size: int = 18, label_font_size: str = 18,
style_name: str = 'default', save: bool = False,
plot_name: str = 'NULL', hist_type: str = 'bar',
dens: bool = False, title: str = 'NULL',
title_font_size: int = 24) -> None:
"""
:param headers: A string representing the dataframe column that contains the
data to be parsed and plotted
:param parsing_header: A string representing the dataframe header that contains
key phrases that will be used to filter the dataframe
for specific data
:param column_values: The key phrases in the dataframe column described by the
`parsing_header` variable
:param x_label: The title for the x axis. Defaulted to ''
:param y_label: The title for the y axis. Defaulted to ''
:param colors: A list containing the colors that will be used to represent
each plot.
:param edge_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href
`colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`_
:param shading: The density of the fill for each plot, defaulted to 0.7
:param label_pos: The position of the ledgend in the plot. Defaulted to 'upper_right'
:param num_bins: The number of bins used to represent the histogram. Defaulted to 50
:param tick_font_size: The font size of the plot ticks. Defaulted to 18
:param label_font_size: The font size of plot labels. Defaulted to 18
:param style_name: The plot style, defaulted to 'default'. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
:param save: True if the plot is to be saved, False if the plot is only to be
shown
:param plot_name: The name of the plot, if it is to be saved
:param hist_type: {``bar``, ``barstacked``, ``step``, ``stepfilled``}
See
`histogram <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html>`_
for more information.
:param dens: If True, the first element of the return tuple will be the counts
normalized to form a probability density, i.e., the area (or integral)
under the histogram will sum to 1
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> one = np.repeat('one', len(x))
> two = np.repeat('two', len(x))
> x = np.hstack((x, y))
> y = np.hstack((one, two))
> dictionary = {'data': x, 'type': y}
> df = pd.DataFrame(dictionary)
> obj = MatPlotDataFrame(df)
> obj.histogram_plot_parse_column('data', 'type', labels, x_label='x-axis',
y_label='y-axis', shading=[0.9, 0.4], save=True,
.. image:: hist2.eps
:align: center
"""
if colors[0] == "None":
colors = self.colors
if edge_colors[0] == 'None':
edge_colors = np.repeat('black', len(column_values))
if shading[0] == "None":
shading = np.repeat(0.7, len(column_values))
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.15)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
plt.xlabel(x_label, fontsize=label_font_size)
plt.ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
for i in range(len(column_values)):
plt.hist(df_list[i][header], bins=num_bins, color=colors[i], edgecolor=edge_colors[i],
alpha=shading[i], label=column_values[i], histtype=hist_type, density=dens)
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def histogram_plot_columns(self, x_headers: List[str], labels: List[str],
x_label: str='',
y_label: str='', colors: List[str]=['None'],
edge_colors: List[str]=['None'],
shading: List[float]=['None'], label_pos: str='upper right',
num_bins: int = 50,
tick_font_size: int = 18, label_font_size: str = 18,
style_name: str = 'default', save: bool = False,
plot_name: str = 'NULL', hist_type: str = 'bar',
dens: bool = False, title: str = 'NULL',
title_font_size: int = 24) -> None:
"""
:param x_headers: A list of strings representing the dataframe columns to be
used for the x axis of a plot
:param labels: A list of labels, each label corresponding to each
histogram
:param x_label: The title for the x axis. Defaulted to ''
:param y_label: The title for the y axis. Defaulted to ''
:param colors: A list containing the colors that will be used to represent
each plot.
:param edge_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href
`colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`_
:param shading: The density of the fill for each plot, defaulted to 0.7
:param label_pos: The position of the ledgend in the plot. Defaulted to 'upper_right'
:param num_bins: The number of bins used to represent the histogram. Defaulted to 50
:param tick_font_size: The font size of the plot ticks. Defaulted to 18
:param label_font_size: The font size of plot labels. Defaulted to 18
:param style_name: The plot style, defaulted to 'default'. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
:param save: True if the plot is to be saved, False if the plot is only to be
shown
:param plot_name: The name of the plot, if it is to be saved
:param hist_type: {``bar``, ``barstacked``, ``step``, ``stepfilled``}
See
`histogram <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html>`_
for more information.
:param dens: If True, the first element of the return tuple will be the counts
normalized to form a probability density, i.e., the area (or integral)
under the histogram will sum to 1
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> one = np.repeat('one', len(x))
> two = np.repeat('two', len(x))
> x = np.hstack((x, y))
> y = np.hstack((one, two))
> dictionary = {'data': x, 'type': y}
> df = pd.DataFrame(dictionary)
> obj = MatPlotDataFrame(df)
> obj.histogram_plot_parse_column('data', 'type', labels, x_label='x-axis',
y_label='y-axis', shading=[0.9, 0.4], save=True,
.. image:: hist2.eps
:align: center
"""
if colors[0] == "None":
colors = self.colors
if edge_colors[0] == 'None':
edge_colors = np.repeat('black', len(labels))
if shading[0] == "None":
shading = np.repeat(0.7, len(labels))
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.15)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
plt.xlabel(x_label, fontsize=label_font_size)
plt.ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
for i in range(len(x_headers)):
plt.hist(self.df[x_headers[i]], bins=num_bins, color=colors[i],
edgecolor=edge_colors[i], alpha=shading[i], label=labels[i],
density=dens)
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# ================================================================================
# ================================================================================
# eof
# TODO Create histogram version of plots
# TODO Repeat for Bokeh plots
```
#### File: Core-Utilities/test/test_file_readers.py
```python
import sys
import os
import pytest
import numpy as np
from math import isclose
import platform
sys.path.insert(1, os.path.abspath('core_utilities'))
from core_utilities.read_files import ReadTextFileKeywords, read_csv_columns_by_headers
from core_utilities.read_files import read_csv_columns_by_index, read_text_columns_by_headers
from core_utilities.read_files import read_text_columns_by_index, read_excel_columns_by_headers
from core_utilities.read_files import read_excel_columns_by_index, ManageSQLiteDB
from core_utilities.read_files import simple_sqlite_query
# ==============================================================================
# ==============================================================================
# Date: December 11, 2020
# Purpose: This code contains functions that test the functions and classes
# in the operating_system.py file
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME> Inc."
__version__ = "1.0"
# ==============================================================================
# ==============================================================================
# Test ReadTextFileKeywords
def test_file_not_found():
"""
This function ensures that the ReadTextFileKeywords class fails
correctly when the file cannot be found
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/not_file_found.txt'
else:
file = r'..\data\test\not_file_found.txt'
with pytest.raises(SystemExit):
ReadTextFileKeywords(file)
# ------------------------------------------------------------------------------
def test_read_double():
"""
This function tests the ReadTextFileKeywords.read_float function to
determine if it correctly reads in a variable as a numpy.float32
variable.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
value = key.read_double('double:')
assert isclose(value, 3.141596235941, rel_tol=1.0e-3)
assert isinstance(value, np.float64)
# ------------------------------------------------------------------------------
def test_read_double_list():
"""
This function tests the ReadTextFileKeywords.read_double_list
function to determine if it can properly read a variable
as a list of double precision values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
double_value = key.read_double_list('double list:')
expected = [1.12321, 344.3454453, 21.434553]
for i in range(len(double_value)):
assert isclose(double_value[i], expected[i], rel_tol=1.0e-3)
assert isinstance(double_value[i], np.float64)
# ------------------------------------------------------------------------------
def test_read_float():
"""
This function tests the ReadTextFileKeywords.read_float function to
determine if it correctly reads in a variable as a numpy.float32
variable.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
value = key.read_float('float:')
assert isclose(value, 3.1415, rel_tol=1.0e-3)
assert isinstance(value, np.float32)
# ------------------------------------------------------------------------------
def test_float_list():
"""
This function tests the ReadTextFileKeywords.read_float_list
function to determine if it can properly read a variable
as a list of float values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
float_value = key.read_float_list('float list:')
expected = [1.2, 3.4, 4.5, 5.6, 6.7]
for i in range(len(float_value)):
assert isclose(float_value[i], expected[i], rel_tol=1.0e-3)
assert isinstance(float_value[i], np.float32)
# ------------------------------------------------------------------------------
def test_read_integer():
"""
This function tests the ReadTextFileKeywords.read_float function to
determine if it correctly reads in a variable as a numpy.int32
variable.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
value = key.read_integer('Integer Value:')
assert value == 3
assert isinstance(value, np.int32)
# ------------------------------------------------------------------------------
def test_read_integer_list():
"""
This function tests the ReadTextFileKeywords.read_float_list
function to determine if it can properly read a variable
as a list of float values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
int_value = key.read_integer_list('integer list:')
expected = [1, 2, 3, 4, 5, 6, 7]
for i in range(len(int_value)):
assert isclose(int_value[i], expected[i], rel_tol=1.0e-3)
assert isinstance(int_value[i], np.int32)
# ------------------------------------------------------------------------------
def test_read_sentence():
"""
This function tests the ReadTextFileKeywords.read_sentence
function to determine if it can properly read a sentence as
a string
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
sentence = key.read_sentence('sentence:')
assert sentence == "This is a short sentence!"
assert isinstance(sentence, str)
# ------------------------------------------------------------------------------
def test_read_string():
"""
This function tests the ReadTextFileKeywords.read_string
function to determine if it can properly read a variable
as a single string
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
sentence = key.read_string('String:')
assert sentence == "test"
assert isinstance(sentence, str)
# ------------------------------------------------------------------------------
def test_read_string_list():
"""
This function tests the ReadTextFileKeywords.read_string_list
function to determine if it can properly read a variable
as a list of string values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
sentence = key.read_string_list('sentence:')
assert sentence == ['This', 'is', 'a', 'short', 'sentence!']
for i in sentence:
assert isinstance(i, str)
# ==============================================================================
# ==============================================================================
# Test read column functions
def test_read_csv_by_headers():
"""
This function tests the read_csv_columns_by_headers function to ensure
it properly reads in a csv file with the headers placed at the top
of the file
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/test1.csv'
else:
file_name = r'..\data\test\test1.csv'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_csv_columns_by_headers(file_name, headers, dat)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_csv_by_headers_below_start():
"""
This function tests the read_csv_columns_by_headers function to ensure
it properly reads in a csv file with the headers placed below the top
of the file
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/test2.csv'
else:
file_name = r'..\data\test\test2.csv'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_csv_columns_by_headers(file_name, headers, dat, skip=2)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_csv_by_index():
"""
This function tests the read_csv_columns_by_index function to ensure
it properly reads in a csv file that has no headers and gives each
header a name
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/test3.csv'
else:
file_name = r'..\data\test\test3.csv'
headers = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_csv_columns_by_index(file_name, headers, dat, names)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_csv_by_index_below_start():
"""
This function tests the read_csv_columns_by_index function to ensure
it properly reads in a csv file that has no headers and gives each
header a name. This test uses a .csv file that has metadata on
the first two lines before the beginning of the columnar data
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/test4.csv'
else:
file_name = r'..\data\test\test4.csv'
headers = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_csv_columns_by_index(file_name, headers, dat,
names, skip=2)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_text_by_header():
"""
This function tests the read_text_columns_by_headers function to
ensure it properly reads in a space delimited text file with
a header in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/textcol1.txt'
else:
file_name = r'..\data\test\textcol1.txt'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_text_columns_by_headers(file_name, headers, dat)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_text_by_header_below_start():
"""
This function tests the read_text_columns_by_headers function to
ensure it properly reads in a space delimited text file with
a header not in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/textcol2.txt'
else:
file_name = r'..\data\test\textcol2.txt'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_text_columns_by_headers(file_name, headers, dat, skip=2)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_text_by_index():
"""
This function tests the read_text_columns_by_index function to
ensure it properly reads in a space delimited text file with
a header in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/textcol3.txt'
else:
file_name = r'..\data\test\textcol3.txt'
headers = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_text_columns_by_index(file_name, headers, dat, names)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_text_by_index_below_start():
"""
This function tests the read_text_columns_by_index function to
ensure it properly reads in a space delimited text file with
a header not in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/textcol4.txt'
else:
file_name = r'..\data\test\textcol4.txt'
headers = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_text_columns_by_index(file_name, headers, dat,
names, skip=2)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_excel_by_header():
"""
This function tests the read_excel_columns_by_headers function to
ensure it properly reads in a space delimited text file with
a header in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/excel_test1.xls'
else:
file_name = r'../data/test/excel_test1.xls'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
# Test read first tab
df = read_excel_columns_by_headers(file_name, 'primary', headers, dat)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# Test read second tab
df1 = read_excel_columns_by_headers(file_name, 'secondary', headers, dat)
new_id = np.array([5, 6, 7], dtype=int)
inventory = np.array(['shelves', 'computers', 'mugs'], dtype=str)
weight = np.array([15.4, 3.4, 0.6], dtype=float)
number = np.array([4, 10, 20], dtype=int)
for i in range(len(df1)):
assert new_id[i] == df1['ID'][i]
assert isinstance(df1['ID'][i], np.int64)
assert inventory[i] == df1['Inventory'][i]
assert isinstance(df1['Inventory'][i], str)
assert weight[i] == df1['Weight_per'][i]
assert isinstance(df1['Weight_per'][i], np.float64)
assert number[i] == df1['Number'][i]
assert isinstance(df1['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_excel_by_header_below_start():
"""
This function tests the read_excel_columns_by_headers function to
ensure it properly reads in a space delimited text file with
a header not in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/excel_test2.xls'
else:
file_name = r'../data/test/excel_test2.xls'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
# Test read first tab
df = read_excel_columns_by_headers(file_name, 'primary', headers, dat, skip=2)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_excel_by_index():
"""
This function tests the read_excel_columns_by_index function to
ensure it properly reads in a space delimited text file with
a header not in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/excel_test3.xls'
else:
file_name = r'../data/test/excel_test3.xls'
col_index = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
# Test read first tab
df = read_excel_columns_by_index(file_name, 'primary', col_index,
names, dat)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# Test read second tab
df1 = read_excel_columns_by_index(file_name, 'secondary', col_index,
names, dat)
new_id = np.array([5, 6, 7], dtype=int)
inventory = np.array(['shelves', 'computers', 'mugs'], dtype=str)
weight = np.array([15.4, 3.4, 0.6], dtype=float)
number = np.array([4, 10, 20], dtype=int)
for i in range(len(df1)):
assert new_id[i] == df1['ID'][i]
assert isinstance(df1['ID'][i], np.int64)
assert inventory[i] == df1['Inventory'][i]
assert isinstance(df1['Inventory'][i], str)
assert weight[i] == df1['Weight_per'][i]
assert isinstance(df1['Weight_per'][i], np.float64)
assert number[i] == df1['Number'][i]
assert isinstance(df1['Number'][i], np.int64)
# ------------------------------------------------------------------------------
def test_read_excel_by_index_below_start():
"""
This function tests the read_excel_columns_by_index function to
ensure it properly reads in a space delimited text file with
a header not in the top row
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/excel_test4.xls'
else:
file_name = r'../data/test/excel_test4.xls'
headers = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
# Test read first tab
df = read_excel_columns_by_index(file_name, 'primary', headers,
names, dat, skip=2)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ==============================================================================
# ==============================================================================
# Test ManageDB and related functions
def test_no_db_exists():
"""
This function tests to ensure that ReadSQLiteDB correctly determines
if a database does not exist
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/not_db.db'
else:
file = r'..\data\test\not_db.db'
with pytest.raises(SystemExit):
ManageSQLiteDB(file)
# ------------------------------------------------------------------------------
def test_read_database_from_class():
"""
This function tests to ensure that ReadSQLiteDB correctly reads a
database table
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/Maintenance.db'
else:
file = r'..\data\test\Maintenance.db'
db = ManageSQLiteDB(file)
query = "Select Date, Cost, Gallons FROM gas;"
df = db.query_db(query)
db.close_database_connection()
assert df['Date'][0] == '2020-02-04'
assert isclose(df['Cost'][0], 27.88, rel_tol=1.0e-3)
# ------------------------------------------------------------------------------
def test_simple_sqlite_query():
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/Maintenance.db'
else:
file = r'..\data\test\Maintenance.db'
query = "Select Date, Cost, Gallons FROM gas;"
df = simple_sqlite_query(file, query)
assert df['Date'][0] == '2020-02-04'
assert isclose(df['Cost'][0], 27.88, rel_tol=1.0e-3)
# ==============================================================================
# ==============================================================================
# eof
``` |
{
"source": "Jon-Webb-79/core_utilities",
"score": 2
} |
#### File: core_utilities/test/test_operating_system.py
```python
import os
import sys
import platform
import shutil
from math import isclose
sys.path.insert(0, os.path.abspath('../core_utilities'))
import core_utilities.operating_system as util
# ================================================================================
# ================================================================================
# Date: Month Day, Year
# Purpose: Describe the types of testing to occur in this file.
# Instruction: This code can be run in hte following ways
# - pytest # runs all functions beginnning with the word test in the
# directory
# - pytest file_name.py # Runs all functions in file_name beginning
# with the word test
# - pytest file_name.py::test_func_name # Runs only the function
# titled test_func_name in
# the file_name.py file
# - pytest -s # Runs tests and displays when a specific file
# has completed testing, and what functions failed.
# Also displays print statments
# - pytest -v # Displays test results on a function by function basis
# - pytest -p no:warnings # Runs tests and does not display warning
# messages
# - pytest -s -v -p no:warnings # Displays relevant information and
# supports debugging
# - pytest -s -p no:warnings # Run for record
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Jon Webb Inc."
__version__ = "1.0"
# ================================================================================
# ================================================================================
# Insert Code here
plat = platform.system()
lin_plat = ['Darwin', 'Linux']
def test_change_directory():
"""
This function tests the ability of change_directory to
properly change a directory
"""
current = os.getcwd()
new= current[:-5]
util.change_directory("../")
assert new == os.getcwd()
util.change_directory("test")
# --------------------------------------------------------------------------------
def test_copy_directory():
"""
This function tests the ability of the copy_directory function to
copy a directory
"""
if plat in lin_plat:
directory1 = '../data/test/test_directory2'
directory2 = '../data/test/test_directory3'
file = '../data/test/test_directory3/test.txt'
else:
directory1 = r'..\data\test\test_directory2'
directory2 = r'..\data\test\test_directory3'
file = r'..\data\test\test_directory3\test.txt'
util.copy_directory(directory1, directory2)
assert os.path.isdir(directory2)
assert os.path.isfile(file)
if os.path.isdir(directory2):
shutil.rmtree(directory2)
# --------------------------------------------------------------------------------
def test_copy_file():
"""
This function tests the ability of the copy_file function to correctly
copy a file
"""
if plat in lin_plat:
file1 = '../data/test/test_file2.txt'
file2 = '../data/test/copy_test.txt'
else:
file1 = r'..\data\test\test_file2.txt'
file2 = r'..\data\test\copy_test.txt'
util.copy_file(file1, file2)
assert os.path.isfile(file2)
if os.path.isfile(file2):
os.remove(file2)
# ------------------------------------------------------------------------------
def test_count_word_occurrence():
"""
This function tests the count_occurrence_of_words_in_file
to ensure it correctly determines the number of times a word occurs in
a file
"""
if plat in lin_plat:
file = '../data/test/text_file.txt'
else:
file = r'..\data\test\text_file.txt'
num_words = util.count_occurrence_of_word_in_file(file, 'file')
assert num_words == 4
# ------------------------------------------------------------------------------
def test_create_directory():
"""
This function tests the create_directory command to ensure it correctly
creates a directory
"""
if plat in lin_plat:
directory = '../data/test/test_directory3'
else:
directory = r'..\data\test\test_directory3'
util.create_directory(directory)
assert os.path.isdir(directory)
if os.path.isdir(directory):
os.rmdir(directory)
# ------------------------------------------------------------------------------
def test_create_file():
"""
This function tests the create_file function to ensure that it
correctly creates an ASCII based text file
"""
if plat in lin_plat:
file = '../data/test/create_file_test.txt'
else:
file = r'..\data\test\create_file_test.txt'
util.create_file(file)
assert os.path.isfile(file)
if os.path.isfile(file):
os.remove(file)
# --------------------------------------------------------------------------------
def test_current_working_directory():
"""
This function tests the current_working_directory function to
ensure it correctly determines the working directory
"""
cwd = os.getcwd()
cwd2 = util.current_working_directory()
assert cwd == cwd2
# ------------------------------------------------------------------------------
def test_delete_directory():
"""
This function tests the create_file function to ensure that it
correctly creates an ASCII based text file
"""
if plat in lin_plat:
dire = '../data/test/test_directory'
else:
dire = r'..\data\test\test_directory'
util.delete_directory(dire)
assert not os.path.isdir(dire)
if not os.path.isdir(dire):
os.mkdir(dire)
# ------------------------------------------------------------------------------
def test_delete_file():
"""
This function tests the delete_file function to ensure that it correctly
deletes a file
"""
if plat in lin_plat:
file = '../data/test/delete_test.txt'
else:
file = r'..\data\test\delete_test.txt'
util.delete_file(file)
assert not os.path.isfile(file)
if not os.path.isfile(file):
util.create_file(file)
# ------------------------------------------------------------------------------
def test_remove_populated_directory():
"""
This function tests the remove_populated_directory function to determine
if it correctly removes a populated directory
"""
if plat in lin_plat:
directory = '../data/test/populated_dir1'
file = '../data/test/populated_dir1/test.txt'
else:
directory = r'../data/test/populated_dir1'
file = r'../data/test/populated_dir1/test.txt'
util.delete_populated_directory(directory)
assert not os.path.isdir(directory)
if not os.path.isdir(directory):
os.mkdir(directory)
util.create_file(file)
# ------------------------------------------------------------------------------
def test_determine_file_size():
"""
This function tests the determine_file_size function to determine
if it can correctly determine the size of a file
"""
if plat in lin_plat:
file = '../data/test/size_test.jpg'
else:
file = r'..\data\test\size_test.jpg'
file_size = util.determine_file_size(file)
assert isclose(file_size, 26674.009, rel_tol=1.0e-3)
# ------------------------------------------------------------------------------
def test_determine_file_line_count():
"""
This function tests the file_line_count function to ensure
it can correctly determine how many lines are in a file
"""
if plat in lin_plat:
file = '../data/test/text_file.txt'
else:
file = r'../data/test/text_file.txt'
lines = util.file_line_count(file)
assert lines == 4
# ------------------------------------------------------------------------------
def test_file_word_count():
"""
This function tests the file_word_count function to determine
if it can correctly determine the number of words in a file
"""
if plat in lin_plat:
file = '../data/test/text_file.txt'
else:
file = r'../data/test/text_file.txt'
words = util.file_word_count(file)
assert words == 21
# --------------------------------------------------------------------------------
def test_move_directory():
"""
This function test the move_file_or_directory function to ensure that it
successfully moves directories between different locations
"""
if plat in lin_plat:
file1 = '../data/test/populated_dir2'
file2 = '../data/test/move_directory2/populated_dir2'
file3 = '../data/test/move_directory2/populated_dir2/test.txt'
else:
file1 = r'..\data\test\populated_dir2'
file2 = r'..\data\test\move_directory2\populated_dir2'
file3 = r'..\data\test\move_directory2\populated_dir2\test.txt'
util.move_directory(file1, file2)
assert os.path.isdir(file2)
assert os.path.isfile(file3)
if os.path.isdir(file2):
util.move_directory(file2, file1)
# ------------------------------------------------------------------------------
def test_move_file():
"""
This function test the move_file_or_directory function to ensure that it
successfully moves files between different locations
"""
if plat in lin_plat:
file1 = '../data/test/move_test.txt'
file2 = '../data/test/move_directory1/move_test2.txt'
else:
file1 = r'..\data\test\move_test.txt'
file2 = r'..\data\test\move_directory1\move_test2.txt'
util.move_file(file1, file2)
assert os.path.isfile(file2)
if os.path.isfile(file2):
util.move_file(file2, file1)
# ------------------------------------------------------------------------------
def test_list_contents():
"""
This function tests the list_contents function to ensure it returns the
correct files and directories
"""
if plat in lin_plat:
directory = '../data/test/list_dir'
else:
directory = r'..\data\test\list_dir'
contents = util.list_contents(directory=directory, extension='.py')
assert 'test.py' in contents
contents = util.list_contents(directory=directory, extension='.txt')
expected_result = ['test1.txt', 'test2.txt', 'test3.txt']
for i in contents:
assert i in expected_result
contents = util.list_contents(directory=directory)
expected_result = ['test1.txt', 'test2.txt', 'test3.txt', 'test', 'test.py']
for i in contents:
assert i in expected_result
# ------------------------------------------------------------------------------
def test_copy_files_files():
"""
This function tests the copy_files function to ensure that it
correctly copies text file contents of a directory to a new directory
"""
if plat in lin_plat:
source = '../data/test/move_directory3'
destination = '../data/test/move_directory2'
file1 = '../data/test/move_directory2/test1.txt'
file2 = '../data/test/move_directory2/test2.txt'
else:
source = r'..\data\test\move_directory3'
destination = r'..\data\test\move_directory2'
file1 = r'..\data\test\move_directory2\test1.txt'
file2 = r'..\data\test\move_directory2\test2.txt'
util.copy_files(destination, source, '.txt')
assert os.path.isfile(file1)
assert os.path.isfile(file2)
os.remove(file1)
os.remove(file2)
# ------------------------------------------------------------------------------
def test_copy_files_dirs():
"""
This function tests the copy_files function to ensure that it
correctly copies all directories of a directory to a new directory
"""
if plat in lin_plat:
source = '../data/test/move_directory3'
destination = '../data/test/move_directory2'
direct = '../data/test/move_directory2/test'
else:
source = r'..\data\test\move_directory3'
destination = r'..\data\test\move_directory2'
direct = r'..\data\test\move_directory2/test'
util.copy_files(destination, source, dirs=True)
assert os.path.isdir(direct)
shutil.rmtree(direct)
# ------------------------------------------------------------------------------
def test_move_files_everything():
"""
This function tests the move_files function to ensure that it
correctly moves all contents of a directory to a new directory
"""
if plat in lin_plat:
source = '../data/test/move_directory3'
destination = '../data/test/move_directory2'
direct = '../data/test/move_directory2/test'
file1 = '../data/test/move_directory2/test1.txt'
file2 = '../data/test/move_directory2/test2.txt'
else:
source = r'..\data\test\move_directory3'
destination = r'..\data\test\move_directory2'
direct = r'..\data\test\move_directory2\test'
file1 = r'..\data\test\move_directory2\test1.txt'
file2 = r'..\data\test\move_directory2\test2.txt'
util.move_files(destination, source)
assert os.path.isfile(file1)
assert os.path.isfile(file2)
assert os.path.isdir(direct)
util.move_files(source, destination)
# ------------------------------------------------------------------------------
def test_move_files_dirs():
"""
This function tests the copy_files function to ensure that it
correctly moves all directories of a directory to a new directory
"""
if plat in lin_plat:
source = '../data/test/move_directory3'
destination = '../data/test/move_directory2'
direct = '../data/test/move_directory2/test'
else:
source = r'..\data\test\move_directory3'
destination = r'..\data\test\move_directory2'
direct = r'..\data\test\move_directory2\test'
util.move_files(destination, source, dirs=True)
assert os.path.isdir(direct)
util.move_files(source, destination, dirs=True)
# ------------------------------------------------------------------------------
def test_verify_directory_existence():
"""
This function tests the verify_directory_existence function to
ensure it can correctly identify that a file does exist
"""
if plat in lin_plat:
file = '../data/test/test_directory'
else:
file = r'..\data\test\test_directory'
status = util.verify_directory_existence(file)
assert status
# ------------------------------------------------------------------------------
def test_directory_existence_not_verified():
"""
This function tests the verify_directory_existence function to
ensure it can correctly identify that a file does exist
"""
if plat in lin_plat:
file = '../data/test/no_directory'
else:
file = r'..\data\test\no_directory'
status = util.verify_directory_existence(file)
assert not status
# ------------------------------------------------------------------------------
def test_verify_file_existence():
"""
This function tests the verify_file_existence function to
ensure it can correctly identify that a file does exist
"""
if plat in lin_plat:
file = '../data/test/text_file.txt'
else:
file = r'..\data\test\text_file.txt'
status = util.verify_file_existence(file)
assert status
# ------------------------------------------------------------------------------
def test_file_existence_not_verified():
"""
This function tests the verify_file_existence function to
ensure that it can correctly identify when a file does not exist
"""
if plat in lin_plat:
file = '../data/test/no_text_file.txt'
else:
file = r'..\data\test\no_text_file.txt'
status = util.verify_file_existence(file)
assert not status
# ================================================================================
# ================================================================================
# eof
```
#### File: core_utilities/test/test_plotting.py
```python
import os
import sys
import platform
import numpy as np
import pandas as pd
sys.path.insert(0, os.path.abspath('../core_utilities'))
from core_utilities.plotting import MatPlotDataFrame
# ================================================================================
# ================================================================================
# Date: Month Day, Year
# Purpose: Describe the types of testing to occur in this file.
# Instruction: This code can be run in hte following ways
# - pytest # runs all functions beginnning with the word test in the
# directory
# - pytest file_name.py # Runs all functions in file_name beginning
# with the word test
# - pytest file_name.py::test_func_name # Runs only the function
# titled test_func_name in
# the file_name.py file
# - pytest -s # Runs tests and displays when a specific file
# has completed testing, and what functions failed.
# Also displays print statments
# - pytest -v # Displays test results on a function by function basis
# - pytest -p no:warnings # Runs tests and does not display warning
# messages
# - pytest -s -v -p no:warnings # Displays relevant information and
# supports debugging
# - pytest -s -p no:warnings # Run for record
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Jon Webb Inc."
__version__ = "1.0"
# ================================================================================
# ================================================================================
# Insert Code here
plat = platform.system()
lin_plat = ['Darwin', 'Linux']
def test_scatter_plot_parse_columns():
"""
This functon tests the ability of scatter_plot_parse_column
within the MatPlotDataFrame class to process a plot without
failing
"""
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
lin = np.repeat('linear', 20)
sq = np.repeat('squared', 20)
# Combine arrays into one
x = np.hstack((x, x))
y = np.hstack((linear, squared))
power = np.hstack((lin, sq))
# Create dataframe
dictionary = {'x': x, 'y': y, 'power': power}
df = pd.DataFrame(dictionary)
parsing_header = 'power'
column_values = ['linear', 'squared']
# Plot data
obj = MatPlotDataFrame(nrows=1, ncols=1)
obj.scatter_plot_parse_column(df, 'x', 'y', parsing_header, column_values,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default', marker_style=['o', '^'],
label_pos='upper left', grid=True, labels=True)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_scatter_plot_column():
"""
This function tests the ability of scatter_plot_column
within the MatPlotDataFrame class to process a plot without
failint
"""
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
# create dataframe
dictionary = {'x': x, 'linear': linear, 'squared': squared}
df = pd.DataFrame(dictionary)
# plot data
obj = MatPlotDataFrame(1, 1)
x_headers = ['x', 'x']
y_headers = ['linear', 'squared']
obj.scatter_plot_columns(df, x_headers, y_headers, labels=False,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default',marker_colors=['red', 'green'],
fill_alpha=0.7, marker_style=['o', '^'],
label_pos='upper left', grid=False)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_line_plot_parse_column():
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
lin = np.repeat('linear', 20)
sq = np.repeat('squared', 20)
# Combine arrays into one
x = np.hstack((x, x))
y = np.hstack((linear, squared))
power = np.hstack((lin, sq))
# Create dataframe
dictionary = {'x': x, 'y': y, 'power': power}
df = pd.DataFrame(dictionary)
parsing_header = 'power'
column_values = ['linear', 'squared']
# Plot data
obj = MatPlotDataFrame(nrows=1, ncols=1)
obj.line_plot_parse_column(df, 'x', 'y', parsing_header,
column_values,
line_colors=['red', 'green'],
label_pos='upper left')
obj.close_plot()
# --------------------------------------------------------------------------------
def test_line_plot_column():
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
# create dataframe
dictionary = {'x': x, 'linear': linear, 'squared': squared}
df = pd.DataFrame(dictionary)
# plot data
obj = MatPlotDataFrame(1, 1)
x_headers = ['x', 'x']
y_headers = ['linear', 'squared']
obj = MatPlotDataFrame(nrows=1, ncols=1)
obj.line_plot_columns(df, x_headers, y_headers, labels=False,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default',line_colors=['red', 'green'],
label_pos='upper left', grid=False)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_datetime_plot_parse_column():
length = 6
dates = pd.date_range(start=pd.to_datetime('2016-09-24'),
periods = length, freq='w')
x = np.linspace(0, length, num=length)
linear = x
squared = x ** 2.0
lin = np.repeat('linear', length)
sq = np.repeat('squared', length)
# Combine arrays into one
x = np.hstack((dates, dates))
y = np.hstack((linear, squared))
power = np.hstack((lin, sq))
# Create dataframe
dictionary = {'dates': x, 'y': y, 'power': power}
df = pd.DataFrame(dictionary)
# Plot data
obj = MatPlotDataFrame()
parsing_header = 'power'
column_values = ['linear', 'squared']
obj.timedate_plot_parse_column(df, 'dates', 'y', parsing_header, column_values,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default', line_colors=['red', 'green'],
label_pos='upper left', grid=True)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_datetime_plot_column():
length = 6
dates = pd.date_range(start=pd.to_datetime('2016-09-24'),
periods = length, freq='y')
x = np.linspace(0, length, num=length)
linear = x
squared = x ** 2.0
dictionary = {'dates': dates, 'squared': squared,
'linear': linear}
df = pd.DataFrame(dictionary)
# Plot data
obj = MatPlotDataFrame()
time_axis = ['dates', 'dates']
y_axis = ['linear', 'squared']
obj.timedate_plot_columns(df, time_axis, y_axis, y_axis,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default', line_colors=['red', 'green'],
label_pos='upper left', grid=True)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_fill_between_parse_columns():
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
lin = np.repeat('linear', 20)
sq = np.repeat('squared', 20)
# Combine arrays into one
x = np.hstack((x, x))
y = np.hstack((linear, squared))
power = np.hstack((lin, sq))
# Create dataframe
dictionary = {'x': x, 'y': y, 'power': power}
df = pd.DataFrame(dictionary)
parsing_header = 'power'
column_values = ['linear', 'squared']
# Plot data
obj = MatPlotDataFrame(nrows=1, ncols=1)
obj.fill_between_lines_parse_column(df, 'x', 'y', parsing_header,
column_values)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_fill_between_columns():
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
# create dataframe
dictionary = {'x': x, 'linear': linear, 'squared': squared}
df = pd.DataFrame(dictionary)
# plot data
obj = MatPlotDataFrame(1, 1)
x_headers = ['x', 'x']
y_headers = ['linear', 'squared']
obj = MatPlotDataFrame(nrows=1, ncols=1)
obj.fill_between_lines_columns(df, x_headers, y_headers)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_fill_between_datetime_parse():
length = 6
dates = pd.date_range(start=pd.to_datetime('2016-09-24'),
periods = length, freq='w')
x = np.linspace(0, length, num=length)
linear = x
squared = x ** 2.0
lin = np.repeat('linear', length)
sq = np.repeat('squared', length)
# Combine arrays into one
x = np.hstack((dates, dates))
y = np.hstack((linear, squared))
power = np.hstack((lin, sq))
# Create dataframe
dictionary = {'dates': x, 'y': y, 'power': power}
df = pd.DataFrame(dictionary)
# Plot data
obj = MatPlotDataFrame()
parsing_header = 'power'
column_values = ['linear', 'squared']
obj.fill_between_dt_parse_column(df, 'dates', 'y', parsing_header,
column_values)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_fill_between_datetime_column():
length = 6
dates = pd.date_range(start=pd.to_datetime('2016-09-24'),
periods = length, freq='y')
x = np.linspace(0, length, num=length)
linear = x
squared = x ** 2.0
dictionary = {'dates': dates, 'squared': squared,
'linear': linear}
df = pd.DataFrame(dictionary)
# Plot data
obj = MatPlotDataFrame()
time_axis = ['dates', 'dates']
y_axis = ['linear', 'squared']
obj.fill_between_dt_column(df, time_axis, y_axis)
obj.close_plot()
# --------------------------------------------------------------------------------
def test_hist_plot_parse_column():
np.random.seed(19680801)
x = np.random.normal(15.0, 3.0, 1000)
y = np.random.normal(20.0, 3.0, 1000)
data = [x, y]
labels = ['one', 'two']
one = np.repeat('one', len(x))
two = np.repeat('two', len(x))
x = np.hstack((x, y))
y = np.hstack((one, two))
dictionary = {'data': x, 'type': y}
df = pd.DataFrame(dictionary)
obj = MatPlotDataFrame()
obj.histogram_plot_parse_column(df, 'data', 'type', labels, x_label='x-axis',
y_label='y-axis', shading=[0.9, 0.4])
obj.close_plot()
# ================================================================================
# ================================================================================
# eof
``` |
{
"source": "Jon-Webb-79/PyFinances",
"score": 3
} |
#### File: PyFinances/PyFinances/read_files.py
```python
import os
import sys
from calendar import monthrange
import numpy as np
from typing import List, Dict, Tuple
import pandas as pd
# ================================================================================
# ================================================================================
# Date: January 24, 2021
# Purpose: This file contains classes and functions that assist in reading
# ASCII based text files and databases
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Jon Webb Inc."
__version__ = "0.1.0"
# ================================================================================
# ================================================================================
# Insert Code here
class ReadTextFileKeywords:
"""
A class to find keywords in a text file and the the variable(s)
to the right of the key word. This class must inherit the
``FileUtilities`` class
:param file_name: The name of the file being read to include the
path-link
For the purposes of demonstrating the use of this class, assume
a text file titled ``test_file.txt`` with the following contents.
.. code-block:: text
sentence: This is a short sentence!
float: 3.1415 # this is a float comment
double: 3.141596235941 # this is a double comment
String: test # this is a string comment
Integer Value: 3 # This is an integer comment
float list: 1.2 3.4 4.5 5.6 6.7
double list: 1.12321 344.3454453 21.434553
integer list: 1 2 3 4 5 6 7
"""
def __init__(self, file_name: str):
self.file_name = file_name
if not os.path.isfile(file_name):
sys.exit('{}{}{}'.format('FATAL ERROR: ', file_name,
' does not exist'))
# ----------------------------------------------------------------------------
def read_double(self, key_words: str) -> np.float64:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The float value following the **key_word** on the
text file. This variable is returned as a
np.float64 data type
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the first data point following the key word(s) on the text file as a
float value. The text file can also contain a comment line
following the variable being read. For example we could use this
class to read the double value 3.141596235941 in the following manner.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> double_data = dat.read_double('double:')
> print(double_data)
3.141596235941
"""
values = self.read_sentence(key_words)
values = values.split()
return np.float64(values[0])
# ----------------------------------------------------------------------------
def read_double_list(self, key_words: str) -> List[np.float64]:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The string values following the **key_word** on the
text file. This variable is returned as a List of
string values
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the the data points following the key word(s) on the text file as a
numpy.float64 value. The text file can also contain a comment
line following the variable being read.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> str_data = dat.read_double_list('double list:')
> print(str_data)
[1.12321, 344.3454453, 21.434553]
"""
values = self.read_sentence(key_words)
values = values.split()
values = [np.float64(value) for value in values]
return values
# ----------------------------------------------------------------------------
def read_float(self, key_words: str) -> np.float32:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The float value following the **key_word** on the
text file. This variable is returned as a
np.float32 data type
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the first data point following the key word(s) on the text file as
a float value. The text file can also contain a comment line
following the variable being read. For example we could use this
class to read the float value 3.1415 in the following manner.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> float_data = dat.read_float('float data:')
> print(float_data)
3.1415
"""
values = self.read_sentence(key_words)
values = values.split()
return np.float32(values[0])
# ----------------------------------------------------------------------------
def read_float_list(self, key_words: str) -> List[np.float32]:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The string values following the **key_word** on the
text file. This variable is returned as a List of
numpy.float32 values
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the the data points following the key word(s) on the text file as a
float value. The text file can also contain a comment line following
the variable being read.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> float_data = dat.read_float_list('float list')
> print(float_data)
[1.2, 3.4, 4.5, 5.6, 6.7]
"""
values = self.read_sentence(key_words)
values = values.split()
values = [np.float32(value) for value in values]
return values
# ----------------------------------------------------------------------------
def read_integer(self, key_words: str) -> np.int32:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The integer value following the **key_word** on the
text file. This variable is returned as a np.int32
data type
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the the first data point following the key word(s) on the text file as
a integer value. The text file can also contain a comment line
following the variable being read.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> int_data = dat.read_float('Integer Value')
> print(int_data)
3
"""
values = self.read_sentence(key_words)
values = values.split()
return np.int32(values[0])
# ----------------------------------------------------------------------------
def read_integer_list(self, key_words: str) -> List[np.int32]:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The string values following the **key_word** on the
text file. This variable is returned as a List of
numpy.float32 values
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the the data points following the key word(s) on the text file as an
integer value. The text file can also contain a comment line following
the variable being read.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> float_data = dat.read_integer_list('integer list:')
> print(float_data)
[1, 2, 3, 4, 5, 6, 7]
"""
values = self.read_sentence(key_words)
values = values.split()
values = [np.int32(value) for value in values]
return values
# ----------------------------------------------------------------------------
def read_sentence(self, key_words: str) -> str:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The data following the **key_word** on the text file.
The data is returned as a continuous string value
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the data following the key word(s) on the text file as a continuous
string. The text file can also contain a comment line following the
variable being read. For example we could use this class to read
the integer value `This is a short sentence!` in the following manner.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> str_data = dat.read_float('sentence:')
> print(str_data)
'This is a short sentence!'
"""
input_words = key_words.split()
with open(self.file_name) as Input_File:
lines = Input_File.readlines()
for line in lines:
variable = line.split()
counter = 0
for i in range(len(input_words)):
if input_words[i] != variable[i]:
break
else:
counter += 1
if counter == len(input_words):
start = len(input_words)
end = len(variable)
word = ''
for i in range(start, end):
word = word + ' ' + variable[i]
return word.lstrip()
sys.exit('{}{}{}'.format(key_words, " Keywords not found in ",
self.file_name))
# ----------------------------------------------------------------------------
def read_string(self, key_words: str) -> str:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The string value following the **key_word** on the
text file. This variable is returned as a str
data type
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the the first data point following the key word(s) on the text file
as a string value. The text file can also contain a comment line
following the variable being read. For example we could use this
class to read the string value `test` in the following manner.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> str_data = dat.read_float('String:')
> print(str_data)
'test'
"""
values = self.read_sentence(key_words)
values = values.split()
return str(values[0])
# ----------------------------------------------------------------------------
def read_string_list(self, key_words: str) -> List[str]:
"""
:param key_words: The key word that proceeds the data to be
read
:return data: The string values following the **key_word** on the
text file. This variable is returned as a List of
string values
This function reads a text file and searches for a key word which
can be a single word or a string of words. This function will read
the the data points following the key word(s) on the text file as a
string value. The text file can also contain a comment line following
the variable being read.
.. code-block:: python
> dat = ReadTextFileKeywords('test_file.txt')
> str_data = dat.read_string_list('sentence:')
> print(str_data)
['This', 'is', 'a', 'short', 'sentence!']
"""
values = self.read_sentence(key_words)
values = values.split()
values = [str(value) for value in values]
return values
# ================================================================================
# ================================================================================
class ReadRunOptionsFile(ReadTextFileKeywords):
"""
:param file_name: The name of the file containing keywords that will
be read by this class. The file_name must be input
with the path length to the file.
This class will read a RunOptions.txt file containing all information
describing how the PyFinances software package should run.
"""
def __init__(self, file_name: str):
self.file_name = file_name
ReadTextFileKeywords.__init__(self, file_name)
# --------------------------------------------------------------------------------
def read_file(self) -> Dict:
"""
:return input_dict: A dictionary containing all information necessary
to run the PyFinances software package.
This function reads the RunOptions.txt file to determine
how the software package fill function.
"""
# Dictionary will act as a container to pass between programs
input_dict = {'run_hist': 'False',
'nbins': 20,
'hist_start': 'False',
'hist_end': 'False',
'sample_size': 0,
'start_date': 'False',
'end_date': 'False',
'checking_start_value': 0.0,
'savings_start_value': 0.0,
'annual_salary': 0.0,
'pay_frequency': 'Never',
'first_pay_date': 'False',
'daily_expense_file': 'False',
'total_expense_file': 'False',
'planned_expense_file': 'False',
'bills_file': 'False',
'deductions_file': 'False',
'hist_location': 'NA',
'output_file': 'NA'}
# - Read input for produce histogram files. If the uer enters
# True for Run Histogram, then the code will only exeucte
# the histogram building functions and not the Monte Carlo
# calculation.
try:
input_dict['run_hist'] = self.read_string('Run Histogram:')
except SystemExit:
pass
if input_dict['run_hist'] == 'True':
input_dict['nbins'] = self.read_integer('Bins:')
input_dict['hist_start'] = self.read_string('Hist Start Date:')
input_dict['hist_end'] = self.read_string('Hist End Date:')
input_dict['daily_expense_file'] = \
self.read_string('Daily Expense File:')
input_dict['total_expense_file'] = \
self.read_string('Total Expense File:')
input_dict['hist_location'] = \
self.read_string('Histogram Location:')
# Read input for monte classrlo process
else:
input_dict['nbins'] = self.read_integer('Bins:')
input_dict['hist_start'] = self.read_string('Hist Start Date:')
input_dict['hist_end'] = self.read_string('Hist End Date:')
input_dict['daily_expense_file'] = \
self.read_string('Daily Expense File:')
input_dict['sample_size'] = self.read_integer('Sample Size:')
input_dict['start_date'] = self.read_string('Start Date:')
input_dict['end_date'] = self.read_string('End Date:')
input_dict['checking_start_value'] = \
self.read_float('Checking Start Value:')
input_dict['savings_start_value'] = \
self.read_float('Savings Start Value:')
input_dict['annual_salary'] = self.read_float('Annual Salary:')
input_dict['pay_frequency'] = self.read_string('Pay Frequency:')
input_dict['first_pay_date'] = self.read_string('First Pay Date:')
input_dict['total_expense_file'] = \
self.read_string('Total Expense File:')
input_dict['planned_expense_file'] = \
self.read_string('Planned Expense File:')
input_dict['bills_file'] = self.read_string('Bills File:')
input_dict['deductions_file'] = \
self.read_string('Deductions File:')
# Check values
self._validate_frequency(input_dict['pay_frequency'])
self._validate_first_pay_date(input_dict['first_pay_date'],
input_dict['pay_frequency'])
input_dict['hist_location'] = \
self.read_string('Histogram Location:')
input_dict['output_file'] = \
self.read_string('Output File:')
return input_dict
# ================================================================================
def _validate_frequency(self, freq: str) -> None:
"""
:param freq: The pay frequency
:return None:
This function validates the user pay_frequency input
"""
accepted = ['WEEKLY', 'MONTHLY', 'BI-WEEKLY']
msg = "FATAL ERROR: Pay Frequency must be one of the following "
message = '{}{}'.format(msg, accepted)
if freq.upper() not in accepted:
sys.exit(message)
# --------------------------------------------------------------------------------
def _validate_first_pay_date(self, first_pay_date: str,
pay_freq: str) -> None:
"""
:param first_pay_date: The user entered first pay date
:param pay_freq: The user entered pay frequency
:return None:
This function validates the user first_pay_date entry and
ensures that it matches the pay allocation dates.
"""
year = int(first_pay_date[0:4])
month = int(first_pay_date[5:7])
dates = monthrange(year, month)
acceptable_dates = [15, dates[1]]
if pay_freq.upper() == 'MONTHLY' or pay_freq.upper() == 'BI-MONTHLY':
if first_pay_date not in acceptable_dates:
msg = 'FATAL ERROR: First Pay Date must be the 15th or the '
msg += 'last day of the month if the Pay Frequency is '
msg += 'Bi-monthly or monthly'
sys.exit(msg)
# ================================================================================
# ================================================================================
class ReadCSVFile:
"""
This class contains functions that read csv files as relevant to
the PyFinances software suite.
"""
@classmethod
def read_csv_columns_by_headers(cls, file_name: str, headers: List[str],
data_type: List[type],
skip: int = 0) -> pd.DataFrame:
"""
:param file_name: The file name to include path-link
:param headers: A list of the names of the headers that contain
columns which will be read
:param data_type: A list containing the data type of each column. Data
types are limited to ``numpy.int64``, ``numpy.float64``,
and ``str``
:param skip: The number of lines to be skipped before reading data
:return df: A pandas dataframe containing all relevant information
This function assumes the file has a comma (i.e. ,) delimiter, if
it does not, then it is not a true .csv file and should be transformed
to a text function and read by the xx function. Assume we have a .csv
file titled ``test.csv`` with the following format.
.. list-table:: test.csv
:widths: 6 10 6 6
:header-rows: 1
* - ID,
- Inventory,
- Weight_per,
- Number
* - 1,
- Shoes,
- 1.5,
- 5
* - 2,
- t-shirt,
- 1.8,
- 3,
* - 3,
- coffee,
- 2.1,
- 15
* - 4,
- books,
- 3.2,
- 48
This file can be read via the following command
.. code-block:: python
> file_name = 'test.csv'
> headers = ['ID', 'Inventory', 'Weight_per', 'Number']
> dat = [int, str, float, int]
> obj = ReadCSVFile()
> df = obj.read_csv_columns_by_headers(file_name, headers, dat)
> print(df)
ID Inventory Weight_per Number
0 1 shoes 1.5 5
1 2 t-shirt 1.8 3
2 3 coffee 2.1 15
3 4 books 3.2 40
This function can also use the `skip` attributed read data when the
headers are not on the first line. For instance, assume the following csv file;
.. list-table:: test1.csv
:widths: 16 8 5 5
:header-rows: 0
* - This line is used to provide metadata for the csv file
-
-
-
* - This line is as well
-
-
-
* - ID,
- Inventory,
- Weight_per,
- Number
* - 1,
- Shoes,
- 1.5,
- 5
* - 2,
- t-shirt,
- 1.8,
- 3,
* - 3,
- coffee,
- 2.1,
- 15
* - 4,
- books,
- 3.2,
- 48
This file can be read via the following command
.. code-block:: python
> file_name = 'test1.csv'
> headers = ['ID', 'Inventory', 'Weight_per', 'Number']
> dat = [int, str, float, int]
> obj = ReadCSVFile()
> df = obj.read_csv_columns_by_headers(file_name, headers, dat, skip=2)
> print(df)
ID Inventory Weight_per Number
0 1 shoes 1.5 5
1 2 t-shirt 1.8 3
2 3 coffee 2.1 15
3 4 books 3.2 40
"""
if not os.path.isfile(file_name):
sys.exit('{}{}{}'.format('FATAL ERROR: ', file_name, ' does not exist'))
dat = dict(zip(headers, data_type))
df = pd.read_csv(file_name, usecols=headers, dtype=dat, skiprows=skip)
return df
# ================================================================================
# ================================================================================
# eof
```
#### File: PyFinances/test/test_read_files.py
```python
import pytest
import sys
import os
import platform
import numpy as np
from math import isclose
sys.path.insert(1, os.path.abspath('PyFinances'))
from PyFinances.read_files import ReadTextFileKeywords, ReadRunOptionsFile
from PyFinances.read_files import ReadCSVFile
# ================================================================================
# ================================================================================
# Date: January 24, 2021
# Purpose: This code contains functions that test the functions and classes
# in the read_files.py file.
# Instruction: This code can be run in hte following ways
# - pytest # runs all functions beginnning with the word test in
# the directory
# - pytest file_name.py # Runs all functions in file_name
# beginning with the word test
# - pytest file_name.py::test_func_name # Runs only the function
# titled test_func_name in
# the file_name.py file
# - pytest -s # Runs tests and displays when a specific file
# has completed testing, and what functions failed.
# Also displays print statments
# - pytest -v # Displays test results on a function by function
# basis
# - pytest -p no:warnings # Runs tests and does not display
# warning messages
# - pytest -s -v -p no:warnings # Displays relevant information
# and supports debugging
# - pytest -s -p no:warnings # Run for record
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Jon Webb Inc."
__version__ = "1.0"
# ================================================================================
# ================================================================================
# Test the ReadTextFilekeywords class
def test_file_not_found():
"""
This function ensures that the ReadTextFileKeywords class fails
correctly when the file cannot be found
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/not_file_found.txt'
else:
file = r'..\data\test\not_file_found.txt'
with pytest.raises(SystemExit):
ReadTextFileKeywords(file)
# --------------------------------------------------------------------------------
def test_read_double_list():
"""
This function tests the ReadTextFileKeywords.read_double_list
function to determine if it can properly read a variable
as a list of double precision values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
double_value = key.read_double_list('double list:')
expected = [1.12321, 344.3454453, 21.434553]
for i in range(len(double_value)):
assert isclose(double_value[i], expected[i], rel_tol=1.0e-3)
assert isinstance(double_value[i], np.float64)
# --------------------------------------------------------------------------------
def test_read_float():
"""
This function tests the ReadTextFileKeywords.read_float function to
determine if it correctly reads in a variable as a numpy.float32
variable.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
value = key.read_float('float:')
assert isclose(value, 3.1415, rel_tol=1.0e-3)
assert isinstance(value, np.float32)
# --------------------------------------------------------------------------------
def test_float_list():
"""
This function tests the ReadTextFileKeywords.read_float_list
function to determine if it can properly read a variable
as a list of float values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
float_value = key.read_float_list('float list:')
expected = [1.2, 3.4, 4.5, 5.6, 6.7]
for i in range(len(float_value)):
assert isclose(float_value[i], expected[i], rel_tol=1.0e-3)
assert isinstance(float_value[i], np.float32)
# --------------------------------------------------------------------------------
def test_read_integer():
"""
This function tests the ReadTextFileKeywords.read_float function to
determine if it correctly reads in a variable as a numpy.int32
variable.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
value = key.read_integer('Integer Value:')
assert value == 3
assert isinstance(value, np.int32)
# --------------------------------------------------------------------------------
def test_read_integer_list():
"""
This function tests the ReadTextFileKeywords.read_float_list
function to determine if it can properly read a variable
as a list of float values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
int_value = key.read_integer_list('integer list:')
expected = [1, 2, 3, 4, 5, 6, 7]
for i in range(len(int_value)):
assert isclose(int_value[i], expected[i], rel_tol=1.0e-3)
assert isinstance(int_value[i], np.int32)
# --------------------------------------------------------------------------------
def test_read_sentence():
"""
This function tests the ReadTextFileKeywords.read_sentence
function to determine if it can properly read a sentence as
a string
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
sentence = key.read_sentence('sentence:')
assert sentence == "This is a short sentence!"
assert isinstance(sentence, str)
# --------------------------------------------------------------------------------
def test_read_string():
"""
This function tests the ReadTextFileKeywords.read_string
function to determine if it can properly read a variable
as a single string
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
sentence = key.read_string('String:')
assert sentence == "test"
assert isinstance(sentence, str)
# --------------------------------------------------------------------------------
def test_read_string_list():
"""
This function tests the ReadTextFileKeywords.read_string_list
function to determine if it can properly read a variable
as a list of string values
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/keywords.txt'
else:
file = r'..\data\test\keywords.txt'
key = ReadTextFileKeywords(file)
sentence = key.read_string_list('sentence:')
assert sentence == ['This', 'is', 'a', 'short', 'sentence!']
for i in sentence:
assert isinstance(i, str)
# ================================================================================
# ================================================================================
# Test the ReadRunOptionsFile class
def test_read_hist_true_info():
"""
This function tests the ReadRunOptionsFile class to see if it correctly
reads an input file when Run Histogram: is True.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/RunOptionsHist.txt'
else:
file = r'..\data\test\RunOptionsHist.txt'
inp = ReadRunOptionsFile(file)
inp_dict = inp.read_file()
assert inp_dict['run_hist'] == 'True'
assert inp_dict['nbins'] == 60
assert inp_dict['hist_start'] == '2020-03-01'
assert inp_dict['hist_end'] == '2021-02-28'
assert inp_dict['daily_expense_file'] == \
'../data/test/daily_expenses_one.csv'
assert inp_dict['hist_location'] == \
'../data/test/'
# --------------------------------------------------------------------------------
def test_read_no_hist_info():
"""
This function tests the ReadRunOptionsFile class to see if it correctly
reads an input file when Run Histogram: is not listed.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/RunOptionsnoHist.txt'
else:
file = r'..\data\test\RunOptionsnoHist.txt'
inp = ReadRunOptionsFile(file)
inp_dict = inp.read_file()
assert inp_dict['run_hist'] == 'False'
assert inp_dict['nbins'] == 60
assert inp_dict['hist_start'] == '2020-03-01'
assert inp_dict['hist_end'] == '2021-02-28'
assert inp_dict['daily_expense_file'] == \
'../data/test/expense_file_one.csv'
assert inp_dict['sample_size'] == 1000
assert inp_dict['start_date'] == '2021-03-01'
assert inp_dict['end_date'] == '2022-02-28'
assert isclose(inp_dict['checking_start_value'], 54000.0, rel_tol=0.0001)
assert isclose(inp_dict['savings_start_value'], 4800.0, rel_tol=0.0001)
assert isclose(inp_dict['annual_salary'], 145000.0, rel_tol=0.0001)
assert inp_dict['pay_frequency'] == 'Weekly'
assert inp_dict['first_pay_date'] == '2021-03-05'
assert inp_dict['total_expense_file'] == '../data/test/total_expenses.csv'
assert inp_dict['planned_expense_file'] == \
'../data/test/planned_expenses.csv'
assert inp_dict['bills_file'] == '../data/test/bills.csv'
assert inp_dict['deductions_file'] == '../data/test/deductions.csv'
assert inp_dict['hist_location'] == '../data/test/'
# --------------------------------------------------------------------------------
def test_read_false_hist_info():
"""
This function tests the ReadRunOptionsFile class to see if it correctly
reads an input file when Run Histogram: is False.
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/RunOptionsFalseHist.txt'
else:
file = r'..\data\test\RunOptionsFalseHist.txt'
inp = ReadRunOptionsFile(file)
inp_dict = inp.read_file()
assert inp_dict['run_hist'] == 'False'
assert inp_dict['nbins'] == 60
assert inp_dict['hist_start'] == '2020-03-01'
assert inp_dict['hist_end'] == '2021-02-28'
assert inp_dict['daily_expense_file'] == \
'../data/test/expense_file_one.csv'
assert inp_dict['sample_size'] == 1000
assert inp_dict['start_date'] == '2021-03-01'
assert inp_dict['end_date'] == '2022-02-28'
assert isclose(inp_dict['checking_start_value'], 54000.0, rel_tol=0.0001)
assert isclose(inp_dict['savings_start_value'], 4800.0, rel_tol=0.0001)
assert isclose(inp_dict['annual_salary'], 145000.0, rel_tol=0.0001)
assert inp_dict['pay_frequency'] == 'Weekly'
assert inp_dict['first_pay_date'] == '2021-03-05'
assert inp_dict['total_expense_file'] == \
'../data/test/total_expenses.csv'
assert inp_dict['planned_expense_file'] == \
'../data/test/planned_expenses.csv'
assert inp_dict['bills_file'] == '../data/test/bills.csv'
assert inp_dict['deductions_file'] == '../data/test/deductions.csv'
assert inp_dict['hist_location'] == '../data/test/'
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
def test_read_bad_freq():
"""
This function tests the ReadRunOptionsFile class to see if it correctly
determines that the pay_frequency was not entered correctly
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/RunOptionsbadfreq.txt'
else:
file = r'..\data\test\RunOptionsbadfreq.txt'
inp = ReadRunOptionsFile(file)
with pytest.raises(SystemExit):
inp_dict = inp.read_file()
# --------------------------------------------------------------------------------
def test_read_pay_date():
"""
This function tests the ReadRunOptionsFile class to see if it correctly
determines that the pay_frequency was not entered correctly
"""
plat = platform.system()
if plat == 'Darwin':
file = '../data/test/RunOptionsbadpaydate.txt'
else:
file = r'..\data\test\RunOptionsbadpaydate.txt'
inp = ReadRunOptionsFile(file)
with pytest.raises(SystemExit):
inp_dict = inp.read_file()
# ================================================================================
# ================================================================================
# Test read csv functions
def test_read_csv_by_headers():
"""
This function tests the read_csv_columns_by_headers function to ensure
it properly reads in a csv file with the headers placed at the top
of the file
"""
plat = platform.system()
if plat == 'Darwin':
file_name = '../data/test/test1.csv'
else:
file_name = r'..\data\test\test1.csv'
headers = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
obj = ReadCSVFile()
df = obj.read_csv_columns_by_headers(file_name, headers, dat)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
# ================================================================================
# ================================================================================
# eof
``` |
{
"source": "jonwedell/mmtf-python",
"score": 4
} |
#### File: codecs/decoders/decoders.py
```python
def run_length_decode(in_array):
"""A function to run length decode an int array.
:param in_array: the input array of integers
:return the decoded array"""
switch=False
out_array=[]
for item in in_array:
if switch==False:
this_item = item
switch=True
else:
switch=False
out_array.extend([this_item]*int(item))
return out_array
def delta_decode(in_array):
"""A function to delta decode an int array.
:param in_array: the input array of integers
:return the decoded array"""
if len(in_array) == 0:
return []
this_ans = in_array[0]
out_array = [this_ans]
for i in range(1, len(in_array)):
this_ans += in_array[i]
out_array.append(this_ans)
return out_array
``` |
{
"source": "jonwedell/PyBMRB",
"score": 3
} |
#### File: jonwedell/PyBMRB/binderlog.py
```python
import requests
import json
import sys
def binderlog(date):
c=0
response = requests.get("https://archive.analytics.mybinder.org/events-{}.jsonl".format(date))
#https://archive.analytics.mybinder.org/index.jsonl
try:
data = [json.loads(l) for l in response.iter_lines()]
for i in data:
if 'PyBMRB' in i['spec'].split("/"):
#print (i)
c+=1
except json.decoder.JSONDecodeError:
c = -1
return c
def month_log(y,m):
c=0
for i in range(1,32):
date = '{}-{:02d}-{:02d}'.format(y,m,i)
n=binderlog(date)
if n > 0:
c+=n
print ("{} : {}".format(date,n))
print (" {}-{:02d} : {}".format(y,m,c))
if __name__=="__main__":
y=int(sys.argv[1])
m=int(sys.argv[2])
month_log(y,m)
``` |
{
"source": "jon-wehner/MyPantry",
"score": 3
} |
#### File: app/api/inventory_routes.py
```python
from flask import Blueprint, request
from app.models import UserItem, User, db
from app.forms import InventoryItemForm
from flask_login import login_required
from app.utils import validation_errors_to_error_messages
inventory_routes = Blueprint('inventory', __name__)
# Get all of a user's Items
@inventory_routes.route('/<int:user_id>')
@login_required
def user_inventory(user_id):
user = User.query.get(user_id)
if user:
return {"inventory": user.inventory()}
else:
return {"errors": "User Not Found"}
# Add an item to a user intentory
@inventory_routes.route('/<int:user_id>', methods=['POST'])
@login_required
def add_item(user_id):
user = User.query.get(user_id)
form = InventoryItemForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
item_id = form.data['item_id']
measurement_id = form.data['measurement_id']
item = UserItem(
item_id=form.data['item_id'],
user_id=user_id,
expiration_date=form.data['expiration_date'],
quantity=form.data['quantity'],
measurement_id=form.data['measurement_id']
)
db.session.add(item)
if form.errors:
return {"errors": validation_errors_to_error_messages(form.errors)}
else:
db.session.commit()
return {"inventory": user.inventory()}
@inventory_routes.route('/<int:user_id>/<int:item_id>',
methods=['PUT', 'DELETE'])
@login_required
def edit_delete_item(user_id, item_id):
user = User.query.get(user_id)
item = UserItem.query.get(item_id)
form = InventoryItemForm()
if request.method == 'PUT':
form['csrf_token'].data = request.cookies['csrf_token']
form['item_id'].data = item.item.id
if form.validate_on_submit():
item.expiration_date = form.data['expiration_date']
print(item.expiration_date)
item.quantity = form.data['quantity']
measurement_id = form.data['measurement_id']
db.session.add(item)
if request.method == 'DELETE':
db.session.delete(item)
if form.errors:
return {"errors": validation_errors_to_error_messages(form.errors)}
else:
db.session.commit()
return {"inventory": user.inventory()}
```
#### File: app/models/shopping_list.py
```python
from .db import db
class ShoppingList(db.Model):
__tablename__ = "shopping_lists"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
user = db.relationship("User", back_populates="shopping_lists")
shopping_list_items = db.relationship("ShoppingListItem",
back_populates="shopping_list",
cascade="all, delete")
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"userId": self.user_id,
"items": [item.to_dict() for item in self.shopping_list_items]
}
```
#### File: app/tests/test_category.py
```python
import pytest
from app.models import Category
test_category = Category()
test_category.id = 1
test_category.name = "name"
def test_to_dic():
assert test_category.to_dict() == {
"id": 1,
"name": "name",
}
```
#### File: migrations/versions/20210317_140448_added_categories.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cca087112f71'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'categories', sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.add_column('items', sa.Column(
'category_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'items', 'categories', ['category_id'], ['id'])
op.drop_column('items', 'aisle')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('items', sa.Column(
'aisle', sa.VARCHAR(), autoincrement=False, nullable=False))
op.drop_constraint('items_category_id_fkey', 'items', type_='foreignkey')
op.drop_column('items', 'category_id')
op.drop_table('categories')
# ### end Alembic commands ###
``` |
{
"source": "jonwells90/operon-complexes",
"score": 3
} |
#### File: operon-complexes/operon_assembly/operon_null_model.py
```python
from random_operons import RandomOperons
class SignificanceTests(RandomOperons):
"""Provides methods to test the significance of trend observed in Fig S2.
"""
_strucs = 'data/dataset1.csv'
_yeast2hybrid = 'data/ecoli_y2h_pairwise.txt'
_struc_indices = [3, 4, 6, 14, 15, 16, 18]
_y2h_indices = [0, 1, 6, 4, 2, 3, 5]
_struc_sep = ','
_y2h_sep = '\t'
def __init__(self, dtype):
"""Initiates data based on either y2h or structural data."""
if dtype == 'struc':
data = self._strucs
inds = self._struc_indices
sep = self._struc_sep
elif dtype == 'y2h':
data = self._yeast2hybrid
inds = self._y2h_indices
sep = self._y2h_sep
else:
raise ValueError('use "struc" or "y2h"')
super().__init__(data, inds, sep, oplevel=True)
self.dtype = dtype
def fractional_test(self, trials):
"""Tests significance of observing fraction of interacting genes that
are also adjacent.
Returns:
observed fraction of interacting genes that are adjacent
mean expected fraction
p-value
"""
observed = self.calc_fraction()
successes = 0
mean = 0
for i in range(trials):
self.shuffle_operons()
expected = self.calc_fraction()
mean += expected
if expected >= observed:
successes += 1
observed = round(observed, 2)
mean_expected = round(mean/trials, 2)
pval = successes/trials
self.__init__(self.dtype)
return (observed, mean_expected, pval)
def total_intervening_test(self, trials):
"""Tests significance of observing at most x number of intervening
genes between all interacting pairs in dataset.
Returns:
observed number of intervening genes between interacting pairs.
mean expected number based on random positioning within operon.
p-value
"""
observed = self.calc_intervening()
successes = 0
mean = 0
for i in range(trials):
self.shuffle_operons()
expected = self.calc_intervening()
mean += expected
if expected <= observed:
successes += 1
observed = round(observed, 2)
mean_expected = round(mean/trials, 2)
pval = successes/trials
self.__init__(self.dtype) # Resets operons to initial state
return (observed, mean_expected, pval)
def intervening_distribution(self, trials):
"""See total_intervening_test(). Returns raw values of that."""
observed = self.calc_intervening(printr=True)[1]
expected = []
for i in range(trials):
self.shuffle_operons()
expected += self.calc_intervening(printr=True)[1]
for val in observed:
print('observed', val, sep='\t')
for val in expected:
print('expected', val, sep='\t')
def summary(self):
"""Summarises information about operons in data"""
num_ops = len(self.oplen.keys())
total_oplen = sum(self.oplen.values())
print(num_ops, total_oplen, total_oplen/num_ops)
if __name__ == '__main__':
struc = SignificanceTests('struc')
# complex 1 operons: '116617', '580994'
struc.filter_operons('116617', exclude=True)
struc.intervening_distribution(1000)
# print(struc.fractional_test(10000))
``` |
{
"source": "jonwells90/phylo",
"score": 3
} |
#### File: phylo/phylo/alignment.py
```python
import re
import numpy as np
class PairwiseAlignment():
"""Methods for pairwise alignments of sequences.
Accepts protein nucleic acid sequence. Currently implemented:
Needleman-Wunsch
Smith-Waterman
Needleman-Wunsch with affine gap penalties
Smith-Waterman with affine gap penalties
"""
def __init__(self, seq1, seq2, atype, scoremat):
self.seq1 = seq1
self.seq2 = seq2
self.m = len(seq1)
self.n = len(seq2)
self.atype = atype
self.scoremat = []
self.resind = {}
with open(f'../data/{scoremat}.txt') as infile:
i = 0
for line in infile:
if re.match(r'^[A-z\*]', line):
line = line.split()
res, scores = line[0], [int(i) for i in line[1:]]
self.resind[res] = i
self.scoremat.append(scores)
i += 1
self.scoremat = np.asarray(self.scoremat)
############################################################################
## Algorithm subroutines
############################################################################
def _clear_matrices(self):
self.f_mat = np.zeros((self.m + 1, self.n + 1))
self.ix_mat = np.zeros((self.m + 1, self.n + 1))
self.iy_mat = np.zeros((self.m + 1, self.n + 1))
self.f_ptr = np.zeros((self.m + 1, self.n + 1))
self.ix_ptr = np.zeros((self.m + 1, self.n + 1))
self.iy_ptr = np.zeros((self.m + 1, self.n + 1))
self.ptr_mats = {'f': self.f_ptr, 'ix': self.ix_ptr, 'iy': self.iy_ptr}
def _get_score(self, res1, res2):
"""Return score from specified scoring matrix"""
return self.scoremat[self.resind[res1], self.resind[res2]]
def _seq1_match(self, aln1, aln2, i, j):
aln1 += self.seq1[i-1]
aln2 += self.seq2[j-1]
i -= 1
j -= 1
return aln1, aln2, i, j
def _seq1_ins(self, aln1, aln2, i, j):
aln1 += self.seq1[i-1]
aln2 += '-'
i -= 1
return aln1, aln2, i, j
def _seq1_del(self, aln1, aln2, i, j):
aln1 += '-'
aln2 += self.seq2[j-1]
j -= 1
return aln1, aln2, i, j
def _set_affine_ptr(self, i, j,
fmatch, ixmatch, iymatch,
ixopen, ixextend,
iyopen, iyextend):
if self.f_mat[i, j] == fmatch:
self.f_ptr[i, j] = 2 # Go diagonal
elif self.f_mat[i, j] == ixmatch:
self.f_ptr[i, j] = 1 # Arrived from ix
elif self.f_mat[i, j] == iymatch:
self.f_ptr[i, j] = 3 # Arrived from iy
if self.ix_mat[i, j] == ixopen:
self.ix_ptr[i, j] = 4 # Arrived from f
elif self.ix_mat[i, j] == ixextend:
self.ix_ptr[i, j] = 5
if self.iy_mat[i, j] == iyopen:
self.iy_ptr[i, j] = 4 # Arrived from f
elif self.iy_mat[i, j] == iyextend:
self.iy_ptr[i, j] = 5
def _traceback(self, aln1, aln2, i, j):
"""Decides necessary pointer in traceback matrix."""
if self.f_ptr[i, j] == 2:
step = self._seq1_match(aln1, aln2, i, j)
elif self.f_ptr[i, j] == 1:
step = self._seq1_ins(aln1, aln2, i, j)
elif self.f_ptr[i, j] == 3:
step = self._seq1_del(aln1, aln2, i, j)
return step
def _affine_traceback(self, currmat, aln1, aln2, i, j):
"""Decides necessary pointer in traceback matrices."""
ptr_mat = self.ptr_mats[currmat]
step = aln1, aln2, i, j
if ptr_mat[i, j] == 2:
step = self._seq1_match(*step)
elif ptr_mat[i, j] == 1: # Go to ixtmat
step = self._seq1_match(*step)
currmat = 'ix'
elif ptr_mat[i, j] == 3:
step = self._seq1_match(*step)
currmat = 'iy'
elif ptr_mat[i, j] == 5 and currmat == 'ix':
step = self._seq1_ins(*step)
elif ptr_mat[i, j] == 5 and currmat == 'iy':
step = self._seq1_del(*step)
elif ptr_mat[i, j] == 4 and currmat == 'ix':
step = self._seq1_ins(*step)
currmat = 'f'
elif ptr_mat[i, j] == 4 and currmat == 'iy':
step = self._seq1_del(*step)
currmat = 'f'
aln1, aln2, i, j = step
return currmat, aln1, aln2, i, j
############################################################################
## Algorithm implementations
############################################################################
def nw_align(self, gap_open=8):
"""Needleman-Wunsch global alignment"""
# Initialise matrices
self._clear_matrices()
for i in range(1, self.m + 1):
self.f_mat[i, 0] = self.f_mat[i-1, 0] - gap_open
self.f_ptr[i, 0] = 1
for j in range(1, self.n+1):
self.f_mat[0, j] = self.f_mat[0, j-1] - gap_open
self.f_ptr[0, j] = 3
# Populate scoring and traceback matrices
for i in range(1, self.m + 1):
for j in range(1, self.n + 1):
a, b = self.seq1[i-1], self.seq2[j-1]
match = self.f_mat[i-1, j-1] + self._get_score(a, b)
seq1_ins = self.f_mat[i-1, j] - gap_open
seq1_del = self.f_mat[i, j-1] - gap_open
self.f_mat[i, j] = max(match, seq1_ins, seq1_del)
if self.f_mat[i, j] == match:
self.f_ptr[i, j] = 2
elif self.f_mat[i, j] == seq1_ins:
self.f_ptr[i, j] = 1
elif self.f_mat[i, j] == seq1_del:
self.f_ptr[i, j] = 3
# Traceback
aln1, aln2 = '', ''
i, j = self.m, self.n
tracestep = aln1, aln2, i, j
while i > 0 or j > 0:
tracestep = self._traceback(*tracestep)
i, j = tracestep[2:]
aln1, aln2 = tracestep[:2]
return aln1[::-1], aln2[::-1]
def sw_align(self, gap_open=8):
"""Smith-Waterman local alignment"""
# Initialise matrices
self._clear_matrices()
# Populate scoring and traceback matrices
for i in range(1, self.m + 1):
for j in range(1, self.n + 1):
a, b = self.seq1[i-1], self.seq2[j-1]
match = self.f_mat[i-1, j-1] + self._get_score(a, b)
seq1_ins = self.f_mat[i-1, j] - gap_open
seq1_del = self.f_mat[i, j-1] - gap_open
self.f_mat[i, j] = max(match, seq1_ins, seq1_del, 0)
if self.f_mat[i, j] == match:
self.f_ptr[i, j] = 2
elif self.f_mat[i, j] == seq1_ins:
self.f_ptr[i, j] = 1
elif self.f_mat[i, j] == seq1_del:
self.f_ptr[i, j] = 3
# Traceback
aln1, aln2 = '', ''
i, j = np.unravel_index(np.argmax(self.f_mat, axis=None), self.f_mat.shape)
tracestep = aln1, aln2, i, j
while self.f_ptr[i, j] != 0:
tracestep = self._traceback(*tracestep)
i, j = tracestep[2:]
aln1, aln2 = tracestep[:2]
return aln1[::-1], aln2[::-1]
def nwa_align(self, gap_open=8, gap_extend=1):
"""Needleman-Wunsch global alignment with affine gap penalty."""
# Initialise matrices
self._clear_matrices()
for i in range(1, self.m + 1):
self.f_mat[i, 0] = -gap_open - (i-1)*gap_extend
self.ix_mat[i, 0] = -gap_open - (i-1)*gap_extend
self.iy_mat[i, 0] = -gap_open - (i-1)*gap_extend
self.f_ptr[i, 0] = 1
for j in range(1, self.n + 1):
self.f_mat[0, j] = -gap_open - (j-1)*gap_extend
self.ix_mat[0, j] = -gap_open - (j-1)*gap_extend
self.iy_mat[0, j] = -gap_open - (j-1)*gap_extend
self.f_ptr[0, j] = 3
for i in range(1, self.m + 1):
for j in range(1, self.n + 1):
a, b = self.seq1[i-1], self.seq2[j-1]
ixopen = self.f_mat[i-1, j] - gap_open
ixextend = self.ix_mat[i-1, j] - gap_extend
self.ix_mat[i, j] = max(ixopen, ixextend)
iyopen = self.f_mat[i, j-1] - gap_open
iyextend = self.iy_mat[i, j-1] - gap_extend
self.iy_mat[i, j] = max(iyopen, iyextend)
fmatch = self.f_mat[i-1, j-1] + self._get_score(a, b)
ixmatch = self.ix_mat[i-1, j-1] + self._get_score(a, b)
iymatch = self.iy_mat[i-1, j-1] + self._get_score(a, b)
self.f_mat[i, j] = max(fmatch, ixmatch, iymatch)
self._set_affine_ptr(i, j,
fmatch, ixmatch, iymatch,
ixopen, ixextend,
iyopen, iyextend)
aln1, aln2 = '', ''
i, j = self.m, self.n
currmat = 'f'
tracestep = currmat, aln1, aln2, i, j
while i > 0 or j > 0:
tracestep = self._affine_traceback(*tracestep)
i, j = tracestep[-2:]
aln1, aln2 = tracestep[1:3]
return aln1[::-1], aln2[::-1]
def swa_align(self, gap_open=8, gap_extend=1):
self._clear_matrices()
for i in range(1, self.m + 1):
for j in range(1, self.n + 1):
a, b = self.seq1[i-1], self.seq2[j-1]
ixopen = self.f_mat[i-1, j] - gap_open
ixextend = self.ix_mat[i-1, j] - gap_extend
self.ix_mat[i, j] = max(ixopen, ixextend)
iyopen = self.f_mat[i, j-1] - gap_open
iyextend = self.iy_mat[i, j-1] - gap_extend
self.iy_mat[i, j] = max(iyopen, iyextend)
fmatch = self.f_mat[i-1, j-1] + self._get_score(a, b)
ixmatch = self.ix_mat[i-1, j-1] + self._get_score(a, b)
iymatch = self.iy_mat[i-1, j-1] + self._get_score(a, b)
self.f_mat[i, j] = max(fmatch, ixmatch, iymatch, 0)
self._set_affine_ptr(i, j,
fmatch, ixmatch, iymatch,
ixopen, ixextend,
iyopen, iyextend)
# Traceback
currmat = 'f'
aln1, aln2 = '', ''
i, j = np.unravel_index(np.argmax(self.f_mat, axis=None), self.f_mat.shape)
tracestep = currmat, aln1, aln2, i, j
while self.f_ptr[i, j] != 0:
tracestep = self._affine_traceback(*tracestep)
aln1, aln2, i, j = tracestep[1:]
return aln1[::-1], aln2[::-1]
def main():
seq1 = 'ALFGLKSGRNGRITCMASYKVKLITPDGPIEFLFGLKSGRNGRITCMASYKVKLITPDGPECP'
seq2 = 'ALFGLKLKRGDLAVAMASYKVDGTQEFECPLFGLKSGRNGRITCTCMASYKVKLITPDMASYKVKLITPDGP'
test = PairwiseAlignment(seq1, seq2, 'prot', 'BLOSUM62')
test.nw_align()
test.nwa_align()
test.sw_align()
test.swa_align()
if __name__ == '__main__':
main()
```
#### File: phylo/phylo/tree.py
```python
class Node(object):
def __init__(self, name=None, parent=None, brlen=None):
self.name = name
self.parent = parent
self.children = []
self.brlen = brlen
def add_child(self, child):
self.children.append(child)
def add_parent(self, parent):
self.parent = parent
def set_brlen(self, brlen):
self.brlen = brlen
def get_parent(self):
return self.parent
def get_children(self):
return self.children
@property
def isleaf(self):
if self.degree == 1:
return True
else:
return False
@property
def degree(self):
degree = 0
if self.parent:
degree += 1
degree += len(self.children)
return degree
def __repr__(self):
name = f'Name: {self.name}'
if self.parent == None:
parent = 'Parent: None'
else:
parent = f'Parent: {self.parent.name}'
children = 'Children: ['+', '.join([c.name for c in self.children])+']'
brlen = f'Branch length: {self.brlen}'
return f'{name}\n\t{parent}\n\t{children}\n\t{brlen}'
class Tree(object):
"""Simple self._tree structure - represents self._tree as linked list/dict."""
def __init__(self, nodes=None):
self.root = None
if nodes:
self.nodes = nodes
else:
self.nodes = []
def add_node(self, node):
self.nodes.append(node)
def get_root(self):
for node in self.nodes:
if node.parent == None:
return node
def post_order_traversal(self):
root = self.get_root()
return self._passdown_child(root, [])
def _passdown_child(self, node, pot):
pot.append(node)
for child in node.children:
self._passdown_child(child, pot)
return pot
@property
def isbinary(self):
for node in self.nodes:
if node.degree > 3:
return False
return True
def __repr__(self):
nodes = []
for node in self.nodes:
nodes.append(str(node))
return '\n'.join(nodes)
if __name__ == '__main__':
root = Node(name='_0')
node = Node(name='A', parent=root, brlen=0.1)
root.add_child(node)
tree = Tree()
tree.add_node(root)
tree.add_node(node)
print(tree)
print(root.degree)
print(node.degree)
print(root.isleaf)
``` |
{
"source": "jonwesneski/end2",
"score": 2
} |
#### File: package2a/package3/test_package3.py
```python
from src import RunMode
__run_mode__ = RunMode.PARALLEL
def test_1(logger, package_objects):
assert hasattr(package_objects, 'package1')
assert hasattr(package_objects, 'package2a')
assert not hasattr(package_objects, 'package2b')
assert hasattr(package_objects, 'package3')
```
#### File: examples/simple/__init__.py
```python
from src import setup, teardown
@setup
def my_smoke_setup(global_object):
global_object.cheesy = 4
@teardown
def my_smoke_teardown(global_object):
print(global_object.cheesy)
```
#### File: simple/regression/sample_skipped.py
```python
from src import (
RunMode,
setup
)
__run_mode__ = RunMode.PARALLEL
@setup
def my_setup(logger):
assert False, "FAILING SETUP ON PURPOSE"
def test_skipped(logger):
assert False, "THIS TEST SHOULD NOT RUN BECAUSE SETUP FAILED"
```
#### File: src/pattern_matchers/tag.py
```python
from src.pattern_matchers.default import DefaultModulePatternMatcher
class TagModulePatternMatcher(DefaultModulePatternMatcher):
pass
class TagTestCasePatternMatcher(DefaultModulePatternMatcher):
delimiter = ','
@classmethod
def parse_str(cls, pattern: str, include: bool = True):
return super(TagTestCasePatternMatcher, cls).parse_str(pattern, include)
def included(self, func) -> bool:
result = False
try:
for tag in func.metadata['tags']:
result = super().included(tag)
if result:
break
except (KeyError, AttributeError):
pass
return result
```
#### File: end2/src/runner.py
```python
import asyncio
import concurrent.futures
import inspect
import traceback
import sys
from typing import (
List,
Tuple
)
from src import exceptions
from src.discovery import discover_suite
from src.enums import Status
from src.logger import SuiteLogManager
from src.models.result import (
Result,
TestMethodResult,
TestModuleResult,
TestSuiteResult,
)
from src.models.test_popo import (
TestGroups,
TestMethod,
TestModule
)
from src.resource_profile import create_last_run_rc
def default_test_parameters(logger, package_object) -> Tuple[tuple, dict]:
return (logger,), {}
def create_test_run(args, test_parameters_func=default_test_parameters
, log_manager: SuiteLogManager = None) -> Tuple[TestSuiteResult, Tuple[str]]:
sequential_modules, parallel_modules, failed_imports = discover_suite(args.suite.modules)
suite_run = SuiteRun(args, test_parameters_func, sequential_modules, parallel_modules, log_manager)
return suite_run, failed_imports
def start_test_run(args, test_parameters_func=default_test_parameters
, log_manager: SuiteLogManager = None) -> Tuple[TestSuiteResult, Tuple[str]]:
suite_run, failed_imports = create_test_run(args, test_parameters_func, log_manager)
return suite_run.run(), failed_imports
class SuiteRun:
def __init__(self, args, test_parameters_func, sequential_modules: Tuple[TestModule]
, parallel_modules: Tuple[TestModule], log_manager: SuiteLogManager = None) -> None:
self.args = args
self.test_parameters_func = test_parameters_func
if self.args.no_concurrency:
self.sequential_modules = sequential_modules + parallel_modules
self.parallel_modules = tuple()
else:
self.sequential_modules = sequential_modules
self.parallel_modules = parallel_modules
self.allow_concurrency = not self.args.no_concurrency
self.name = 'suite_run'
self.results = None
self.log_manager = log_manager or SuiteLogManager(run_logger_name='suite_run', max_folders=self.args.max_log_folders)
self.logger = self.log_manager.logger
def run(self) -> TestSuiteResult:
self.log_manager.on_suite_start(self.name)
self.results = TestSuiteResult(self.name)
try:
for test_module in self.sequential_modules:
module_run = TestModuleRun(self.test_parameters_func, test_module, self.log_manager, self.args.stop_on_fail)
self.results.append(module_run.run())
with concurrent.futures.ThreadPoolExecutor(max_workers=self.args.max_workers) as executor:
futures = [
executor.submit(
TestModuleRun(self.test_parameters_func, test_module, self.log_manager, self.args.stop_on_fail, executor).run)
for test_module in self.parallel_modules
]
for future in futures:
self.results.append(future.result())
except exceptions.StopTestRunException as stre:
self.logger.critical(stre)
self.results.end()
self.log_manager.on_suite_stop(self.results)
create_last_run_rc(self.results)
return self.results
class TestModuleRun:
def __init__(self, test_parameters_func, module: TestModule, log_manager: SuiteLogManager
, stop_on_fail: bool, concurrent_executor: concurrent.futures.ThreadPoolExecutor = None) -> None:
self.test_parameters_func = test_parameters_func
self.module = module
self.log_manager = log_manager
self.stop_on_fail = stop_on_fail
self.concurrent_executor = concurrent_executor
def run(self) -> TestModuleResult:
self.module.test_package_list.setup()
result = TestModuleResult(self.module)
setup_results, test_results, teardown_results = self.run_group(self.module.groups)
result.setups = setup_results
result.test_results = test_results
result.teardowns = teardown_results
result.end()
self.log_manager.on_module_done(result)
self.module.test_package_list.teardown()
return result
def run_group(self, group: TestGroups) -> Tuple[List[Result], List[TestMethodResult], List[Result]]:
setup_results = [self.setup(group.setup_func)]
teardown_results = []
if setup_results[0].status is Status.FAILED:
test_results = self.create_skipped_results(group, setup_results[0].message)
else:
test_results = self.run_tests(group)
for group_ in group.children:
sr, tr, trr = self.run_group(group_)
setup_results.extend(sr)
test_results.extend(tr)
teardown_results.extend(trr)
teardown_results.append(self.teardown(group.teardown_func))
return setup_results, test_results, teardown_results
def create_skipped_results(self, group: TestGroups, message: str) -> List[TestMethodResult]:
test_results = [
TestMethodResult(v.name, status=Status.SKIPPED, message=message, description=v.__doc__, metadata=v.metadata)
for _, v in group.tests.items()
]
for g in group.children:
test_results.extend(self.create_skipped_results(g, message))
return test_results
def setup(self, setup_func) -> Result:
setup_logger = self.log_manager.get_setup_logger(self.module.name)
args, kwargs = self.test_parameters_func(setup_logger, self.module.test_package_list.package_object)
result = run_test_func(setup_logger, setup_func, *args, **kwargs)
self.log_manager.on_setup_module_done(self.module.name, result.to_base())
return result
def run_tests(self, group: TestGroups) -> List[TestMethodResult]:
async def as_completed(coroutines_, results_, stop_on_first_fail_):
for fs in coroutines_:
try:
result = await fs.run_async()
results_.append(result)
if result.status is Status.FAILED and stop_on_first_fail_:
[f.cancel() for f in coroutines_]
except exceptions.IgnoreTestException:
pass
routines, coroutines = [], []
for k, test in group.tests.items():
test_run = TestMethodRun(test, self.test_parameters_func, self.log_manager, self.module.name, self.module.test_package_list.package_object)
if inspect.iscoroutinefunction(test.func):
coroutines.append(test_run)
else:
routines.append(test_run)
results = []
loop = None
try:
if self.concurrent_executor:
future_results = [
self.concurrent_executor.submit(test.run)
for test in routines
]
try:
for future_result in concurrent.futures.as_completed(future_results):
try:
result = future_result.result()
results.append(result)
if self.stop_on_fail and result.status is Status.FAILED:
raise exceptions.StopTestRunException(result.message)
except exceptions.IgnoreTestException:
pass
except exceptions.StopTestRunException as stre:
raise
except:
self.log_manager.logger.error(traceback.format_exc())
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(as_completed(coroutines, results, self.stop_on_fail))
loop.close()
else:
try:
for test in routines:
try:
results.append(test.run())
if self.stop_on_fail and results[-1].status is Status.FAILED:
raise exceptions.StopTestRunException(results[-1].message)
except exceptions.IgnoreTestException:
pass
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
for test in coroutines:
try:
results.append(loop.run_until_complete(test.run_async()))
if self.stop_on_fail and results[-1].status is Status.FAILED:
raise exceptions.StopTestRunException(results[-1].message)
except exceptions.IgnoreTestException:
pass
loop.close()
except exceptions.StopTestRunException as stre:
raise
except:
self.log_manager.logger.error(traceback.format_exc())
return results
finally:
if loop is not None and loop.is_running():
loop.close()
def teardown(self, teardown_func) -> Result:
teardown_logger = self.log_manager.get_teardown_logger(self.module.name)
args, kwargs = self.test_parameters_func(teardown_logger, self.module.test_package_list.package_object)
result = run_test_func(teardown_logger, teardown_func, *args, **kwargs)
self.log_manager.on_teardown_module_done(self.module.name, result.to_base())
return result
class TestMethodRun:
def __init__(self, test_method: TestMethod, test_parameters_func
, log_manager: SuiteLogManager, module_name: str, package_object) -> None:
self.test_method = test_method
self.test_parameters_func = test_parameters_func
self.log_manager = log_manager
self.module_name = module_name
self.package_object = package_object
def run(self) -> TestMethodResult:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if inspect.iscoroutinefunction(self.test_method.setup_func):
setup_result = loop.run_until_complete(
self._intialize_args_and_setup_async()
)
else:
setup_result = self._intialize_args_and_setup()
result = self._intialize_args_and_run()
if inspect.iscoroutinefunction(self.test_method.teardown_func):
teardown_result = loop.run_until_complete(
self._intialize_args_and_teardown_async()
)
else:
teardown_result = self._intialize_args_and_teardown()
result.setup_result = setup_result
result.teardown_result = teardown_result
loop.close()
return result
async def run_async(self) -> TestMethodResult:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if inspect.iscoroutinefunction(self.test_method.setup_func):
setup_result = await self._intialize_args_and_setup_async()
else:
setup_result = self._intialize_args_and_setup()
result = await self._intialize_args_and_run_async()
if inspect.iscoroutinefunction(self.test_method.teardown_func):
teardown_result = await self._intialize_args_and_teardown_async()
else:
teardown_result = self._intialize_args_and_teardown()
result.setup_result = setup_result
result.teardown_result = teardown_result
loop.close()
return result
def _intialize_args_and_setup(self) -> Result:
logger = self.log_manager.get_setup_test_logger(self.module_name, self.test_method.name)
args, kwargs = self.test_parameters_func(logger, self.package_object)
result = run_test_func(logger, self.test_method.setup_func, *args, **kwargs)
self.log_manager.on_setup_test_done(self.module_name, self.test_method.name, result.to_base())
return result
async def _intialize_args_and_setup_async(self) -> Result:
logger = self.log_manager.get_setup_test_logger(self.module_name, self.test_method.name)
args, kwargs = self.test_parameters_func(logger, self.package_object)
result = await run_async_test_func(logger, self.test_method.setup_func, *args, **kwargs)
self.log_manager.on_setup_test_done(self.module_name, self.test_method.name, result.to_base())
return result
def _intialize_args_and_teardown(self) -> Result:
logger = self.log_manager.get_teardown_test_logger(self.module_name, self.test_method.name)
args, kwargs = self.test_parameters_func(logger, self.package_object)
result = run_test_func(logger, self.test_method.teardown_func, *args, **kwargs)
self.log_manager.on_teardown_test_done(self.module_name, self.test_method.name, result.to_base())
return result
async def _intialize_args_and_teardown_async(self) -> Result:
logger = self.log_manager.get_teardown_test_logger(self.module_name, self.test_method.name)
args, kwargs = self.test_parameters_func(logger, self.package_object)
result = await run_async_test_func(logger, self.test_method.teardown_func, *args, **kwargs)
self.log_manager.on_teardown_test_done(self.module_name, self.test_method.name, result.to_base())
return result
def _intialize_args_and_run(self) -> TestMethodResult:
logger = self.log_manager.get_test_logger(self.module_name, self.test_method.name)
args, kwargs = self.test_parameters_func(logger, self.package_object)
result = run_test_func(logger, self.test_method.func, *(args + self.test_method.parameterized_tuple), **kwargs)
result.metadata = self.test_method.metadata
self.log_manager.on_test_done(self.module_name, result)
return result
async def _intialize_args_and_run_async(self) -> TestMethodResult:
logger = self.log_manager.get_test_logger(self.module_name, self.test_method.name)
args, kwargs = self.test_parameters_func(logger, self.package_object)
result = await run_async_test_func(logger, self.test_method.func, *(args + self.test_method.parameterized_tuple), **kwargs)
result.metadata = self.test_method.metadata
self.log_manager.on_test_done(self.module_name, result)
return result
def run_test_func(logger, func, *args, **kwargs) -> TestMethodResult:
"""
>>> from src.logger import empty_logger
>>> def test_1():
... assert True
>>> result = run_test_func(empty_logger, test_1)
>>> result.status == Status.PASSED and result.message == "" and result.end_time is not None
True
>>> def test_2(a):
... assert False
>>> result = run_test_func(empty_logger, test_2, 1)
>>> result.status == Status.FAILED and result.message != "" and result.end_time is not None
True
>>> def test_3(a, b):
... raise exceptions.SkipTestException("I skip")
>>> result = run_test_func(empty_logger, test_3, a=1, b=2)
>>> result.status == Status.SKIPPED and result.message == "I skip" and result.end_time is not None
True
>>> def test_4(a, b, c):
... raise Exception("Error")
>>> result = run_test_func(empty_logger, test_4, 1, 2, 3)
>>> result.status == Status.FAILED and "Encountered an exception" in result.message and result.end_time is not None
True
"""
result = TestMethodResult(func.__name__, status=Status.FAILED)
try:
func(*args, **kwargs)
result.status = Status.PASSED
except AssertionError as ae:
_, _, tb = sys.exc_info()
tb_info = traceback.extract_tb(tb)
filename, line, func, error_text = tb_info[-1]
result.message = str(ae) if str(ae) else error_text
logger.error(result.message)
except exceptions.SkipTestException as ste:
logger.info(ste.message)
result.message = ste.message
result.status = Status.SKIPPED
except exceptions.IgnoreTestException as ite:
raise
except Exception as e:
logger.debug(traceback.format_exc())
result.message = f'Encountered an exception: {e}'
logger.error(result.message)
return result.end()
async def run_async_test_func(logger, func, *args, **kwargs) -> TestMethodResult:
"""
>>> from src.logger import empty_logger
>>> import asyncio
>>> loop = asyncio.get_event_loop()
>>> async def test_1():
... assert True
>>> result = loop.run_until_complete(run_async_test_func(empty_logger, test_1))
>>> result.status == Status.PASSED and result.message == "" and result.end_time is not None
True
>>> def test_2(a):
... assert False
>>> result = loop.run_until_complete(run_async_test_func(empty_logger, test_2, 1))
>>> result.status == Status.FAILED and result.message != "" and result.end_time is not None
True
>>> def test_3(a, b):
... raise exceptions.SkipTestException("I skip")
>>> result = loop.run_until_complete(run_async_test_func(empty_logger, test_3, a=1, b=2))
>>> result.status == Status.SKIPPED and result.message == "I skip" and result.end_time is not None
True
>>> def test_4(a, b, c):
... raise Exception("Error")
>>> result = loop.run_until_complete(run_async_test_func(empty_logger, test_4, 1, 2, 3))
>>> result.status == Status.FAILED and "Encountered an exception" in result.message and result.end_time is not None
True
"""
result = TestMethodResult(func.__name__, status=Status.FAILED)
try:
await func(*args, **kwargs)
result.status = Status.PASSED
except AssertionError as ae:
_, _, tb = sys.exc_info()
tb_info = traceback.extract_tb(tb)
filename, line, func, error_text = tb_info[-1]
result.message = str(ae) if str(ae) else error_text
logger.error(result.message)
except exceptions.SkipTestException as ste:
logger.info(ste.message)
result.message = ste.message
result.status = Status.SKIPPED
except exceptions.IgnoreTestException as ite:
raise
except asyncio.CancelledError:
result.message = 'I got cancelled'
result.status = Status.SKIPPED
logger.info(result.message)
except Exception as e:
logger.debug(traceback.format_exc())
result.message = f'Encountered an exception: {e}'
logger.error(result.message)
return result.end()
``` |
{
"source": "jon-whit/airU-pi",
"score": 4
} |
#### File: lib/airu/utils.py
```python
import threading
import time
import gps
from functools import wraps
class GpsPoller(threading.Thread):
def __init__(self):
"""
Constructs a new GpsPoller object.
A GpsPoller is invoked on a separate thread and runs continuously
until the thread is terminated. The GpsPoller should be used to
get the latest GPS data at any given time.
"""
threading.Thread.__init__(self)
self.gpsd = gps.gps("localhost", "2947")
self.gpsd.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)
self.current_value = None
self.running = True
def get_gps_data(self):
"""
Gets the latest GPS data from the GPS client.
:return: A dictionary of fixtures from the GPS client.
"""
return self.current_value
def run(self):
"""
Overrides the Thread's 'run' method. This method is invoked when the
thread begins, and it continues to run until this GpsPoller's 'running'
field is set to False.
"""
while self.running:
report = self.gpsd.next()
if (self.gpsd.fix.mode != 1): # 1 = NO_FIX, 2 = 2D, 3 = 3D
self.current_value = report
time.sleep(1) # Wait 1 second between updates
def retry(e, retries=4, delay=1, logger=None):
"""
A decorator that will retry whatever function that it decorates a specified
number of times with a delay period between each try. If the decorated
function does not return a result that is *not* None, then this decorator
will raise the supplied exception.
:param e: The exception to raise if the retry count is exceeded.
:param retries: The of retries that should be carried out.
:param delay: The time delay between retries (seconds).
:param logger: An optional logger to use.
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries = retries
while mtries > 0:
result = f(*args, **kwargs)
if result is None:
time.sleep(delay)
mtries -= 1
if logger:
msg = "Call to '{0}' failed to acquire a value... Retrying in {1} seconds.".format(f.__name__,
delay)
logger.warning(msg)
else:
return result
raise e("Call to '{0}' failed to acquire a value in the retry period.".format(f.__name__))
return f_retry
return deco_retry
def get_mac(interface):
"""
Gets the MAC address for the supplied interface or None if the MAC could
not be read from the system.
:param interface: The network interface whose MAC should be returned
:return: The unique MAC address, or None otherwise.
"""
try:
result = open('/sys/class/net/{0}/address'.format(interface)).readline()[0:17]
except IOError:
result = None
return result
``` |
{
"source": "jonwhittlestone/customervault",
"score": 2
} |
#### File: api/app/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import as_declarative, declared_attr
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
engine = create_engine(settings.DATABASE_URI, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
@as_declarative()
class Base:
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
``` |
{
"source": "jonwhittlestone/kaizen-async",
"score": 2
} |
#### File: kaizen-async/app/main.py
```python
import json
import os
from pathlib import Path
from app.services import (sjd_service, email_service, book_service, article_service)
import fastapi
import uvicorn
from app.api import kaizen_api
from app.views import home
app = fastapi.FastAPI()
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def configure():
configure_routing()
configure_settings()
def configure_routing():
app.include_router(home.router)
app.include_router(kaizen_api.router)
def configure_settings():
file = Path(os.path.join(PROJECT_DIR, 'settings.json')).absolute()
if not file.exists():
print(f"WARNING: {file} file not found, you cannot continue. Please see settings_template.json")
raise Exception("settings.json file not found. Please see settings_template.json.")
with open(file) as fin:
settings = json.load(fin)
sjd_service.username = settings.get('sjd_username')
sjd_service.password = <PASSWORD>('<PASSWORD>')
email_service.username = settings.get('email_username')
email_service.password = <PASSWORD>('<PASSWORD>')
book_service.key = settings.get('goodreads_key')
book_service.secret = settings.get('goodreads_secret')
article_service.consumer = settings.get('pocket_consumer')
article_service.access = settings.get('pocket_access')
if __name__ == '__main__':
configure()
uvicorn.run(app, port=8000, host='127.0.0.1')
else:
configure()
```
#### File: app/services/book_service.py
```python
import datetime
from goodreads import client
from typing import Optional
API_DOCS = 'https://www.goodreads.com/api'
USER_ID = '28892852'
key: Optional[str] = None
secret: Optional[str] = None
now = datetime.datetime.now()
class Client():
def __init__(self):
self.gc = client.GoodreadsClient(
key, secret)
async def books_on_shelf_count(self, shelf_label=f'read-{now.year}'):
res = self.gc.request('shelf/list.xml',{'page':'1', 'user_id':USER_ID})
for shelf in res['shelves']['user_shelf']:
if shelf['name'] == shelf_label:
return int(shelf['book_count']['#text'])
return self.no_shelf_code
@property
def no_shelf_code(self):
return '[no shelf]'
```
#### File: kaizen-async/tests/test_app.py
```python
def test_home(test_app):
response = test_app.get("/")
assert response.status_code == 200
assert response.json() == {"Kaizen": f"Personal metrics server - {response.url}api/kaizen"}
``` |
{
"source": "jonwhittlestone/lambda-api-boto3",
"score": 2
} |
#### File: lambda-api-boto3/src/lambda_functions.py
```python
import boto3
import json
import os
from helpers import Zipper
from helpers import lambda_client, iam_client
from settings import PYTHON_LAMBDA_API_PERMISSION_STATEMENT_ID, LAMBDA_POLICY_NAME, LAMBDA_ROLE, LAMBDA_TIMEOUT, LAMBDA_MEMORY, PYTHON_36_RUNTIME, PYTHON_LAMBDA_NAME
def remove_permission():
try:
response = lambda_client().remove_permission(
FunctionName=PYTHON_LAMBDA_NAME,
StatementId=PYTHON_LAMBDA_API_PERMISSION_STATEMENT_ID,
)
return response
except lambda_client().exceptions.ResourceNotFoundException as e:
pass
def get_or_create_access_policy_for_lambda(policy_name):
policy = find_policy(policy_name)
if not policy:
s3_access_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:*",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"cloudwatch:PutMetricData",
],
"Effect": "Allow",
"Resource": "*",
}
],
}
return iam_client().create_policy(
PolicyName=policy_name,
PolicyDocument=json.dumps(s3_access_policy_document),
Description="Allows lambda function to access s3 resources",
)["Policy"]
return policy
def find_policy(policy_name):
for p in iam_client().list_policies(Scope="Local").get("Policies"):
if p.get("PolicyName") == policy_name:
return p
def find_role(lambda_role):
try:
role = iam_client().get_role(RoleName=lambda_role)
if role.get("Role", False):
return role.get("Role")
except iam_client().exceptions.NoSuchEntityException:
pass
def get_or_create_execution_role_lambda(arn, lambda_role):
role = find_role(lambda_role)
if role:
return role
lambda_execution_assumption_role = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
"Action": "sts:AssumeRole",
}
],
}
return iam_client().create_role(
RoleName=lambda_role,
AssumeRolePolicyDocument=json.dumps(lambda_execution_assumption_role),
Description="Gives necessary permissions for lambda to be executed",
)["Role"]
def attach_access_policy_to_execution_role(lambda_role, policy_arn):
return iam_client().attach_role_policy(RoleName=lambda_role, PolicyArn=policy_arn)
def deploy_lambda_function(function_name, runtime, handler, role_arn, source_folder):
folder_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), source_folder
)
zip_file = Zipper().make_bytes(path=folder_path)
return lambda_client().create_function(
FunctionName=function_name,
Runtime=runtime,
Role=role_arn,
Handler=handler,
Code={"ZipFile": zip_file,},
Timeout=LAMBDA_TIMEOUT,
MemorySize=LAMBDA_MEMORY,
Publish=False,
)
def remove_function(lambda_name):
try:
return lambda_client().delete_function(FunctionName=lambda_name)
except lambda_client().exceptions.ResourceNotFoundException:
pass
def invoke_function(lambda_name):
return lambda_client().invoke(FunctionName=lambda_name)
def create_lambda():
policy = get_or_create_access_policy_for_lambda(LAMBDA_POLICY_NAME)
policy_arn = policy.get("Arn")
role = get_or_create_execution_role_lambda(policy_arn, LAMBDA_ROLE)
role_arn = role.get("Arn")
role_policy = attach_access_policy_to_execution_role(LAMBDA_ROLE, policy_arn)
return role_arn
def deploy_lambda(role_arn):
return deploy_lambda_function(
PYTHON_LAMBDA_NAME,
PYTHON_36_RUNTIME,
"hello_world.handler",
role_arn,
"python_lambdas",
)
``` |
{
"source": "jonwhittlestone/mongo-csfle-beanie-fastapi",
"score": 2
} |
#### File: src/db/mongo.py
```python
from icecream import ic
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorClientEncryption
from src.config import settings
async def start_mongo(encrypted=True):
from src.main import app
ic('starting mongo..')
try:
app.mongodb_client = AsyncIOMotorClient(settings.DB_URL)
app.mongodb = app.mongodb_client[settings.DB_NAME]
ic(settings.DB_URL)
await app.mongodb[settings.DB_NAME].update_one({'connection': True}, {"$set": {'connection': True}}, upsert=True)
except Exception as e:
raise Exception(
'There has been an error. Remember: Environment Variables must be set.')
``` |
{
"source": "jonwhittlestone/streaming-form-data",
"score": 2
} |
#### File: examples/tornado/stream_request_body.py
```python
import os.path
import tempfile
from time import time
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget, ValueTarget
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler, stream_request_body
one_gb = 100 * 1024 * 1024 * 1024
@stream_request_body
class UploadHandler(RequestHandler):
def prepare(self):
self.request.connection.set_max_body_size(one_gb)
name = 'uploaded-file-tornado-{}.dat'.format(int(time()))
self.value = ValueTarget()
self.file_ = FileTarget(os.path.join(tempfile.gettempdir(), name))
self._parser = StreamingFormDataParser(headers=self.request.headers)
self._parser.register('name', self.value)
self._parser.register('file', self.file_)
def data_received(self, chunk):
self._parser.data_received(chunk)
def post(self):
self.render(
'upload.html', name=self.value.value, filename=self.file_.filename
)
class IndexHandler(RequestHandler):
def get(self):
self.render('index.html')
def main():
handlers = [(r'/', IndexHandler), (r'/upload', UploadHandler)]
settings = dict(debug=True, template_path=os.path.dirname(__file__))
app = Application(handlers, **settings)
app.listen(9999, address='localhost')
IOLoop().current().start()
if __name__ == '__main__':
print('Listening on localhost:9999')
main()
```
#### File: streaming-form-data/streaming_form_data/parser.py
```python
import cgi
from typing import Mapping, Type
from streaming_form_data._parser import ErrorGroup, _Parser # type: ignore
from streaming_form_data.targets import BaseTarget
class ParseFailedException(Exception):
pass
def parse_content_boundary(headers: Mapping[str, str]) -> bytes:
content_type = None
for key in headers.keys():
if key.lower() == 'content-type':
content_type = headers.get(key)
break
if not content_type:
raise ParseFailedException('Missing Content-Type header')
value, params = cgi.parse_header(content_type)
if not value or value.lower() != 'multipart/form-data':
raise ParseFailedException('Content-Type is not multipart/form-data')
boundary = params.get('boundary')
if not boundary:
raise ParseFailedException('Boundary not found')
return boundary.encode('utf-8')
class StreamingFormDataParser:
def __init__(self, headers: Mapping[str, str]):
self.headers = headers
raw_boundary = parse_content_boundary(headers)
delimiter = b'\r\n--' + raw_boundary + b'\r\n'
ender = b'\r\n--' + raw_boundary + b'--'
self._parser = _Parser(delimiter, ender)
self._running = False
def register(self, name: str, target: Type[BaseTarget]):
if self._running:
raise ParseFailedException(
'Registering parts not allowed while parser is running'
)
self._parser.register(name, target)
def data_received(self, data: bytes):
if not self._running:
self._running = True
result = self._parser.data_received(data)
if result > 0:
if ErrorGroup.Internal <= result < ErrorGroup.Delimiting:
message = 'internal errors'
elif ErrorGroup.Delimiting <= result < ErrorGroup.PartHeaders:
message = 'delimiting multipart stream into parts'
elif ErrorGroup.PartHeaders <= result:
message = 'parsing specific part headers'
raise ParseFailedException(
'_parser.data_received failed with {}'.format(message)
)
```
#### File: streaming-form-data/tests/test_validators.py
```python
import pytest
from streaming_form_data.validators import MaxSizeValidator, ValidationError
def test_max_size_validator_empty_input():
validator = MaxSizeValidator(0)
with pytest.raises(ValidationError):
validator('x')
def test_max_size_validator_normal():
validator = MaxSizeValidator(5)
for char in 'hello':
validator(char)
with pytest.raises(ValidationError):
validator('x')
``` |
{
"source": "jonwhuang/diana-api",
"score": 2
} |
#### File: diana-api/mongoenginetest/views.py
```python
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework_mongoengine import viewsets
from mongoenginetest.serializers import TestSerializer
from mongoenginetest.models import Test
# Create your views here.
class TestViewSet(viewsets.ModelViewSet):
lookup_field = 'id'
serializer_class = TestSerializer
def get_queryset(self):
return Test.objects.all()
``` |
{
"source": "JonWiggins/pepperon.ai",
"score": 4
} |
#### File: pepperonai/data/utils.py
```python
__author__ = "<NAME>"
import math
import numpy as np
import pandas as pd
import itertools
import random
def fowlkesmallowsindex(
first_clustering: "Set[object]",
second_clustering: "Set[object]",
all_data_points: "Set[object]",
) -> float:
"""
Given two clusterings and a list of all the points, calculates the Fowlkes-Mallows Index
:param first_clustering: the first set of iterables to compare
:param second_clustering: the second set of iterables to compare
:param all_data_points: all of the datapoints in the two clusterings as an iterable
:return: the Fowlkes Mallows Index as a float
"""
# TP = the number of points that are present in the same cluster in both clusterings
# FP = the number of points that are present in the same cluster in clustering1 but not clustering2
# FN = the number of points that are present in the same cluster in clustering2 but not clustering1
# TN = the number of points that are in different clusters in both clusterings
TP = 0
FP = 0
FN = 0
TN = 0
for element in all_data_points:
elements_first_location = None
for cluster in first_clustering:
if element in cluster:
elements_first_location = cluster
break
elements_second_location = None
for cluster in second_clustering:
if element in cluster:
elements_second_location = cluster
break
for comparison_element in all_data_points:
comparisons_first_cluster = None
for cluster in first_clustering:
if comparison_element in cluster:
comparisons_first_cluster = cluster
comparisons_second_cluster = None
for cluster in second_clustering:
if comparison_element in cluster:
comparisons_second_cluster = cluster
if (
elements_first_location == comparisons_first_cluster
and elements_second_location == comparisons_second_cluster
):
TP += 1
elif (
elements_first_location == comparisons_first_cluster
and not elements_second_location == comparisons_second_cluster
):
FP += 1
elif (
not elements_first_location == comparisons_first_cluster
and elements_second_location == comparisons_second_cluster
):
FN += 1
elif (
not elements_first_location == comparisons_first_cluster
and not elements_second_location == comparisons_second_cluster
):
TN += 1
if TP + FP == 0 or TP + FN == 0:
return 0
return math.sqrt((TP / (TP + FP))) * (TP / (TP + FN))
def purity(first_clustering: "Set[object]", second_clustering: "Set[object]") -> float:
"""
Returns the purity of the given two clusterings
:param first_clusterings: a set of iterables to compare
:param second_clusterings: a set of iterables to compare
:return: the purity index as a float
"""
summation = 0
for cluster in first_clustering:
highest = 0
for comparer in second_clustering:
next_element = len(cluster.intersection(comparer))
if next_element > highest:
highest = next_element
summation += highest
# find total number of data points
N = sum(len(cluster) for cluster in first_clustering)
if N == 0:
return 0
return summation / N
def jackknife(
data: "dataframe",
fold_count: int,
save: bool,
save_dir: str,
experiment_id: str = "",
) -> "List[dataframe]":
"""
Jackknifes the given dataframe into fold_count number of dataframes
Randomly shuffles the dataframe before splitting
Will also save the folds to a given directory
:param data: a dataframe source
:param fold_count: the number of folds to create
:param save: if true, saves the folds to the given directory
:param save_dir: a directory as a string on where to save the folds as a csv
:param experiment_id: an identifier to use when saving the folds to idenfity them later
:return: a list of dataframes
"""
data = data.sample(frac=1)
folds = np.array_split(data, fold_count)
np.random.shuffle(folds)
if save:
for count, fold in enumerate(folds):
fold.to_csv(save_dir + "fold" + str(count) + "_" + experiment_id + ".csv")
return folds
def random_unit_vector(dimensions: int, seed: int = None) -> "List[float]":
"""
Returns a random unit vector in the given number of dimensions
Created using Gausian Random vars
:param dimensions: desired dimensions
:param seed: nullable, random var seed
:return: random unit vecotor
"""
raw = []
magnitude = 0
if seed:
random.seed(seed)
for count in range(dimensions):
uniform1 = random.uniform(0, 1)
uniform2 = random.uniform(0, 1)
toadd = math.sqrt(-2 * math.log(uniform1)) * math.cos(2 * math.pi * uniform2)
magnitude += toadd ** 2
raw.append(toadd)
magnitude = math.sqrt(magnitude)
return [element / magnitude for element in raw]
def model_accuracy(
model: "Model",
probe_method: "function",
test_set: "dataframe",
test_answer: "List[object]",
) -> float:
"""
Tests the model on the given set and returns the accuracy
:param model: a trained model
:param probe_method: the models query method, takes an instance as a parameter and returns the prediction
:param test_set: an iterable object to probe with
:param test_answer: an iterable object to correlate with the models predictions
:return: the number of correct predicitons / total number of probes
"""
correct_count = 0
for index, element in enumerate(test_set):
if model.probe_method(element) == test_answer[index]:
correct_count += 1
return correct_count / len(test_set)
def model_error(
model: "Model",
probe_method: "function",
test_set: "dataframe",
test_answer: "List[object]",
) -> float:
"""
Tests the model on the given set and returns the error
:param model: a trained model
:param probe_method: the models query method, takes an instance as a parameter and returns the prediction
:param test_set: an iterable object to probe with
:param test_answer: an iterable object to correlate with the models predictions
:return: 1- the number of correct predicitons / total number of probes
"""
return 1 - model_accuracy(model, probe_method, test_set, test_answer)
def get_average_accuracy_and_sd(
model: "Model", probe_method: "function", folds: "dataframe"
) -> "Tuple(float, float)":
"""
*incomplete*
Tests the given model on each fold
"""
# TODO generalize this for all different models
accs = []
for test_fold in folds:
if test_fold == folds[0]:
train_set = folds[1]
else:
train_set = folds[0]
for element in folds:
if element == test_fold:
continue
train_set = train_set.append(element)
# TODO generalize and train here
accs.append(
model_accuracy(
model, probe_method, test_set=test_fold, test_answer=test_fold
)
)
return np.mean(accs, axis=0), np.std(accs, axis=0)
def enumerate_hyperparameter_combinations(parameter_to_options: dict) -> "List[dict]":
"""
Returns a list of dictionaries of all hyperparameter options
:param parameter_to_options: a dictionary that maps parameter name to a list of possible values
:return: a list of dictionaries that map parameter names to set values
"""
keys, values = zip(*parameter_to_options.items())
return [dict(zip(keys, v)) for v in itertools.product(*values)]
def grid_search(
model_type: "Model",
probe_method: "function",
parameter_to_options: dict,
train_set: "dataframe",
train_answers: "List[object]",
test_set: "dataframe",
test_answers: "List[object]",
fold_count: int,
print_results: bool,
) -> "Tuple[Model, dict, float, float]":
"""
*incomplete*
Trains a model with every possible set of given hyperparameters and returns the best performing one
:param model_type: a class of model to train
:param probe_method: the method in model_type to probe after training
:param parameter_to_options: a dictionary that maps each hyperparamer to a list of possible values
:param train_set: a set of training input
:param train_answers: a set of trianing set answers to correlate with train_set
:param test_set: a set of testing input
:param test_answers: a set of testing answers to correlate with test_answers
:param fold_count: the number of jackknife folds to evaluate with
:param print_results: prints the accuracy and standard deviation of each experiment if true
:return: the model and info as; (the model, the hyperparameter dict, the average accuracy, the standard deviation)
"""
best_params = None
best_model = None
best_acc = None
best_std = None
for parameter_set in enumerate_hyperparameter_combinations(parameter_to_options):
# TODO generalize this for all different models
current_model = model_type(**parameter_set)
# TODO possibly move where training takes place
current_model.train(train_set, train_answers)
acc, sd = get_average_accuracy_and_sd(current_model, probe_method, fold_count)
if print_results:
print(type(model_type))
print("Parameters:", parameter_set)
print("Average Accuracy:", acc)
print("Average Standard Deviation:", sd)
if not best_acc or best_acc < acc:
best_model = current_model
best_params = parameter_set
best_acc = acc
best_std = sd
return best_model, best_params, best_acc, best_std
```
#### File: pepperonai/supervised/decisiontree.py
```python
__author__ = "<NAME>"
import math
def entropy(data: "dataframe", target_label: str) -> float:
"""
Calculates the entroy of the given dataframe with respect to the column given by the target_label
:param data: A dataframe of examples
:param target_label: A name of a column in the dataframe
:returns: the entrophy
"""
to_return = 0
value_probs = [count / data.shape[0] for count in data[target_label].value_counts()]
to_return += sum(value_prob * math.log2(value_prob) for value_prob in value_probs)
return to_return * -1
def gini_index(data: "dataframe", target_label: str) -> float:
"""
Calculates the gini index of the given dataframe with respect to the column given by the target_label
:param data: A dataframe of examples
:param target_label: A name of a column in the dataframe
:returns: the gini index
"""
to_return = 1
for value in data[target_label].unique():
subset = data[data[target_label] == value]
value_prob = subset.shape[0] / data.shape[0]
to_return -= value_prob * value_prob
return to_return
def information_gain(
data: "dataframe",
target_label: str,
attribute: str,
impurity_metric: "function" = entropy,
) -> float:
"""
Calculates the information gain of the given attribute with respect to the target_label in the given dataframe
:param data: A dataframe of examples
:param target_label: A name of a column in the dataframe
:param attribute: A name of a column in the dataframe
:param impurity_metric: A function for calculating impurity, defaults to entropy
:returns: the information gain of the attribute
"""
to_return = impurity_metric(data, target_label)
for value in data[attribute].unique():
subset = data[data[attribute] == value]
value_prob = subset.shape[0] / data.shape[0]
to_return -= value_prob * impurity_metric(subset, target_label)
return to_return
def fast_information_gain_on_entropy(
data: "dataframe", target_label: str, attribute: str
) -> float:
"""
Calculates the information gain of the given attribute with respect to the target_label in the given dataframe
Does this a bit quicker than the above method as it only uses entropy
:param data: A dataframe of examples
:param target_label: A name of a column in the dataframe
:param attribute: A name of a column in the dataframe
:returns: the information gain of the attribute
"""
to_return = 0
value_probs = [count / data.shape[0] for count in data[target_label].value_counts()]
to_return += sum(value_prob * math.log2(value_prob) for value_prob in value_probs)
to_return *= -1
counts = data[attribute].value_counts()
for index, count in zip(counts.index, counts):
value_probs = [
subset_count / data.shape[0]
for subset_count in data[data[attribute] == index][
target_label
].value_counts()
]
to_return -= sum(
value_prob * math.log2(value_prob) for value_prob in value_probs
)
return to_return
class DecisionTree:
"""
This class acts as a single node in a Decison Tree.
It holds:
- attribute: The attribute this node acts on
- is_leaf: a flag for if this is a leaf node
- label: if this node is a leaf, this will contain the target label value predicted
- gain: the gain calculated by ID3 when creating this node
"""
def __init__(
self,
attribute: str = None,
is_leaf: bool = False,
label: str = None,
gain: float = None,
):
"""
Create a new Decision Tree
"""
self.is_leaf = is_leaf
self.label = label
self.attribute = attribute
self.branches = {}
self.gain = gain
def add_branch(self, label: str, decision_tree: "DecisionTree"):
"""
Adds a new branch to this node's children
:param label: the value of this node's attribute that this child should be called on
:param decison_tree: a decison tree to add as a child
"""
self.branches[label] = decision_tree
def traverse(self, values: "dataframe") -> str:
"""
Probes this decision tree, and it's children recursively for the target_label
Predicted by the given values
:param values: A dictionary of the examples attributes
:returns: The predicted target label for this set of values
"""
if self.is_leaf or values[self.attribute] not in self.branches:
return self.label
return self.branches[values[self.attribute]].traverse(values)
def find_max_depth(self) -> int:
"""
A helper method that find the depth of this tree from its childre
:returns: The depth as an int
"""
if len(self.branches) == 0:
return 0
return max(branch.find_max_depth() for branch in self.branches.values()) + 1
def ID3(
examples: "dataframe",
target_label: str,
metric: "function" = fast_information_gain_on_entropy,
depth_budget: int = None,
) -> DecisionTree:
"""
This function implements the ID3 algorithm
It will return a DecisionTree constructed for the target_label using the givne dataframe of examples
:param examples: Set of possible labels
:param target_label: Target Attribute
:param metric: entrophy method, defaults to information_gain
:param depth_budget: max depth of the tree
:return: a DecisionTree
"""
# if all examples have the same value, or the budget is expired, or there are no attributes left to pick
# return a node with the most popular label
if (
depth_budget == 0
or len(examples[target_label].unique()) == 1
or examples.shape[1] == 2
):
return DecisionTree(
is_leaf=True, label=examples[target_label].value_counts().idxmax()
)
examples = examples.sample(frac=1)
max_metric = None
attribute = None
for element in examples.columns.values:
if element == target_label:
continue
# next_metric = metric(examples, target_label, element, entropy)
next_metric = metric(examples, target_label, element)
if attribute is None or next_metric > max_metric:
max_metric = next_metric
attribute = element
most_common_label = examples[target_label].value_counts().idxmax()
to_return = DecisionTree(
attribute=attribute, label=most_common_label, gain=max_metric
)
# for each possible value of this attribute
for value in examples[attribute].unique():
example_subset = examples[examples[attribute] == value]
example_subset = example_subset.drop(attribute, axis=1)
if depth_budget is None:
to_return.add_branch(value, ID3(example_subset, target_label))
else:
to_return.add_branch(
value, ID3(example_subset, target_label, depth_budget=depth_budget - 1)
)
return to_return
```
#### File: pepperonai/supervised/perceptron.py
```python
__author__ = "<NAME>"
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
def sign(number: float) -> int:
"""
This helper method returns 0, 1, or -1 based on the sign of the given number
:param number: a number
:return: 0 iff number is 0, -1 iff number is negative, and 1 iff number is positive
"""
if number == 0:
return 0
if number < 0:
return -1
else:
return 1
class SimplePerceptron:
"""
This class implements a perceptron algorithm - with no funny business
"""
def __init__(self):
self.weight = None
self.bias = None
self.update_count = 0
self.epoch_saves = []
def train(
self,
examples: "dataframe",
target_labels: "List[float]",
dimens: int,
learning_rate: float = 0.1,
epochs: int = 1,
rate_decay: bool = True,
):
"""
Trains a Simple perceptron with the given input
:param exaples: A sparse matrix wherein each row is a vector example corresponding to target_labels
:param target_labels: An array wherein each element is a label corresponding to target_labels
:param dimens: An int indicating the dimensions of the example space
:param learning_rate: The rate at which the model should adjust its weight/bias on incorrect guesses, default is 0.1
:param epochs: The number of trining epochs
:param rate_decay: Boolean, true for decayed rate of learning in later epochs, default is true
"""
decay = learning_rate
self.weight = np.random.uniform(low=-0.01, high=0.01, size=(1, dimens))
self.bias = np.random.uniform(low=-0.01, high=0.01)
self.update_count = 0
self.epoch_saves = []
for count in range(epochs):
indicese = np.arange(len(target_labels))
np.random.shuffle(indicese)
if rate_decay:
learning_rate = decay / (1 + count)
for index in indicese:
x = examples.getrow(index).todense()
y = target_labels[index]
prediction = sign(self.weight.dot(x.T) + self.bias)
if prediction != y:
# update if prediction is incorrect
self.weight = self.weight + (learning_rate * (y * x))
self.bias = self.bias + (learning_rate * y)
self.update_count += 1
self.epoch_saves.append((self.weight, self.bias))
def probe(self, x: "List[float]", epoch: int = None) -> int:
"""
Probes the model for a guess on the given input
:param x: An array with which to probe the model
:param epoch: If desired, give an epoch number and this will probe the state of the model after that training epoch
"""
if epoch:
return sign(
self.epoch_saves[epoch][0].dot(x.T) + self.epoch_saves[epoch][1]
)
else:
return sign(self.weight.dot(x.T) + self.bias)
class AveragedPerceptron:
"""
This class implements an averaged perceptron algorithm
"""
def __init__(self):
self.weight = None
self.model = None
self.bias = None
self.update_count = 0
self.epoch_saves = []
def train(
self,
examples: "dataframe",
target_labels: "List[float]",
dimens: int,
learning_rate: float = 0.1,
epochs: int = 1,
rate_decay: bool = True,
):
"""
Trains an averaged perceptron with the given input
:param exaples: A sparse matrix wherein each row is a vector example corresponding to target_labels
:param target_labels: An array wherein each element is a label corresponding to target_labels
:param dimens: An int indicating the dimensions of the example space
:param learning_rate: The rate at which the model should adjust its weight/bias on incorrect guesses, default is 0.1
:param epochs: The number of trining epochs
:param rate_decay: Boolean, true for decayed rate of learning in later epochs, default is true
"""
self.bias = np.random.uniform(low=-0.01, high=0.01)
self.weight = np.random.uniform(low=-0.01, high=0.01, size=(1, dimens))
weight_summation = self.weight
bias_summation = 0
self.update_count = 0
self.epoch_saves = []
decay = learning_rate
for count in range(epochs):
indicese = np.arange(len(target_labels))
np.random.shuffle(indicese)
if rate_decay:
learning_rate = decay / (1 + count)
for index in indicese:
x = examples.getrow(index).todense()
y = target_labels[index]
prediction = sign(self.weight.dot(x.T) + self.bias)
if prediction != y:
# update if pidiction is incorrect
self.weight = self.weight + (learning_rate * (y * x))
self.bias = self.bias + (learning_rate * y)
self.update_count += 1
# update average
weight_summation = weight_summation + self.weight
bias_summation += self.bias
self.epoch_saves.append(
(
weight_summation / len(target_labels),
bias_summation / len(target_labels),
)
)
self.model = weight_summation / len(target_labels)
self.bias = bias_summation / len(target_labels)
def probe(self, x: "List[float]", epoch: int = None) -> float:
"""
Probes the model for a guess on the given input
:param x: An array with which to probe the model
:param epoch: If desired, give an epoch number and this will probe the state of the model after that training epoch
"""
if epoch:
return sign(
self.epoch_saves[epoch][0].dot(x.T) + self.epoch_saves[epoch][1]
)
else:
return sign(self.model.dot(x.T) + self.bias)
```
#### File: pepperonai/supervised/regression.py
```python
__author__ = "<NAME>"
import numpy as np
class LinearRegression:
def __init__(self):
self.fitted = False
pass
def train(self, X: np.array, y: np.array, lmbda: int):
"""
Fit a linear regression model
Args:
X (np.array): (N, d) array
Y (np.array): (N,) array
lmbda (int): regularization coefficient
"""
N = X.shape[0]
d = X.shape[1]
y_col = Y.reshape(-1, 1)
# add column for coeffs
matrix = np.hstack((np.ones((N, 1)), X))
const = N * lmbda * np.eye(d + 1)
self.beta = (
np.linalg.pinv(matrix.T.dot(matrix) + const).dot(matrix.T).dot(Y_col)
)
self.fitted = True
def probe(self, X: np.array) -> np.array:
"""
Returns prediction for examples
Args:
X (np.array): Input examples with snape (N, d)
Returns:
np.array: Predicted results of shape (N, )
"""
if not self.fitted:
raise Exception("This model has not been fitted")
N = X.shape[0]
d = X.shape[1]
matrix = np.hstack((np.ones((N, 1)), X))
results = matrix.dot(self.beta).reshape(-1)
return results
```
#### File: pepperonai/supervised/svm.py
```python
__author__ = "<NAME>"
import pandas as pd
import numpy as np
def sign(number: int) -> int:
""" Maps the sign of the number to -1 or 1"""
if number < 0:
return -1
else:
return 1
class SVM:
"""
SVM Via SGD
"""
def __init__(self):
self.weight = None
self.bias = None
self.update_count = 0
self.epoch_saves = []
def train(
self,
examples: "dataframe",
target_labels: "List[float]",
initial_learning_rate: float = 0.1,
loss_tradeoff: float = 0,
epochs: int = 1,
rate_decay: bool = True,
):
"""
Trains a SVM on examples
:param examaples: a matrix wherein each row is one training example
:param target_labels: an array where each element's index corresponds to the label for examples
:param inital_learning_rate: as a double
:param loss_tradeoff: as a double
:param epochs: count to train the model to
:param rate_date: decreases learning_rate with each epoch iff true
"""
self.weight = np.random.uniform(low=-0.01, high=0.01, size=(len(examples[0]),))
self.bias = np.random.uniform(low=-0.01, high=0.01)
self.update_count = 0
self.epoch_saves = []
for count in range(epochs):
indicese = np.arange(len(target_labels))
np.random.shuffle(indicese)
if rate_decay:
learning_rate = initial_learning_rate / (1 + count)
for index in indicese:
x = examples[index].T
y = target_labels[index]
prediction = y * self.weight.T.dot(x) + self.bias
if prediction <= 1:
self.weight = (1 - learning_rate) * self.weight + (
learning_rate * loss_tradeoff * y * x
)
self.bias = self.bias + (learning_rate * y)
else:
self.weight = (1 - learning_rate) * self.weight
self.epoch_saves.append((self.weight, self.bias))
def probe(self, x: "List[float]", epoch: int = None) -> int:
"""
Probes the model for a guess on the given input
:param x: An array with which to probe the model
:param epoch: If desired, give an epoch number and this will probe the state of the model after that training epoch
"""
if epoch:
return sign(
self.epoch_saves[epoch][0].dot(x.T) + self.epoch_saves[epoch][1]
)
else:
return sign(self.weight.T.dot(x) + self.bias)
``` |
{
"source": "JonWiggins/spaCy-entity-linker",
"score": 2
} |
#### File: spaCy-entity-linker/spacy_entity_linker/DatabaseConnection.py
```python
import sqlite3
import os
MAX_DEPTH_CHAIN = 10
P_INSTANCE_OF = 31
P_SUBCLASS = 279
MAX_ITEMS_CACHE = 100000
conn = None
entity_cache = {}
chain_cache = {}
DB_DEFAULT_PATH = os.path.abspath(__file__ + '/../../data_spacy_entity_linker/wikidb_filtered.db')
wikidata_instance = None
def get_wikidata_instance():
global wikidata_instance
if wikidata_instance is None:
wikidata_instance = WikidataQueryController()
return wikidata_instance
class WikidataQueryController:
def __init__(self):
self.conn = None
self.cache = {
"entity": {},
"chain": {},
"name": {}
}
self.init_database_connection()
def _get_cached_value(self, cache_type, key):
return self.cache[cache_type][key]
def _is_cached(self, cache_type, key):
return key in self.cache[cache_type]
def _add_to_cache(self, cache_type, key, value):
if len(self.cache[cache_type]) < MAX_ITEMS_CACHE:
self.cache[cache_type][key] = value
def init_database_connection(self, path=DB_DEFAULT_PATH):
self.conn = sqlite3.connect(path)
def clear_cache(self):
self.cache["entity"].clear()
self.cache["chain"].clear()
self.cache["name"].clear()
def get_entities_from_alias(self, alias):
c = self.conn.cursor()
if self._is_cached("entity", alias):
return self._get_cached_value("entity", alias).copy()
query_alias = """SELECT j.item_id,j.en_label, j.en_description,j.views,j.inlinks,a.en_alias from aliases as a
LEFT JOIN joined as j ON a.item_id = j.item_id
WHERE a.en_alias_lowercase = ? and j.item_id NOT NULL"""
c.execute(query_alias, [alias.lower()])
fetched_rows = c.fetchall()
self._add_to_cache("entity", alias, fetched_rows)
return fetched_rows
def get_instances_of(self, item_id, properties=[P_INSTANCE_OF, P_SUBCLASS], count=1000):
query = "SELECT source_item_id from statements where target_item_id={} and edge_property_id IN ({}) LIMIT {}".format(
item_id, ",".join([str(prop) for prop in properties]), count)
c = self.conn.cursor()
c.execute(query)
res = c.fetchall()
return [e[0] for e in res]
def get_entity_name(self, item_id):
if self._is_cached("name", item_id):
return self._get_cached_value("name", item_id)
c = self.conn.cursor()
query = "SELECT en_label from joined WHERE item_id=?"
c.execute(query, [item_id])
res = c.fetchone()
if res and len(res):
if res[0] == None:
self._add_to_cache("name", item_id, 'no label')
else:
self._add_to_cache("name", item_id, res[0])
else:
self._add_to_cache("name", item_id, '<none>')
return self._get_cached_value("name", item_id)
def get_entity(self, item_id):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"WHERE j.item_id=={}".format(item_id)
res = c.execute(query)
return res.fetchone()
def get_children(self, item_id, limit=100):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"JOIN statements as s on j.item_id=s.source_item_id " \
"WHERE s.target_item_id={} and s.edge_property_id IN (279,31) LIMIT {}".format(item_id, limit)
res = c.execute(query)
return res.fetchall()
def get_parents(self, item_id, limit=100):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"JOIN statements as s on j.item_id=s.target_item_id " \
"WHERE s.source_item_id={} and s.edge_property_id IN (279,31) LIMIT {}".format(item_id, limit)
res = c.execute(query)
return res.fetchall()
def get_categories(self, item_id, max_depth=10):
chain = []
edges = []
self._append_chain_elements(item_id, 0, chain, edges, max_depth, [P_INSTANCE_OF, P_SUBCLASS])
return [el[0] for el in chain]
def get_chain(self, item_id, max_depth=10, property=P_INSTANCE_OF):
chain = []
edges = []
self._append_chain_elements(item_id, 0, chain, edges, max_depth, property)
return chain
def get_recursive_edges(self, item_id):
chain = []
edges = []
self._append_chain_elements(self, item_id, 0, chain, edges)
return edges
def _append_chain_elements(self, item_id, level=0, chain=[], edges=[], max_depth=10, property=P_INSTANCE_OF):
properties = property
if type(property) != list:
properties = [property]
if self._is_cached("chain", (item_id, max_depth)):
chain += self._get_cached_value("chain", (item_id, max_depth)).copy()
return
# prevent infinite recursion
if level >= max_depth:
return
c = self.conn.cursor()
query = "SELECT target_item_id,edge_property_id from statements where source_item_id={} and edge_property_id IN ({})".format(
item_id, ",".join([str(prop) for prop in properties]))
# set value for current item in order to prevent infinite recursion
self._add_to_cache("chain", (item_id, max_depth), [])
for target_item in c.execute(query):
chain_ids = [el[0] for el in chain]
if not (target_item[0] in chain_ids):
chain += [(target_item[0], level + 1)]
edges.append((item_id, target_item[0], target_item[1]))
self._append_chain_elements(target_item[0], level=level + 1, chain=chain, edges=edges,
max_depth=max_depth,
property=property)
self._add_to_cache("chain", (item_id, max_depth), chain)
if __name__ == '__main__':
queryInstance = WikidataQueryController()
queryInstance.init_database_connection()
print(queryInstance.get_categories(13191, max_depth=1))
print(queryInstance.get_categories(13191, max_depth=1))
```
#### File: spaCy-entity-linker/spacy_entity_linker/EntityCollection.py
```python
from collections import Counter, defaultdict
from .DatabaseConnection import get_wikidata_instance
MAX_ITEMS_PREVIEW=20
class EntityCollection:
def __init__(self, entities=[]):
self.entities = entities
def __iter__(self):
for entity in self.entities:
yield entity
def __getitem__(self, item):
return self.entities[item]
def __len__(self):
return len(self.entities)
def append(self, entity):
self.entities.append(entity)
def get_categories(self, max_depth=1):
categories = []
for entity in self.entities:
categories += entity.get_categories(max_depth)
return categories
def print_super_entities(self, max_depth=1, limit=10):
wikidataInstance = get_wikidata_instance()
all_categories = []
category_to_entites = defaultdict(list)
for e in self.entities:
for category in e.get_categories(max_depth):
category_to_entites[category].append(e)
all_categories.append(category)
counter = Counter()
counter.update(all_categories)
for category, frequency in counter.most_common(limit):
print("{} ({}) : {}".format(wikidataInstance.get_entity_name(category), frequency,
','.join([str(e) for e in category_to_entites[category]])))
def __repr__(self) -> str:
preview_str="<EntityCollection ({} entities):".format(len(self))
for index,entity_element in enumerate(self):
if index>MAX_ITEMS_PREVIEW:
preview_str+="\n...{} more".format(len(self)-MAX_ITEMS_PREVIEW)
break
preview_str+="\n-{}".format(entity_element.get_preview_string())
preview_str+=">"
return preview_str
def pretty_print(self):
for entity in self.entities:
entity.pretty_print()
def grouped_by_super_entities(self, max_depth=1):
counter = Counter()
counter.update(self.get_categories(max_depth))
return counter
def get_distinct_categories(self, max_depth=1):
return list(set(self.get_categories(max_depth)))
``` |
{
"source": "jonwilami323/TypePrism",
"score": 2
} |
#### File: test/test_bots/test_redditbot.py
```python
import unittest
import sys
sys.path.append(".")
from rmtibot.redditbot import test_unit
class TestRedditInstance(unittest.TestCase):
def test_should_pass(self):
self.assertEqual(test_unit(3), 4, "Set up Reddit Intance.")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jonwilkey/chem-eng-solver",
"score": 3
} |
#### File: src/chem_eng_solver/units.py
```python
import re
from typing import Tuple, Union
import unyt as u
MAX_SIGFIGS = 6
class Patterns:
"""Define regular expressions used for parsing strings here."""
initial = re.compile(r"^([-0-9\.eE]+)\s+([a-zA-Z)^/*0-9\s-]+)")
base_units = re.compile(r"([a-zA-Z]+)")
operators = r"(^[\*/]?{unit}\^?(-?\d+)?)"
sigfigs = re.compile(r"[-0]*([0-9]*)?\.?([0-9]*)?[eE]?[0-9-]*")
@staticmethod
def sigfigs_repl(x: re.Match) -> str:
"""Removes trailing-zeros (10000 -> 1) and leading zeros (000.001 -> .001).
Args:
x (re.Match): Match object containing left/right-side of decimal point.
Returns:
str: String with trailing/leading zeros removed.
"""
left, right = x.groups()
left = left.rstrip("0") if right == "" else left
return left + right if left != "0" else right
class Units:
"""Class for parsing and converting units from input strings."""
def __init__(self, max_sigfigs: int = MAX_SIGFIGS) -> None:
"""Initialize class.
Args:
max_sigfigs (int, optional): How many sig figs to report (at most). Defaults
to MAX_SIGFIGS.
"""
self.sigfigs = max_sigfigs
def _initial_parser(self, input_str: str) -> Tuple[float, str]:
"""Parses input into numeric value string and units string.
Args:
input_str (str): Input string.
Raises:
Exception: If input string doesn't match expected pattern.
Returns:
Tuple[str, str]: Value and units of parsed input string.
"""
found = Patterns.initial.findall(input_str)
if not found:
raise Exception(
f"Input string '{input_str}' doesn't match expected parsing "
"pattern. Please input value again using the pattern: "
"'[-0-9\\.]+ [units]' where units are any set of character "
"strings with '/' to indicate division and '**' or '^' are used"
" to indicate exponentiation."
)
value, units = found[0]
self.count_sigfigs(value)
return float(value), units
@staticmethod
def _units_parser(units: str) -> u.unyt_quantity:
"""Parses given units string, returning equivalent unyt_quantity object.
Args:
units (str): Units string, e.g. "kg/m/s**2".
Raises:
Exception: If unknown pattern is encountered that prevents function from
building final_unit object (expects just multiplication, division, and
exponentiation without paranthesis or brackets).
Returns:
u.unyt_quantity: unyt_quantity object equivalent to the input string.
"""
units = units.replace(" ", "").replace("**", "^")
final_units = 1.0
for unit in Patterns.base_units.findall(units):
base_unit = getattr(u, unit)
parsed = re.findall(Patterns.operators.format(unit=unit), units)
if not parsed:
raise Exception(
f"Cannot determine how to build '{unit}' into final_units "
f"from first occurence in remaining unit string: {units}"
)
base_str, exponent = parsed[0]
if base_str.startswith("/"):
if exponent:
final_units /= base_unit ** int(exponent)
else:
final_units /= base_unit
else:
if exponent:
final_units *= base_unit ** int(exponent)
else:
final_units *= base_unit
units = units.replace(base_str, "", 1)
return final_units
def count_sigfigs(self, value: str) -> None:
"""Count number of significant figures in input value.
Tracks what the minimum number of sigfigs is of the units that this instance of
:class:`Units` has encountered thus far.
Args:
value (str): Input value.
"""
sigfig_count = len(Patterns.sigfigs.sub(Patterns.sigfigs_repl, value))
self.sigfigs = min(self.sigfigs, sigfig_count)
def unit_converter(
self, input_str: str, include_units: bool = False
) -> Union[float, u.unyt_quantity]:
"""Parse input value plus string and convert into equivalent SI units.
This function assumes that inputs match the following pattern:
:regexp:`[0-9]+ [a-z]+([*/])?`
Args:
input_str (str): Input value plus units, e.g. "212 degF".
include_units (bool, optional): Whether or not to include units in the
return object. Defaults to False.
Returns:
float: Value converted to SI units, e.g. "212 degF" --> 373.15 (the
equivalent value in Kelvin).
"""
value, units_str = self._initial_parser(input_str)
units = self._units_parser(units_str)
quantity = value * units
quantity.convert_to_mks()
return quantity if include_units else quantity.to_value()
``` |
{
"source": "Jonwodi/Django-PostgreSQL-Docker",
"score": 2
} |
#### File: app/table_booker/models.py
```python
from django.contrib.auth.models import User
from django.db import models
class Restaurant(models.Model):
name = models.CharField(max_length=150)
address1 = models.CharField(max_length=250)
address2 = models.CharField(max_length=250)
postcode = models.CharField(max_length=12)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Table(models.Model):
restaurant = models.ForeignKey(
Restaurant, on_delete=models.CASCADE, related_name="tables"
)
name = models.CharField(max_length=250)
capacity = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.name} capacity: {self.capacity}"
class Booking(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
restaurant = models.ForeignKey(Restaurant, on_delete=models.CASCADE)
table = models.ForeignKey(Table, on_delete=models.CASCADE)
date = models.DateTimeField()
total_guests = models.IntegerField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
DAYS_OF_WEEK = (
(0, "Monday"),
(1, "Tuesday"),
(2, "Wednesday"),
(3, "Thursday"),
(4, "Friday"),
(5, "Saturday"),
(6, "Sunday"),
)
class BusinessHour(models.Model):
restaurant = models.ForeignKey(Restaurant, on_delete=models.CASCADE)
day = models.IntegerField(choices=DAYS_OF_WEEK)
start_time = models.TimeField()
finish_time = models.TimeField()
closed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Setting(models.Model):
restaurant = models.OneToOneField(
Restaurant, on_delete=models.CASCADE, related_name="setting"
)
min_guest = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
``` |
{
"source": "jonwright/PolyXSim",
"score": 2
} |
#### File: PolyXSim/polyxsim/make_imagestack.py
```python
from __future__ import absolute_import
from __future__ import print_function
import numpy as n
from xfab import tools
from xfab import detector
from fabio import edfimage,tifimage
import gzip
from scipy import ndimage
from . import variables,check_input
from . import generate_grains
import time
import sys
A_id = variables.refarray().A_id
class make_image:
def __init__(self,graindata,killfile):
self.graindata = graindata
self.killfile = killfile
# wedge NB! wedge is in degrees
# The sign is reversed for wedge as the parameter in
# tools.find_omega_general is right handed and in ImageD11
# it is left-handed (at this point wedge is defined as in ImageD11)
self.wy = -1.*self.graindata.param['wedge']*n.pi/180.
self.wx = 0.
def setup_odf(self):
odf_scale = self.graindata.param['odf_scale']
if self.graindata.param['odf_type'] == 1:
odf_spread = self.graindata.param['mosaicity']/4
odf_spread_grid = odf_spread/odf_scale
sigma = odf_spread_grid*n.ones(3)
r1_max = int(n.ceil(3*odf_spread_grid))
r1_range = r1_max*2 + 1
r2_range = r1_max*2 + 1
r3_range = r1_max*2 + 1
mapsize = r1_range*n.ones(3)
odf_center = r1_max*n.ones(3)
print('size of ODF map', mapsize)
self.odf = generate_grains.gen_odf(sigma,odf_center,mapsize)
#from pylab import *
#imshow(self.odf[:,:,odf_center[2]])
#show()
elif self.graindata.param['odf_type'] == 3:
odf_spread = self.graindata.param['mosaicity']/4
odf_spread_grid = odf_spread/odf_scale
r1_max = n.ceil(3*odf_spread_grid)
r2_max = n.ceil(3*odf_spread_grid)
r3_max = n.ceil(3*odf_spread_grid)
r1_range = r1_max*2 + 1
r2_range = r2_max*2 + 1
r3_range = r3_max*2 + 1
print('size of ODF map', r1_range*n.ones(3))
odf_center = r1_max*n.ones(3)
self.odf= n.zeros((r1_range,r2_range,r3_range))
# Makes spheric ODF for debug purpuses
for i in range(self.odf.shape[0]):
for j in range(self.odf.shape[1]):
for k in range(self.odf.shape[2]):
r = [i-(r1_max), j-(r2_max), k-(r3_max)]
if n.linalg.norm(r) > r1_max:
self.odf[i,j,k] = 0
else:
self.odf[i,j,k] = 1
#from pylab import *
#imshow(self.odf[:,:,r3_max],interpolation=None)
#show()
elif self.graindata.param['odf_type'] == 2:
file = self.graindata.param['odf_file']
print('Read ODF from file_ %s' %file)
file = open(file,'r')
(r1_range, r2_range, r3_range) = file.readline()[9:].split()
r1_range = int(r1_range)
r2_range = int(r2_range)
r3_range = int(r3_range)
odf_scale = float(file.readline()[10:])
oneD_odf = n.fromstring(file.readline(),sep=' ')
elements = r1_range*r2_range*r3_range
self.odf = oneD_odf[:elements].reshape(r1_range,r2_range,r3_range)
if self.graindata.param['odf_sub_sample'] > 1:
sub =self.graindata.param['odf_sub_sample']
print('subscale =',sub)
r1_range_sub = r1_range * self.graindata.param['odf_sub_sample']
r2_range_sub = r2_range * self.graindata.param['odf_sub_sample']
r3_range_sub = r3_range * self.graindata.param['odf_sub_sample']
odf_fine = n.zeros((r1_range_sub,r2_range_sub,r3_range_sub))
for i in range(r1_range):
for j in range(r2_range):
for k in range(r3_range):
odf_fine[i*sub:(i+1)*sub,
j*sub:(j+1)*sub,
k*sub:(k+1)*sub] = self.odf[i,j,k]
self.odf = odf_fine.copy()/(sub*sub*sub)
r1_range = r1_range_sub
r2_range = r2_range_sub
r3_range = r3_range_sub
odf_scale = odf_scale/sub
print('odf_scale', odf_scale)
#[r1_range, r2_range, r3_range] = self.odf.shape
odf_center = [(r1_range)/2, r2_range/2, r3_range/2]
print(odf_center)
#self.odf[:,:,:] = 0.05
print(self.odf.shape)
#from pylab import *
#imshow(self.odf[:,:,odf_center[2]])
#show()
self.Uodf = n.zeros(r1_range*r2_range*r3_range*9).\
reshape(r1_range,r2_range,r3_range,3,3)
if self.graindata.param['odf_cut'] != None:
self.odf_cut = self.odf.max()*self.graindata.param['odf_cut']
else:
self.odf_cut = 0.0
for i in range(self.odf.shape[0]):
for j in range(self.odf.shape[1]):
for k in range(self.odf.shape[2]):
r = odf_scale*n.pi/180.*\
n.array([i-odf_center[0],
j-odf_center[1],
k-odf_center[2]])
self.Uodf[i,j,k,:,:] = tools.rod_to_u(r)
if self.graindata.param['odf_type'] != 2:
file = open(self.graindata.param['stem']+'.odf','w')
file.write('ODF size: %i %i %i\n' %(r1_range,r2_range,r3_range))
file.write('ODF scale: %f\n' %(odf_scale))
for i in range(int(r1_range)):
self.odf[i,:,:].tofile(file,sep=' ',format='%f')
file.write(' ')
file.close()
return self.Uodf
def make_image_array(self):
from scipy import sparse
#make stack of empty images as a dictionary of sparse matrices
print('Build sparse image stack')
stacksize = len(self.graindata.frameinfo)
self.frames = {}
for i in range(stacksize):
self.frames[i]=sparse.lil_matrix((int(self.graindata.param['dety_size']),
int(self.graindata.param['detz_size'])))
def make_image(self,grainno=None,refl = None):
from scipy import ndimage
if grainno == None:
do_grains = list(range(self.graindata.param['no_grains']))
else:
do_grains = [grainno]
# loop over grains
for grainno in do_grains:
gr_pos = n.array(self.graindata.param['pos_grains_%s' \
%(self.graindata.param['grain_list'][grainno])])
B = self.graindata.grain[grainno].B
SU = n.dot(self.graindata.S,self.graindata.grain[grainno].U)
if refl == None:
do_refs = list(range(len(self.graindata.grain[grainno].refs)))
else:
do_refs = [refl]
# loop over reflections for each grain
for nref in do_refs:
# exploit that the reflection list is sorted according to omega
print('\rDoing reflection %i of %i for grain %i of %i' %(nref+1,
len(self.graindata.grain[grainno].refs),
grainno+1,self.graindata.param['no_grains']), end=' ')
sys.stdout.flush()
#print 'Doing reflection: %i' %nref
if self.graindata.param['odf_type'] == 3:
intensity = 1
else:
intensity = self.graindata.grain[grainno].refs[nref,A_id['Int']]
hkl = n.array([self.graindata.grain[grainno].refs[nref,A_id['h']],
self.graindata.grain[grainno].refs[nref,A_id['k']],
self.graindata.grain[grainno].refs[nref,A_id['l']]])
Gc = n.dot(B,hkl)
for i in range(self.odf.shape[0]):
for j in range(self.odf.shape[1]):
for k in range(self.odf.shape[2]):
check_input.interrupt(self.killfile)
if self.odf[i,j,k] > self.odf_cut:
Gtmp = n.dot(self.Uodf[i,j,k],Gc)
Gw = n.dot(SU,Gtmp)
Glen = n.sqrt(n.dot(Gw,Gw))
tth = 2*n.arcsin(Glen/(2*abs(self.graindata.K)))
costth = n.cos(tth)
Qw = Gw*self.graindata.param['wavelength']/(4.*n.pi)
(Omega, eta) = tools.find_omega_general(Qw,
tth,
self.wx,
self.wy)
try:
minpos = n.argmin(n.abs(Omega-self.graindata.grain[grainno].refs[nref,A_id['omega']]))
except:
print(Omega)
if len(Omega) == 0:
continue
omega = Omega[minpos]
# if omega not in rotation range continue to next step
if (self.graindata.param['omega_start']*n.pi/180) > omega or\
omega > (self.graindata.param['omega_end']*n.pi/180):
continue
Om = tools.form_omega_mat_general(omega,self.wx,self.wy)
Gt = n.dot(Om,Gw)
# Calc crystal position at present omega
[tx,ty,tz]= n.dot(Om,gr_pos)
(dety, detz) = detector.det_coor(Gt,
costth,
self.graindata.param['wavelength'],
self.graindata.param['distance'],
self.graindata.param['y_size'],
self.graindata.param['z_size'],
self.graindata.param['dety_center'],
self.graindata.param['detz_center'],
self.graindata.R,
tx,ty,tz)
if self.graindata.param['spatial'] != None :
# To match the coordinate system of the spline file
# SPLINE(i,j): i = detz; j = (dety_size-1)-dety
# Well at least if the spline file is for frelon2k
(x,y) = detector.detyz_to_xy([dety,detz],
self.graindata.param['o11'],
self.graindata.param['o12'],
self.graindata.param['o21'],
self.graindata.param['o22'],
self.graindata.param['dety_size'],
self.graindata.param['detz_size'])
# Do the spatial distortion
(xd,yd) = self.spatial.distort(x,y)
# transform coordinates back to dety,detz
(dety,detz) = detector.xy_to_detyz([xd,yd],
self.graindata.param['o11'],
self.graindata.param['o12'],
self.graindata.param['o21'],
self.graindata.param['o22'],
self.graindata.param['dety_size'],
self.graindata.param['detz_size'])
if dety > -0.5 and dety <= self.graindata.param['dety_size']-0.5 and\
detz > -0.5 and detz <= self.graindata.param['detz_size']-0.5:
dety = int(round(dety))
detz = int(round(detz))
frame_no = int(n.floor((omega*180/n.pi-self.graindata.param['omega_start'])/\
self.graindata.param['omega_step']))
self.frames[frame_no][dety,detz] = self.frames[frame_no][dety,detz]+ intensity*self.odf[i,j,k]
def correct_image(self):
no_frames = len(self.graindata.frameinfo)
print('\nGenerating ', no_frames, 'frames')
for frame_no in self.frames:
t1 = time.clock()
frame = self.frames[frame_no].toarray()
if self.graindata.param['bg'] > 0:
frame = frame + self.graindata.param['bg']*n.ones((self.graindata.param['dety_size'],
self.graindata.param['detz_size']))
# add noise
if self.graindata.param['noise'] != 0:
frame = n.random.poisson(frame)
# apply psf
if self.graindata.param['psf'] != 0:
frame = ndimage.gaussian_filter(frame,self.graindata.param['psf']*0.5)
# limit values above 16 bit to be 16bit
frame = n.clip(frame,0,2**16-1)
# convert to integers
frame = n.uint16(frame)
#flip detector orientation according to input: o11, o12, o21, o22
frame = detector.trans_orientation(frame,
self.graindata.param['o11'],
self.graindata.param['o12'],
self.graindata.param['o21'],
self.graindata.param['o22'],
'inverse')
# Output frames
if '.edf' in self.graindata.param['output']:
self.write_edf(i,frame)
if '.edf.gz' in self.graindata.param['output']:
self.write_edf(i,frame,usegzip=True)
if '.tif' in self.graindata.param['output']:
self.write_tif(i,frame)
if '.tif16bit' in self.graindata.param['output']:
self.write_tif16bit(i,frame)
print('\rDone frame %i took %8f s' %(frame_no+1,time.clock()-t1), end=' ')
sys.stdout.flush()
def write_edf(self,framenumber,frame,usegzip=False):
e=edfimage.edfimage()
e.data=frame
edim2,edim1=frame.shape
e.header = {}
e.header['origin']='PolyXSim'
e.header['Dim_1']=edim1
e.header['Dim_2']=edim2
e.header['col_end']=edim1-1
e.header['row_end']=edim2-1
e.header['DataType']='UnsignedShort'
e.header['Image']=1
e.header['ByteOrder']='Low'
e.header['time']=time.asctime()
e.header['Omega']= self.graindata.frameinfo[framenumber].omega +\
self.graindata.param['omega_step']/2.0
e.header['OmegaStep']=self.graindata.param['omega_step']
e.header['grainfile']='%s/%s_%0.4dgrains.txt' \
%(self.graindata.param['direc'],self.graindata.param['stem'],self.graindata.param['no_grains'])
fname = '%s%s' %(self.graindata.frameinfo[framenumber].name,'.edf')
if usegzip:
fobj = gzip.GzipFile( fname + ".gz", "wb" )
e.write( fobj )
fobj.close()
else:
e.write(fname)
def write_tif(self,framenumber,frame):
e=tifimage.tifimage()
e.data=frame
e.write('%s%s' %(self.graindata.frameinfo[framenumber].name,'.tif'))
def write_tif16bit(self,framenumber,frame):
size = frame.shape[:2][::-1]
pilimage = Image.frombuffer('I',size,frame.tostring(),"raw",'I;16',0,1)
pilimage.save('%s%s' %(self.graindata.frameinfo[framenumber].name,'.tif'))
```
#### File: PolyXSim/test/test_input.py
```python
from __future__ import absolute_import
import unittest
import numpy as n
from polyxsim import check_input
class test_input(unittest.TestCase):
def test_reading(self): ## test method names begin 'test*'
myinput = check_input.parse_input(input_file='simul.inp')
myinput.read()
def test_checking(self):
myinput = check_input.parse_input(input_file='simul.inp')
myinput.read()
myinput.check()
def test_initialize(self):
myinput = check_input.parse_input(input_file='simul.inp')
myinput.read()
myinput.check()
myinput.initialize()
if __name__ == '__main__':
unittest.main()
```
#### File: PolyXSim/test/test_miller.py
```python
from __future__ import absolute_import
import unittest
from polyxsim import check_input
from polyxsim import reflections
import numpy as n
class test_gen_miller(unittest.TestCase):
def test_(self): ## test method names begin 'test*'
param = {}
param['theta_min'] = 0
param['theta_max'] = 5
param['wavelength'] = 0.2647
param['unit_cell_phase_0'] = [8.5312,4.8321,10.125,90.00,92.031,90.00]
param['sgno'] = 4
param['sgname_phase_0'] = 'P21'
param['cell_choice_phase_0'] = 'standard'
hkl = reflections.gen_miller(param,0)
self.assertEqual(len(hkl),498)
def test_open_structure(self):
param = {}
param['structure_phase_0'] = 'oPPA.cif'
# param['structure_datablock'] = 'oPPA'
structure = reflections.open_structure(param,0)
self.assertEqual(param['sgno_phase_0'],4)
self.assertEqual([8.5312,4.8321,10.125,90.00,92.031,90.00],
param['unit_cell_phase_0'])
def test_calc_intensity(self):
myinput = check_input.parse_input(input_file='simul.inp')
myinput.read()
myinput.check()
myinput.initialize()
myinput.param['structure_phase_0'] = 'oPPA.cif'
myinput.param['structure_datablock'] = 'oPPA'
xtal_structure = reflections.open_structure(myinput.param,0)
hkl = reflections.gen_miller(myinput.param,0)
hkl = reflections.calc_intensity(hkl,xtal_structure)
def test_add_intensity(self):
param = {}
param['theta_min'] = 0
param['theta_max'] = 3
param['wavelength'] = 0.2647
param['unit_cell_phase_0'] = [8.5312,4.8321,10.125,90.00,92.031,90.00]
param['cell_choice_phase_0']='standard'
param['sgno_phase_0'] = 4
param['sgname_phase_0'] = 'p21'
hkl = reflections.gen_miller(param,0)
hkl2 = reflections.add_intensity(hkl,param)
self.assertEqual(n.sum(hkl2[:,3]),len(hkl2)*2**15)
param['structure_int'] = 2**14
hkl2 = reflections.add_intensity(hkl,param)
self.assertEqual(n.sum(hkl2[:,3]),len(hkl2)*2**14)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonwright/pyopengltk",
"score": 2
} |
#### File: pyopengltk/pyopengltk/win32.py
```python
from ctypes import WinDLL, c_void_p
from ctypes.wintypes import HDC
from OpenGL.WGL import PIXELFORMATDESCRIPTOR, ChoosePixelFormat, \
SetPixelFormat, SwapBuffers, wglCreateContext, wglMakeCurrent
from pyopengltk.base import BaseOpenGLFrame
_user32 = WinDLL('user32')
GetDC = _user32.GetDC
GetDC.restype = HDC
GetDC.argtypes = [c_void_p]
pfd = PIXELFORMATDESCRIPTOR()
PFD_TYPE_RGBA = 0
PFD_MAIN_PLANE = 0
PFD_DOUBLEBUFFER = 0x00000001
PFD_DRAW_TO_WINDOW = 0x00000004
PFD_SUPPORT_OPENGL = 0x00000020
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER
pfd.iPixelType = PFD_TYPE_RGBA
pfd.cColorBits = 24
pfd.cDepthBits = 16
pfd.iLayerType = PFD_MAIN_PLANE
# Inherits the base and fills in the 3 platform dependent functions
class OpenGLFrame(BaseOpenGLFrame):
def tkCreateContext(self):
self.__window = GetDC(self.winfo_id())
pixelformat = ChoosePixelFormat(self.__window, pfd)
SetPixelFormat(self.__window, pixelformat, pfd)
self.__context = wglCreateContext(self.__window)
wglMakeCurrent(self.__window, self.__context)
def tkMakeCurrent(self):
if self.winfo_ismapped():
wglMakeCurrent(self.__window, self.__context)
def tkSwapBuffers(self):
if self.winfo_ismapped():
SwapBuffers(self.__window)
``` |
{
"source": "Jonxslays/analytix",
"score": 2
} |
#### File: analytix/analytix/features.py
```python
from __future__ import annotations
import typing as t
from analytix import abc, data, errors
class CompareMixin:
values: set[str]
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values == other.values
def __ne__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values != other.values
def __hash__(self) -> int:
return hash(self.__class__.__name__)
class NestedCompareMixin:
values: set[abc.SetType]
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values == other.values
def __ne__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values != other.values
def __hash__(self) -> int:
return hash(self.__class__.__name__)
class Metrics(abc.FeatureType, CompareMixin):
def validate(self, inputs: t.Collection[str]) -> None:
if not len(inputs):
raise errors.MissingMetrics()
if not isinstance(inputs, set):
inputs = set(inputs)
diff = inputs - data.ALL_METRICS
if diff:
raise errors.InvalidMetrics(diff)
diff = inputs - self.values
if diff:
raise errors.UnsupportedMetrics(diff)
class SortOptions(abc.FeatureType, CompareMixin):
def __init__(self, *args: str, descending_only: bool = False) -> None:
super().__init__(*args)
self.descending_only = descending_only
def validate(self, inputs: t.Collection[str]) -> None:
raw_inputs = set(i.strip("-") for i in inputs)
if not isinstance(inputs, set):
inputs = set(inputs)
diff = raw_inputs - data.ALL_METRICS
if diff:
raise errors.InvalidSortOptions(diff)
diff = raw_inputs - self.values
if diff:
raise errors.UnsupportedSortOptions(diff)
if self.descending_only:
diff = {i for i in inputs if not i.startswith("-")}
if diff:
raise errors.UnsupportedSortOptions(diff, descending_only=True)
class Dimensions(abc.SegmentedFeatureType, NestedCompareMixin):
def validate(self, inputs: t.Collection[str]) -> None:
if not isinstance(inputs, set):
inputs = set(inputs)
diff = inputs - data.ALL_DIMENSIONS
if diff:
depr = inputs & data.DEPRECATED_DIMENSIONS
raise errors.InvalidDimensions(diff, depr)
diff = inputs - self.every
if diff:
raise errors.UnsupportedDimensions(diff)
for set_type in self.values:
set_type.validate_dimensions(inputs)
class Filters(abc.MappingFeatureType, NestedCompareMixin):
@property
def every_key(self) -> set[str]:
return {v[: v.index("=")] if "==" in v else v for v in self.every}
@property
def locked(self) -> dict[str, str]:
locked = {}
for set_type in self.values:
for value in filter(lambda v: "==" in v, set_type.values):
k, v = value.split("==")
locked.update({k: v})
return locked
def validate(self, inputs: dict[str, str]) -> None:
keys = set(inputs.keys())
locked = self.locked
diff = keys - data.ALL_FILTERS
if diff:
raise errors.InvalidFilters(diff)
for k, v in inputs.items():
valid = data.VALID_FILTER_OPTIONS[k]
if valid and (v not in valid):
raise errors.InvalidFilterValue(k, v)
if k in locked.keys():
if v != locked[k]:
raise errors.UnsupportedFilterValue(k, v)
diff = keys - self.every_key
if diff:
raise errors.UnsupportedFilters(diff)
for set_type in self.values:
set_type.validate_filters(keys)
class Required(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if self.values & inputs == self.values:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("all", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if self.expd_keys & keys == self.expd_keys:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("all", common, self.values)
class ExactlyOne(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) == 1:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) == 1:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("1", common, self.values)
class OneOrMore(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) > 0:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("at least 1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) > 0:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("at least 1", common, self.values)
class Optional(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
# No verifiction required.
...
def validate_filters(self, keys: set[str]) -> None:
# No verifiction required.
...
class ZeroOrOne(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) < 2:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("0 or 1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) < 2:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("0 or 1", common, self.values)
class ZeroOrMore(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
# No verifiction required.
...
def validate_filters(self, keys: set[str]) -> None:
# No verifiction required.
...
``` |
{
"source": "Jonxslays/Blackjack",
"score": 4
} |
#### File: blackjack/models/cards.py
```python
class Card:
"""Represents a playing card."""
RANKS = (2, 3, 4, 5, 6, 7, 8, 9, 10, "jack", "queen", "king", "ace")
SUITS = ("clubs", "diamonds", "hearts", "spades")
def __init__(self, rank: str | int, suit: str, hidden: bool = False) -> None:
self._rank = rank
self._suit = suit
self._hidden = hidden
@property
def rank(self) -> str | int:
"""The rank of the card."""
return self._rank
@property
def suit(self) -> str:
"""The suit of the card."""
return self._suit
@property
def hidden(self) -> bool:
"""Whether or not the card is hidden."""
return self._hidden
def flip(self) -> "Card":
"""Flip the card to the opposite hidden state."""
self._hidden = not self.hidden
return self
def value(self, score: int) -> int:
"""Returns the cards value."""
if isinstance(self.rank, int):
return self.rank
match self.rank:
case "king" | "queen" | "jack":
return 10
case _:
return 1 if score + 11 > 21 else 11
def __repr__(self) -> str:
return f"**HIDDEN**" if self.hidden else f"{self.rank} of {self.suit}"
```
#### File: blackjack/models/decks.py
```python
import random
from blackjack.models import Card
class Deck:
"""Represents a deck of playing cards"""
def __init__(self) -> None:
self.new_deck()
@property
def cards(self) -> list[Card]:
"""The cards in the deck."""
return self._cards
def new_deck(self) -> None:
"""Generates a new deck of 52 cards and shuffles them."""
self._cards: list[Card] = []
for r in Card.RANKS:
self.cards.extend(Card(r, s) for s in Card.SUITS)
self.shuffle()
def shuffle(self) -> None:
"""Shuffles the deck."""
random.shuffle(self.cards)
def draw(self) -> Card:
"""Draws one card from the deck."""
return self.cards.pop()
def draw_many(self, amount: int) -> list[Card]:
"""Draws multiple cards from the deck."""
drawn = self.cards[-amount:]
self._cards = self.cards[:-amount]
return drawn
def __repr__(self) -> str:
"""Representation of a deck."""
return "\n".join(str(c) for c in self.cards)
``` |
{
"source": "Jonxslays/Carberretta",
"score": 2
} |
#### File: bot/cogs/gateway.py
```python
import datetime as dt
import discord
from discord.ext import commands
from carberretta import Config
TIMEOUT = 600
class Gateway(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
async def schedule_action(self, member: discord.Member, secs: int = TIMEOUT) -> None:
async def _take_action(member: discord.Member) -> None:
if member.pending:
return await member.kick(
reason=(
"Member failed to accept the server rules before "
"being timed out."
)
)
await member.add_roles(
self.announcements_role,
self.videos_role,
reason="Member accepted the server rules.",
atomic=False
)
self.bot.scheduler.add_job(
_take_action,
id=f"{member.id}",
next_run_time=dt.datetime.utcnow() + dt.timedelta(seconds=secs),
args=[member],
)
@commands.Cog.listener()
async def on_ready(self) -> None:
if not self.bot.ready.booted:
self.gateway_channel = self.bot.get_channel(Config.GATEWAY_ID)
self.announcements_role = self.gateway_channel.guild.get_role(Config.ANNOUNCEMENTS_ROLE_ID)
self.videos_role = self.gateway_channel.guild.get_role(Config.VIDEOS_ROLE_ID)
for m in self.gateway_channel.guild.members:
if (secs := (dt.datetime.utcnow() - m.joined_at).seconds) <= TIMEOUT:
await self.schedule_action(m, secs=TIMEOUT - secs)
elif m.pending:
await m.kick(
reason=(
"Member failed to accept the server rules before "
"being timed out."
)
)
self.bot.ready.up(self)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
await self.schedule_action(member)
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member) -> None:
if member.pending:
try:
return self.bot.scheduler.get_job(f"{member.id}").remove()
except AttributeError:
pass
else:
await self.gateway_channel.send(
f"{member.display_name} is no longer in the server. "
f"(ID: {member.id})"
)
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:
if before.pending != after.pending:
humans = len([m for m in after.guild.members if not m.bot])
await self.gateway_channel.send(
f"Welcome {after.mention}! You are member nº {humans:,} of "
"Carberra Tutorials (excluding bots). Make yourself at home "
"in <#626608699942764548>, and look at <#739572184745377813> "
"to find out how to get support."
)
def setup(bot: commands.Bot) -> None:
bot.add_cog(Gateway(bot))
```
#### File: bot/cogs/support.py
```python
import asyncio
import datetime as dt
import io
import json
import os
import re
import typing as t
from enum import Enum
from inspect import Parameter
import aiofiles
import aiofiles.os
import aiohttp
import discord
from apscheduler.jobstores.base import ConflictingIdError
from discord.ext import commands
from carberretta import Config
from carberretta.utils import string
INACTIVE_TIME: t.Final = 3600
NAMES: t.Final = [
"alpha",
"beta",
"gamma",
"delta",
"epsilon",
"zeta",
"eta",
"theta",
"iota",
"kappa",
"lambda",
"mu",
"nu",
"xi",
"omicron",
"pi",
"rho",
"sigma",
"tau",
"upsilon",
"phi",
"chi",
"psi",
"omega",
]
class SupportState(Enum):
UNAVAILABLE = 0
OCCUPIED = 1
AVAILABLE = 2
STATES: t.Final = {
Config.UNAVAILABLE_SUPPORT_ID: SupportState.UNAVAILABLE,
Config.OCCUPIED_SUPPORT_ID: SupportState.OCCUPIED,
Config.AVAILABLE_SUPPORT_ID: SupportState.AVAILABLE,
}
class SupportChannel:
def __init__(self, channel: discord.TextChannel, message: t.Optional[discord.Message] = None):
self.channel = channel
self._message = message
self._previous_message = None
self.get_channel = self.channel.guild.get_channel
@property
def id(self) -> int:
return self.channel.id
@property
def state(self) -> SupportState:
return STATES.get(self.channel.category.id, SupportState.UNAVAILABLE)
@property
def message(self) -> discord.Message:
return self._message
@message.setter
def message(self, value: discord.Message) -> None:
self._previous_message = self._message
self._message = value
@property
def previous_message(self) -> discord.Message:
return self._previous_message
@property
def occupied_from(self) -> t.Optional[dt.datetime]:
return getattr(self.message, "created_at", None)
@property
def claimant(self) -> discord.Member:
return getattr(self.message, "author", None)
def determine_position_in(self, category: discord.CategoryChannel) -> int:
return sorted([self.channel, *category.text_channels], key=lambda c: c.id).index(self.channel) + 1
async def send_to_available(self) -> None:
self.message = None
category = self.channel.guild.get_channel(Config.AVAILABLE_SUPPORT_ID)
await self.channel.edit(
category=category,
reason="Support channel is now available.",
sync_permissions=True,
position=self.determine_position_in(category),
)
async def send_to_occupied(self, message: discord.Message) -> None:
self.message = message
category = self.channel.guild.get_channel(Config.OCCUPIED_SUPPORT_ID)
await self.channel.edit(
category=category,
reason="Support channel is now occupied.",
sync_permissions=True,
position=self.determine_position_in(category),
)
try:
await self.channel.send(f"This channel is now occupied by {self.claimant.mention}.")
except AttributeError:
# Accounting for Kelsier's blasted macro d:
pass
async def send_to_unavailable(self) -> None:
self.message = None
category = self.channel.guild.get_channel(Config.UNAVAILABLE_SUPPORT_ID)
await self.channel.edit(
category=category,
reason="Support channel is now unavailable.",
sync_permissions=True,
position=self.determine_position_in(category),
)
class Support(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.state_path = f"{self.bot._dynamic}/support.json"
self._channels: t.List[SupportChannel] = []
@property
def available_channels(self) -> t.List[SupportChannel]:
return [c for c in self._channels if c.state == SupportState.AVAILABLE]
@property
def occupied_channels(self) -> t.List[SupportChannel]:
return [c for c in self._channels if c.state == SupportState.OCCUPIED]
@property
def usable_channels(self) -> t.List[SupportChannel]:
return [c for c in self._channels if c.state != SupportState.UNAVAILABLE]
@property
def max_total(self) -> int:
return min(max(4, len(self.helper_role.members)), 24)
@property
def max_usable(self) -> int:
return max(4, len([m for m in self.helper_role.members if m.status != discord.Status.offline]))
@staticmethod
def idle_timeout(offset: int = 0) -> dt.datetime:
return dt.datetime.utcnow() + dt.timedelta(seconds=INACTIVE_TIME + offset)
@commands.Cog.listener()
async def on_ready(self) -> None:
if not self.bot.ready.booted:
self.available_category = self.bot.get_channel(Config.AVAILABLE_SUPPORT_ID)
self.occupied_category = self.bot.get_channel(Config.OCCUPIED_SUPPORT_ID)
self.unavailable_category = self.bot.get_channel(Config.UNAVAILABLE_SUPPORT_ID)
self.redirect_channel = self.bot.get_channel(Config.REDIRECT_ID)
self.info_channel = self.bot.get_channel(Config.INFO_ID)
self.staff_role = self.available_category.guild.get_role(Config.STAFF_ROLE_ID)
self.helper_role = self.available_category.guild.get_role(Config.HELPER_ROLE_ID)
data = await self.load_states()
for channel in [*self.available_category.text_channels, *self.unavailable_category.text_channels]:
self._channels.append(SupportChannel(channel))
for channel in self.occupied_category.text_channels:
try:
message = await channel.fetch_message(data[f"{channel.id}"])
except discord.NotFound:
message = await channel.history(
limit=None, after=dt.datetime.utcnow() - dt.timedelta(seconds=7200)
).get(author__id=self.bot.user.id)
self._channels.append(sc := SupportChannel(channel, message))
if message is None:
return await self.determine_channel_destination(sc)
last_message = (await sc.channel.history(limit=1).flatten())[0]
secs_since_activity = (dt.datetime.utcnow() - last_message.created_at).seconds
if secs_since_activity > INACTIVE_TIME:
await self.determine_channel_destination(sc)
else:
await self.schedule(sc, -secs_since_activity)
self.bot.ready.up(self)
@commands.Cog.listener()
async def on_disconnect(self) -> None:
data = {f"{sc.id}": getattr(sc.message, "id", 0) for sc in self._channels}
await self.save_states(data)
async def on_shutdown(self) -> None:
data = {f"{sc.id}": getattr(sc.message, "id", 0) for sc in self._channels}
await self.save_states(data)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
if self.bot.ready.support and not message.author.bot:
if (sc := self.get_support_channel(message.channel)) is None or message.content.startswith(Config.PREFIX):
return
if sc.state == SupportState.AVAILABLE:
if (
claimed := self.get_claimed_channel(message.author)
) is not None and claimed is not message.channel:
await claimed.send(
f"{message.author.mention}, you're still occupying this channel. If you have further questions, ask them here."
)
try:
await message.delete()
except discord.NotFound:
pass
else:
await self.open_case(sc, message)
else:
await self.reschedule(sc)
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:
if self.bot.ready.support:
if (
self.helper_role in after.roles
and before.status != after.status
and after.status == discord.Status.online
and len(self.usable_channels) < self.max_usable
):
await self.try_get_from_unavailable("Helper came online.")
# Attempt to avoid caching problems.
if before.roles != after.roles and (set(after.roles) ^ set(before.roles)).pop() == self.helper_role:
self.helper_role = discord.utils.get(await self.bot.guild.fetch_roles(), id=Config.HELPER_ROLE_ID)
async def open_case(self, sc: SupportChannel, message: discord.Message) -> None:
await sc.send_to_occupied(message)
await self.schedule(sc)
if not self.available_category.text_channels:
await self.update_available()
async def schedule(self, sc: SupportChannel, offset: int = 0) -> None:
try:
self.bot.scheduler.add_job(
self.close_case, id=f"{sc.channel.id}", next_run_time=self.idle_timeout(offset), args=[sc]
)
except ConflictingIdError:
await self.reschedule(sc, offset)
async def reschedule(self, sc: SupportChannel, offset: int = 0) -> None:
try:
self.bot.scheduler.get_job(f"{sc.channel.id}").modify(next_run_time=self.idle_timeout(offset))
except AttributeError:
pass
async def unschedule(self, sc: SupportChannel) -> None:
try:
self.bot.scheduler.get_job(f"{sc.channel.id}").remove()
except AttributeError:
pass
async def close_case(self, sc: SupportChannel) -> None:
if sc.claimant == self.bot.user or sc.claimant == None:
claimant = "The"
else:
claimant = f"{sc.claimant.display_name}'{'s' if not sc.claimant.display_name.endswith('s') else ''}"
await self.determine_channel_destination(sc)
await sc.channel.send(f"{claimant} support case timed out.")
async def load_states(self) -> t.Mapping[str, int]:
if os.path.isfile(self.state_path):
async with aiofiles.open(f"{self.bot._dynamic}/support.json", "r", encoding="utf-8") as f:
data = json.loads(await f.read())
await aiofiles.os.remove(self.state_path)
return data
else:
return {}
async def save_states(self, data: t.Mapping[str, int]) -> None:
async with aiofiles.open(f"{self.bot._dynamic}/support.json", "w", encoding="utf-8") as f:
await f.write(json.dumps(data, ensure_ascii=False))
def get_support_channel(self, tc: discord.TextChannel) -> t.Optional[SupportChannel]:
for sc in self._channels:
if sc.channel == tc:
return sc
return None
def get_claimed_channel(self, member: discord.Member) -> t.Optional[discord.TextChannel]:
for channel in self._channels:
if member == channel.claimant:
return channel.channel
return None
async def try_get_available_channel(self) -> t.Optional[discord.TextChannel]:
try:
return self.available_category.text_channels[0]
except IndexError:
return await self.update_available()
async def try_get_from_unavailable(self, reason: str) -> t.Optional[discord.TextChannel]:
try:
await (tc := self.unavailable_category.text_channels[0]).edit(
category=self.available_category, reason=reason, sync_permissions=True
)
return tc
except IndexError:
return None
async def try_create_new_channel(self, reason: str) -> t.Optional[discord.TextChannel]:
if len(self._channels) < self.max_total:
tc = await self.available_category.create_text_channel(
f"support-{NAMES[len(self._channels)]}",
topic=f"Need help? Ask your question here. Read {self.info_channel.mention} for more information.",
reason=reason,
)
self._channels.append(SupportChannel(tc))
return tc
return None
async def determine_channel_destination(self, sc: SupportChannel) -> None:
if len(self.usable_channels) > self.max_usable:
return await sc.send_to_unavailable()
await sc.send_to_available()
async def update_available(self) -> t.Optional[discord.TextChannel]:
if len(self.usable_channels) < self.max_usable:
reason = "All usable support channels are occupied."
return await self.try_get_from_unavailable(reason) or await self.try_create_new_channel(reason)
return None
@commands.command(name="close")
async def close_command(self, ctx: commands.Context) -> None:
if (sc := self.get_support_channel(ctx.channel)) is None:
return await ctx.message.delete()
if not (ctx.author == sc.claimant or self.staff_role in ctx.author.roles):
return await ctx.send(f"{ctx.author.mention}, you can't close this support case.")
if sc.claimant == self.bot.user or sc.claimant == None:
claimant = "The"
else:
claimant = string.possessive(sc.claimant)
await self.determine_channel_destination(sc)
await self.unschedule(sc)
await ctx.send(f"{claimant} support case was closed.")
@commands.command(name="reopen")
async def reopen_command(self, ctx: commands.Context, target: t.Optional[discord.Member]) -> None:
if (sc := self.get_support_channel(ctx.channel)) is None:
return await ctx.message.delete()
if sc.state == SupportState.OCCUPIED:
return await ctx.send("There is already a support case open in this channel.")
if target is not None:
message = await ctx.channel.history(
limit=None, after=dt.datetime.utcnow() - dt.timedelta(seconds=86400),
).get(author__id=target.id)
else:
message = sc.previous_message
if message is None:
return await ctx.send("No case could be reopened.")
await self.open_case(sc, message)
@commands.command(name="claimant", aliases=["client"])
async def claimant_command(self, ctx: commands.Context) -> None:
if (sc := self.get_support_channel(ctx.channel)) is None:
return await ctx.message.delete()
if sc.claimant == self.bot.user:
return await ctx.send("A channel claimant could not be identified.")
await ctx.send(f"This channel is currently claimed by {sc.claimant.display_name}.")
@commands.command(name="redirect")
async def redirect_command(self, ctx: commands.Context, target: discord.Member) -> None:
await ctx.message.delete()
if (sc := self.get_support_channel(ctx.channel)) is None:
return
if sc.state == SupportState.AVAILABLE:
return
if not (ctx.author == sc.claimant or self.staff_role in ctx.author.roles):
return await ctx.send(
f"{ctx.author.mention}, you can't redirect members in this support case.", delete_after=10
)
if target == sc.claimant or target.bot:
return await ctx.send(f"{ctx.author.mention}, that member can't be redirected.", delete_after=10)
if not (purged := await ctx.channel.purge(after=sc.message, check=lambda m: m.author == target)):
return await ctx.send(f"{target.display_name} doesn't appear to have been here recently.", delete_after=10)
# Redirection valid:
async def culminate(messages) -> t.List[str]:
big_message = "**Your previous messages:**"
for message in reversed(messages):
content = f"`{message.created_at.strftime('%H:%M:%S')}Z` {message.clean_content}"
big_message += f"\n{await string.binify(self.bot.session, content)}"
return [big_message[i : i + 2000] for i in range(0, len(big_message), 2000)]
if (channel := self.get_claimed_channel(target)) :
await channel.send(f"{target.mention}, you're still occupying this channel.")
for message in await culminate(purged):
await channel.send(message)
elif (channel := await self.try_get_available_channel()) and (sc := self.get_support_channel(channel)):
await sc.send_to_occupied(purged[0])
await self.schedule(sc)
await channel.send(
f"You were redirected as the channel you attempted to open a support case in is already occupied."
)
for message in await culminate(purged):
await channel.send(message)
else:
await self.redirect_channel.send(
f"{target.name}, you were redirected as the channel you attempted to open a support case in is already occupied. Unfortunely, there are no available support channels, so you will need to wait for a channel to become available."
)
@commands.command(name="binify")
async def binify_command(self, ctx: commands.Context, *, obj: t.Union[discord.Message, str]):
async with ctx.typing():
if isinstance(obj, discord.Message):
if obj.attachments:
await obj.attachments[0].save(data := io.BytesIO())
file_contents = data.read().decode(encoding="utf-8")
else:
file_contents = ""
content = (
f"{await string.binify(self.bot.session, obj.clean_content, only_codeblocks=False)}" + "\n\n"
if obj.clean_content
else ""
)
file = f"{await string.binify(self.bot.session, file_contents) if file_contents else ''}"
await ctx.send(f"**{string.possessive(obj.author)} message:**\n{content}{file}")
await ctx.message.delete()
else:
await ctx.send(
f"{ctx.author.mention}:\n{await string.binify(self.bot.session, discord.utils.escape_mentions(obj), only_codeblocks=False)}"
)
await ctx.message.delete()
# @commands.command(name="call")
# @commands.cooldown(1, 21600, commands.BucketType.member)
# async def call_command(self, ctx: commands.Context) -> None:
# # Calls a specified role. Useful for preventing mention spamming.
# pass
def setup(bot: commands.Bot) -> None:
bot.add_cog(Support(bot))
``` |
{
"source": "Jonxslays/hikari-miru",
"score": 2
} |
#### File: hikari-miru/examples/basic.py
```python
import hikari
import miru
# This is a basic example demonstrating how to create a simple component-based menu.
# This example uses decorators to create the components, if you want to use variables
# instead of static values for the component properties, check out the subclassed example.
class BasicView(miru.View):
# Define a new Select menu with two options
@miru.select(
placeholder="Select me!",
options=[
miru.SelectOption(label="Option 1"),
miru.SelectOption(label="Option 2"),
],
)
async def basic_select(self, select: miru.Select, ctx: miru.Context) -> None:
await ctx.respond(f"You've chosen {select.values[0]}!")
# Define a new Button with the Style of success (Green)
@miru.button(label="Click me!", style=hikari.ButtonStyle.SUCCESS)
async def basic_button(self, button: miru.Button, ctx: miru.Context) -> None:
await ctx.respond("You clicked me!")
# Define a new Button that when pressed will stop the view & invalidate all the buttons in this view
@miru.button(label="Stop me!", style=hikari.ButtonStyle.DANGER)
async def stop_button(self, button: miru.Button, ctx: miru.Context) -> None:
self.stop() # Called to stop the view
bot = hikari.GatewayBot("...")
miru.load(bot) # Start miru
@bot.listen()
async def buttons(event: hikari.GuildMessageCreateEvent) -> None:
# Do not process messages from bots or empty messages
if event.is_bot or not event.content:
return
if event.content.startswith("miru"):
view = BasicView() # Create an instance of our newly created BasicView
# Build the components defined in the view and attach them to our message
# View.build() returns a list of the built action-rows, ready to be sent in a message
message = await event.message.respond(
"This is a basic component menu built with miru!", components=view.build()
)
view.start(message) # Start listening for interactions
await view.wait() # Wait until the view is stopped or times out
print("View stopped or timed out!")
bot.run()
```
#### File: hikari-miru/miru/context.py
```python
from __future__ import annotations
import typing
import hikari
from hikari.snowflakes import Snowflake
from .interaction import Interaction
from .traits import ViewsAware
if typing.TYPE_CHECKING:
from .view import View
__all__ = ["Context"]
class Context:
"""
A context object proxying a component interaction.
"""
def __init__(self, view: View, interaction: Interaction) -> None:
self._view: View = view
self._interaction: Interaction = interaction
@property
def interaction(self) -> Interaction:
"""The underlying interaction object."""
return self._interaction
@property
def view(self) -> View:
"""The view this context originates from."""
return self._view
@property
def app(self) -> ViewsAware:
"""The application that loaded miru."""
return self._view.app
@property
def message(self) -> hikari.Message:
"""The message object this context is proxying."""
return self._interaction.message
@property
def user(self) -> hikari.User:
"""The user who triggered this interaction."""
return self._interaction.user
@property
def member(self) -> typing.Optional[hikari.InteractionMember]:
"""The member who triggered this interaction. Will be None in DMs."""
return self._interaction.member
@property
def locale(self) -> str:
"""The locale of this context."""
return self._interaction.locale
@property
def guild_locale(self) -> typing.Optional[str]:
"""
The guild locale of this context, if in a guild.
This will default to `en-US` if not a community guild.
"""
return self._interaction.guild_locale
@property
def channel_id(self) -> Snowflake:
"""The ID of the channel the context represents."""
return self._interaction.channel_id
@property
def guild_id(self) -> typing.Optional[Snowflake]:
"""The ID of the guild the context represents. Will be None in DMs."""
return self._interaction.guild_id
def get_guild(self) -> typing.Optional[hikari.GatewayGuild]:
"""Gets the guild this context represents, if any. Requires application cache."""
return self.interaction.get_guild()
def get_channel(self) -> typing.Union[hikari.GuildTextChannel, hikari.GuildNewsChannel, None]:
"""Gets the channel this context represents, None if in a DM. Requires application cache."""
return self.interaction.get_channel()
async def respond(
self,
content: hikari.UndefinedOr[typing.Any] = hikari.UNDEFINED,
*,
flags: typing.Union[int, hikari.MessageFlag, hikari.UndefinedType] = hikari.UNDEFINED,
tts: hikari.UndefinedOr[bool] = hikari.UNDEFINED,
component: hikari.UndefinedOr[hikari.api.ComponentBuilder] = hikari.UNDEFINED,
components: hikari.UndefinedOr[typing.Sequence[hikari.api.ComponentBuilder]] = hikari.UNDEFINED,
embed: hikari.UndefinedOr[hikari.Embed] = hikari.UNDEFINED,
embeds: hikari.UndefinedOr[typing.Sequence[hikari.Embed]] = hikari.UNDEFINED,
mentions_everyone: hikari.UndefinedOr[bool] = hikari.UNDEFINED,
user_mentions: hikari.UndefinedOr[
typing.Union[hikari.SnowflakeishSequence[hikari.PartialUser], bool]
] = hikari.UNDEFINED,
role_mentions: hikari.UndefinedOr[
typing.Union[hikari.SnowflakeishSequence[hikari.PartialRole], bool]
] = hikari.UNDEFINED,
) -> None:
"""Short-hand method to respond to the interaction this context represents.
Parameters
----------
content : undefined.UndefinedOr[typing.Any], optional
The content of the message. Anything passed here will be cast to str.
tts : undefined.UndefinedOr[bool], optional
If the message should be tts or not.
attachment : undefined.UndefinedOr[hikari.Resourceish], optional
An attachment to add to this message.
attachments : undefined.UndefinedOr[typing.Sequence[hikari.Resourceish]], optional
A sequence of attachments to add to this message.
component : undefined.UndefinedOr[hikari.api.special_endpoints.ComponentBuilder], optional
A component to add to this message.
components : undefined.UndefinedOr[typing.Sequence[hikari.api.special_endpoints.ComponentBuilder]], optional
A sequence of components to add to this message.
embed : undefined.UndefinedOr[hikari.Embed], optional
An embed to add to this message.
embeds : undefined.UndefinedOr[typing.Sequence[hikari.Embed]], optional
A sequence of embeds to add to this message.
mentions_everyone : undefined.UndefinedOr[bool], optional
If True, mentioning @everyone will be allowed.
user_mentions : undefined.UndefinedOr[typing.Union[hikari.SnowflakeishSequence[hikari.PartialUser], bool]], optional
The set of allowed user mentions in this message. Set to True to allow all.
role_mentions : undefined.UndefinedOr[typing.Union[hikari.SnowflakeishSequence[hikari.PartialRole], bool]], optional
The set of allowed role mentions in this message. Set to True to allow all.
flags : typing.Union[undefined.UndefinedType, int, hikari.MessageFlag], optional
Message flags that should be included with this message.
"""
if self.interaction._issued_response:
await self.interaction.execute(
content,
tts=tts,
component=component,
components=components,
embed=embed,
embeds=embeds,
mentions_everyone=mentions_everyone,
user_mentions=user_mentions,
role_mentions=role_mentions,
flags=flags,
)
else:
await self.interaction.create_initial_response(
hikari.ResponseType.MESSAGE_CREATE,
content,
tts=tts,
component=component,
components=components,
embed=embed,
embeds=embeds,
mentions_everyone=mentions_everyone,
user_mentions=user_mentions,
role_mentions=role_mentions,
flags=flags,
)
async def edit_response(
self,
content: hikari.UndefinedOr[typing.Any] = hikari.UNDEFINED,
*,
flags: typing.Union[int, hikari.MessageFlag, hikari.UndefinedType] = hikari.UNDEFINED,
tts: hikari.UndefinedOr[bool] = hikari.UNDEFINED,
component: hikari.UndefinedOr[hikari.api.ComponentBuilder] = hikari.UNDEFINED,
components: hikari.UndefinedOr[typing.Sequence[hikari.api.ComponentBuilder]] = hikari.UNDEFINED,
embed: hikari.UndefinedOr[hikari.Embed] = hikari.UNDEFINED,
embeds: hikari.UndefinedOr[typing.Sequence[hikari.Embed]] = hikari.UNDEFINED,
mentions_everyone: hikari.UndefinedOr[bool] = hikari.UNDEFINED,
user_mentions: hikari.UndefinedOr[
typing.Union[hikari.SnowflakeishSequence[hikari.PartialUser], bool]
] = hikari.UNDEFINED,
role_mentions: hikari.UndefinedOr[
typing.Union[hikari.SnowflakeishSequence[hikari.PartialRole], bool]
] = hikari.UNDEFINED,
) -> None:
"""A short-hand method to edit the message belonging to this interaction.
Parameters
----------
content : undefined.UndefinedOr[typing.Any], optional
The content of the message. Anything passed here will be cast to str.
tts : undefined.UndefinedOr[bool], optional
If the message should be tts or not.
attachment : undefined.UndefinedOr[hikari.Resourceish], optional
An attachment to add to this message.
attachments : undefined.UndefinedOr[typing.Sequence[hikari.Resourceish]], optional
A sequence of attachments to add to this message.
component : undefined.UndefinedOr[hikari.api.special_endpoints.ComponentBuilder], optional
A component to add to this message.
components : undefined.UndefinedOr[typing.Sequence[hikari.api.special_endpoints.ComponentBuilder]], optional
A sequence of components to add to this message.
embed : undefined.UndefinedOr[hikari.Embed], optional
An embed to add to this message.
embeds : undefined.UndefinedOr[typing.Sequence[hikari.Embed]], optional
A sequence of embeds to add to this message.
mentions_everyone : undefined.UndefinedOr[bool], optional
If True, mentioning @everyone will be allowed.
user_mentions : undefined.UndefinedOr[typing.Union[hikari.SnowflakeishSequence[hikari.PartialUser], bool]], optional
The set of allowed user mentions in this message. Set to True to allow all.
role_mentions : undefined.UndefinedOr[typing.Union[hikari.SnowflakeishSequence[hikari.PartialRole], bool]], optional
The set of allowed role mentions in this message. Set to True to allow all.
flags : typing.Union[undefined.UndefinedType, int, hikari.MessageFlag], optional
Message flags that should be included with this message.
Raises
------
RuntimeError
The interaction was already responded to.
"""
if self.interaction._issued_response:
await self.interaction.edit_initial_response(
content,
component=component,
components=components,
embed=embed,
embeds=embeds,
mentions_everyone=mentions_everyone,
user_mentions=user_mentions,
role_mentions=role_mentions,
)
else:
await self.interaction.create_initial_response(
hikari.ResponseType.MESSAGE_UPDATE,
content,
component=component,
components=components,
tts=tts,
embed=embed,
embeds=embeds,
mentions_everyone=mentions_everyone,
user_mentions=user_mentions,
role_mentions=role_mentions,
flags=flags,
)
async def defer(self, flags: typing.Union[int, hikari.MessageFlag, None] = None) -> None:
"""Short-hand method to defer an interaction response. Raises RuntimeError if the interaction was already responded to.
Parameters
----------
flags : typing.Union[int, hikari.MessageFlag, None], optional
Message flags that should be included with this defer request, by default None
Raises
------
RuntimeError
The interaction was already responded to.
"""
if self.interaction._issued_response:
raise RuntimeError("Interaction was already responded to.")
await self.interaction.create_initial_response(hikari.ResponseType.DEFERRED_MESSAGE_UPDATE, flags=flags)
``` |
{
"source": "Jonxslays/Jonxhikari",
"score": 2
} |
#### File: core/plugins/code.py
```python
import re
import lightbulb
import hikari
import jonxhikari
class Compile(lightbulb.Plugin):
"""Runs code through the Piston api."""
def __init__(self, bot: jonxhikari.Bot) -> None:
super().__init__()
self.bot = bot
self.langs: list[str] = []
self.uri = "https://emkc.org/api/v2/piston"
async def get_langs(self) -> None:
"""Gets available language details from Piston api."""
uri = self.uri + "/runtimes"
async with self.bot.session.get(uri) as response:
if not 200 <= response.status <= 299:
return None
if not (data := await response.json()):
return None
self.resolve_langs(*data)
def resolve_langs(self, *data: dict[str, str]) -> None:
"""Saves raw language data to cache."""
for lang in data:
self.langs.append(lang["language"])
self.langs.extend(a for a in lang["aliases"])
@lightbulb.command(name="run")
async def run_cmd(self, ctx: lightbulb.Context, *, code: str) -> None:
"""Sends code to the Piston api to be executed."""
uri = self.uri + "/execute"
if not self.langs:
await self.get_langs()
if not (matches := re.match(r"```(\w+)\s([\w\W]+)[\s*]?```", code)):
output = f"{await self.bot.resolve_prefix(self.bot, ctx.message)}run \`\`\`python\nprint('This is a test')\`\`\`" #type: ignore
await ctx.respond(
f"Wrong format. Use a code block.\nSpecify lang inside first set of triple backticks. Example:\n\n{output}",
reply=True,
)
return None
lang = matches.group(1)
source = matches.group(2)
if lang not in self.langs:
await ctx.respond(f"{lang} is not a supported language.", reply=True)
return None
data = {
"language": lang,
"version": "*",
"files": [{"content": source}],
}
async with self.bot.session.post(uri, json=data) as response:
if not 200 <= response.status <= 299:
return None
if not (data := await response.json()):
return None
fields = [
("Language:", f"```{data['language'].title()}```", True),
("Version:", f"```{data['version']}```", True),
]
if stdout := data["run"]["stdout"]:
color = hikari.Color.from_rgb(0, 210, 0)
fields.append(("Output:", f"```{stdout}```", False))
if stderr := data["run"]["stderr"]:
color = hikari.Color.from_rgb(210, 0, 0)
fields.append(("Errors:", f"```{stderr}```", False))
await ctx.respond(
embed=self.bot.embeds.build(
ctx=ctx,
fields=fields,
color=color, #type: ignore
header="Source code evaluation results",
),
reply=True,
)
def load(bot: jonxhikari.Bot) -> None:
bot.add_plugin(Compile(bot))
def unload(bot: jonxhikari.Bot) -> None:
bot.remove_plugin("Compile")
```
#### File: core/plugins/events.py
```python
import datetime
import hikari
import lightbulb
import tanjun
from hikari.messages import Attachment
import jonxhikari
class Events(lightbulb.Plugin):
"""Jonxhikaris pseudo event handler."""
def __init__(self, bot: jonxhikari.Bot) -> None:
self.bot = bot
super().__init__()
@lightbulb.plugins.listener()
async def on_cmd(self, _: lightbulb.CommandCompletionEvent) -> None:
"""Fires on completion of a command."""
self.bot.invokes += 1
@lightbulb.plugins.listener()
async def on_interaction(self, event: hikari.InteractionCreateEvent) -> None:
"""Fires on creations of an interaction."""
self.bot.invokes += 1
# TODO research interaction events on a lower level to
# prevent this from firing multiple times per command
@lightbulb.plugins.listener()
async def on_cmd_exc(self, event: lightbulb.CommandErrorEvent) -> None:
"""Handles Lightbulb command exception events."""
await self.bot.errors.parse_lightbulb(event.exception, event.context)
@lightbulb.plugins.listener()
async def on_exc(self, event: hikari.ExceptionEvent) -> None: # type: ignore
"""Handles other exception events."""
await self.bot.errors.parse(event.exception)
def load(bot: jonxhikari.Bot) -> None:
bot.add_plugin(Events(bot))
def unload(bot: jonxhikari.Bot) -> None:
bot.remove_plugin("Events")
```
#### File: core/utils/embeds.py
```python
import datetime
import typing as t
import lightbulb
import hikari
import tanjun
FieldsT = t.Optional[list[tuple[t.Union[str, int], t.Union[str, int], bool]]]
CtxT = t.Union[lightbulb.Context, tanjun.abc.Context]
ResourceishT = t.Optional[hikari.Resourceish]
class Embeds:
"""Embed constructor class."""
def build(self, **kwargs: t.Any) -> hikari.Embed:
"""Builds an embed from given kwargs.
kwargs:
- ctx: required
- title: optional
- description: optional
- fields: optional
- footer: optional
- header: optional
- header_icon: optional
- thumbnail: optional
- image: optional
- color: optional
Returns:
- hikari.Embed
"""
self.fields: FieldsT = kwargs.get("fields")
self._ctx: CtxT = kwargs.get("ctx")
self.title: t.Optional[str] = kwargs.get("title")
self.desc: t.Optional[str] = kwargs.get("description")
self.footer: t.Optional[str] = kwargs.get("footer")
self.header: t.Optional[str] = kwargs.get("header")
self.header_url: t.Optional[str] = kwargs.get("header_url")
self.header_icon: ResourceishT = kwargs.get("header_icon")
self.thumbnail: ResourceishT = kwargs.get("thumbnail")
self.image: ResourceishT = kwargs.get("image")
self.color: t.Optional[hikari.Colorish] = kwargs.get("color")
self.time: datetime.datetime = kwargs.get(
"timestamp", datetime.datetime.now().astimezone()
)
assert self._ctx is not None # You happy now mypy???
embed = (
hikari.Embed(
title=self.title,
description=self.desc,
timestamp=self.time,
color=self.color or hikari.Color.from_hex_code("#713dc7"),
)
.set_thumbnail(self.thumbnail)
.set_image(self.image)
.set_author(name=self.header, url=self.header_url, icon=self.header_icon)
.set_footer(
text=(
None
if self.footer == "BYPASS"
else (self.footer or f"Invoked by: {self._ctx.author.username}")
),
icon=(
None
if self.footer == "BYPASS"
else self._ctx.author.avatar_url
),
)
)
if self.fields:
for name, value, inline in self.fields:
embed.add_field(name=str(name), value=str(value), inline=inline)
return embed
```
#### File: core/utils/errors.py
```python
import typing as t
import lightbulb
import hikari
import tanjun
from lightbulb import errors as lb_errors
from .embeds import Embeds
DualCtxT = t.Union[lightbulb.Context, tanjun.abc.Context]
class WTFError(Exception):
pass
class Errors:
embeds = Embeds()
def embed(self, ctx: DualCtxT, message: str) -> hikari.Embed:
embed = self.embeds.build(
ctx=ctx,
description=message,
footer="BYPASS",
)
return embed
@staticmethod
def wtf(message: str) -> WTFError:
return WTFError(message)
@staticmethod
async def parse(exc: Exception) -> None:
print(exc)
raise exc
async def parse_tanjun(
self, exc: t.Union[tanjun.CommandError, Exception], ctx: tanjun.abc.Context
) -> None:
if isinstance(exc, (tanjun.NotEnoughArgumentsError, tanjun.TooManyArgumentsError)):
await ctx.respond(self.embed(ctx, f"**ERROR**```{exc.message}```"))
raise exc
elif isinstance(exc, tanjun.MissingDependencyError):
await ctx.respond(self.embed(ctx, f"**ERROR**```{exc.message}```"))
raise exc
else:
print(exc)
raise exc
async def parse_lightbulb(
self, exc: t.Union[lb_errors.CommandError, Exception], ctx: lightbulb.Context
) -> None:
if isinstance(exc, lb_errors.CommandNotFound):
pass
elif isinstance(exc, lb_errors.NotEnoughArguments):
args = "\n".join(f" > {a}" for a in exc.args[1])
await ctx.respond(
self.embed(ctx, f"**ERROR**\nRequired argument(s) were missing:\n```{args}```")
)
raise exc
elif isinstance(exc, lb_errors.MissingRequiredPermission):
perms = "\n".join(f" > {p}" for p in exc.args[1:]).replace("_", " ")
await ctx.respond(self.embed(ctx, f"**ERROR**\nMissing permissions.```{perms}```"))
raise exc
elif isinstance(exc, lb_errors.ConverterFailure):
await ctx.respond(
self.embed(
ctx,
(
"**ERROR**\nConversion of arguments failed during "
f"`{ctx.command.qualified_name}` command."
),
)
)
raise exc
else:
print(exc)
raise exc
``` |
{
"source": "Jonxslays/kit-api",
"score": 2
} |
#### File: kit-api/kitapi/main.py
```python
import uvloop
from fastapi.staticfiles import StaticFiles
from starlette.responses import FileResponse
from tortoise.contrib.fastapi import register_tortoise
from kitapi.core import settings
from kitapi.v1 import api
uvloop.install()
app = api.app
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", include_in_schema=False)
async def index() -> FileResponse:
return FileResponse("static/index.html")
register_tortoise(
app, generate_schemas=True, add_exception_handlers=True, config=settings.tortoise_config()
)
``` |
{
"source": "Jonxslays/modmail",
"score": 2
} |
#### File: modmail/bot/bot.py
```python
from pathlib import Path
import discord
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
from pytz import utc
import modmail
from modmail import Config
class Bot(commands.Bot):
__slots__ = ("extensions", "scheduler")
def __init__(self) -> None:
self.extensions = [p.stem for p in Path(".").glob("./modmail/bot/extensions/*.py")]
self.scheduler = AsyncIOScheduler()
self.scheduler.configure(timezone=utc)
super().__init__(
command_prefix=Config.PREFIX,
status=discord.Status.online,
intents=discord.Intents.all(),
)
def __call__(self) -> None:
self.run()
def setup(self) -> None:
print("Running setup...")
for ext in self.extensions:
self.load_extension(f"modmail.bot.extensions.{ext}")
print(f" `{ext}` extension loaded.")
def run(self) -> None:
self.setup()
print("Running bot...")
super().run(Config.TOKEN, reconnect=True)
async def close(self) -> None:
print("Shutting down...")
if stdout := self.get_cog("Hub").stdout:
await stdout.send(f"Modmail v{modmail.__version__} is shutting down.")
await super().close()
print(" Bot shut down.")
async def on_connect(self) -> None:
print(f" Bot connected. DWSP latency: {self.latency * 1000:,.0f} ms")
async def on_disconnect(self) -> None:
print(f" Bot disconnected.")
async def on_ready(self) -> None:
self.scheduler.start()
print(f" Scheduler started ({len(self.scheduler.get_jobs())} jobs scheduled).")
await self.change_presence(
activity=discord.Activity(
name=f"DM reports • Version {modmail.__version__}", type=discord.ActivityType.listening
)
)
print(f" Presence set.")
self.guild = self.get_guild(Config.GUILD_ID)
self.staff_role = self.guild.get_role(Config.STAFF_ROLE_ID)
print(f" Bot ready.")
async def on_message(self, message: discord.Message) -> None:
if message.guild and message.guild.me in message.mentions:
if self.staff_role in message.author.roles:
return
await message.delete()
try:
await message.author.send(
f"Hey {message.author.name}! This bot is only for discrete reporting in DMs, and thus has no server functionality."
)
except discord.Forbidden:
await message.channel.send(
f"Hey {message.author.name}! This bot is only for discrete reporting in DMs, and thus has no server functionality.",
delete_after=10,
)
async def process_commands(self, message) -> None:
pass
```
#### File: bot/extensions/mail.py
```python
import datetime as dt
import random
import typing as t
import discord
from discord.ext import commands
import modmail
from modmail import Config
from modmail.utils import chron, string
class Mail(commands.Cog):
__slots__ = ("bot", "cooldown")
def __init__(self, bot: modmail.bot.Bot) -> None:
self.bot = bot
self.cooldown: t.List[discord.Member] = []
async def handle_modmail(self, message: discord.Message) -> None:
if message.author in self.cooldown:
retry = self.bot.scheduler.get_job(f"{message.author.id}").next_run_time - dt.datetime.now(dt.timezone.utc)
return await message.channel.send(
f"Sorry, you're on cooldown! You can send another message in {chron.long_delta(retry)}."
)
if not 50 <= len(message.content) <= 1000:
return await message.channel.send("Your message should be between 50 and 1,000 characters long.")
member = self.bot.guild.get_member(message.author.id)
fields = [
{"name": "Member", "value": f"{member.name} (ID: {member.id})", "inline": False},
{"name": "Message", "value": message.content, "inline": False},
]
if message.mentions:
fields.append(
{
"name": "Other Mentions",
"value": "\n".join((f"{m.name}: {m.id}") for m in message.mentions),
"inline": False,
}
)
struc = {
"title": "Modmail",
"color": discord.Colour.random().value,
"thumbnail": {"url": f"{member.avatar_url}"},
"footer": {"text": f"ID: {message.id}"},
"image": {"url": att[0].url if len(att := message.attachments) else None},
"fields": fields,
}
embed = discord.Embed.from_dict(struc)
await self.output.send(embed=embed)
await message.channel.send(
"Message sent. If needed, a moderator will DM you regarding this issue. You'll need to wait 1 hour before sending another modmail."
)
self.cooldown.append(message.author)
self.bot.scheduler.add_job(
lambda m: self.cooldown.remove(m),
id=f"{message.author.id}",
next_run_time=dt.datetime.utcnow() + dt.timedelta(seconds=3600),
args=[message.author],
)
@commands.Cog.listener()
async def on_ready(self) -> None:
self.output = self.bot.get_channel(Config.MODMAIL_ID)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
if message.author.bot or not isinstance(message.channel, discord.DMChannel):
return
await self.handle_modmail(message)
def setup(bot: modmail.bot.Bot) -> None:
bot.add_cog(Mail(bot))
```
#### File: modmail/utils/chron.py
```python
import datetime as dt
from time import strftime
from modmail.utils import string
def sys_time():
return strftime("%H:%M:%S")
def utc_time():
return dt.datetime.utcnow().strftime("%H:%M:%S")
def short_date(dt):
return dt.strftime("%d/%m/%y")
def short_date_and_time(dt):
return dt.strftime("%d/%m/%y %H:%M:%S")
def long_date(dt):
return dt.strftime("%d %b %Y")
def long_date_and_time(dt):
return dt.strftime("%d %b %Y at %H:%M:%S")
def short_delta(td, milliseconds=False):
parts = []
if td.days != 0:
parts.append(f"{td.days:,}d")
if (h := td.seconds // 3600) != 0:
parts.append(f"{h}h")
if (m := td.seconds // 60 - (60 * h)) != 0:
parts.append(f"{m}m")
if (s := td.seconds - (60 * m) - (3600 * h)) != 0 or not parts:
if milliseconds:
ms = round(td.microseconds / 1000)
parts.append(f"{s}.{ms}s")
else:
parts.append(f"{s}s")
return ", ".join(parts)
def long_delta(td, milliseconds=False):
parts = []
if (d := td.days) != 0:
parts.append(f"{d:,} day{'s' if d > 1 else ''}")
if (h := td.seconds // 3600) != 0:
parts.append(f"{h} hour{'s' if h > 1 else ''}")
if (m := td.seconds // 60 - (60 * h)) != 0:
parts.append(f"{m} minute{'s' if m > 1 else ''}")
if (s := td.seconds - (60 * m) - (3600 * h)) != 0 or not parts:
if milliseconds:
ms = round(td.microseconds / 1000)
parts.append(f"{s}.{ms} seconds")
else:
parts.append(f"{s} second{'s' if s > 1 else ''}")
return string.list_of(parts)
``` |
{
"source": "Jonxslays/nusex",
"score": 2
} |
#### File: nusex/spec/nsc.py
```python
from nusex import CONFIG_FILE
from nusex.errors import UnsupportedFile
SPEC_ID = b"\x99\x63"
class NSCSpecIO:
__slots__ = ("defaults",)
def __init__(self):
self.defaults = {
"profile": "default",
"last_update": "000101",
"use_wildmatch_ignore": False,
"auto_update": False,
}
def read(self):
data = self.defaults.copy()
with open(CONFIG_FILE, "rb") as f:
# Validate format.
if f.read(2) != SPEC_ID:
raise UnsupportedFile("Not a valid NSC file")
# Load profile data.
data["profile"] = f.read(24).decode().strip()
date = f.read(6).decode()
try:
int(date)
data["last_update"] = date
except ValueError:
# Some invalid or broken config.
...
# Not guaranteed from here.
attrs = ("use_wildmatch_ignore", "auto_update")
for attr in attrs:
try:
data[attr] = f.read(1) == b"\x01"
except Exception as exc:
# Most likely no more options to read, so exit.
break
return data
def write(self, data):
with open(CONFIG_FILE, "wb") as f:
# Identify format.
f.write(SPEC_ID)
# Write data.
f.write(data["profile"].ljust(24).encode())
f.write(data["last_update"].encode())
# Not guaranteed, so write a default value if not present.
f.write((b"\x00", b"\x01")[data.get("use_wildmatch_ignore", 0)])
f.write((b"\x00", b"\x01")[data.get("auto_update", 0)])
``` |
{
"source": "Jonxslays/sqlite2pg",
"score": 2
} |
#### File: sqlite2pg/sqlite2pg/cli.py
```python
import asyncio
import json
import pathlib
import typing
import click
from sqlite2pg import DEV_HOME_CONFIG, DEV_LOG_CONFIG
from sqlite2pg.modules import S2PLogger
__all__: typing.List[str] = [
"CommandHandler",
"CONFIG_SCHEMA",
]
COMMANDS_DIR: str = "./sqlite2pg/commands"
COMMANDS_FILES: typing.List[pathlib.Path] = [*pathlib.Path(".").glob(f"{COMMANDS_DIR}/*.py")]
ConfigSchemaT = typing.Mapping[str, typing.Mapping[str, typing.Union[str, bool, int]]]
CONFIG_SCHEMA: ConfigSchemaT = {
"logging": {
"enable": True,
"level": "INFO",
"to_cwd": False,
"retention": 7,
}
}
class CommandHandler(click.MultiCommand):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
def list_commands(self, ctx: click.Context) -> typing.List[str]:
commands: typing.List[str] = []
commands.extend(p.stem for p in COMMANDS_FILES)
commands.sort()
return commands
def get_command(self, ctx: click.Context, name: str) -> typing.Optional[click.Command]:
namespace: typing.Dict[str, object] = {}
filepath = pathlib.Path(f"{COMMANDS_DIR}/{name}.py")
with open(filepath, "r") as f:
code = compile(f.read(), filepath, "exec")
eval(code, namespace, namespace)
cmd = namespace.get(name)
return cmd if isinstance(cmd, click.Command) else None
@staticmethod
def init_logging() -> None:
try:
with open(DEV_HOME_CONFIG / "config.json", "r") as f:
data: typing.MutableMapping[str, typing.Any] = json.loads(f.read())
except FileNotFoundError:
config_path = DEV_HOME_CONFIG / "config.json"
DEV_LOG_CONFIG.mkdir(parents=True, exist_ok=True)
config_path.touch()
schema = json.dumps(CONFIG_SCHEMA, indent=4, sort_keys=True)
with open(config_path, "w") as f2:
f2.write(schema)
except PermissionError:
click.echo(
f"{click.style('unable to access config file.', fg='red', bold=True)}\n"
"this is likely a permissions issue. please make sure the\n"
"sqlite2pg config.json exists, and has the correct permissions.\nusing bash: "
f"`{click.style('find / -wholename *sqlite2pg/config.json', fg='yellow', bold=True)}`"
)
click.secho("continuing anyways...", bold=True)
S2PLogger.configure(enable=False, to_cwd=False, log_level="", retention=0)
else:
S2PLogger.configure(
enable=data["logging"]["enable"],
to_cwd=data["logging"]["to_cwd"],
log_level=data["logging"]["level"],
retention=data["logging"]["retention"],
)
async def async_main() -> None:
await asyncio.sleep(0)
cli = CommandHandler(help="An SQLite3 to PostgreSQL database migration tool.")
cli.init_logging()
cli()
def main() -> None:
asyncio.run(async_main())
``` |
{
"source": "jony0917/tensorflow-extend-framework",
"score": 2
} |
#### File: tef/ops/variable.py
```python
import tef
import tef.pywrap
import tef.utils
def variable(name, shape, dtype):
v = tef.pywrap.ps_pull(name, shape, dtype)
tef.utils.add_to_collection(tef.utils.TEF_TRAINABLE_COLLECTION,
tef.utils.VariableSub(v, name, shape, dtype, None, "dense"))
return v
```
#### File: tef/training/optimizer.py
```python
import tensorflow as tf
import tef
import tef.pywrap
import tef.utils
class BaseOptimizer(object):
def __init__(self):
pass
def compute_gradients(self, loss):
tef_trainable = tef.utils.get_collection(tef.utils.TEF_TRAINABLE_COLLECTION)
gs = []
stubs = []
for stub in tef_trainable:
gradient = tf.gradients(loss, stub.var)
assert len(gradient) == 1
gs.append(gradient[0])
stubs.append(stub)
return gs, stubs
def apply_gradients(self, gs, stubs):
"""
To be implemented in subclass.
:param gs: gradients, list of tf.Tensor or tf.IndexedSlices object.
:param stubs: tef variable stubs
:return: train operation
"""
assert False
def minimize(self, loss):
gs, stubs = self.compute_gradients(loss)
return self.apply_gradients(gs, stubs)
class GradientDescentOptimizer(BaseOptimizer):
def __init__(self, learning_rate):
super(GradientDescentOptimizer, self).__init__()
self.learning_rate = learning_rate
def apply_gradients(self, gs, stubs):
assert len(gs) == len(stubs)
push_ops = []
for i in range(len(gs)):
gradient = gs[i]
stub = stubs[i]
if stub.category == "dense":
assert isinstance(gradient, tf.Tensor)
push_op = tef.pywrap.ps_push(gradient,
stub.name,
stub.shape,
stub.dtype,
"SGD",
self.learning_rate)
elif stub.category == "index":
assert isinstance(gradient, tf.IndexedSlices)
ids = tf.gather(stub.ids, gradient.indices)
push_op = tef.pywrap.ps_sparse_push(ids,
gradient.values,
stub.name,
stub.shape,
stub.dtype,
"SGD",
self.learning_rate)
elif stub.category == "hash":
assert isinstance(gradient, tf.IndexedSlices)
ids = tf.gather(stub.ids, gradient.indices)
push_op = tef.pywrap.ps_hash_push(ids,
gradient.values,
stub.name,
stub.shape,
stub.dtype,
"SGD",
self.learning_rate)
else:
assert False
push_ops.append(push_op)
return tf.group(push_ops)
``` |
{
"source": "jony67/telegram-aiogram",
"score": 3
} |
#### File: jony67/telegram-aiogram/bot.py
```python
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from openweather import сurrent_weather, status_response, print_weather
from config import TOKEN
# Для передачи токена через параметр виртуального окружения
#import os
#TOKEN = os.getenv("TOKEN")
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def process_start_command(message: types.Message):
await message.reply("Привет! Я Вася. Могу рассказать тебе о текущей погоде в любом городе. Набери /help, чтобы узнать как это сделать:)")
@dp.message_handler(commands=['help'])
async def process_help_command(message: types.Message):
await message.reply("Введи название города на русском языке или на латинице, например: Москва или Moscow")
@dp.message_handler()
async def echo_message(msg: types.Message):
city = msg.text
weather = print_weather(city)
await bot.send_message(msg.from_user.id, weather)
if __name__ == '__main__':
executor.start_polling(dp)
``` |
{
"source": "jonyB0B/MemoriaTFG-JonatanSantanaPero",
"score": 3
} |
#### File: MemoriaTFG-JonatanSantanaPero/microbit/microIU.py
```python
import pyautogui
import microbit
import msvcrt, sys
#GLOBAL
THRESHOLD=200.0
SENX=180.0
SENY=280.0
AYUDA=""" HELP
Press A to move the camera,
Press B to move,
Press A & B to interact"""
#FUNCTIONS
def Help_Windows():
ch = msvcrt.getwch()
if ch == 'q':
sys.exit()
elif ch == 'h':
print(AYUDA)
else:
print ("Wrong Key Pressed")
print("Press H to help, Q to exit")
def Help_Linux():
ch = input("Press H to help, Q to exit")
if ch == 'q':
sys.exit()
elif ch == 'h':
print(AYUDA)
else:
print ("Wrong Key Pressed")
print("Press H to help, Q to exit")
def Camera(Width):
while True:
x = microbit.accelerometer.get_x()
y = microbit.accelerometer.get_y()
if x > SENX:
pyautogui.moveRel(Height*x, None)
elif x < -SENX:
pyautogui.moveRel(Height*x, None)
if y > SENY:
pyautogui.moveRel(None, y*Width)
elif y < -SENY:
pyautogui.moveRel(None, y*Width)
if microbit.button_b.is_pressed():
pyautogui.mouseUp()
break
def Move():
while True:
x = microbit.accelerometer.get_x()
y = microbit.accelerometer.get_y()
if x > THRESHOLD:
pyautogui.keyDown("right")
elif x < -THRESHOLD:
pyautogui.keyDown("left")
else:
pyautogui.keyUp("right")
pyautogui.keyUp("left")
if y > THRESHOLD:
pyautogui.keyDown("up")
elif y < -THRESHOLD:
pyautogui.keyDown("down")
else:
pyautogui.keyUp("down")
pyautogui.keyUp("up")
if microbit.button_a.is_pressed():
break
def Center_Cam():
screenWidth, screenHeight = pyautogui.size()
pyautogui.mouseDown()
pyautogui.moveTo(screenWidth / 2, screenHeight / 2)
pyautogui.mouseUp()
#MAIN
while True:
if msvcrt.kbhit():
Help_Windows()
#Help_Linux()
elif microbit.button_a.is_pressed() and microbit.button_b.is_pressed():
print("Button A & B pressed, click mode")
pyautogui.click()
elif microbit.button_a.is_pressed():
print("Button A pressed, camera mode, press B to exit")
screenWidth, screenHeight = pyautogui.size()
Factor_y = 1920*6 # Factor 6 porque la pantalla es 4k
Factor_x = 1080*6
Width = screenWidth / Factor_y # Ajusto la sensibilidad
Height = screenHeight/ Factor_x
pyautogui.mouseDown()
Camera(Width)
elif microbit.button_b.is_pressed():
print("Button B pressed, movement mode, press A to exit")
#Centramos la poscion de la camara antes de movernos
Center_Cam()
Move()
#print(microbit.accelerometer.get_values())
microbit.sleep(500)
``` |
{
"source": "JonyBepary/hysterical-hammer-under-construction",
"score": 3
} |
#### File: hysterical-hammer-under-construction/file_lib/jpeg.py
```python
from __future__ import absolute_import, division, print_function
import copy
try:
from settings import Settings
except ModuleNotFoundError:
from file_lib.settings import Settings
try:
import extern
except ModuleNotFoundError:
from file_lib import extern
pass
_JPEG_FORMAT = 'JPEG'
FORMATS = set([_JPEG_FORMAT])
OUT_EXT = '.jpg'
_MOZJPEG_ARGS = ['jpegtran', '-optimize']
_JPEGOPTIM_ARGS = ['jpegoptim']
_JPEGTRAN_ARGS = ['jpegtran', '-optimize']
_JPEGRESCAN_ARGS = ['jpegrescan']
test = False
filename = 'jhone.jpg'
new_filename = 'jhone.jpg'
def mozjpeg(ext_args, root=None):
"""Create argument list for mozjpeg."""
args = copy.copy(_MOZJPEG_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
if Settings.jpegtran_prog:
args += ["-progressive"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
if test:
print("mozjpeg: ", args)
return 0
extern.run_ext(args, root)
return _JPEG_FORMAT
def jpegoptim(filename, quality=None, root=None):
# jpegoptim - s - f - o - -all - progressive . / test.png
"""Create argument list for jpegtran."""
args = copy.copy(_JPEGOPTIM_ARGS)
if Settings.destroy_metadata:
args += ["-s"]
if Settings.force:
args += ["-f"]
if Settings.verbose:
args += ["-v"]
if quality:
args += ["-m {}".format(quality)]
if Settings.jpegtran_prog:
args += ["--all-progressive"]
args += [filename]
if test:
print("jpegoptim: ", args)
return 0
extern.run_ext(args, root)
return _JPEG_FORMAT
def jpegtran(ext_args, root=None):
"""Create argument list for jpegtran."""
args = copy.copy(_JPEGTRAN_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
if Settings.jpegtran_prog:
args += ["-progressive"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
if test:
print("jpegtran: ", args)
return 0
extern.run_ext(args, root)
return _JPEG_FORMAT
_JPEGRESCAN_ARGS = ['jpegrescan']
def jpegrescan(ext_args, root=None):
"""Run the EXTERNAL program jpegrescan."""
args = copy.copy(_JPEGRESCAN_ARGS)
if Settings.jpegrescan_multithread:
args += ['-t']
if Settings.destroy_metadata:
args += ['-s']
args += [ext_args.old_filename, ext_args.new_filename]
if test:
print("jpegrescan: ", args)
return 0
extern.run_ext(args, root)
return _JPEG_FORMAT
```
#### File: hysterical-hammer-under-construction/file_lib/png.py
```python
try:
import extern
except ModuleNotFoundError:
from file_lib import extern
_PNG_FORMAT = 'PNG'
FORMATS = set([_PNG_FORMAT])
LOSSLESS_FORMATS = set(('PNM', 'PPM', 'BMP', 'GIF'))
CONVERTABLE_FORMATS = LOSSLESS_FORMATS | FORMATS
OUT_EXT = '.' + _PNG_FORMAT.lower()
_PINFGO_ARGS = ['pingo', '-pngpalette=100', '-s9', 'test2.png']
_OPTIPNG_ARGS = ['optipng', '-o6', '-fix', '-preserve', '-force', '-quiet']
_ADVPNG_ARGS = ['advpng', '-z', '-4', '-f']
_PNGOUT_ARGS = ['pngout', '-force', '-v']
def pingo(file, root=None):
"""Run the external program pingo on the file."""
args = _PINFGO_ARGS + [file]
extern.run_ext(args, root)
return _PNG_FORMAT
def optipng(file, root=None):
"""Run the external program optipng on the file."""
args = _OPTIPNG_ARGS + [file]
extern.run_ext(args, root)
return _PNG_FORMAT
def advpng(file, root=None):
"""Run the external program advpng on the file."""
args = _ADVPNG_ARGS + [file]
extern.run_ext(args, root)
return _PNG_FORMAT
def pngout(file, root=None):
"""Run the external program pngout on the file."""
args = _PNGOUT_ARGS + [file]
extern.run_ext(args, root)
return _PNG_FORMAT
```
#### File: hysterical-hammer-under-construction/library/file_controler.py
```python
import os
file_name = "filehash.db"
def writer(file, data, path=None, hash1=None):
with open(file, 'w') as f:
for line in data:
f.write(line)
# Returns index of x in data if present, else -1
def binarySearch(data, l, r, x):
mid = int()
# Check base case
mid = int(l + (r - l) / 2)
if r >= l:
try:
# If element is present at the middle itself
if data[mid] == x:
matched = "matched"
return matched
# If element is smaller than mid, then it
# can only be present in left subarray
elif data[mid] > x:
return binarySearch(data, l, mid - 1, x)
# Else the element can only be present
# in right subarray
else:
return binarySearch(data, mid + 1, r, x)
except TypeError:
# print("There is a Un")
return TypeError
else:
# Element is not present in the dataay
# print("mid: ", mid)
return mid
def binsert(data, hash1):
high = len(data) - 1
try:
if high < 1:
if data == []:
return data
elif data[high] > hash1:
mid = 1
return mid
elif data[high] < hash1:
mid = 0
return mid
elif data[high] == hash1:
matched = "matched"
return matched
except TypeError:
return TypeError
# print("high: ", high)
low = 0
mid = binarySearch(data, low, high, hash1)
# print("return of def: ", mid)
return mid
def main_file_strike(xxhash):
file = os.path.join(os.getcwd(), file_name)
# print("File Opening: {}".format(file))
try:
with open(file, "r+") as fp:
data = fp.readlines()
except FileNotFoundError:
print("{0} File Not Found !!!".format(file))
with open(file, "w") as fp:
print("File Created.......")
finally:
with open(file, "r+") as fp:
data = fp.readlines()
try:
xxhash = xxhash + "\n"
except TypeError:
return -1
mid = binsert(data, xxhash)
if mid == "matched":
# print("xxhash is already present!!!")
return "FILE_NOT_MODIFIED"
elif mid == []:
writer(file, ['\n'], 0, xxhash)
else:
mid = mid + 1
data.insert(mid, xxhash)
writer(file, data, mid, xxhash)
# print(type(data))
# print(data)
def check_strike(xxhash1, xxhash2=None):
file = os.path.join(os.getcwd(), file_name)
# print("File Opening: {}".format(file))
try:
with open(file, "r+") as fp:
data = fp.readlines()
except FileNotFoundError:
print("{0} File Not Found !!!".format(file))
with open(file, "w") as fp:
print("File Created.......")
finally:
with open(file, "r+") as fp:
data = fp.readlines()
try:
xxhash1 = xxhash1 + "\n"
except TypeError:
return -1
mid = binsert(data, xxhash1)
if mid == "matched":
# print("xxhash is already present!!!")
return "FILE_NOT_MODIFIED"
else:
return "FILE_MODIFIED"
# print(type(data))
# print(data)
# xxhash = "ba918c89"
# xxhash = "dae03bab"
# xxhash = "4cc82932"
# # xxhash = "24284847"
# main_file_strike(xxhash)
```
#### File: hysterical-hammer-under-construction/library/test.py
```python
import os
import sys
import xxhash
from file_controler import main_file_strike
def hasher(file):
try:
with open(file, "r") as fp:
data = fp.read()
x = xxhash.xxh32(data).hexdigest()
# print(x)
return x
except FileNotFoundError:
print("File Not Found!!! Please Check if ({0}) path is Valid!!!".format(file))
def hash_saver(hash1):
pass
def is_hash_equal(hash1, hash2):
return hash1 == hash2
def File_Probe():
for root, dirs, files in os.walk(os.getcwd()):
for file in files:
if root == os.path.join(os.getcwd(), "__pycache__"):
continue
# print(os.path.join(os.getcwd(), "__pycache__"))
xxhash = hasher(file)
main_file_strike(xxhash)
li = list()
print("start\n")
File_Probe()
# print(is_hash_equal(hasher("/home/jony/PYC/hho/md5/main.js"), hasher("/home/jony/PYC/hho/md5/main.min.js")))
# file = "/home/jony/PYC/hho/md5/maidn.js"
# hasher = Hasher()
# sha_hash = hasher.md5(file)
# print("MD5 (" + file + ") :")
# print(sha_hash)
``` |
{
"source": "jonyboi396825/BikeDashboardPlus",
"score": 2
} |
#### File: BikeDashboardPlus/raspberrypi/bike_mode.py
```python
import datetime
import json
import math
import os
import subprocess
import sys
import threading
import time
import traceback
import typing as t
from copy import deepcopy
import Adafruit_SSD1306
import gps
import pytz
import RPi.GPIO as GPIO
import serial
from PIL import Image, ImageDraw, ImageFont
# config data sent to Arduino during setup and other cfg data
cfg_file = open("raspberrypi/cfg.json", 'r')
cfg_ard = json.load(cfg_file)
cfg = deepcopy(cfg_ard)
cur_tz = cfg["TMZ"]
del cfg_ard["24H"], cfg_ard["TMZ"]
cfg_file.close()
# data sent to Arduino during loop
send = {
"GPS": [-1]*6,
"LED": [0, 0],
"B1RCV": False,
"B2RCV": False
}
# data from GPS
curdata = {}
# tracking and buttons
tracking = 0 # 0 = stopped, 1 = paused, 2 = tracking
wastracking = False # continues tracking even if disconnected
prevbstate1 = False
prevbstate2 = False
prevTimeEpoch = 0
fileName = "ERROR"
msg = "ERROR"
# interval of plotting data during tracking
INTERVAL = 2
# what to put onto disp
disp_data_g = {
"speed": 0,
"unit": cfg_ard["UNT"],
"datetime": datetime.datetime(1970, 1, 1, 0, 0, 0),
"mode": 'D',
"track": ''
}
# sync LED panel speed to OLED speed because OLED is slow and cannot do other way around
oled_speed = 0
# Arduino serial port
port_file = open("raspberrypi/port", 'r')
port = port_file.read().strip()
port_file.close()
def err(ex_type, value, tb):
"""
Custom error (for debugging)
"""
print(f"Exception occured at: {datetime.datetime.now()}")
print(ex_type.__name__)
traceback.print_tb(tb)
def get_gps_data() -> None:
"""
Thread that gets positional, time, and speed data from GPS since it is blocking
"""
global curdata
session = gps.gps("localhost")
session.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)
while True:
try:
report = session.next()
if (report["class"] == "TPV"):
curdata = report
# print(curdata)
except KeyError:
pass
except KeyboardInterrupt:
quit()
except StopIteration:
session = None
print("GPSD has terminated")
# converts dt as str to time zone
def _conv_tmz(dt: t.Union[str, datetime.datetime], fmt: t.Union[str, None], tmz: str) -> datetime.datetime:
d_temp = None
if (isinstance(dt, str)):
d_temp = datetime.datetime.strptime(
dt, fmt).replace(tzinfo=pytz.utc)
else:
d_temp = dt.replace(tzinfo=pytz.utc)
timezone = pytz.timezone(tmz)
d_localized = d_temp.astimezone(timezone)
return d_localized
def new_track_file(tm: datetime.datetime, tmz: str) -> None:
global fileName
n_tm = _conv_tmz(tm, None, tmz)
fileName = datetime.datetime.strftime(n_tm, "%Y-%m-%d_%H:%M:%S_track_path")
print(f"creating new track file with time: {fileName}")
def tracker(lat: int, lng: int, tm: datetime.datetime) -> None:
global tracking, fileName, prevTimeEpoch, msg
# makes sure doesn't print "PAUSED" multiple times
# print coordinates every 2 seconds
if (tracking == 0 or (tracking == 1 and msg.strip().upper() == "PAUSED") or math.floor(time.time())-prevTimeEpoch < INTERVAL):
return
else:
prevTimeEpoch = math.floor(time.time())
if (tracking == 1):
msg = "PAUSED\n"
else:
msg = str(lat) + "," + str(lng) + "\n"
print(f"writing {msg[:-1]} to {fileName}")
with open(os.path.join("tracking", fileName), 'a') as f:
f.write(msg)
def conv_unit(val: int, unit: int) -> int:
# given in m/s
assert(isinstance(unit, int) and unit >= 0 and unit < 3)
if (unit == 0): # mph
return int((val/1.0)*2.237)
elif (unit == 1): # km/h
return int((val/1.0)*3.6)
else: # m/s
return int(val)
def draw_on_display(disp: Adafruit_SSD1306.SSD1306_128_64, img: Image.Image,
drawing: ImageDraw.ImageDraw, fonts: "list[ImageFont.ImageFont]", data: dict) -> None:
"""
draws data on screen according to plan doc
data: {
"speed": x in m/s
"unit": {0, 1, 2}; 0 = mph, 1 = km/h, 2 = m/s
"datetime": datetime obj
"mode": String of: {'D', '2', '3'}
"track": String of: {'T', 'P', ''}
refer to plan doc for what each means
}
"""
global oled_speed
unit_to_str = ["mph", "km/h", "m/s"]
mode_font = fonts[0]
sp_font = fonts[1]
unit_font = fonts[2]
track_font = fonts[3]
# dd-mm or mm-dd
dayfmt = "%m/%d "
if (cfg["DTM"] == 1):
dayfmt = "%d-%m "
# date_x is to shift to account for lack of AM/PM
# 23:00 or 11:00PM
tmfmt = "%I:%M%p"
date_x = 30
if (cfg["24H"] == 1):
date_x = 40
tmfmt = "%H:%M"
# conv all to disp strings
disp_speed = str(conv_unit(data["speed"], int(data["unit"])))
disp_dt = str(datetime.datetime.strftime(data["datetime"], dayfmt + tmfmt))
disp_mode = "M:" + str(data["mode"])
disp_unit = str(unit_to_str[data["unit"]])
disp_track = str(data["track"])
# draw black rectangle the size of screen to clear the screen
drawing.rectangle((0, 0, 128, 128), fill=0)
drawing.text((0, 0), disp_mode, font=mode_font, fill=255)
drawing.text((date_x, 0), disp_dt, font=mode_font, fill=255)
drawing.text((0, 16), disp_speed, font=sp_font, fill=255)
drawing.text((84, 16), disp_unit, font=unit_font, fill=255)
drawing.text((84, 48), disp_track, font=track_font, fill=255)
disp.image(img)
disp.display()
time.sleep(0.2)
# this is the speed currently displayed on the oled
oled_speed = data["speed"]
def disp_th() -> None:
# writing to OLED causes delay so I'm putting it in a thread
global disp_data_g
display = Adafruit_SSD1306.SSD1306_128_64(rst=None)
display.begin()
time.sleep(2)
display.clear()
display.display()
time.sleep(1)
img = Image.new('1', (display.width, display.height))
drawing = ImageDraw.Draw(img)
FONTFILE = "raspberrypi/fonts/Gidole-Regular.ttf"
fonts = [
ImageFont.truetype(FONTFILE, 15),
ImageFont.truetype(FONTFILE, 45),
ImageFont.truetype(FONTFILE, 20),
ImageFont.truetype(FONTFILE, 15)
]
while True:
try:
draw_on_display(display, img, drawing, fonts, disp_data_g)
except OSError:
# ask user to reconnect by exiting out of program
# refer to __main__.py under handle_bike_mode()
print(f"OLED disconnected", file=sys.stderr)
os._exit(1)
def main_ser_connect(ser: serial.Serial) -> None:
global cfg_ard, send, curdata, tracking, prevbstate1, prevbstate2, disp_data_g, cur_tz, oled_speed, wastracking
while True:
while (ser.is_open):
# what to put onto display
display_dict = {
"speed": 0,
"unit": cfg_ard["UNT"],
"datetime": datetime.datetime(1970, 1, 1, 0, 0, 0),
"mode": 'D',
"track": ''
}
# get data from Arduino
rcv = {}
if (ser.in_waiting):
temp = ser.readline()
if (temp != b"\r\n"):
rcv = json.loads(temp.decode('utf-8').rstrip())
# get data from curdata (from thread) and alter send
if ("mode" not in curdata or curdata["mode"] < 2):
# disconnected
send["GPS"] = [-1]*6
send["LED"][1] = 2
tracking = 0
else:
# GPS: [lat, long, speed, month, day, hr, min]
# turn red LED off since there is communcation with GPS
send["LED"][1] = 0
# keep tracking if GPS was disconnected
if (tracking == 0 and wastracking):
tracking = 2
# time
curtime = curdata["time"]
d_localized = _conv_tmz(curtime[:-5], "%Y-%m-%dT%H:%M:%S", cur_tz)
# speed, given in m/s
speed = curdata["speed"]
# send speed currently displayed on oled to panel so it is synced with oled
send["GPS"] = [curdata["lat"], curdata["lon"], conv_unit(oled_speed, unit=display_dict["unit"]),
d_localized.month, d_localized.day, d_localized.hour, d_localized.minute]
# update what to display
t = ['', 'P', 'T']
display_dict["speed"] = speed
display_dict["mode"] = curdata["mode"]
display_dict["datetime"] = d_localized
display_dict["track"] = t[tracking]
# TRACKING
# if button 1 pressed
if ("BUTTON1" in rcv and rcv["BUTTON1"] and not prevbstate1):
if (tracking == 1 or tracking == 2):
wastracking = False
tracking = 0
else:
tracking = 2
# create new file when button pressed
if (send["LED"][1] == 0):
wastracking = True
tm = datetime.datetime.strptime(
curdata["time"][:-5], "%Y-%m-%dT%H:%M:%S")
new_track_file(tm, cur_tz)
# if button 2 pressed
if ("BUTTON2" in rcv and rcv["BUTTON2"] and not prevbstate2 and tracking != 0):
# switch between pausing it and resuming it (states 1 and 2)
tracking = (tracking % 2)+1
# adjust green LED
# off = tracking stopped
# flashing = tracking paused
# on = tracking on
send["LED"][0] = tracking
# put tracking to file
# make sure there is data from GPS since red LED would be on if no data
if (send["LED"][1] == 0):
tm = datetime.datetime.strptime(
curdata["time"][:-5], "%Y-%m-%dT%H:%M:%S")
tracker(curdata["lat"], curdata["lon"], tm)
# Process received data and prepare sending data
send_str = ""
if ("REQ" in rcv and rcv["REQ"] == 0):
send_str = json.dumps(cfg_ard)
elif ("REQ" in rcv and rcv["REQ"] == 1):
# B1RCV/B2RCV alg
if (rcv["BUTTON1"]):
send["B1RCV"] = True
else:
send["B1RCV"] = False
if (rcv["BUTTON2"]):
send["B2RCV"] = True
else:
send["B2RCV"] = False
send_str = json.dumps(send)
send_str += "\n"
# print what was received and what we are sending (debug)
# if (rcv != {}):
# print(f"received: {rcv}")
# print(f"sending: {send_str}\n")
# send data
ser.write(send_str.encode("utf-8"))
# display on OLED
disp_data_g = display_dict
prevbstate1 = send["B1RCV"]
prevbstate2 = send["B2RCV"]
time.sleep(0.01)
assert(tracking < 3 and tracking >= 0)
time.sleep(1)
def main() -> None:
global cfg_ard, send, curdata, tracking, prevbstate1, prevbstate2, disp_data_g, cur_tz, port
# debug
# sys.excepthook = err
# GPS command
CMD = "sudo gpsd /dev/serial0 -F /var/run/gpsd.sock"
subprocess.run(CMD.split())
# threads
th1 = threading.Thread(target=get_gps_data, name="gps_thread", daemon=True)
th1.start()
th2 = threading.Thread(target=disp_th, name="oled_thread", daemon=True)
th2.start()
# serial init
ser = serial.Serial(port, 115200, timeout=1,
stopbits=2, parity=serial.PARITY_NONE)
ser.flush()
# main program
main_ser_connect(ser)
GPIO.cleanup()
if (__name__ == "__main__"):
main()
```
#### File: BikeDashboardPlus/raspberrypi/__main__.py
```python
import datetime
import subprocess
import threading
import time
import Adafruit_SSD1306
import requests
import serial
from gpiozero import Button
from PIL import Image, ImageDraw, ImageFont
BUTTON_PIN = 17
BUTTON_SH_PIN = 18
CMD_BIKE_MODE = "python3 raspberrypi/bike_mode.py 2>> errors.txt && printf \"Happened at $(date)\\n\\n\" >> errors.txt;"
CMD_SERVER_MODE = "python3 raspberrypi/server_mode.py 2>> errors.txt && printf \"Happened at $(date)\\n\\n\" >> errors.txt;"
class InitiationError(Exception):
"""
This error is raised if could not initialize OLED or Arduino.
"""
def __init__(self, code, *args, **kwargs):
self.code = code
self.which = kwargs["which"]
self.msg = f"{self.which} could not be initialized."
super().__init__(self.msg)
def __repr__(self):
return f"{self.msg} Exiting with code {self.code}."
def handle_bike_mode() -> None:
global display, img, draw, font, b
draw.rectangle((0, 0, 128, 128), fill=0)
draw.text((0, 0), "Restart to switch mode ", fill=255, font=font)
draw.text((0, 16), "No press detected \nEntering bike mode", fill=255, font=font)
display.image(img)
display.display()
time.sleep(2)
while True:
# using subprocess instead of import so an error would not exit out of the whole program and the process would be easier to kill
subprocess.call(CMD_BIKE_MODE.split())
# if exits out here, means that OS error happened/Arduino disc or OLED disc
try:
# put in try because OLED may be disconnected
draw.rectangle((0, 0, 128, 128), fill=0)
draw.text((0, 0), "Oh no!", fill=255, font=font)
draw.text((0, 16), "OLED or Arduino \ndisconnected. Reconn., \npress B1 try again.", fill=255, font=font)
display.image(img)
display.display()
time.sleep(1)
except:
pass
# let user press button after reconnected and then try again
b.wait_for_press()
time.sleep(2) # wait 2 seconds just in case of conflict with writing to OLED
def handle_server_mode() -> None:
global display, img, draw, font
# checks for internet connection in order to enter server mode
try:
requests.get("https://google.com")
# display the pi IP as website
website_name = _get_pi_ip()
draw.rectangle((0, 0, 128, 128), fill=0)
draw.text((0, 0), "Restart to switch mode ", fill=255, font=font)
draw.text((0, 16), f"In server mode \nVisit website: \n{website_name}:7123", fill=255, font=font)
display.image(img)
display.display()
time.sleep(1)
# using subprocess instead of import so an error would not exit out of the whole program and the process would be easier to kill
subprocess.call(CMD_SERVER_MODE.split())
except requests.exceptions.ConnectionError:
# enter bike mode if no internet connection
draw.rectangle((0, 0, 128, 128), fill=0)
draw.text((0, 0), "No connection", fill=255, font=font)
draw.text((0, 16), "Going into \nbike mode", fill=255, font=font)
display.image(img)
display.display()
time.sleep(1)
handle_bike_mode()
return
def shutdown_button() -> None:
global display, img, draw, font
# wait 30 seconds so the button wouldn't interfere with any of the setup stuff
time.sleep(30)
# shuts down pi when this is pressed
sh_b = Button(BUTTON_SH_PIN)
sh_b.wait_for_press()
# stop all sub programs
STOP_CMD_BK = "pkill -f raspberrypi/bike_mode.py"
STOP_CMD_SV = "pkill -f raspberrypi/server_mode.py"
subprocess.call(STOP_CMD_BK.split())
subprocess.call(STOP_CMD_SV.split())
time.sleep(1)
draw.rectangle((0, 0, 128, 128), fill=0)
draw.text((0, 0), "Powering off", fill=255, font=font)
draw.text((0, 16), "Wait for green LED \non RPi to turn off \nbefore switching off.", fill=255, font=font)
display.image(img)
display.display()
time.sleep(1)
# power off
POWER_OFF_CMD = "sudo shutdown -h now"
subprocess.call(POWER_OFF_CMD.split())
def _check_components() -> bool:
"""
Checks that all components are connected properly before starting the program.
"""
try:
# check display
display = Adafruit_SSD1306.SSD1306_128_64(rst=None)
display.begin()
# check serial port
pt_f = open("raspberrypi/port", 'r')
pt = pt_f.read().strip()
ser = serial.Serial(pt, 115200)
ser.flush()
pt_f.close()
except OSError as e:
if (e.errno == 2):
print("Serial port could not be opened.")
return "Serial port"
elif (e.errno == 121):
print("OLED could not be initialized.")
return "OLED"
return "Something"
return ""
def _get_pi_ip() -> str:
s_p = subprocess.Popen("hostname -I".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = s_p.communicate()
return output.decode("utf-8").rstrip()
def main() -> None:
global display, img, draw, font, b
print(f"Started program at {datetime.datetime.now()}")
try:
try:
_s = _check_components()
if (_s != ""):
raise InitiationError(1, which=_s)
except InitiationError as e:
with open("errors.txt", 'a') as f:
f.write(f"{e}\nHappened at {datetime.datetime.now()} \n\n")
quit(e.code)
th1 = threading.Thread(target=shutdown_button)
th1.start()
mode = None
# init display
display = Adafruit_SSD1306.SSD1306_128_64(rst=None)
display.begin()
time.sleep(2)
display.clear()
display.display()
time.sleep(1)
# init python PIL
img = Image.new('1', (display.width, display.height))
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("raspberrypi/fonts/Gidole-Regular.ttf", 12)
# draw setup text
draw.text((0, 0), "Setup", font=font, fill=255)
draw.multiline_text((0, 16), "Press button 1 on RPi \nto enter server mode. \nOtherwise, do nothing.", font=font, fill=255)
display.image(img)
display.display()
time.sleep(1)
# wait for button press to go into server mode
b = Button(BUTTON_PIN)
# wait 5 seconds for user to press button, otherwise enter bike mode
b.wait_for_press(timeout=5)
# display if button pressed/button not pressed
if (b.is_pressed):
mode = "server"
else:
mode = "bike"
except KeyboardInterrupt:
display.clear()
display.display()
quit()
try:
# run respective programs
if (mode == "bike"):
handle_bike_mode()
elif (mode == "server"):
handle_server_mode()
except (KeyboardInterrupt):
# clear display if keyboard interrupt
display.clear()
display.display()
display.clear()
display.display()
quit()
if (__name__ == "__main__"):
main()
``` |
{
"source": "jonyboi396825/COM-Server",
"score": 3
} |
#### File: src/com_server/base_connection.py
```python
import abc
import json
import os
import threading
import time
import typing as t
from types import TracebackType
import serial
from . import constants, tools
SEND_QUEUE_MAX_SIZE = 65536
class ConnectException(Exception):
"""
Connecting/disconnecting errors
"""
def __init__(self, msg: str) -> None:
super().__init__(msg)
class BaseConnection(abc.ABC):
"""A base connection object with a serial or COM port.
Base class that contains implemented basic methods: `send()`, `receive()`, `connect()`,
and `disconnect()`, properties of the connection, and an abstract IO thread method.
"""
def __init__(
self,
baud: int,
port: str,
*ports: str,
exception: bool = True,
timeout: float = 1,
send_interval: float = 1,
queue_size: int = constants.RCV_QUEUE_SIZE_NORMAL,
exit_on_disconnect: bool = False,
rest_cpu: bool = True,
**kwargs: t.Any,
) -> None:
"""Initializes BaseConnection and Connection-like classes
`baud`, `port` (or a port within `ports`), `timeout`, and `kwargs` will be passed to pyserial.
For more information, see [here](https://pyserial.readthedocs.io/en/latest/pyserial_api.html#serial.Serial).
Args:
baud (int): The baud rate of the serial connection
port (str): The default port of the serial connection
*ports (str): Alternative ports to try if the default port does not work
exception (bool, optional): **DEPRECATED**. Defaults to True.
timeout (float, optional): How long the program should wait, in seconds, for serial data before exiting. Defaults to 1.
send_interval (float, optional): Indicates how much time, in seconds, the program should wait before sending another message. \
Note that this does NOT mean that it will be able to send every `send_interval` seconds. It means that the `send()` method will \
exit if the interval has not reached `send_interval` seconds. NOT recommended to set to small values. Defaults to 1.
queue_size (int, optional): The number of previous data that was received that the program should keep. Must be nonnegative. Defaults to 256.
exit_on_disconnect (bool, optional): If True, sends `SIGTERM` signal to the main thread if the serial port is disconnected. Does not work on Windows. Defaults to False.
rest_cpu (bool, optional): If True, will add 0.01 second delay to end of IO thread. Otherwise, removes those delays but will result in increased CPU usage. \
Not recommended to set to False with the default IO thread. Defaults to True.
**kwargs (Any): Passed to pyserial
Raises:
EnvironmentError: Raised if `exit_on_disconnect` is True and it is running on a Windows machine.
"""
# from above
self._baud = int(baud)
self._port = str(port)
self._ports = ports
self._ports_list = tuple([self._port] + list(self._ports))
self._exception = bool(exception)
self._timeout = abs(float(timeout)) # make sure positive
self._pass_to_pyserial = kwargs
self._queue_size = abs(int(queue_size)) # make sure positive
self._send_interval = abs(float(send_interval)) # make sure positive
self._exit_on_disconnect = exit_on_disconnect
self._rest_cpu = rest_cpu
if os.name == "nt" and self._exit_on_disconnect:
raise EnvironmentError("exit_on_fail is not supported on Windows")
# initialize Serial object
self._conn: t.Optional[serial.Serial] = None
# other
self._last_sent = time.time() # prevents from sending too rapidly
self._last_rcv = (
0.0,
b"",
) # stores the data that the user previously received
# IO variables
self._rcv_queue: t.List[
t.Tuple[float, bytes]
] = (
[]
) # stores previous received strings and timestamps, tuple (timestamp, str)
self._to_send: t.List[bytes] = [] # queue data to send
# this lock makes sure data from the receive queue
# and send queue are written to and read safely
self._lock = threading.Lock()
def __repr__(self) -> str:
"""
Returns string representation of self
"""
return (
f"Connection<id=0x{hex(id(self))}>"
f"{{Serial={self._conn}, "
f"timeout={self._timeout}, max_queue_size={self._queue_size}, send_interval={self._send_interval}}}"
)
def __enter__(self) -> "BaseConnection":
"""Context manager
When in a context manager, it will automatically connect itself
to its serial port and return itself.
"""
if not self.connected:
self.connect()
return self
def __exit__(
self,
exc_type: type,
exc_value: BaseException,
exc_tb: t.Optional[TracebackType],
) -> None:
"""Context manager
When exiting from the `with` statement, it will automatically close itself.
"""
self.disconnect()
def connect(self) -> None:
"""Begins connection to the serial port.
When called, initializes a serial instance if not initialized already. Also starts the IO thread.
Raises:
ConnectException: If the connection is already established.
"""
if self._conn is not None:
if self._exception:
# raise exception if true
raise ConnectException("Connection already established")
# return if initialized already
return
# timeout should be None in pyserial
pyser_timeout = None if self._timeout == constants.NO_TIMEOUT else self._timeout
# user-given ports
_all_ports = self._ports_list
# available ports
_all_avail_ports = [port for port, _, _ in tools.all_ports()]
# actual used port
_used_port = "No port found"
for port in _all_ports:
if port in _all_avail_ports:
_used_port = port
break
# set port attribute to new port (useful when printing)
self._port = _used_port
self._conn = serial.Serial(
port=self._port,
baudrate=self._baud,
timeout=pyser_timeout,
**self._pass_to_pyserial,
)
# clear buffers
self._conn.flush()
self._conn.flushInput()
self._conn.flushOutput()
time.sleep(2) # wait for other end to start up properly
# start receive thread
threading.Thread(
name="Serial-IO-thread", target=self._io_thread, daemon=True
).start()
def disconnect(self) -> None:
"""Closes connection to the serial port.
When called, calls `Serial.close()` then makes the connection `None`.
If it is currently closed then just returns.
Forces the IO thread to close.
**NOTE**: This method should be called if the object will not be used anymore
or before the object goes out of scope, as deleting the object without calling
this will lead to stray threads.
"""
if self._conn is None:
# return if not open, as threads are already closed
return
self._conn.close()
self._reset()
self._conn = None
def send(
self,
*data: t.Any,
check_type: bool = True,
ending: str = "\r\n",
concatenate: str = " ",
) -> bool:
"""Sends data to the port
If the connection is open and the interval between sending is large enough,
then concatenates args with a space (or what was given in `concatenate`) in between them,
encodes to an `utf-8` `bytes` object, adds a carriage return and a newline to the end
(i.e. "\\r\\n") (or what was given as `ending`), then sends to the serial port.
Note that the data does not send immediately and instead will be added to a queue.
The queue size limit is 65536 byte objects. Calling `send()` more than this limit will not add objects to the queue.
Sending data too rapidly (e.g. making `send_interval` too small, varies from computer to computer) is not recommended,
as the queue will get too large and the send data will get backed up be delayed,
because it takes a considerable amount of time for data to be sent through the serial port.
Additionally, parts of the send queue will be all sent together until it reaches 0.5 seconds,
which may end up with unexpected behavior in some programs.
To prevent these problems, either make the value of `send_interval` larger,
or add a delay within the main thread.
If the program has not waited long enough before sending, then the method will return `false`.
If `check_type` is True, then it will process each argument, then concatenate, encode, and send.
- If the argument is `bytes` then decodes to `str`
- If argument is `list` or `dict` then passes through `json.dumps`
- If argument is `set` or `tuple` then converts to list and passes through `json.dumps`
- Otherwise, directly convert to `str` and strip
Otherwise, converts each argument directly to `str` and then concatenates, encodes, and sends.
Args:
`*data` (Any): Everything that is to be sent, each as a separate parameter. Must have at least one parameter.
ending (str, optional): The ending of the bytes object to be sent through the serial port. Defaults to "\\r\\n".
concatenate (str, optional): What the strings in args should be concatenated by. Defaults to a space (" ").
Raises:
ConnectException: If serial port not connected.
Returns:
bool: true on success, false if send interval not reached.
"""
# check if connection open
if not self.connected:
raise ConnectException("No connection established")
# check if it should send by using send_interval.
if time.time() - self._last_sent <= self._send_interval:
return False
self._last_sent = time.time()
# check `check_type`, then converts each element
send_data: str = ""
if check_type:
send_data = concatenate.join([self._check_output(i) for i in data])
else:
send_data = concatenate.join([str(i) for i in data])
# add ending to string
send_data_bytes = (send_data + ending).encode("utf-8")
# make sure nothing is reading/writing to the receive queue
# while reading/assigning the variable
with self._lock:
if len(self._to_send) < SEND_QUEUE_MAX_SIZE:
# only append if limit has not been reached
self._to_send.append(send_data_bytes)
return True
def receive(self, num_before: int = 0) -> t.Optional[t.Tuple[float, bytes]]:
"""Returns the most recent receive object.
The IO thread will continuously detect data from the serial port and put the `bytes` objects in the `rcv_queue`.
If there are no parameters, the method will return the most recent received data.
If `num_before` is greater than 0, then will return `num_before`th previous data.
- Note: `num_before` must be less than the current size of the queue and greater or equal to 0
- If not, returns None (no data)
- Example:
- 0 will return the most recent received data
- 1 will return the 2nd most recent received data
- ...
Args:
num_before (int, optional): The position in the receive queue to return data from. Defaults to 0.
Raises:
ConnectException: If serial port not connected.
ValueError: If num_before is negative.
Returns:
Optional[Tuple[float, bytes]]: A `tuple` representing the `(timestamp received, data in bytes)` and \
None if no data was found (receive queue empty)
"""
if not self.connected:
raise ConnectException("No connection established")
if num_before < 0:
raise ValueError("num_before has to be nonnegative")
try:
# make sure nothing is reading/writing to the receive queue
# while reading/assigning the variable
with self._lock:
self._last_rcv = self._rcv_queue[-1 - num_before] # last received data
return self._last_rcv
except IndexError:
return None
@property
def connected(self) -> bool:
"""A property to determine if the connection object is currently connected to a serial port or not.
This also can determine if the IO thread for this object
is currently running or not.
"""
return self._conn is not None
@property
def timeout(self) -> float:
"""A property to determine the timeout of this object.
Getter:
- Gets the timeout of this object.
Setter:
- Sets the timeout of this object after checking if convertible to nonnegative float.
Then, sets the timeout to the same value on the `pyserial` object of this class.
If the value is `float('inf')`, then sets the value of the `pyserial` object to None.
"""
return self._timeout
@timeout.setter
def timeout(self, value: float) -> None:
self._timeout = abs(float(value))
if self._conn is not None:
self._conn.timeout = (
self._timeout if self._timeout != constants.NO_TIMEOUT else None
)
@property
def send_interval(self) -> float:
"""A property to determine the send interval of this object.
Getter:
- Gets the send interval of this object.
Setter:
- Sets the send interval of this object after checking if convertible to nonnegative float.
"""
return self._send_interval
@send_interval.setter
def send_interval(self, value: float) -> None:
self._send_interval = abs(float(value))
@property
def conn_obj(self) -> serial.Serial:
"""A property to get the Serial object that handles sending and receiving.
Getter:
- Gets the Serial object.
"""
return self._conn
@property
def available(self) -> int:
"""A property indicating how much new data there is in the receive queue.
Getter:
- Gets the number of additional data received since the user last called the `receive()` method.
"""
if not self.connected:
# check if connected
if self._exception:
raise ConnectException("No connection established")
return 0
last_rcv_ind = self._binary_search_rcv(self._last_rcv[0])
return len(self._rcv_queue) - last_rcv_ind - 1
@property
def port(self) -> str:
"""Returns the current port of the connection
Getter:
- Gets the current port of the connection
"""
return self._port
def _check_output(self, output: t.Any) -> str:
"""Argument processing
- If the argument is `bytes` then decodes to `str`
- If argument is `list` or `dict` then passes through `json.dumps`
- If argument is `set` or `tuple` then converts to list, passes through `json.dumps`
- Otherwise, directly convert to `str`
"""
ret = ""
if isinstance(output, bytes):
ret = output.decode("utf-8").strip()
elif isinstance(output, list) or isinstance(output, dict):
ret = json.dumps(output).strip()
elif isinstance(output, tuple) or isinstance(output, set):
ret = json.dumps(list(output)).strip()
else:
ret = str(output).strip()
return ret
def _reset(self) -> None:
"""
Resets all IO variables
"""
self._last_sent = time.time() # prevents from sending too rapidly
self._rcv_queue = [] # stores previous received strings
self._to_send = [] # queue data to send
def _binary_search_rcv(self, target: float) -> int:
"""
Binary searches a timestamp in the receive queue and returns the index of that timestamp.
Works because the timestamps in the receive queue are sorted by default.
When comparing, rounds to 4 digits.
"""
with self._lock:
_tmp_q = self._rcv_queue.copy()
if len(_tmp_q) <= 0:
# not found if no size
return -1
low = 0
high = len(_tmp_q)
while low <= high:
mid = (low + high) // 2 # integer division
# comparing rounding to two digits
cmp1 = round(_tmp_q[mid][0], 4)
cmp2 = round(target, 4)
if cmp1 == cmp2:
return mid
elif cmp1 < cmp2:
low = mid + 1
else:
high = mid - 1
# return -1 if not found
return -1
@abc.abstractmethod
def _io_thread(self) -> None:
"""Thread that interacts with serial port.
Implemented in `Connection` class.
"""
```
#### File: src/com_server/disconnect.py
```python
import logging
import threading
import time
import typing as t
from .connection import Connection
class BaseReconnector(threading.Thread):
"""Base reconnector class"""
_logger: logging.Logger
_logf: t.Optional[str]
def _init_logger(self) -> None:
"""Initializes logger to stdout"""
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
fmt = logging.Formatter("%(levelname)s [%(asctime)s] - %(message)s")
handler.setFormatter(fmt)
self._logger.addHandler(handler)
def _init_logger_file(self) -> None:
"""Initializes logger to file"""
assert self._logf is not None, "No logfile provided" # mypy
handler = logging.FileHandler(self._logf)
handler.setLevel(logging.INFO)
fmt = logging.Formatter("%(levelname)s [%(asctime)s] - %(message)s")
handler.setFormatter(fmt)
self._logger.addHandler(handler)
class Reconnector(BaseReconnector):
"""
Object that detects whenever a single connection is disconnected and reconnects
"""
def __init__(
self,
conn: Connection,
logger: logging.Logger,
logfile: t.Optional[str] = None,
) -> None:
"""Constructor
Takes in the connection object to watch and reconnect if it is disconnected.
This is NOT thread safe and not meant to be used outside of the `RestApiHandler`.
It is recommended to implement your own disconnect/reconnect handler.
Arguments:
- `conn` (Connection): connection to watch and reconnect to.
- `logger` (Logger): logger object
- `logfile` (str, None): the path to the file to log disconnects to
"""
self._conn = conn
self._logf = logfile
self._logger = logger
self._logger.propagate = (
False # prevents from logging twice because waitress calls basicConfig()
)
self._logger.setLevel(logging.INFO)
self._init_logger()
if self._logf:
self._init_logger_file()
# threading
super().__init__(daemon=True)
def run(self) -> None:
"""What to run in thread
In this case, checks if the serial port every 0.01 seconds.
"""
while True:
if not self._conn.connected:
self._logger.warning("Device disconnected")
self._logger.info("Attempting to reconnect...")
self._conn.reconnect()
self._logger.info(f"Device reconnected at {self._conn.port}")
time.sleep(0.01)
class MultiReconnector(BaseReconnector):
"""
Object that detects whenever any given connection is disconnected and reconnects
"""
def __init__(
self,
logger: logging.Logger,
*conns: Connection,
logfile: t.Optional[str] = None,
) -> None:
"""
Takes in the connection objects to watch and reconnect if any are disconnected.
This is NOT thread safe and not meant to be used outside of other connection objects.
You should implement your own disconnect/reconnect handler.
Arguments:
- `logger` (Logger): logger object
- `*conns` (Connection): connections to watch and reconnect to.
- `logfile` (str, None): the path to the file to log disconnects to
"""
self._conns = conns
self._logf = logfile
self._logger = logger
self._logger.propagate = (
False # prevents from logging twice because waitress calls basicConfig()
)
self._logger.setLevel(logging.INFO)
self._init_logger()
if self._logf:
self._init_logger_file()
# threading
super().__init__(daemon=True)
def run(self) -> None:
"""What to run in thread
In this case, checks if the serial port every 0.01 seconds.
"""
cur_reconn = set()
def _reconn(conn: Connection) -> None:
"""
reconnect thread
"""
if conn in cur_reconn:
# if currently reconnecting, then exit
return
cur_reconn.add(conn)
self._logger.warning(f"Device at {conn.port} disconnected")
self._logger.info("Attempting to reconnect...")
conn.reconnect()
self._logger.info(f"Device reconnected at {conn.port}")
# remove of set of currently reconnecting objects after reconnected
cur_reconn.remove(conn)
while True:
for conn in self._conns:
if conn in cur_reconn:
continue
if not conn.connected:
# start thread to reconnect
threading.Thread(target=_reconn, args=(conn,), daemon=True).start()
time.sleep(0.01)
```
#### File: src/com_server/server.py
```python
import logging
import sys
import threading
import typing as t
from concurrent.futures import ThreadPoolExecutor
import waitress
from flask import Flask
from flask_restful import Api, abort
from .api_server import ConnectionResource
from .base_connection import ConnectException
from .connection import Connection
from .constants import SUPPORTED_HTTP_METHODS
from .disconnect import MultiReconnector
class DuplicatePortException(Exception):
pass
class ConnectionRoutes:
"""A wrapper for Flask objects for adding routes involving a `Connection` object
This class allows the user to easily add REST API routes that interact
with a serial connection by using `flask_restful`.
When the connection is disconnected, a `500 Internal Server Error`
will occur when a route relating to the connection is visited.
A thread will detect this event and will try to reconnect the serial port.
Note that this will cause the send and receive queues to **reset**.
If a resource is accessed while it is being used by another process,
then it will respond with `503 Service Unavailable`.
More information on [Flask](https://flask.palletsprojects.com/en/2.0.x/) and [flask-restful](https://flask-restful.readthedocs.io/en/latest/).
"""
def __init__(self, conn: Connection) -> None:
"""Constructor
There should only be one `ConnectionRoutes` object that wraps each `Connection` object.
Having multiple may result in an error.
Note that `conn` needs to be connected when starting
the server or else an error will be raised.
Args:
conn (Connection): The `Connection` object the API is going to be associated with.
"""
self._conn = conn
# dictionary of all resource paths mapped to resource classes
self._all_resources: t.Dict[str, t.Type[ConnectionResource]] = dict()
# for making sure only one thread is accessing Connection obj at a time
self._lock = threading.Lock()
def __repr__(self) -> str:
"""Printing `ConnectionRoutes`"""
return f"ConnectionRoutes<id={hex(id(self))}>" f"{{Connection={self._conn}}}"
def add_resource(self, resource: str) -> t.Callable:
"""Decorator that adds a resource
The resource should interact with the serial port.
If not, use `Api.add_resource()` instead.
This decorator works the same as [Api.resource()](https://flask-restful.readthedocs.io/en/latest/api.html#flask_restful.Api.resource).
However, the class under the decorator should
not extend `flask_restful.Resource` but
instead `com_server.ConnectionResource`. This is
because `ConnectionResource` contains `Connection`
attributes that can be used in the resource.
Unlike a resource added using `Api.add_resource()`,
if a process accesses this resource while it is
currently being used by another process, then it will
respond with `503 Service Unavailable`.
Currently, supported methods are:
- `GET`
- `POST`
- `PUT`
- `PATCH`
- `DELETE`
- `OPTIONS`
- `HEAD`
Make sure to put method names in lowercase
Args:
endpoint (str): The endpoint to the resource.
"""
# outer wrapper
def _outer(
resource_cls: t.Type[ConnectionResource],
) -> t.Type[ConnectionResource]:
# check if resource is subclass of ConnectionResource
if not issubclass(resource_cls, ConnectionResource):
raise TypeError("resource has to extend com_server.ConnectionResource")
# assign connection obj
resource_cls.conn = self._conn
# req methods; _self is needed as these will be part of class functions
def _dec(func: t.Callable) -> t.Callable:
def _inner(_self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if self._lock.locked():
# if another endpoint is currently being used
abort(
503,
message="An endpoint is currently in use by another process.",
)
elif not _self.conn.connected:
# if not connected
abort(500, message="Serial port disconnected.")
else:
with self._lock:
val = func(_self, *args, **kwargs)
return val
return _inner
# replace functions in class with new functions that check if registered
for method in SUPPORTED_HTTP_METHODS:
if hasattr(resource_cls, method):
meth_attr = getattr(resource_cls, method)
setattr(resource_cls, method, _dec(meth_attr))
self._all_resources[resource] = resource_cls
return resource_cls
return _outer
@property
def all_resources(self) -> t.Dict[str, t.Type]:
"""
Returns a dictionary of resource paths mapped
to resource classes.
"""
return self._all_resources
def add_resources(api: Api, *routes: ConnectionRoutes) -> None:
"""Adds all resources given in `servers` to the given `Api`.
This has to be called along with `start_conns()` **before** calling `start_app()` or running a flask app.
Args:
api (Api): The `flask_restful` `Api` object that adds the resources
*routes (ConnectionRoutes): The `ConnectionRoutes` objects to add to the server
"""
res = [route.all_resources for route in routes]
for routes_obj in res:
for endpoint in routes_obj:
api.add_resource(routes_obj[endpoint], endpoint)
def start_conns(
logger: logging.Logger, *routes: ConnectionRoutes, logfile: t.Optional[str] = None
) -> None:
"""Initializes serial connections and disconnect handler
Args:
*routes (ConnectionRoutes): The `ConnectionRoutes` objects to initialize connections from
logger (Logger): A python logging object
logfile (str, None, optional): Path of file to log messages to. Defaults to None.
Raises:
DuplicatePortException: If any ports in `ConnectionRoutes` have ports in common
ConnectException: If any of the connections failed in the `ConnectionRoutes` objects.
"""
# check no duplicate serial ports
tot: tuple = tuple()
for route in routes:
tot += route._conn._ports_list
tot_s = set(tot)
if len(tot) != len(tot_s):
raise DuplicatePortException(
"Connection objects cannot have any ports in common"
)
# start threads
def _initializer(route: ConnectionRoutes) -> None:
if not route._conn.connected:
route._conn.connect()
with ThreadPoolExecutor() as executor:
futures = executor.map(_initializer, routes)
for _ in futures:
# prints out errors
pass
# check that all connections are connected
# if not, raise ConnectException
for route in routes:
if not route._conn.connected:
raise ConnectException("Connection failed.")
conns = (route._conn for route in routes)
reconnector = MultiReconnector(logger, *conns, logfile=logfile)
# start disconnect/reconnect thread
reconnector.start()
def disconnect_conns(*routes: ConnectionRoutes) -> None:
"""Disconnects all `Connection` objects in provided `ConnectionRoutes` objects
It is recommended to call this after `start_app()` to make sure that the serial
connections are closed.
Note that calling this will exit the program using `sys.exit()`.
Args
*routes (ConnectionRoutes): The `ConnectionRoutes` objects to disconnect connections from
"""
for route in routes:
route._conn.disconnect()
sys.exit()
def start_app(
app: Flask,
api: Api,
*routes: ConnectionRoutes,
logfile: t.Optional[str] = None,
host: str = "0.0.0.0",
port: int = 8080,
cleanup: t.Optional[t.Callable] = None,
**kwargs: t.Any,
) -> None:
"""Starts a waitress production server that serves the app
Note that connection objects between `ConnectionRoutes`
can share no ports in common.
Using this is recommended over calling `add_resources()`,
`start_conns()`, `serve_app()`, and `disconnect_conns()`
separately.
**Also note that adding multiple `ConnectionRoutes` is
not tested and may result in very unexpected behavior
when disconnecting and reconnecting**.
Lastly, note that `sys.exit()` will be called in this,
so add any cleanup operations to the `cleanup` parameter.
Args:
app (Flask): The flask object that runs the server
api (Api): The flask_erstful API object that adds the resources
*routes (ConnectionRoutes): The `ConnectionRoutes` objects to add to the server
logfile (str, None, optional): The path to the file to log serial disconnect/reconnect events to \
Leave as None if you do not want to log to a file. Defaults to None.
host (str, optional): The host of the server (e.g. 0.0.0.0 or 127.0.0.1). Defaults to "0.0.0.0".
port (int, optional): The port to host the server on (e.g. 8080, 8000, 5000). Defaults to 8080.
cleanup (Callable, optional): Cleanup function to be called after waitress is done serving app. Defaults to None.
**kwargs (Any): will be passed to `waitress.serve()`
"""
# initialize app by adding resources and staring connections and disconnect handlers
add_resources(api, *routes)
# get waitress logger
_logger = logging.getLogger("waitress")
start_conns(_logger, *routes, logfile=logfile)
# serve on waitress
waitress.serve(app, host=host, port=port, **kwargs)
# call cleanup function
if cleanup:
cleanup()
# destroy connection objects and exit the program
disconnect_conns(*routes)
```
#### File: COM-Server/tests/active_test.py
```python
import os
import pytest
import requests
import sys
try:
requests.get("http://127.0.0.1:8080")
except requests.exceptions.ConnectionError:
print("Server not found. Skipping tests and exiting.")
sys.exit(1)
def main() -> int:
return pytest.main([os.path.join("tests", "active")] + sys.argv[1:])
if __name__ == "__main__":
sys.exit(main())
```
#### File: tests/active/test_receive_get.py
```python
import requests
import json
import pytest
import time
SERVER = "http://127.0.0.1:8080/v1"
class Test_Receive:
"""
tests the /receive and /receive/x resources
"""
@pytest.fixture
def example_data(self):
return {"data": [1, 2, 3, 4, time.time()], "ending": "\n", "concatenate": ";"}
res = f"{SERVER}/receive"
@pytest.mark.parametrize(
"http_method",
[
requests.post,
requests.put,
requests.patch,
],
)
def test_http_method_with_data(self, example_data, http_method):
"""Tests that only given request works (also tests that HTTP methods are working)"""
r = http_method(self.res, example_data)
assert r.status_code == 405
r = http_method(f"{self.res}/23", example_data)
assert r.status_code == 405
@pytest.mark.parametrize("http_method", [requests.delete])
def test_http_method_no_data(self, http_method):
"""Tests that only given request works (but with options, head, delete requests)"""
r = http_method(self.res)
assert r.status_code == 405
r = http_method(f"{self.res}/23")
assert r.status_code == 405
def test_receive_first_good(self, example_data):
"""Tests that the first thing sent is received correctly"""
curt = example_data["data"][4]
requests.post(f"{SERVER}/send", data=example_data)
time.sleep(1)
r = requests.get(f"{SERVER}/receive/0")
loaded = json.loads(r.text)
assert r.status_code == 200 and loaded["message"] == "OK"
assert loaded["data"] == f'Got: "1;2;3;4;{curt}"'
def test_receive_second_good(self, example_data):
"""Tests that the 2nd most recent received object is correct"""
curt = example_data["data"][4]
requests.post(f"{SERVER}/send", data=example_data)
time.sleep(1)
requests.post(f"{SERVER}/send", data=example_data)
time.sleep(1)
r = requests.get(f"{SERVER}/receive/1")
loaded = json.loads(r.text)
assert r.status_code == 200 and loaded["message"] == "OK"
assert loaded["data"] == f'Got: "1;2;3;4;{curt}"'
def test_receive_all(self, example_data):
"""Tests that things are being received in the order they should be"""
curt = example_data["data"][4]
requests.post(f"{SERVER}/send", data=example_data)
time.sleep(1)
r = requests.get(f"{SERVER}/receive")
loaded = json.loads(r.text)
assert r.status_code == 200 and loaded["message"] == "OK"
assert loaded["data"][-1] == f'Got: "1;2;3;4;{curt}"'
class Test_Get:
"""
tests the /get resource
"""
@pytest.fixture
def example_data(self):
return {"data": [1, 2, 3, 4, time.time()], "ending": "\n", "concatenate": ";"}
res = f"{SERVER}/get"
@pytest.mark.parametrize(
"http_method",
[
requests.post,
requests.put,
requests.patch,
],
)
def test_http_method_with_data(self, example_data, http_method):
"""Tests that only given request works (also tests that HTTP methods are working)"""
r = http_method(self.res, example_data)
assert r.status_code == 405
@pytest.mark.parametrize("http_method", [requests.delete])
def test_http_method_no_data(self, http_method):
"""Tests that only given request works (but with options, head, delete requests)"""
r = http_method(self.res)
assert r.status_code == 405
def test_get_working(self, example_data):
"""tests that get works with example data"""
curt = example_data["data"][4]
requests.post(f"{SERVER}/send", data=example_data)
r = requests.get(f"{SERVER}/get")
loaded = json.loads(r.text)
assert r.status_code == 200 and loaded["message"] == "OK"
assert loaded["data"] == f'Got: "1;2;3;4;{curt}"'
time.sleep(1)
```
#### File: methods/v0/test_connections.py
```python
import json
import time
from com_server import all_ports
import pytest
import requests
SERVER = "http://127.0.0.1:8080"
V = "http://127.0.0.1:8080/v0"
# don't start unless running
try:
requests.get(SERVER + "/recall")
except requests.exceptions.ConnectionError:
pytestmark = pytest.mark.skip(
reason='Server not launched. Make sure it is running on 0.0.0.0 with port 8080, or run "com_server run <baud> <serport>".'
)
def test_register() -> None:
r = requests.get(SERVER + "/register")
assert r.status_code == 200
def test_connected():
"""
Arduino should be connected
"""
r = requests.get(V + "/connected")
loaded = json.loads(r.text)
assert r.status_code == 200
assert loaded["connected"] == True
def test_list_ports():
"""
Tests that com_server.list_ports() is the same as the data from request
"""
r = requests.get(V + "/list_ports")
loaded = json.loads(r.text)
a = all_ports()
for i in range(len(a)):
for j in range(len(loaded["ports"][i])):
assert loaded["ports"][i][j] == a[i][j]
assert r.status_code == 200
def test_available():
"""
Tests available property
"""
requests.get(V + "/receive")
data = {"data": [1, 2, 3, 4], "ending": "\n", "concatenate": ";"}
requests.post(V + "/send", data=data)
time.sleep(1) # for send interval
r = requests.get(V + "/connection_state")
assert r.status_code == 200
loaded = json.loads(r.text)
state = loaded["state"]
assert state["available"] == 1
def test_timeout_sendint():
"""
Tests that timeout and send interval are 1.0
"""
r = requests.get(V + "/connection_state")
assert r.status_code == 200
loaded = json.loads(r.text)
state = loaded["state"]
assert state["send_interval"] == 1.0 and state["timeout"] == 1.0
def test_unregister() -> None:
r = requests.get(SERVER + "/recall")
assert r.status_code == 200
```
#### File: tests/passive/test_v1.py
```python
from com_server import Connection, ConnectionRoutes, RestApiHandler
from com_server.api import V1
import pytest
def test_all_routes_being_added() -> None:
"""Tests that all routes from V1 API are being added with correct prefix"""
conn = Connection(115200, "/dev/ttyUSB0")
handler = ConnectionRoutes(conn)
pref = "testpref"
# should not raise anything
V1(handler, pref)
_endpoints = [
"/send",
"/receive/<int:num_before>",
"/receive",
"/get",
"/first_response",
"/send_until",
"/connection_state",
"/all_ports",
]
for i in _endpoints:
assert f"/{pref}{i}" in handler.all_resources
def test_not_connection_routes_raises_exception() -> None:
"""Tests that wrapping other class should raise an exception"""
conn = Connection(115200, "/dev/ttyUSB0")
handler = RestApiHandler(conn)
with pytest.raises(TypeError):
V1(handler)
```
#### File: COM-Server/tests/_test_version_greater.py
```python
import configparser
import requests
from com_server import __version__
from passive.cmp_version import Version
def test_version_greater() -> None:
"""Tests if current version is greater than version on master branch on github"""
req = requests.get(
"https://raw.githubusercontent.com/jonyboi396825/COM-Server/master/setup.cfg"
)
cfg = configparser.ConfigParser()
cfg.read_string(req.text)
master_vers = Version(cfg["metadata"]["version"])
cur_vers = Version(__version__)
assert cur_vers > master_vers
``` |
{
"source": "jo-ny/CarND-Advanced-Lane-Lines",
"score": 3
} |
#### File: CarND-Advanced-Lane-Lines/lane/color_threshold.py
```python
import glob
import numpy as np
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def hls_select_h(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h_channel = hls[:, :, 0]
binary_output = np.zeros_like(h_channel)
binary_output[(h_channel > thresh[0]) & (h_channel <= thresh[1])] = 1
return binary_output
def hls_select_l(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:, :, 1]
binary_output = np.zeros_like(l_channel)
binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1
return binary_output
def hls_select_s(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
binary_output = np.zeros_like(s_channel)
binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1
return binary_output
def lab_select_l(img, thresh=(0, 255)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
l_channel = lab[:, :, 0]
binary_output = np.zeros_like(l_channel)
binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1
return binary_output
def lab_select_a(img, thresh=(0, 255)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
a_channel = lab[:, :, 1]
binary_output = np.zeros_like(a_channel)
binary_output[(a_channel > thresh[0]) & (a_channel <= thresh[1])] = 1
return binary_output
def lab_select_b(img, thresh=(0, 255)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
b_channel = lab[:, :, 2]
binary_output = np.zeros_like(b_channel)
binary_output[(b_channel > thresh[0]) & (b_channel <= thresh[1])] = 1
return binary_output
def luv_select_l(img, thresh=(0, 255)):
luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
l_channel = luv[:, :, 0]
binary_output = np.zeros_like(l_channel)
binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1
return binary_output
def luv_select_u(img, thresh=(0, 255)):
luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
u_channel = luv[:, :, 1]
binary_output = np.zeros_like(u_channel)
binary_output[(u_channel > thresh[0]) & (u_channel <= thresh[1])] = 1
return binary_output
def luv_select_v(img, thresh=(0, 255)):
luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
v_channel = luv[:, :, 2]
binary_output = np.zeros_like(v_channel)
binary_output[(v_channel > thresh[0]) & (v_channel <= thresh[1])] = 1
return binary_output
def adp_thresh_grayscale(image, thresh=250):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY).astype(np.uint8)
img = cv2.equalizeHist(gray)
ret, thrs = cv2.threshold(img, thresh=thresh, maxval=255, type=cv2.THRESH_BINARY)
return thrs
if __name__ == "__main__":
images = glob.glob('./output_images/undistorted/*.jpg')
for fname in images:
image = mpimg.imread(fname)
hls_binary = adp_thresh_grayscale(image, thresh=250)
plt.imshow(hls_binary, cmap='gray')
plt.title("S")
plt.show()
images = glob.glob('./output_images/undistorted/*.jpg')
for fname in images:
image = mpimg.imread(fname)
hls_binary = hls_select_s(image, thresh=(140, 254))
plt.imshow(hls_binary, cmap='gray')
plt.title("S")
plt.show()
# images = glob.glob('../error_images/first/*.jpg')
# for fname in images:
# image = mpimg.imread(fname)
# luv_binary = luv_select_l(image, thresh=[225,255])
# plt.imshow(luv_binary, cmap='gray')
# plt.title("LUV_L")
# plt.show()
# images = glob.glob('../error_images/first/*.jpg')
# for fname in images:
# image = mpimg.imread(fname)
# lab_binary = lab_select_b(image, thresh=[155, 200])
# plt.imshow(lab_binary, cmap='gray')
# plt.title("lab_b")
# plt.show()
# images = glob.glob('../error_images/first/*.jpg')
# for fname in images:
# image = mpimg.imread(fname)
# hls_binary = hls_select_s(image, thresh=[240, 255])
# plt.imshow(hls_binary, cmap='gray')
# plt.title("lab_b")
# plt.show()
```
#### File: CarND-Advanced-Lane-Lines/lane/combined_threshold.py
```python
from lane.color_threshold import hls_select_s, lab_select_b, luv_select_l, adp_thresh_grayscale
from lane.gradient_threshold import abs_sobel_thresh, mag_thresh, dir_threshold
from lane.gaussian_blur import gaussian_blur
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def combined_threshold(image, ksize=3, th=None):
if th is None:
th = [[20, 100], # gradx
[20, 100], # grady
[20, 100], # mag_binary
[0.7, 1.3], # dir_binary
[90, 255], # hls_binary
[155, 200], # lab_binary
[225, 255], # luv_binary
[250, 0]] # adp_binary
gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize, thresh=(th[0][0], th[0][1]))
grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize, thresh=(th[1][0], th[1][1]))
mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(th[2][0], th[2][1]))
dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(th[3][0], th[3][1]))
hls_binary = hls_select_s(image, thresh=(th[4][0], th[4][1]))
lab_binary = lab_select_b(image, thresh=(th[5][0], th[5][1]))
luv_binary = luv_select_l(image, thresh=(th[6][0], th[6][1]))
adp_binary = adp_thresh_grayscale(image, thresh=th[5][0])
combined = np.zeros_like(dir_binary)
# combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (hls_binary == 1) | (adp_binary == 1)] = 1
# combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (hls_binary == 1)] = 1
# combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
# combined[(gradx == 1) & (grady == 1)] = 1 # x = 5 y = 5
# combined[((mag_binary == 1) & (dir_binary == 1))] = 1
# combined[(hls_binary == 1 )] = 1
# combined[(lab_binary == 1) | (luv_binary == 1)] = 1
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (
(lab_binary == 1) | (luv_binary == 1))] = 1
return combined
if __name__ == "__main__":
import glob
images = glob.glob('../output_images/undistorted/*.jpg')
for fname in images:
image = mpimg.imread(fname)
image = gaussian_blur(image, 3)
combined = combined_threshold(image, ksize=3, th=[[20, 100],
[25, 254],
[100, 250],
[0.6, 1.2],
[180, 254],
[155, 200],
[225, 255],
[250, 0]])
fig, ax = plt.subplots(figsize=(20, 7))
ax.imshow(combined, cmap='gray')
plt.show()
mpimg.imsave("../output_images/threshold/" + fname.split("\\")[-1].split("-")[0] + "-threshold.jpg", combined,
cmap="gray")
# import glob
# images = glob.glob('../error_images/first/*.jpg')
# for fname in images:
# image = mpimg.imread(fname)
# image = gaussian_blur(image, 3)
# combined = combined_threshold(image, ksize=3,
# th=[[20, 100],
# [25, 100],
# [100, 200],
# [0.7, 1.2],
# [180, 250],
# [155, 200],
# [225, 255],
# [250, 0]])
# plt.imshow(combined, cmap='gray')
# plt.show()
# # mpimg.imsave("../output_images/threshold/" + fname.split("\\")[-1].split("-")[0] + "-threshold.jpg", combined,cmap="g
```
#### File: CarND-Advanced-Lane-Lines/lane/draw_lane.py
```python
import glob
import numpy as np
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def draw_lane(image, binary_warped, dst, src, left_fitx, right_fitx, ploty):
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
# 用于绘制车道线之间的区域
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# 用于绘制车道线
lane_wrap = np.zeros_like(color_warp).astype(np.uint8)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
lane_left_1 = np.array([np.transpose(np.vstack([left_fitx - 20, ploty]))])
lane_left_2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + 20, ploty])))])
lane_left = np.hstack((lane_left_1, lane_left_2))
# Draw the lane onto the warped blank image
cv2.fillPoly(lane_wrap, np.int_([lane_left]), (255, 0, 0))
# Recast the x and y points into usable format for cv2.fillPoly()
lane_right_1 = np.array([np.transpose(np.vstack([right_fitx - 20, ploty]))])
lane_right_2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + 20, ploty])))])
lane_right = np.hstack((lane_right_1, lane_right_2))
# Draw the lane onto the warped blank image
cv2.fillPoly(lane_wrap, np.int_([lane_right]), (0, 0, 255))
M = cv2.getPerspectiveTransform(dst, src) # 反向透视,对掉参数
# Warp the blank back to original image space using inverse perspective matrix (Minv)
new_color_warp = cv2.warpPerspective(color_warp, M, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(image, 1, new_color_warp, 0.3, 0)
new_line_warp = cv2.warpPerspective(lane_wrap, M, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(result, 1, new_line_warp, 1.0, 0)
return result
```
#### File: CarND-Advanced-Lane-Lines/lane/output_images.py
```python
import matplotlib.pyplot as plt
from matplotlib.image import imsave
from moviepy.editor import VideoFileClip
count = 0
start = 950
end = 1100
def pipeline(image):
global count
count += 1
if start <= count <= end:
print(count)
if count%5 == 0:
plt.imshow(image)
plt.show()
# if count%10 == 0:
imsave("../error_images/second/error_image_"+str(count)+".jpg",image)
return image
white_output = "../output_images/project_video.mp4"
clip1 = VideoFileClip("../project_video.mp4")
white_clip = clip1.fl_image(pipeline)
white_clip.write_videofile(white_output, audio=False)
```
#### File: CarND-Advanced-Lane-Lines/lane/perspective.py
```python
from lane.combined_threshold import combined_threshold
from lane.gaussian_blur import gaussian_blur
import numpy as np
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
src = np.float32([[700, 460], # right_top
[1080, 720], # right_bottom
[200, 720], # left_bottom
[580, 460]]) # left_top
dst = np.float32([[980, 0], # right_top
[980, 720], # right_bottom
[300, 720], # left_bottom
[300, 0]]) # left_top
def perspective(img,src=src, dst=dst):
# Compute and apply perpective transform
img_size = (img.shape[1], img.shape[0])
M = cv2.getPerspectiveTransform(src, dst) # Reverse perspective,对掉参数
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image
return warped
if __name__ == "__main__":
import glob
images = glob.glob('../output_images/threshold/*.jpg')
for fname in images:
image = mpimg.imread(fname)
image = perspective(image)
plt.imshow(image, cmap='gray')
plt.show()
mpimg.imsave("../output_images/perspective/" + fname.split("\\")[-1].split("-")[0] + "-perspective.jpg", image,
cmap="gray")
``` |
{
"source": "jonycgn/clic2021-devkit",
"score": 3
} |
#### File: jonycgn/clic2021-devkit/psnr.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import cv2
import concurrent.futures
import numpy as np
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('input_file', None,
"""Eval CSV file. This has triplets.""")
def load_image(fname):
im = cv2.imread(fname)
return np.asarray(im)
def read_csv(file_name):
"""Read CSV file.
The CSV file contains 4 columns:
FileA,FileB,OriginalFile,BinaryScore
FileA/FileB: paths to images generated by the two methods will be compared.
OriginalFile: path to the original (uncompressed) image filed.
BinaryScore: 0/1. This should be 0 if FileA is closer to the original than
FileB.
Args:
file_name: file name to read.
Returns:
dict({a/b/c} -> score).
"""
contents = []
with open(file_name) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
contents.append(row[:3])
return contents
def psnr(a, b):
mse = np.mean((a.flatten() - b.flatten()) ** 2)
return 10 * np.log10(255**2 / mse)
def process_triplet(item):
o = item[0]
a = item[1]
b = item[2]
oi = load_image(o).astype(np.float32)
ai = load_image(a).astype(np.float32)
bi = load_image(b).astype(np.float32)
apsnr = psnr(oi, ai)
bpsnr = psnr(oi, bi)
return o,a,b,1 if apsnr < bpsnr else 0
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
inputs = read_csv(FLAGS.input_file)
with concurrent.futures.ThreadPoolExecutor() as exector:
for (o, a, b, s) in exector.map(process_triplet, inputs):
print('{},{},{},{}'.format(o,a,b,s))
if __name__ == '__main__':
app.run(main)
``` |
{
"source": "jonydaimary/level",
"score": 3
} |
#### File: jonydaimary/level/level.py
```python
import discord
from discord.ext import commands
import os
import random
import json
import asyncio
import time
client = commands.Bot(description="marcos Official Bot", command_prefix=commands.when_mentioned_or("/"), pm_help = True)
client.remove_command('help')
@client.event
async def on_message(message):
with open("users.json", "r") as f:
users = json.load(f)
if message.author.bot:
return
if message.channel.is_private:
return
else:
await update_data(users, message.author, message.server)
number = random.randint(5,10)
await add_experience(users, message.author, number, message.server)
await level_up(users, message.author, message.channel, message.server)
with open("users.json", "w") as f:
json.dump(users, f)
await client.process_commands(message)
async def update_data(users, user, server):
if not user.id + "-" + server.id in users:
users[user.id + "-" + server.id] = {}
users[user.id + "-" + server.id]["experience"] = 0
users[user.id + "-" + server.id]["level"] = 1
users[user.id + "-" + server.id]["last_message"] = 0
async def add_experience(users, user, exp, server):
if time.time() - users[user.id + "-" + server.id]["last_message"] > 5:
users[user.id + "-" + server.id]["experience"] += exp
users[user.id + "-" + server.id]["last_message"] = time.time()
else:
return
async def level_up(users, user, channel, server):
experience = users[user.id + "-" + server.id]["experience"]
lvl_start = users[user.id + "-" + server.id]["level"]
lvl_end = int(experience ** (1/4))
if lvl_start < lvl_end:
await client.send_message(channel, f":tada: Congrats {user.name}, you levelled up to level {lvl_end}!")
users[user.id + "-" + server.id]["level"] = lvl_end
#show when it connects to discord
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
@client.event
async def on_reaction_add(reaction, user):
roleChannelId = discord.utils.get(reaction.message.server.channels, name="★verify-for-chatting★", type="ChannelType.voice")
if reaction.message.channel != roleChannelId:
return #So it only happens in the specified channel
if str(reaction.emoji) == "🇻":
role = discord.utils.get(reaction.message.server.roles, name="Verified")
await client.add_roles(user, role)
@client.event
async def on_reaction_remove(reaction, user):
verifychannel = "★verify-for-chatting★"
for channel in user.server.channels:
if channel.name != verifychannel:
return
if str(reaction.emoji) == "🇻":
role = discord.utils.get(user.server.roles, name="Verified")
await client.remove_roles(user, role)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def setreactionverify(ctx):
author = ctx.message.author
server = ctx.message.server
everyone_perms = discord.PermissionOverwrite(send_messages=False,read_messages=True)
everyone = discord.ChannelPermissions(target=server.default_role, overwrite=everyone_perms)
await client.create_channel(server, '★verify-for-chatting★',everyone)
for channel in author.server.channels:
if channel.name == '★verify-for-chatting★':
react_message = await client.send_message(channel, 'React with 🇻 to Verify | Sometimes it not works so you can also use mv!verify anywhere(Where you can send messages)')
reaction = '🇻'
await client.add_reaction(react_message, reaction)
client.run(os.getenv('Token'))
``` |
{
"source": "jonyejin/YourBench",
"score": 2
} |
#### File: jonyejin/YourBench/main.py
```python
import numpy as np
import json
import os
import sys
import time
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.utils
from torchvision import models
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchattacks
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import argparse
from Source.utils import stitch_images
parser = argparse.ArgumentParser(description='✨Welcome to YourBench-Adversarial Attack Robustness Benchmarking & Reporting tools.✨')
parser.add_argument('-a', '--attack_method', required=True, type=str, nargs='*', choices=['FGSM', 'CW', 'PGD', 'DeepFool'], dest='parsedAttackMethod', action='store')
parser.add_argument('-m', '--model', required=True, type=str, choices=['ResNet101_2', 'ResNet18', 'Custom'], dest='parsedModel')
parser.add_argument('-d', '--dataset', required=True, type=str, choices=['CIFAR-10', 'CIFAR-100', 'ImageNet', 'Custom'], dest='parsedDataset')
args = parser.parse_args()
simple_data = False
print(args.parsedAttackMethod) # ['FGSM']
print(args.parsedModel) # WRN
print(args.parsedDataset)
# Hyper Parameter settings
use_cuda = torch.cuda.is_available()
print("PyTorch", torch.__version__)
print("Torchvision", torchvision.__version__)
print("Torchattacks", torchattacks.__version__)
print("Numpy", np.__version__)
# CUDA Settings
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda:0' if USE_CUDA else 'cpu')
print('학습을 진행하는 기기:',device)
# 1. Load Data
# https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json
# class_idx = json.load(open("./data/imagenet_class_index.json"))
# idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
transform = transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(), # ToTensor : [0, 255] -> [0, 1]
])
# imagnet_data = image_folder_custom_label(root='./data/oneImage', transform=transform, idx2label=idx2label)
# data_loader = torch.utils.data.DataLoader(imagnet_data, batch_size=10, shuffle=False)
if args.parsedDataset == 'CIFAR-10':
cifar10_data = torchvision.datasets.CIFAR10('Data/CIFAR10', download=True, transform=transform)
data_loader = torch.utils.data.DataLoader(cifar10_data, batch_size=5)
simple_data = True
elif args.parsedDataset == 'CIFAR-100':
cifar100_data = torchvision.datasets.CIFAR100('Data/CIFAR100', download=True, transform=transform)
data_loader = torch.utils.data.DataLoader(cifar100_data, batch_size=5)
simple_data = True
elif args.parsedDataset == 'ImageNet':
imagenet_data = torchvision.datasets.ImageNet('Data/ImageNet', download=True, transform=transform)
data_loader = torch.utils.data.DataLoader(imagenet_data, batch_size=5)
class Normalize(nn.Module) :
def __init__(self, mean, std) :
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1, 3, 1, 1)
std = self.std.reshape(1, 3, 1, 1)
mean.to(device)
std.to(device)
input.to(device)
return (input - mean) / std
if args.parsedModel == 'ResNet101_2':
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(
norm_layer,
models.wide_resnet101_2(pretrained=True)
).to(device)
model = model.eval()
elif args.parsedModel == 'ResNet18':
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(
norm_layer,
models.resnet18(pretrained=True)
).to(device)
model = model.eval()
elif args.parsedModel == 'Custom':
pkg = __import__('custom_net')
model_custom = pkg.my_model(pretrained = False)
#state_dict의 경로를 넣기.
model_custom.load_state_dict(torch.load('./my_model.pth'))
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(
norm_layer,
model_custom
).cuda()
model = model.eval()
# 3. Attacks
from torchattacks import *
attackMethodDict = {'FGSM': FGSM(model, eps=8/255),
'CW' : CW(model, c=1, lr=0.01, steps=100, kappa=0),
'PGD' : PGD(model, eps=8/255, alpha=2/225, steps=100, random_start=True),
'DeepFool': DeepFool(model, steps=100)}
atks = [
VANILA(model),
#FGSM(model, eps=8/255),
#CW(model, c=1, lr=0.01, steps=100, kappa=0),
#PGD(model, eps=8/255, alpha=2/225, steps=100, random_start=True),
#DeepFool(model, steps=100),
]
# args로 읽어온 정보만 attack한다.
for atk in args.parsedAttackMethod:
atks.append(attackMethodDict[atk])
print(atks)
print("Adversarial Image & Predicted Label")
# +
vanilla_output = []
untargeted_output= []
targeted_output= []
for atk in atks :
print("-"*70)
print(atk)
correct = 0
top5_correct = 0
total = 0
for images, labels in data_loader:
# images : torch.Size([1,3,299,299])
# labels: torch.Size([10]),[7, 5, 388, 1, ...] -> cock, electric_ray, giant_panda...
atk.set_mode_default()
#print(images.shape)
start = time.time()
adv_images = atk(images, labels)
labels = labels.to(device)
outputs = model(adv_images) # outputs: torch.Size([batch_size, 1000]), adversarial image를 모델에 넣은 결과, outputs.data:[batch_size, 1000]
#print(outputs.shape)
#print(outputs.data.shape)
_, pre = torch.max(outputs.data, 1) # 1000 classes중 가장 큰 VALUE 1 남음, value, index 나온다. batch_size>1이면 batch_size크기의 array로 나온다.
_, top_5 = torch.topk(outputs.data, 5)
#print(top_5)
#print(labels.shape)
total += len(images)
correct += (pre == labels).sum()
break
print('Total elapsed time (sec): %.2f' % (time.time() - start))
print('Robust accuracy: %.2f %%' % (100 * float(correct) / total))
if atk.attack == "VANILA":
for i in range (len(atks) - 1):
vanilla_output.append(100 * float(correct) / total)
else:
untargeted_output.append(100 * float(correct) / total)
if atk.attack == ("FGSM" or "CW"):
print("-"*70)
print(atk)
for images, labels in data_loader:
atk.set_mode_targeted_least_likely()
start = time.time()
adv_images = atk(images, labels)
labels = labels.to(device)
outputs = model(adv_images) # outputs: torch.Size([batch_size, 1000]), adversarial image를 모델에 넣은 결과, outputs.data:[batch_size, 1000]
_, pre = torch.max(outputs.data, 1) # 1000 classes중 가장 큰 VALUE 1 남음, value, index 나온다. batch_size>1이면 batch_size크기의 array로 나온다.
_, top_5 = torch.topk(outputs.data, 5)
total += len(images)
correct += (pre == labels).sum()
break
print('Total elapsed time (sec): %.2f' % (time.time() - start))
print('Robust accuracy: %.2f %%' % (100 * float(correct) / total))
targeted_output.append(100 * float(correct) / total)
elif atk.attack != "VANILA" and (atk.attack == "PGD" or "DeepFool"):
targeted_output.append(-10)
# -
print("==================")
print(adv_images.shape)
# 기본 `log_dir` 은 "runs"이며, 여기서는 더 구체적으로 지정하였습니다
writer = SummaryWriter('Tutorials/runs/white_box_attack_image_net')
images = images.cuda()
writer.add_graph(model, images)
writer.close()
#make Data/Generated directory
os.makedirs("./Data/Generated", exist_ok=True)
# Save Image in Folder
for i in range(adv_images.shape[0]):
torchvision.utils.save_image(images[i], fp=f"./Data/Generated/image_original_{i+1}.jpg", normalize=True)
torchvision.utils.save_image(adv_images[i], fp=f"./Data/Generated/image_adv_{i+1}.jpg", normalize=True)
# 4. Report Generating
# matplotlib로 그래프 그리기
x_val =[]
for atk in atks:
if atk.attack == "VANILA":
continue
x_val.append(atk.attack)
plt.plot(x_val, vanilla_output, color='green', label = 'VANILLA')
plt.plot(x_val, untargeted_output, color='blue', label = 'DEFAULT')
plt.plot(x_val, targeted_output, color='red', label = 'TARGETED')
#plt.legend(loc=(0.73,0.775))
plt.legend(loc=(0.0,0.775))
plt.xlabel('Attack Method')
plt.ylabel('Accuracy (%)\nnegative value for unsupported attacks')
plt.savefig(f'./Data/Generated/graph.jpg', dip=300)
#점수 계산하기
total_result = 0
for atk in untargeted_output:
total_result = total_result + atk
total_result = total_result / 0.001 if vanilla_output[0] == 0 else vanilla_output[0] / len(untargeted_output)
total_grade = ""
if simple_data == True:
if total_result >= 45.0:
total_grade = "A"
elif total_result >= 35.0:
total_grade = "B"
elif total_result >= 25.0:
total_grade = "C"
elif total_result >= 15.0:
total_grade = "D"
else:
total_grade = "F"
from fpdf import FPDF
from torchvision.transforms.functional import to_pil_image
from PIL.Image import Image
import PIL
class PDF(FPDF):
def header(self):
self.set_font("Times", "B", 20)
# Moving cursor to the right:
self.cell(80)
self.cell(30, 10, "Benchmark Result", 0, 0, "C")
self.cell(0, 10, ("Grade : " + total_grade if simple_data else "Score : " + str(total_result)),0, 0, "R")
# Performing a line tbreak:
self.ln(20)
def footer(self):
# Position cursor at 1.5 cm from bottom:
self.set_y(-15)
# Setting font: helvetica italic 8
self.set_font("helvetica", "I", 8)
# Printing page number:
self.cell(0, 10, f"Page {self.page_no()}/{{nb}}", 0, 0, "C")
# Instantiation of inherited class
pdf = PDF()
pdf.set_display_mode(zoom='fullwidth',layout='two')
pdf.alias_nb_pages() # 페이지 수에 대한 alias ?
pdf.add_page()
pdf.set_auto_page_break(True)
# Mapped Network
top_y = pdf.get_y()
#pdf.set_font("Times", "B", size=12)
#pdf.cell(0, 10, f"Mapped Network", 0, 1)
#pdf.set_font("Helvetica", "I", 12)
#pdf.cell(0, 10, f"<This function is still working in process.>", 0, 1)
# 1. 성공한 adversarial example들
pdf.set_font("Times", "B", size=12)
pdf.cell(0, 10, f"Succeeded Adversarial examples", 0, 1)
# Effective page width, or just epw
epw = pdf.w - 2*pdf.l_margin
img_size = epw/2 - 20
np_images = (images[0:5].cpu().numpy().transpose(0,2,3,1) * 255).astype(np.uint8)
np_adv_images = (adv_images[0:5].cpu().numpy().transpose(0,2,3,1) * 255).astype(np.uint8)
original_labels = [str(l) for l in labels.cpu().numpy()[0:5]]
predicted_labels = [str(l) for l in pre.cpu().numpy()[0:5]]
outputImage = stitch_images(np_images, np_adv_images, original_labels, predicted_labels)
import cv2
cv2.imwrite("./Data/Generated/stitchedImage.jpg", outputImage)
#torchvision.utils.save_image(outputImage, fp=f"./Data/Generated/stitchedImage.jpg", normalize=True)
pdf.image(f'./Data/Generated/stitchedImage.jpg', w=img_size)
# for i in range(max(5, adv_images.shape[0])):
# pdf.image(f'./Data/Generated/image_original_{i+1}.jpg', w=img_size, h=img_size)
# pdf.set_xy(pdf.get_x() + img_size + 10, pdf.get_y() - img_size)
# pdf.image(f'./Data/Generated/image_adv_{i+1}.jpg', w=img_size, h=img_size)
# pdf.ln(2)
# second column
## 2. table 추가
pdf.set_xy(epw /2 +pdf.l_margin, top_y)
pdf.set_font("Times", "B", size=12)
pdf.cell(epw / 2 + 10, 10, txt=f"Top-5 Accuracy against attacks", border=0, ln=1) # ln: 커서 포지션을 다음줄로 바꾼다.
#pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
# Set column width to 1/4 of effective page width to distribute content
# evenly across table and page
col_width = epw/10
# Since we do not need to draw lines anymore, there is no need to separate
# headers from data matrix.
data = [['Vanilla']+vanilla_output,
['attacks'] + x_val,
['default'] + untargeted_output,
['targeted'] + targeted_output,]
pdf.set_font('Times','',10.0)
pdf.ln(0.5)
# Text height is the same as current font size
th = pdf.font_size
# Here we add more padding by passing 2*th as height
#pdf.set_xy(epw /2 +pdf.l_margin, top_y)
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
for row in data:
for datum in row:
# Enter data in colums
pdf.cell(col_width, 2*th, str(datum), border=1)
pdf.ln(2*th)
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
#####################
# 3. attack result graph
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
#pdf.set_xy(epw /2 +pdf.l_margin, top_y)
pdf.set_font("Times", "B", size=12)
pdf.cell(epw / 2 + 10, 10, f"Attack Results with graph", 0, 1)
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
pdf.image(f'./Data/Generated/graph.jpg', w=epw /2)
# 4. Advise
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
pdf.set_font("Times", "B", size=12)
pdf.cell(0, 10, f"Advise for your model robustness", 0, 1)
pdf.set_font("Helvetica", "I", 12)
#pdf.cell(w=0, h=0, txt=f"Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack. Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack.,Your model is significantly weak against CW L2 Attack", border=0, 1)
# pdf.write(h=5, txt=f"Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack. Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack.,Your model is significantly weak against CW L2 Attack")
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
advice_data={'0to10 accuracy attacks' : 'None1', '10to100 accuracy attacks' : ''}
advice = ['robustness about your model can vary considering your data sets complexity. ']
advice.append('Your Model cannot defend against' + advice_data['0to10 accuracy attacks'])
if advice_data['10to100 accuracy attacks'] == '':
advice.append(' Your model is hardly robust to given attacks. Is this properly trained? ')
else:
advice.append(' But relatively robust against' + advice_data['10to100 accuracy attacks'])
advice.append('\nThis weakness can be caused from setting hyper parameters, matbe input bias, or input capacity and so many more.')
advice.append('If you think none of this are your issues we recommend adversarial training with our adverarial examples provided.')
advice.append('\nTry again with adversarilly trained model and check out the result. ')
advice.append('See more info in the attached papers and linked tensorboard.')
advice_sentence = ''
for i in advice:
advice_sentence = advice_sentence + i
pdf.multi_cell(w= epw / 2, h=5, txt=advice_sentence)
"""
"robustness about your model can vary considering your data sets complexity."
Your Model cannot defend against [0~10% accuracy attacks].
if[10~100% exits]
But relatively robust against [10~100% accruacy attacks] .
else
Your model is hardly robust to given attacks. Is this properly trained?
This weakness can be caused from setting hyper parameters, matbe input bias, or input capacity and so many more.
If you think none of this are your issues we recommend adversarial training with our adverarial examples provided.
Try again with adversarilly trained model and check out the result.
See more info in the attached papers and linked tensorboard.
"""
pdf.output("Benchmark Result.pdf")
``` |
{
"source": "jonyfive/web-spider",
"score": 3
} |
#### File: jonyfive/web-spider/spider.py
```python
import os
import re
import openpyxl
import requests
from requests import adapters
import string
from docx import Document
from docx.enum.text import WD_BREAK
from bs4 import BeautifulSoup
# End
def extractor(): # Extract url strings from Excel file
# Set up openpyxl (Excel package for Python)
wb = openpyxl.load_workbook('MyWorkbook.xlsx')
sheet = wb['SheetName']
index = 0
count = 0
for index in range(0, 0, 0): # Iterate over cells
flag = 0
print('\n\n\n\n')
print('ITERATION:', index) # Print iteration in terminal
url = sheet['B%s' % index].value # Extract url
title = sheet['A%s' % index].value # Extract title
print(title) # Print title in terminal
if url != None: # Verify url is valid
s = requests.Session() # Send request to server
a = requests.adapters.HTTPAdapter(
max_retries=5) # Configure retries
s.mount('http://', a)
response = s.get(url)
html = response.text # Convert response format
# Initiate BeautifulSoup package for Python
soup = BeautifulSoup(html, "html.parser")
text = soup.get_text() # Get text from raw data
# Verify data is valid by title name
valid = bool(re.search('(?i)MyData ', title))
if valid == True:
print('IS VALID')
word_builder(soup, url, title) # Send raw data to be cleaned
return
def word_builder(soup, url, title): # Extract relevant content & clean
# Remove html style elements
for script in source_soup(["script", "style"]):
script.extract()
soup.prettify()
# Locate specific content within response in various ways
temp_text = soup.find_all(True, class_="ClassName") # By class name
if not temp_text:
temp_text = soup.find_all(id=['IDname']) # By ID name
if not temp_text:
temp_text = soup.find_all(text=re.compile(
'(?i)ContentName')) # By content name
flag = 1
if not temp_text:
return
# Some more clean up (Lines 78-83 based on StackOverflow post)
text = ''
if not flag:
for objects in temp_text:
text += objects.get_text(separator=u'\n')
# Remove spaces
lines = (line.strip() for line in text.splitlines())
# Reorder in line format
chunks = (phrase.strip()
for line in lines for phrase in line.split(" "))
# Remove blank lines
text = ' '.join(chunk for chunk in chunks if chunk)
word_function(title, url, text)
# Build word document
def word_function(title, url, text):
doc = Document()
doc.add_heading(title)
doc.add_paragraph('\n')
doc.add_paragraph(url)
doc.add_paragraph(text)
doc.add_paragraph('\n')
# Clean filename before saving
clean_filename = re.sub('[\\\\\r\n/:*?–"<>|]', '', title)
doc.save(clean_filename[:173] + '.docx')
print('DOCUMENT IS DONE\n\n\n\n')
return
if __name__ == '__main__':
extractor()
# ============================================================
``` |
{
"source": "jonyg80/youtube-dl",
"score": 2
} |
#### File: youtube_dl/extractor/abcnews.py
```python
from __future__ import unicode_literals
import re
from .amp import AMPIE
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
try_get,
)
class AbcNewsVideoIE(AMPIE):
IE_NAME = 'abcnews:video'
_VALID_URL = r'''(?x)
https?://
(?:
abcnews\.go\.com/
(?:
(?:[^/]+/)*video/(?P<display_id>[0-9a-z-]+)-|
video/(?:embed|itemfeed)\?.*?\bid=
)|
fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://abcnews.go.com/ThisWeek/video/week-exclusive-irans-foreign-minister-zarif-20411932',
'info_dict': {
'id': '20411932',
'ext': 'mp4',
'display_id': 'week-exclusive-irans-foreign-minister-zarif',
'title': '\'This Week\' Exclusive: Iran\'s Foreign Minister Zarif',
'description': '<NAME> goes one-on-one with Iranian Foreign Minister Dr. <NAME>.',
'duration': 180,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1380454200,
'upload_date': '20130929',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://abcnews.go.com/video/embed?id=46979033',
'only_matching': True,
}, {
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
'only_matching': True,
}, {
'url': 'http://abcnews.go.com/video/itemfeed?id=46979033',
'only_matching': True,
}, {
'url': 'https://abcnews.go.com/GMA/News/video/history-christmas-story-67894761',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_id = mobj.group('id')
info_dict = self._extract_feed_info(
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
info_dict.update({
'id': video_id,
'display_id': display_id,
})
return info_dict
class AbcNewsIE(InfoExtractor):
IE_NAME = 'abcnews'
_VALID_URL = r'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
_TESTS = [{
# Youtube Embeds
'url': 'https://abcnews.go.com/Entertainment/peter-billingsley-child-actor-christmas-story-hollywood-power/story?id=51286501',
'info_dict': {
'id': '51286501',
'title': "<NAME>: From child actor in 'A Christmas Story' to Hollywood power player",
'description': 'Billingsley went from a child actor to Hollywood power player.',
},
'playlist_count': 5,
}, {
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
'info_dict': {
'id': '38897857',
'ext': 'mp4',
'title': '<NAME> Drops Hints For Secret Single',
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
'upload_date': '20160505',
'timestamp': 1462442280,
},
'params': {
# m3u8 download
'skip_download': True,
# The embedded YouTube video is blocked due to copyright issues
'playlist_items': '1',
},
'add_ie': ['AbcNewsVideo'],
}, {
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
'only_matching': True,
}, {
# inline.type == 'video'
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
'only_matching': True,
}]
def _real_extract(self, url):
story_id = self._match_id(url)
webpage = self._download_webpage(url, story_id)
story = self._parse_json(self._search_regex(
r"window\['__abcnews__'\]\s*=\s*({.+?});",
webpage, 'data'), story_id)['page']['content']['story']['everscroll'][0]
article_contents = story.get('articleContents') or {}
def entries():
featured_video = story.get('featuredVideo') or {}
feed = try_get(featured_video, lambda x: x['video']['feed'])
if feed:
yield {
'_type': 'url',
'id': featured_video.get('id'),
'title': featured_video.get('name'),
'url': feed,
'thumbnail': featured_video.get('images'),
'description': featured_video.get('description'),
'timestamp': parse_iso8601(featured_video.get('uploadDate')),
'duration': parse_duration(featured_video.get('duration')),
'ie_key': AbcNewsVideoIE.ie_key(),
}
for inline in (article_contents.get('inlines') or []):
inline_type = inline.get('type')
if inline_type == 'iframe':
iframe_url = try_get(inline, lambda x: x['attrs']['src'])
if iframe_url:
yield self.url_result(iframe_url)
elif inline_type == 'video':
video_id = inline.get('id')
if video_id:
yield {
'_type': 'url',
'id': video_id,
'url': 'http://abcnews.go.com/video/embed?id=' + video_id,
'thumbnail': inline.get('imgSrc') or inline.get('imgDefault'),
'description': inline.get('description'),
'duration': parse_duration(inline.get('duration')),
'ie_key': AbcNewsVideoIE.ie_key(),
}
return self.playlist_result(
entries(), story_id, article_contents.get('headline'),
article_contents.get('subHead'))
```
#### File: youtube_dl/extractor/acast.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
clean_podcast_url,
int_or_none,
parse_iso8601,
)
class ACastBaseIE(InfoExtractor):
def _extract_episode(self, episode, show_info):
title = episode['title']
info = {
'id': episode['id'],
'display_id': episode.get('episodeUrl'),
'url': clean_podcast_url(episode['url']),
'title': title,
'description': clean_html(episode.get('description') or episode.get('summary')),
'thumbnail': episode.get('image'),
'timestamp': parse_iso8601(episode.get('publishDate')),
'duration': int_or_none(episode.get('duration')),
'filesize': int_or_none(episode.get('contentLength')),
'season_number': int_or_none(episode.get('season')),
'episode': title,
'episode_number': int_or_none(episode.get('episode')),
}
info.update(show_info)
return info
def _extract_show_info(self, show):
return {
'creator': show.get('author'),
'series': show.get('title'),
}
def _call_api(self, path, video_id, query=None):
return self._download_json(
'https://feeder.acast.com/api/v1/shows/' + path, video_id, query=query)
class ACastIE(ACastBaseIE):
IE_NAME = 'acast'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:embed|www)\.)?acast\.com/|
play\.acast\.com/s/
)
(?P<channel>[^/]+)/(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
'md5': 'f5598f3ad1e4776fed12ec1407153e4b',
'info_dict': {
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
'ext': 'mp3',
'title': '2. Raggarmordet - Röster ur det förflutna',
'description': 'md5:a992ae67f4d98f1c0141598f7bebbf67',
'timestamp': 1477346700,
'upload_date': '20161024',
'duration': 2766,
'creator': '<NAME> & <NAME>',
'series': 'Spår',
'episode': '2. Raggarmordet - Röster ur det förflutna',
}
}, {
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
'only_matching': True,
}, {
'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2',
'only_matching': True,
}, {
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
'only_matching': True,
}]
def _real_extract(self, url):
channel, display_id = re.match(self._VALID_URL, url).groups()
episode = self._call_api(
'%s/episodes/%s' % (channel, display_id),
display_id, {'showInfo': 'true'})
return self._extract_episode(
episode, self._extract_show_info(episode.get('show') or {}))
class ACastChannelIE(ACastBaseIE):
IE_NAME = 'acast:channel'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?acast\.com/|
play\.acast\.com/s/
)
(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'https://www.acast.com/todayinfocus',
'info_dict': {
'id': '4efc5294-5385-4847-98bd-519799ce5786',
'title': 'Today in Focus',
'description': 'md5:c09ce28c91002ce4ffce71d6504abaae',
},
'playlist_mincount': 200,
}, {
'url': 'http://play.acast.com/s/ft-banking-weekly',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
def _real_extract(self, url):
show_slug = self._match_id(url)
show = self._call_api(show_slug, show_slug)
show_info = self._extract_show_info(show)
entries = []
for episode in (show.get('episodes') or []):
entries.append(self._extract_episode(episode, show_info))
return self.playlist_result(
entries, show.get('id'), show.get('title'), show.get('description'))
```
#### File: youtube_dl/extractor/animeondemand.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
url_or_none,
urlencode_postdata,
urljoin,
)
class AnimeOnDemandIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
_NETRC_MACHINE = 'animeondemand'
# German-speaking countries of Europe
_GEO_COUNTRIES = ['AT', 'CH', 'DE', 'LI', 'LU']
_TESTS = [{
# jap, OmU
'url': 'https://www.anime-on-demand.de/anime/161',
'info_dict': {
'id': '161',
'title': 'Grimgar, Ashes and Illusions (OmU)',
'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
},
'playlist_mincount': 4,
}, {
# Film wording is used instead of Episode, ger/jap, Dub/OmU
'url': 'https://www.anime-on-demand.de/anime/39',
'only_matching': True,
}, {
# Episodes without titles, jap, OmU
'url': 'https://www.anime-on-demand.de/anime/162',
'only_matching': True,
}, {
# ger/jap, Dub/OmU, account required
'url': 'https://www.anime-on-demand.de/anime/169',
'only_matching': True,
}, {
# Full length film, non-series, ger/jap, Dub/OmU, account required
'url': 'https://www.anime-on-demand.de/anime/185',
'only_matching': True,
}, {
# Flash videos
'url': 'https://www.anime-on-demand.de/anime/12',
'only_matching': True,
}]
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page:
self.raise_geo_restricted(
'%s is only available in German-speaking countries of Europe' % self.IE_NAME)
login_form = self._form_hidden_inputs('new_user', login_page)
login_form.update({
'user[login]': username,
'user[password]': password,
})
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_URL, group='url')
if not post_url.startswith('http'):
post_url = urljoin(self._LOGIN_URL, post_url)
response = self._download_webpage(
post_url, None, 'Logging in',
data=urlencode_postdata(login_form), headers={
'Referer': self._LOGIN_URL,
})
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
error = self._search_regex(
r'<p[^>]+\bclass=(["\'])(?:(?!\1).)*\balert\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</p>',
response, 'error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
anime_id = self._match_id(url)
webpage = self._download_webpage(url, anime_id)
if 'data-playlist=' not in webpage:
self._download_webpage(
self._APPLY_HTML5_URL, anime_id,
'Activating HTML5 beta', 'Unable to apply HTML5 beta')
webpage = self._download_webpage(url, anime_id)
csrf_token = self._html_search_meta(
'csrf-token', webpage, 'csrf token', fatal=True)
anime_title = self._html_search_regex(
r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
webpage, 'anime name')
anime_description = self._html_search_regex(
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
webpage, 'anime description', default=None)
def extract_info(html, video_id, num=None):
title, description = [None] * 2
formats = []
for input_ in re.findall(
r'<input[^>]+class=["\'].*?streamstarter[^>]+>', html):
attributes = extract_attributes(input_)
title = attributes.get('data-dialog-header')
playlist_urls = []
for playlist_key in ('data-playlist', 'data-otherplaylist', 'data-stream'):
playlist_url = attributes.get(playlist_key)
if isinstance(playlist_url, compat_str) and re.match(
r'/?[\da-zA-Z]+', playlist_url):
playlist_urls.append(attributes[playlist_key])
if not playlist_urls:
continue
lang = attributes.get('data-lang')
lang_note = attributes.get('value')
for playlist_url in playlist_urls:
kind = self._search_regex(
r'videomaterialurl/\d+/([^/]+)/',
playlist_url, 'media kind', default=None)
format_id_list = []
if lang:
format_id_list.append(lang)
if kind:
format_id_list.append(kind)
if not format_id_list and num is not None:
format_id_list.append(compat_str(num))
format_id = '-'.join(format_id_list)
format_note = ', '.join(filter(None, (kind, lang_note)))
item_id_list = []
if format_id:
item_id_list.append(format_id)
item_id_list.append('videomaterial')
playlist = self._download_json(
urljoin(url, playlist_url), video_id,
'Downloading %s JSON' % ' '.join(item_id_list),
headers={
'X-Requested-With': 'XMLHttpRequest',
'X-CSRF-Token': csrf_token,
'Referer': url,
'Accept': 'application/json, text/javascript, */*; q=0.01',
}, fatal=False)
if not playlist:
continue
stream_url = url_or_none(playlist.get('streamurl'))
if stream_url:
rtmp = re.search(
r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+/))(?P<playpath>mp[34]:.+)',
stream_url)
if rtmp:
formats.append({
'url': rtmp.group('url'),
'app': rtmp.group('app'),
'play_path': rtmp.group('playpath'),
'page_url': url,
'player_url': 'https://www.anime-on-demand.de/assets/jwplayer.flash-55abfb34080700304d49125ce9ffb4a6.swf',
'rtmp_real_time': True,
'format_id': 'rtmp',
'ext': 'flv',
})
continue
start_video = playlist.get('startvideo', 0)
playlist = playlist.get('playlist')
if not playlist or not isinstance(playlist, list):
continue
playlist = playlist[start_video]
title = playlist.get('title')
if not title:
continue
description = playlist.get('description')
for source in playlist.get('sources', []):
file_ = source.get('file')
if not file_:
continue
ext = determine_ext(file_)
format_id_list = [lang, kind]
if ext == 'm3u8':
format_id_list.append('hls')
elif source.get('type') == 'video/dash' or ext == 'mpd':
format_id_list.append('dash')
format_id = '-'.join(filter(None, format_id_list))
if ext == 'm3u8':
file_formats = self._extract_m3u8_formats(
file_, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)
elif source.get('type') == 'video/dash' or ext == 'mpd':
continue
file_formats = self._extract_mpd_formats(
file_, video_id, mpd_id=format_id, fatal=False)
else:
continue
for f in file_formats:
f.update({
'language': lang,
'format_note': format_note,
})
formats.extend(file_formats)
return {
'title': title,
'description': description,
'formats': formats,
}
def extract_entries(html, video_id, common_info, num=None):
info = extract_info(html, video_id, num)
if info['formats']:
self._sort_formats(info['formats'])
f = common_info.copy()
f.update(info)
yield f
# Extract teaser/trailer only when full episode is not available
if not info['formats']:
m = re.search(
r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>(?P<kind>Teaser|Trailer)<',
html)
if m:
f = common_info.copy()
f.update({
'id': '%s-%s' % (f['id'], m.group('kind').lower()),
'title': m.group('title'),
'url': urljoin(url, m.group('href')),
})
yield f
def extract_episodes(html):
for num, episode_html in enumerate(re.findall(
r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', html), 1):
episodebox_title = self._search_regex(
(r'class="episodebox-title"[^>]+title=(["\'])(?P<title>.+?)\1',
r'class="episodebox-title"[^>]+>(?P<title>.+?)<'),
episode_html, 'episodebox title', default=None, group='title')
if not episodebox_title:
continue
episode_number = int(self._search_regex(
r'(?:Episode|Film)\s*(\d+)',
episodebox_title, 'episode number', default=num))
episode_title = self._search_regex(
r'(?:Episode|Film)\s*\d+\s*-\s*(.+)',
episodebox_title, 'episode title', default=None)
video_id = 'episode-%d' % episode_number
common_info = {
'id': video_id,
'series': anime_title,
'episode': episode_title,
'episode_number': episode_number,
}
for e in extract_entries(episode_html, video_id, common_info):
yield e
def extract_film(html, video_id):
common_info = {
'id': anime_id,
'title': anime_title,
'description': anime_description,
}
for e in extract_entries(html, video_id, common_info):
yield e
def entries():
has_episodes = False
for e in extract_episodes(webpage):
has_episodes = True
yield e
if not has_episodes:
for e in extract_film(webpage, anime_id):
yield e
return self.playlist_result(
entries(), anime_id, anime_title, anime_description)
```
#### File: youtube_dl/extractor/ccma.py
```python
from __future__ import unicode_literals
import calendar
import datetime
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
extract_timezone,
int_or_none,
parse_duration,
parse_resolution,
try_get,
url_or_none,
)
class CCMAIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ccma\.cat/(?:[^/]+/)*?(?P<type>video|audio)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.ccma.cat/tv3/alacarta/lespot-de-la-marato-de-tv3/lespot-de-la-marato-de-tv3/video/5630208/',
'md5': '7296ca43977c8ea4469e719c609b0871',
'info_dict': {
'id': '5630208',
'ext': 'mp4',
'title': 'L\'espot de La Marató de TV3',
'description': 'md5:f12987f320e2f6e988e9908e4fe97765',
'timestamp': 1478608140,
'upload_date': '20161108',
'age_limit': 0,
}
}, {
'url': 'http://www.ccma.cat/catradio/alacarta/programa/el-consell-de-savis-analitza-el-derbi/audio/943685/',
'md5': 'fa3e38f269329a278271276330261425',
'info_dict': {
'id': '943685',
'ext': 'mp3',
'title': 'El Consell de Savis analitza el derbi',
'description': 'md5:e2a3648145f3241cb9c6b4b624033e53',
'upload_date': '20170512',
'timestamp': 1494622500,
'vcodec': 'none',
'categories': ['Esports'],
}
}, {
'url': 'http://www.ccma.cat/tv3/alacarta/crims/crims-josep-tallada-lespereu-me-capitol-1/video/6031387/',
'md5': 'b43c3d3486f430f3032b5b160d80cbc3',
'info_dict': {
'id': '6031387',
'ext': 'mp4',
'title': 'Crims - <NAME>, l\'"Espereu-me" (capítol 1)',
'description': 'md5:7cbdafb640da9d0d2c0f62bad1e74e60',
'timestamp': 1582577700,
'upload_date': '20200224',
'subtitles': 'mincount:4',
'age_limit': 16,
'series': 'Crims',
}
}]
def _real_extract(self, url):
media_type, media_id = re.match(self._VALID_URL, url).groups()
media = self._download_json(
'http://dinamics.ccma.cat/pvideo/media.jsp', media_id, query={
'media': media_type,
'idint': media_id,
})
formats = []
media_url = media['media']['url']
if isinstance(media_url, list):
for format_ in media_url:
format_url = url_or_none(format_.get('file'))
if not format_url:
continue
label = format_.get('label')
f = parse_resolution(label)
f.update({
'url': format_url,
'format_id': label,
})
formats.append(f)
else:
formats.append({
'url': media_url,
'vcodec': 'none' if media_type == 'audio' else None,
})
self._sort_formats(formats)
informacio = media['informacio']
title = informacio['titol']
durada = informacio.get('durada') or {}
duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text'))
tematica = try_get(informacio, lambda x: x['tematica']['text'])
timestamp = None
data_utc = try_get(informacio, lambda x: x['data_emissio']['utc'])
try:
timezone, data_utc = extract_timezone(data_utc)
timestamp = calendar.timegm((datetime.datetime.strptime(
data_utc, '%Y-%d-%mT%H:%M:%S') - timezone).timetuple())
except TypeError:
pass
subtitles = {}
subtitols = media.get('subtitols') or []
if isinstance(subtitols, dict):
subtitols = [subtitols]
for st in subtitols:
sub_url = st.get('url')
if sub_url:
subtitles.setdefault(
st.get('iso') or st.get('text') or 'ca', []).append({
'url': sub_url,
})
thumbnails = []
imatges = media.get('imatges', {})
if imatges:
thumbnail_url = imatges.get('url')
if thumbnail_url:
thumbnails = [{
'url': thumbnail_url,
'width': int_or_none(imatges.get('amplada')),
'height': int_or_none(imatges.get('alcada')),
}]
age_limit = None
codi_etic = try_get(informacio, lambda x: x['codi_etic']['id'])
if codi_etic:
codi_etic_s = codi_etic.split('_')
if len(codi_etic_s) == 2:
if codi_etic_s[1] == 'TP':
age_limit = 0
else:
age_limit = int_or_none(codi_etic_s[1])
return {
'id': media_id,
'title': title,
'description': clean_html(informacio.get('descripcio')),
'duration': duration,
'timestamp': timestamp,
'thumbnails': thumbnails,
'subtitles': subtitles,
'formats': formats,
'age_limit': age_limit,
'alt_title': informacio.get('titol_complet'),
'episode_number': int_or_none(informacio.get('capitol')),
'categories': [tematica] if tematica else None,
'series': informacio.get('programa'),
}
```
#### File: youtube_dl/extractor/comedycentral.py
```python
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?)/(?P<id>[0-9a-z]{6})'
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TESTS = [{
'url': 'http://www.cc.com/video-clips/5ke9v2/the-daily-show-with-trevor-noah-doc-rivers-and-steve-ballmer---the-nba-player-strike',
'md5': 'b8acb347177c680ff18a292aa2166f80',
'info_dict': {
'id': '89ccc86e-1b02-4f83-b0c9-1d9592ecd025',
'ext': 'mp4',
'title': 'The Daily Show with Trevor Noah|August 28, 2020|25|25149|Doc Rivers and Steve Ballmer - The NBA Player Strike',
'description': 'md5:5334307c433892b85f4f5e5ac9ef7498',
'timestamp': 1598670000,
'upload_date': '20200829',
},
}, {
'url': 'http://www.cc.com/episodes/pnzzci/drawn-together--american-idol--parody-clip-show-season-3-ep-314',
'only_matching': True,
}, {
'url': 'https://www.cc.com/video/k3sdvm/the-daily-show-with-jon-stewart-exclusive-the-fourth-estate',
'only_matching': True,
}]
class ComedyCentralTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/folgen/(?P<id>[0-9a-z]{6})'
_TESTS = [{
'url': 'https://www.comedycentral.tv/folgen/pxdpec/josh-investigates-klimawandel-staffel-1-ep-1',
'info_dict': {
'id': '15907dc3-ec3c-11e8-a442-0e40cf2fc285',
'ext': 'mp4',
'title': 'Josh Investigates',
'description': 'Steht uns das Ende der Welt bevor?',
},
}]
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
_GEO_COUNTRIES = ['DE']
def _get_feed_query(self, uri):
return {
'accountOverride': 'intl.mtvi.com',
'arcEp': 'web.cc.tv',
'ep': 'b9032c3a',
'imageEp': 'web.cc.tv',
'mgid': uri,
}
```
#### File: youtube_dl/extractor/storyfire.py
```python
from __future__ import unicode_literals
import functools
from .common import InfoExtractor
from ..utils import (
# HEADRequest,
int_or_none,
OnDemandPagedList,
smuggle_url,
)
class StoryFireBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?storyfire\.com/'
def _call_api(self, path, video_id, resource, query=None):
return self._download_json(
'https://storyfire.com/app/%s/%s' % (path, video_id), video_id,
'Downloading %s JSON metadata' % resource, query=query)
def _parse_video(self, video):
title = video['title']
vimeo_id = self._search_regex(
r'https?://player\.vimeo\.com/external/(\d+)',
video['vimeoVideoURL'], 'vimeo id')
# video_url = self._request_webpage(
# HEADRequest(video['vimeoVideoURL']), video_id).geturl()
# formats = []
# for v_url, suffix in [(video_url, '_sep'), (video_url.replace('/sep/video/', '/video/'), '')]:
# formats.extend(self._extract_m3u8_formats(
# v_url, video_id, 'mp4', 'm3u8_native',
# m3u8_id='hls' + suffix, fatal=False))
# formats.extend(self._extract_mpd_formats(
# v_url.replace('.m3u8', '.mpd'), video_id,
# mpd_id='dash' + suffix, fatal=False))
# self._sort_formats(formats)
uploader_id = video.get('hostID')
return {
'_type': 'url_transparent',
'id': vimeo_id,
'title': title,
'description': video.get('description'),
'url': smuggle_url(
'https://player.vimeo.com/video/' + vimeo_id, {
'http_headers': {
'Referer': 'https://storyfire.com/',
}
}),
# 'formats': formats,
'thumbnail': video.get('storyImage'),
'view_count': int_or_none(video.get('views')),
'like_count': int_or_none(video.get('likesCount')),
'comment_count': int_or_none(video.get('commentsCount')),
'duration': int_or_none(video.get('videoDuration')),
'timestamp': int_or_none(video.get('publishDate')),
'uploader': video.get('username'),
'uploader_id': uploader_id,
'uploader_url': 'https://storyfire.com/user/%s/video' % uploader_id if uploader_id else None,
'episode_number': int_or_none(video.get('episodeNumber') or video.get('episode_number')),
}
class StoryFireIE(StoryFireBaseIE):
_VALID_URL = StoryFireBaseIE._VALID_URL_BASE + r'video-details/(?P<id>[0-9a-f]{24})'
_TEST = {
'url': 'https://storyfire.com/video-details/5df1d132b6378700117f9181',
'md5': 'caec54b9e4621186d6079c7ec100c1eb',
'info_dict': {
'id': '378954662',
'ext': 'mp4',
'title': 'Buzzfeed Teaches You About Memes',
'uploader_id': 'ntZAJFECERSgqHSxzonV5K2E89s1',
'timestamp': 1576129028,
'description': 'md5:0b4e28021548e144bed69bb7539e62ea',
'uploader': 'whang!',
'upload_date': '20191212',
'duration': 418,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata']
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._call_api(
'generic/video-detail', video_id, 'video')['video']
return self._parse_video(video)
class StoryFireUserIE(StoryFireBaseIE):
_VALID_URL = StoryFireBaseIE._VALID_URL_BASE + r'user/(?P<id>[^/]+)/video'
_TEST = {
'url': 'https://storyfire.com/user/UQ986nFxmAWIgnkZQ0ftVhq4nOk2/video',
'info_dict': {
'id': 'UQ986nFxmAWIgnkZQ0ftVhq4nOk2',
},
'playlist_mincount': 151,
}
_PAGE_SIZE = 20
def _fetch_page(self, user_id, page):
videos = self._call_api(
'publicVideos', user_id, 'page %d' % (page + 1), {
'skip': page * self._PAGE_SIZE,
})['videos']
for video in videos:
yield self._parse_video(video)
def _real_extract(self, url):
user_id = self._match_id(url)
entries = OnDemandPagedList(functools.partial(
self._fetch_page, user_id), self._PAGE_SIZE)
return self.playlist_result(entries, user_id)
class StoryFireSeriesIE(StoryFireBaseIE):
_VALID_URL = StoryFireBaseIE._VALID_URL_BASE + r'write/series/stories/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://storyfire.com/write/series/stories/-Lq6MsuIHLODO6d2dDkr/',
'info_dict': {
'id': '-Lq6MsuIHLODO6d2dDkr',
},
'playlist_mincount': 13,
}, {
'url': 'https://storyfire.com/write/series/stories/the_mortal_one/',
'info_dict': {
'id': 'the_mortal_one',
},
'playlist_count': 0,
}]
def _extract_videos(self, stories):
for story in stories.values():
if story.get('hasVideo'):
yield self._parse_video(story)
def _real_extract(self, url):
series_id = self._match_id(url)
stories = self._call_api(
'seriesStories', series_id, 'series stories')
return self.playlist_result(self._extract_videos(stories), series_id)
```
#### File: youtube_dl/extractor/stv.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_str,
float_or_none,
int_or_none,
smuggle_url,
str_or_none,
try_get,
)
class STVPlayerIE(InfoExtractor):
IE_NAME = 'stv:player'
_VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})'
_TESTS = [{
# shortform
'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/',
'md5': '5adf9439c31d554f8be0707c7abe7e0a',
'info_dict': {
'id': '5333973339001',
'ext': 'mp4',
'upload_date': '20170301',
'title': '60 seconds on set with Laura Norton',
'description': "How many questions can Laura - a.k.a <NAME> - answer in 60 seconds? Let\'s find out!",
'timestamp': 1488388054,
'uploader_id': '1486976045',
},
'skip': 'this resource is unavailable outside of the UK',
}, {
# episodes
'url': 'https://player.stv.tv/episode/4125/jennifer-saunders-memory-lane',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s'
_PTYPE_MAP = {
'episode': 'episodes',
'video': 'shortform',
}
def _real_extract(self, url):
ptype, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id, fatal=False) or ''
props = (self._parse_json(self._search_regex(
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
webpage, 'next data', default='{}'), video_id,
fatal=False) or {}).get('props') or {}
player_api_cache = try_get(
props, lambda x: x['initialReduxState']['playerApiCache']) or {}
api_path, resp = None, {}
for k, v in player_api_cache.items():
if k.startswith('/episodes/') or k.startswith('/shortform/'):
api_path, resp = k, v
break
else:
episode_id = str_or_none(try_get(
props, lambda x: x['pageProps']['episodeId']))
api_path = '/%s/%s' % (self._PTYPE_MAP[ptype], episode_id or video_id)
result = resp.get('results')
if not result:
resp = self._download_json(
'https://player.api.stv.tv/v1' + api_path, video_id)
result = resp['results']
video = result['video']
video_id = compat_str(video['id'])
subtitles = {}
_subtitles = result.get('_subtitles') or {}
for ext, sub_url in _subtitles.items():
subtitles.setdefault('en', []).append({
'ext': 'vtt' if ext == 'webvtt' else ext,
'url': sub_url,
})
programme = result.get('programme') or {}
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['GB']}),
'description': result.get('summary'),
'duration': float_or_none(video.get('length'), 1000),
'subtitles': subtitles,
'view_count': int_or_none(result.get('views')),
'series': programme.get('name') or programme.get('shortName'),
'ie_key': 'BrightcoveNew',
}
```
#### File: youtube_dl/extractor/turner.py
```python
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
url_or_none,
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
if not token:
query = {
'path': secure_path,
}
if custom_tokenizer_query:
query.update(custom_tokenizer_query)
else:
query['videoId'] = content_id
if ap_data.get('auth_required'):
query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
auth = self._download_xml(
tokenizer_src, content_id, query=query)
error_msg = xpath_text(auth, 'error/msg')
if error_msg:
raise ExtractorError(error_msg, expected=True)
token = xpath_text(auth, 'token')
if not token:
return video_url
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
return {}
video_id = video_data.attrib['id']
title = xpath_text(video_data, 'headline', fatal=True)
content_id = xpath_text(video_data, 'contentId') or video_id
# rtmp_src = xpath_text(video_data, 'akamai/src')
# if rtmp_src:
# split_rtmp_src = rtmp_src.split(',')
# if len(split_rtmp_src) == 2:
# rtmp_src = split_rtmp_src[1]
# aifp = xpath_text(video_data, 'akamai/aifp', default='')
urls = []
formats = []
thumbnails = []
subtitles = {}
rex = re.compile(
r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
# Possible formats locations: files/file, files/groupFiles/files
# and maybe others
for video_file in video_data.findall('.//file'):
video_url = url_or_none(video_file.text.strip())
if not video_url:
continue
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
# auth = self._download_webpage(
# protected_path_data['tokenizer_src'], query={
# 'path': protected_path,
# 'videoId': content_id,
# 'aifp': aifp,
# })
# token = xpath_text(auth, 'token')
# if not token:
# continue
# video_url = rtmp_src + video_url + '?' + token
elif video_url.startswith('/secure/'):
secure_path_data = path_data.get('secure')
if not secure_path_data:
continue
video_url = self._add_akamai_spe_token(
secure_path_data['tokenizer_src'],
secure_path_data['media_src'] + video_url,
content_id, ap_data)
elif not re.match('https?://', video_url):
base_path_data = path_data.get(ext, path_data.get('default', {}))
media_src = base_path_data.get('media_src')
if not media_src:
continue
video_url = media_src + video_url
if video_url in urls:
continue
urls.append(video_url)
format_id = video_file.get('bitrate')
if ext in ('scc', 'srt', 'vtt'):
subtitles.setdefault('en', []).append({
'ext': ext,
'url': video_url,
})
elif ext == 'png':
thumbnails.append({
'id': format_id,
'url': video_url,
})
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/', video_url):
formats.extend(self._extract_akamai_formats(
video_url, video_id, {
'hds': path_data.get('f4m', {}).get('host'),
# nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com
# ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com
# ssl.cdn.turner.com
'http': 'pmd.cdn.turner.com',
}))
elif ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4',
m3u8_id=format_id or 'hls', fatal=False)
if '/secure/' in video_url and '?hdnea=' in video_url:
for f in m3u8_formats:
f['_seekable'] = False
formats.extend(m3u8_formats)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {'hdcore': '3.7.0'}),
video_id, f4m_id=format_id or 'hds', fatal=False))
else:
f = {
'format_id': format_id,
'url': video_url,
'ext': ext,
}
mobj = rex.search(video_url)
if mobj:
f.update({
'width': int(mobj.group('width')),
'height': int(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
elif isinstance(format_id, compat_str):
if format_id.isdigit():
f['tbr'] = int(format_id)
else:
mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
if mobj:
if mobj.group(1) == 'audio':
f.update({
'vcodec': 'none',
'ext': 'm4a',
})
else:
f['tbr'] = int(mobj.group(1))
formats.append(f)
self._sort_formats(formats)
for source in video_data.findall('closedCaptions/source'):
for track in source.findall('track'):
track_url = url_or_none(track.get('url'))
if not track_url or track_url.endswith('/big'):
continue
lang = track.get('lang') or track.get('label') or 'en'
subtitles.setdefault(lang, []).append({
'url': track_url,
'ext': {
'scc': 'scc',
'webvtt': 'vtt',
'smptett': 'tt',
}.get(source.get('format'))
})
thumbnails.extend({
'id': image.get('cut') or image.get('name'),
'url': image.text,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data.findall('images/image'))
is_live = xpath_text(video_data, 'isLive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'thumbnail': xpath_text(video_data, 'poster'),
'description': strip_or_none(xpath_text(video_data, 'description')),
'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
'timestamp': self._extract_timestamp(video_data),
'upload_date': xpath_attr(video_data, 'metas', 'version'),
'series': xpath_text(video_data, 'showTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'is_live': is_live,
}
def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
streams_data = self._download_json(
'http://medium.ngtv.io/media/%s/tv' % media_id,
media_id)['media']['tv']
duration = None
chapters = []
formats = []
for supported_type in ('unprotected', 'bulkaes'):
stream_data = streams_data.get(supported_type, {})
m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
if not m3u8_url:
continue
if stream_data.get('playlistProtection') == 'spe':
m3u8_url = self._add_akamai_spe_token(
'http://token.ngtv.io/token/token_spe',
m3u8_url, media_id, ap_data or {}, tokenizer_query)
formats.extend(self._extract_m3u8_formats(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
duration = float_or_none(stream_data.get('totalRuntime'))
if not chapters:
for chapter in stream_data.get('contentSegments', []):
start_time = float_or_none(chapter.get('start'))
chapter_duration = float_or_none(chapter.get('duration'))
if start_time is None or chapter_duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + chapter_duration,
})
self._sort_formats(formats)
return {
'formats': formats,
'chapters': chapters,
'duration': duration,
}
```
#### File: youtube_dl/extractor/yandexvideo.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
try_get,
url_or_none,
)
class YandexVideoIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
yandex\.ru(?:/(?:portal/(?:video|efir)|efir))?/?\?.*?stream_id=|
frontend\.vh\.yandex\.ru/player/
)
(?P<id>(?:[\da-f]{32}|[\w-]{12}))
'''
_TESTS = [{
'url': 'https://yandex.ru/portal/video?stream_id=4dbb36ec4e0526d58f9f2dc8f0ecf374',
'md5': 'e02a05bfaf0d9615ef07ae3a10f4faf4',
'info_dict': {
'id': '4dbb36ec4e0526d58f9f2dc8f0ecf374',
'ext': 'mp4',
'title': 'Русский Вудсток - главный рок-фест в истории СССР / вДудь',
'description': 'md5:7d6b8d4bc4a3b9a56499916c1ea5b5fa',
'thumbnail': r're:^https?://',
'timestamp': 1549972939,
'duration': 5575,
'age_limit': 18,
'upload_date': '20190212',
'view_count': int,
'like_count': int,
'dislike_count': int,
},
}, {
'url': 'https://yandex.ru/portal/efir?stream_id=4dbb262b4fe5cf15a215de4f34eee34d&from=morda',
'only_matching': True,
}, {
'url': 'https://yandex.ru/?stream_id=4dbb262b4fe5cf15a215de4f34eee34d',
'only_matching': True,
}, {
'url': 'https://frontend.vh.yandex.ru/player/4dbb262b4fe5cf15a215de4f34eee34d?from=morda',
'only_matching': True,
}, {
# vod-episode, series episode
'url': 'https://yandex.ru/portal/video?stream_id=45b11db6e4b68797919c93751a938cee',
'only_matching': True,
}, {
# episode, sports
'url': 'https://yandex.ru/?stream_channel=1538487871&stream_id=4132a07f71fb0396be93d74b3477131d',
'only_matching': True,
}, {
# DASH with DRM
'url': 'https://yandex.ru/portal/video?from=morda&stream_id=485a92d94518d73a9d0ff778e13505f8',
'only_matching': True,
}, {
'url': 'https://yandex.ru/efir?stream_active=watching&stream_id=v7a2dZ-v5mSI&from_block=efir_newtab',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
player = try_get((self._download_json(
'https://frontend.vh.yandex.ru/graphql', video_id, data=('''{
player(content_id: "%s") {
computed_title
content_url
description
dislikes
duration
likes
program_title
release_date
release_date_ut
release_year
restriction_age
season
start_time
streams
thumbnail
title
views_count
}
}''' % video_id).encode(), fatal=False)), lambda x: x['player']['content'])
if not player or player.get('error'):
player = self._download_json(
'https://frontend.vh.yandex.ru/v23/player/%s.json' % video_id,
video_id, query={
'stream_options': 'hires',
'disable_trackings': 1,
})
content = player['content']
title = content.get('title') or content['computed_title']
formats = []
streams = content.get('streams') or []
streams.append({'url': content.get('content_url')})
for stream in streams:
content_url = url_or_none(stream.get('url'))
if not content_url:
continue
ext = determine_ext(content_url)
if ext == 'ismc':
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
content_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
content_url, video_id, mpd_id='dash', fatal=False))
else:
formats.append({'url': content_url})
self._sort_formats(formats)
timestamp = (int_or_none(content.get('release_date'))
or int_or_none(content.get('release_date_ut'))
or int_or_none(content.get('start_time')))
season = content.get('season') or {}
return {
'id': video_id,
'title': title,
'description': content.get('description'),
'thumbnail': content.get('thumbnail'),
'timestamp': timestamp,
'duration': int_or_none(content.get('duration')),
'series': content.get('program_title'),
'age_limit': int_or_none(content.get('restriction_age')),
'view_count': int_or_none(content.get('views_count')),
'like_count': int_or_none(content.get('likes')),
'dislike_count': int_or_none(content.get('dislikes')),
'season_number': int_or_none(season.get('season_number')),
'season_id': season.get('id'),
'release_year': int_or_none(content.get('release_year')),
'formats': formats,
}
```
#### File: youtube_dl/extractor/youku.py
```python
from __future__ import unicode_literals
import random
import re
import string
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_class,
js_to_json,
str_or_none,
strip_jsonp,
)
class YoukuIE(InfoExtractor):
IE_NAME = 'youku'
IE_DESC = '优酷'
_VALID_URL = r'''(?x)
(?:
https?://(
(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
video\.tudou\.com/v/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TESTS = [{
# MD5 is unstable
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
'info_dict': {
'id': 'XMTc1ODE5Njcy',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
'ext': 'mp4',
'duration': 74.73,
'thumbnail': r're:^https?://.*',
'uploader': '。躲猫猫、',
'uploader_id': '36017967',
'uploader_url': 'http://i.youku.com/u/UMTQ0MDcxODY4',
'tags': list,
}
}, {
'url': 'http://player.youku.com/player.php/sid/XNDgyMDQ2NTQw/v.swf',
'only_matching': True,
}, {
'url': 'http://v.youku.com/v_show/id_XODgxNjg1Mzk2_ev_1.html',
'info_dict': {
'id': 'XODgxNjg1Mzk2',
'ext': 'mp4',
'title': '武媚娘传奇 85',
'duration': 1999.61,
'thumbnail': r're:^https?://.*',
'uploader': '疯狂豆花',
'uploader_id': '62583473',
'uploader_url': 'http://i.youku.com/u/UMjUwMzMzODky',
'tags': list,
},
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
'id': 'XMTI1OTczNDM5Mg',
'ext': 'mp4',
'title': '花千骨 04',
'duration': 2363,
'thumbnail': r're:^https?://.*',
'uploader': '放剧场-花千骨',
'uploader_id': '772849359',
'uploader_url': 'http://i.youku.com/u/UMzA5MTM5NzQzNg==',
'tags': list,
},
}, {
'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
'note': 'Video protected with password',
'info_dict': {
'id': 'XNjA1NzA2Njgw',
'ext': 'mp4',
'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起',
'duration': 7264.5,
'thumbnail': r're:^https?://.*',
'uploader': 'FoxJin1006',
'uploader_id': '322014285',
'uploader_url': 'http://i.youku.com/u/UMTI4ODA1NzE0MA==',
'tags': list,
},
'params': {
'videopassword': '<PASSWORD>',
},
}, {
# /play/get.json contains streams with "channel_type":"tail"
'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html',
'info_dict': {
'id': 'XOTUxMzg4NDMy',
'ext': 'mp4',
'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft',
'duration': 702.08,
'thumbnail': r're:^https?://.*',
'uploader': '明月庄主moon',
'uploader_id': '38465621',
'uploader_url': 'http://i.youku.com/u/UMTUzODYyNDg0',
'tags': list,
},
}, {
'url': 'http://video.tudou.com/v/XMjIyNzAzMTQ4NA==.html?f=46177805',
'info_dict': {
'id': 'XMjIyNzAzMTQ4NA',
'ext': 'mp4',
'title': '卡马乔国足开大脚长传冲吊集锦',
'duration': 289,
'thumbnail': r're:^https?://.*',
'uploader': '阿卜杜拉之星',
'uploader_id': '2382249',
'uploader_url': 'http://i.youku.com/u/UOTUyODk5Ng==',
'tags': list,
},
}, {
'url': 'http://video.tudou.com/v/XMjE4ODI3OTg2MA==.html',
'only_matching': True,
}]
@staticmethod
def get_ysuid():
return '%d%s' % (int(time.time()), ''.join([
random.choice(string.ascii_letters) for i in range(3)]))
def get_format_name(self, fm):
_dict = {
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
'flvhd': 'h4',
'mp4': 'h3',
'mp4hd': 'h3',
'mp4hd2': 'h4',
'mp4hd3': 'h4',
'hd2': 'h2',
'hd3': 'h1',
}
return _dict.get(fm)
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('youku.com', '__ysuid', self.get_ysuid())
self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
_, urlh = self._download_webpage_handle(
'https://log.mmstat.com/eg.js', video_id, 'Retrieving cna info')
# The etag header is '"foobar"'; let's remove the double quotes
cna = urlh.headers['etag'][1:-1]
# request basic data
basic_data_params = {
'vid': video_id,
'ccode': '0532',
'client_ip': '192.168.1.1',
'utid': cna,
'client_ts': time.time() / 1000,
}
video_password = self._downloader.params.get('videopassword')
if video_password:
basic_data_params['password'] = video_password
headers = {
'Referer': url,
}
headers.update(self.geo_verification_headers())
data = self._download_json(
'https://ups.youku.com/ups/get.json', video_id,
'Downloading JSON metadata',
query=basic_data_params, headers=headers)['data']
error = data.get('error')
if error:
error_note = error.get('note')
if error_note is not None and '因版权原因无法观看此视频' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
elif error_note and '该视频被设为私密' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is private', expected=True)
else:
msg = 'Youku server reported error %i' % error.get('code')
if error_note is not None:
msg += ': ' + error_note
raise ExtractorError(msg)
# get video title
video_data = data['video']
title = video_data['title']
formats = [{
'url': stream['m3u8_url'],
'format_id': self.get_format_name(stream.get('stream_type')),
'ext': 'mp4',
'protocol': 'm3u8_native',
'filesize': int(stream.get('size')),
'width': stream.get('width'),
'height': stream.get('height'),
} for stream in data['stream'] if stream.get('channel_type') != 'tail']
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'duration': video_data.get('seconds'),
'thumbnail': video_data.get('logo'),
'uploader': video_data.get('username'),
'uploader_id': str_or_none(video_data.get('userid')),
'uploader_url': data.get('uploader', {}).get('homepage'),
'tags': video_data.get('tags'),
}
class YoukuShowIE(InfoExtractor):
_VALID_URL = r'https?://list\.youku\.com/show/id_(?P<id>[0-9a-z]+)\.html'
IE_NAME = 'youku:show'
_TESTS = [{
'url': 'http://list.youku.com/show/id_zc7c670be07ff11e48b3f.html',
'info_dict': {
'id': 'zc7c670be07ff11e48b3f',
'title': '花千骨 DVD版',
'description': 'md5:a1ae6f5618571bbeb5c9821f9c81b558',
},
'playlist_count': 50,
}, {
# Episode number not starting from 1
'url': 'http://list.youku.com/show/id_zefbfbd70efbfbd780bef.html',
'info_dict': {
'id': 'zefbfbd70efbfbd780bef',
'title': '超级飞侠3',
'description': 'md5:275715156abebe5ccc2a1992e9d56b98',
},
'playlist_count': 24,
}, {
# Ongoing playlist. The initial page is the last one
'url': 'http://list.youku.com/show/id_za7c275ecd7b411e1a19e.html',
'only_matching': True,
}, {
# No data-id value.
'url': 'http://list.youku.com/show/id_zefbfbd61237fefbfbdef.html',
'only_matching': True,
}, {
# Wrong number of reload_id.
'url': 'http://list.youku.com/show/id_z20eb4acaf5c211e3b2ad.html',
'only_matching': True,
}]
def _extract_entries(self, playlist_data_url, show_id, note, query):
query['callback'] = 'cb'
playlist_data = self._download_json(
playlist_data_url, show_id, query=query, note=note,
transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html')
if playlist_data is None:
return [None, None]
drama_list = (get_element_by_class('p-drama-grid', playlist_data)
or get_element_by_class('p-drama-half-row', playlist_data))
if drama_list is None:
raise ExtractorError('No episodes found')
video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list)
return playlist_data, [
self.url_result(self._proto_relative_url(video_url, 'http:'), YoukuIE.ie_key())
for video_url in video_urls]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
entries = []
page_config = self._parse_json(self._search_regex(
r'var\s+PageConfig\s*=\s*({.+});', webpage, 'page config'),
show_id, transform_source=js_to_json)
first_page, initial_entries = self._extract_entries(
'http://list.youku.com/show/module', show_id,
note='Downloading initial playlist data page',
query={
'id': page_config['showid'],
'tab': 'showInfo',
})
first_page_reload_id = self._html_search_regex(
r'<div[^>]+id="(reload_\d+)', first_page, 'first page reload id')
# The first reload_id has the same items as first_page
reload_ids = re.findall('<li[^>]+data-id="([^"]+)">', first_page)
entries.extend(initial_entries)
for idx, reload_id in enumerate(reload_ids):
if reload_id == first_page_reload_id:
continue
_, new_entries = self._extract_entries(
'http://list.youku.com/show/episode', show_id,
note='Downloading playlist data page %d' % (idx + 1),
query={
'id': page_config['showid'],
'stage': reload_id,
})
if new_entries is not None:
entries.extend(new_entries)
desc = self._html_search_meta('description', webpage, fatal=False)
playlist_title = desc.split(',')[0] if desc else None
detail_li = get_element_by_class('p-intro', webpage)
playlist_description = get_element_by_class(
'intro-more', detail_li) if detail_li else None
return self.playlist_result(
entries, show_id, playlist_title, playlist_description)
``` |
{
"source": "jonyii/gacoxc",
"score": 2
} |
#### File: gacoxc/gacoxc/garminConnect.py
```python
from urllib.parse import urlencode
import urllib.request, urllib.error, urllib.parse, http.cookiejar
import json
from collections import OrderedDict
class garminConnect(object):
# Maximum number of activities you can request at once. Set and enforced by Garmin.
gc_max_req_limit = 100
# URLs for various services.
gc_url_login = 'https://sso.garmin.com/sso/login?service=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&webhost=olaxpw-connect04&source=https%3A%2F%2Fconnect.garmin.com%2Fen-US%2Fsignin&redirectAfterAccountLoginUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&redirectAfterAccountCreationUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&gauthHost=https%3A%2F%2Fsso.garmin.com%2Fsso&locale=en_US&id=gauth-widget&cssUrl=https%3A%2F%2Fstatic.garmincdn.com%2Fcom.garmin.connect%2Fui%2Fcss%2Fgauth-custom-v1.1-min.css&clientId=GarminConnect&rememberMeShown=true&rememberMeChecked=false&createAccountShown=true&openCreateAccount=false&usernameShown=false&displayNameShown=false&consumeServiceTicket=false&initialFocus=true&embedWidget=false&generateExtraServiceTicket=false'
gc_url_post_auth = 'https://connect.garmin.com/post-auth/login?'
gc_url_logout = 'https://connect.garmin.com/auth/logout'
gc_url_search = 'https://connect.garmin.com/proxy/activity-search-service-1.0/json/activities?'
gc_url_gpx_activity = 'https://connect.garmin.com/proxy/download-service/export/gpx/activity/'
gc_url_tcx_activity = 'https://connect.garmin.com/proxy/download-service/export/tcx/activity/'
gc_url_kml_activity = 'https://connect.garmin.com/proxy/download-service/export/kml/activity/'
gc_url_csv_activity = 'https://connect.garmin.com/proxy/download-service/export/csv/activity/'
gc_url_original_activity = 'https://connect.garmin.com/proxy/download-service/files/activity/'
gc_type_to_download_url = {
'gpx': gc_url_gpx_activity,
'tcx': gc_url_tcx_activity,
'kml': gc_url_kml_activity,
'csv': gc_url_csv_activity,
'orig': gc_url_original_activity,
}
def __init__(self, username, password, verbose=0, login=True):
self.cookie_jar = http.cookiejar.CookieJar()
self.url_opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie_jar))
self.v = verbose
self.username = username
self.password = password
self.sum = []
self.activities = []
self.records = {}
self.loggedin = False
if login:
self.login()
def login(self):
if self.v: print("Logging in...")
# prep the sesion and login
self.loggedin = True
try:
self._http_req(self.gc_url_login) # init cookies
self._http_req(self.gc_url_login, {
'username': self.username,
'password': <PASSWORD>,
'embed': 'true',
'lt': 'e1s1',
'_eventId': 'submit',
'displayNameRequired': 'false'} )
login_ticket = 'ST-0' + ([c.value for c in self.cookie_jar if c.name == 'CASTGC'][0])[4:]
except:
self.loggedin = False
raise Exception('Did not get a ticket cookie. Cannot log in. Did you enter the correct username and password?')
self._http_req(self.gc_url_post_auth + 'ticket=' + login_ticket)
if self.v: print("Logged in!")
self.loggedin = True
def logout(self):
if self.loggedin:
if self.v: print("Logging out...")
self._http_req(self.gc_url_logout)
self.loggedin = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logout()
def __iter__(self):
for a in self.activities:
yield a
def __len__(self):
return len(self.sum)
def __getitem__(self, key):
return self.activities[key]
# url is a string, post is a dictionary of POST parameters, headers is a dictionary of headers.
def _http_req(self, url, post=None, headers={}):
if not self.loggedin:
self.login()
request = urllib.request.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/1337 Safari/537.36') # Tell Garmin we're some supported browser.
for header_key, header_value in list(headers.items()):
request.add_header(header_key, header_value)
if post:
post = urlencode(post).encode("utf-8") # Convert dictionary to POST parameter string.
if self.v > 1: print("requesting url %s with post data %s" %(url, post))
response = self.url_opener.open(request, data=post)
#if response.getcode() != 200:
# raise Exception('Bad return code (' + str(response.getcode()) + ') for: ' + url)
return response.read()
def download_activities(self, how_many=1, getAll=False, ignoreNonGPS=False, sortOrder='DESC', sortField='activitySummaryBeginTimestampGmt'):
"""
if sortOrder='DESC': start downloading from the latest activity
"""
self.activities = []
downloaded = 0
if getAll:
how_many = self.gc_max_req_limit
while downloaded < how_many:
to_download = min(how_many-downloaded, self.gc_max_req_limit)
result = self._http_req(self.gc_url_search + urlencode({
'start': downloaded,
'limit': to_download,
'ignoreNonGPS': ignoreNonGPS,
'sortOrder': sortOrder,
'sortField': sortField,
})
)
json_results = json.loads(result) # TODO: Catch possible exceptions here.
if getAll:
how_many = int(json_results['results']['search']['totalFound'])
getAll = False
actv = json_results['results']['activities']
self.activities.extend(actv)
downloaded += len(actv)
if len(actv) < to_download:
if self.v>1: print("Got only %d activities, orig requested %d, total got %d / expected %d" %(len(actv), to_download, downloaded, how_many))
break # we are done here
if self.v>1: print("Downloaded %d / %d activities" %(downloaded, how_many))
if self.v: print("Downloaded %d activities" %(len(self.activities)))
self.sum = self._gen_summary()
return self.activities
def summary(self, firstn=None, as_string=True):
"return first n activities"
firstn = len(self.sum) if firstn is None else firstn
if as_string:
return "\n".join([str(i)+"\t"+"\t".join([v for k,v in list(a.items())]) for i,a in enumerate(self.sum[:firstn])])
else:
return self.sum[:firstn]
def _gen_summary(self):
def getfield(a, field, preferred='_'):
if field in a:
aa=a[field]
for f in [preferred, 'display', 'value']:
if f in aa:
return aa[f]
return 'n/a'
summ = []
for act in self.activities:
a = act['activity']
asum = OrderedDict([
('id', a['activityId']),
('start', getfield(a, 'beginTimestamp')),
('duration', getfield(a, 'sumElapsedDuration')),
('distance', getfield(a, 'sumDistance', 'withUnit')),
('name', getfield(a, 'activityName')),
])
summ.append(asum)
return summ
def get_cached_record(self, aid, fmt):
if fmt=='orig': fmt = 'zip'
return self.records.get(aid,{}).get(fmt)
def cache_record(self, aid, fmt, data):
if data is None:
data=''
if aid not in self.records:
self.records[aid] = {fmt: data}
else:
self.records[aid][fmt] = data
def actid(self, activity):
"get activity id from one of activity representations"
if isinstance(activity, str):
return activity
elif isinstance(activity, int):
return self.sum[activity]['id']
elif 'id' in activity: # from summary
return activity['id']
elif 'activity' in activity: # from original activity
return activity['activity']['activityId']
else:
raise Exception("No activity ID found in supplied activity")
def download_record(self, act, fmt='orig'):
"fmt could be gpx|tcx|kml|original|kml|csv|... whatever is supported or user cached"
aid = self.actid(act)
# check the cache
data = self.get_cached_record(aid, fmt)
if data is not None:
return data
if self.v: print("Downloading %s record for activity id %s" % (fmt, aid))
# we allow to cache exotic stuff, hence check format only here
downurl = self.gc_type_to_download_url.get(fmt, None)
if downurl is None:
raise Exception('Unrecognized format.')
try:
data = self._http_req(downurl + str(aid))
except urllib.error.HTTPError as e:
# Handle expected (though unfortunate) error codes; die on unexpected ones.
if e.code == 204:
print("ERROR: acitivity has no GPS data")
elif e.code == 500 and fmt == 'tcx':
print('ERROR: Garmin did not generate a TCX file for this activity...')
elif e.code == 404:
print('ERROR: Garmin did not provide activity data...')
elif e.code == 410 and fmt == 'gpx':
print('ERROR: Garmin did not provide gpx file...')
elif float(activity['activity'].get('sumDistance',{}).get('value',0.0)) == 0.0:
print('ERROR: This activity has zero distance in descritption, probably does not have records do download...')
else:
raise Exception('Failed. Got an unexpected HTTP error (' + str(e.code) + ').')
data = None
self.cache_record(aid, fmt, data)
return data
import os
import csv
import json
#import pprint
class cachingGarminConnect(garminConnect):
gc_sum_file='summary.csv'
def __init__(self, username, password, verbose=0, cache_dir='~/.garmin_connect_cache', update=False):
self.cache_dir = os.path.expanduser(cache_dir)
if not os.path.isdir(self.cache_dir):
if verbose: print("Creating cache directory ", self.cache_dir)
os.mkdir(self.cache_dir)
super(cachingGarminConnect, self).__init__(username, password, verbose, login=update)
self.sum_file = os.path.join(self.cache_dir, self.gc_sum_file)
self._load_sum()
if update:
self.update()
def _load_sum(self):
self.sum = []
if os.path.isfile(self.sum_file):
if self.v: print("Loading cached acitivy summary from ", self.sum_file)
with open(self.sum_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.sum.append(row)
def _load_activity(self, aid, fmt='json'):
if fmt=='orig': fmt = 'zip'
adir = os.path.join(self.cache_dir, aid)
if not os.path.isdir(adir):
print("ERROR: activity",aid,'is not cached on disk, likely screwed cache (workaround: ignore cache)')
# TODO download the activity - files were deleted?
return
ajsonf = os.path.join(adir, aid+'.'+fmt)
if os.path.isfile(ajsonf):
if self.v: print(" >> loading cached "+aid+"."+fmt)
openlike = 'r' if fmt in ['json','igc'] else 'rb'
with open(ajsonf, openlike) as f:
if fmt=='json':
self.activities.append(json.loads(f.read()))
else:
data = f.read()
self.cache_record(aid, fmt, data)
def get_cached_record(self, aid, fmt):
if fmt=='orig': fmt = 'zip'
data = self.records.get(aid,{}).get(fmt)
if data is None: # look for cache on disk
self._load_activity(aid, fmt)
data = self.records.get(aid,{}).get(fmt)
return data
def __iter__(self):
for i,a in enumerate(self.sum):
if i >= len(self.activities):
self._load_activity(a['id'])
yield self.activities[i]
def __getitem__(self, key):
if key >= len(self.activities) and key < len(self.sum):
print(len(self.activities), key+1)
for i in range(len(self.activities), key+1):
self._load_activity(self.sum[i]['id'])
return self.activities[key]
def clear_cache(self, everything=False, activities=False, records=False):
"clear some part of cache"
if activities or everything:
self.sum=[]
self.activities = []
if records or everything:
self.records = {}
def update(self):
if not self.sum:
self.download_activities(getAll=True)
else:
# improve filtering to get activities from certain commit time
# we nainvely re-download activitiues till we get all we need
# we'll download ALL activities in case the latest one was deleted, it will stay cached on disk if its there, but summary will be cleaned
need=10
lastest_id = self.sum[0]['id']
tmpsum = self.sum
while need>0:
self.download_activities(need)
for i,act in enumerate(self.activities):
if lastest_id == act['activity']['activityId']:
self.activities = self.activities[:i] # rest will come from cache, TODO is it ok?
self.sum = self.sum[:i]
self.sum.extend(tmpsum)
need = 0
break
if len(self.activities) != need:
break
if need == 10:
need = self.gc_max_req_limit
elif need>0:
need += self.gc_max_req_limit
self.save()
def save(self, only_aid=None):
"if aid set, set particular activity id only"
if only_aid is None:
# save sum file
if self.v: print("Saving activity summary to ", self.sum_file)
with open(self.sum_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=[x for x in self.sum[0].keys()])
writer.writeheader()
for s in self.sum:
writer.writerow(s)
# save individual acitivities, but only ones downloaded
for act in self.activities:
aid = act['activity']['activityId']
if only_aid is not None and aid != only_aid:
continue
adir = os.path.join(self.cache_dir, aid)
if not os.path.isdir(adir):
os.mkdir(adir)
ajsonf = os.path.join(adir, aid+'.json')
if not os.path.isfile(ajsonf):
if self.v: print("Saving activity id", aid,">> details")
with open(ajsonf, 'w') as f:
f.write(json.dumps(act, indent=4))
#f.write(pprint.pformat(act))
# save downloaded records
for aid, recs in self.records.items():
if only_aid is not None and aid != only_aid:
continue
adir = os.path.join(self.cache_dir, aid)
if not os.path.isdir(adir):
os.mkdir(adir)
for k, v in recs.items():
if k=='orig': k = 'zip'
acf = os.path.join(adir, aid+'.'+k)
if not os.path.isfile(acf):
if self.v: print("Saving activity id",aid,">>",k,"record")
openlike = 'w' if isinstance(v, str) else 'wb'
with open(acf, openlike) as f:
f.write(v)
``` |
{
"source": "Jony-Jas/tts-custom",
"score": 3
} |
#### File: tts-custom/toolbox/saveFile.py
```python
import json
class SaveFile:
def save():
f = open ('data.json', "r")
data = json.loads(f.read())
id = data["id"]
f.close()
fpath = "C:/Users/jonyj/Documents/Project/Ongoing/Hackathon/Voice Cloning/cloned_audios/"+id
return fpath
``` |
{
"source": "jonykarki/hamroscraper",
"score": 3
} |
#### File: hamroscraper/FB Chat Bot Mitsuku/main.py
```python
from fbchat import Client
from fbchat.models import *
import requests
import json
import time
payload = {
'Accept': "*/*",
'Accept-Encoding': "gzip, deflate, br",
'Accept-Language': "en-US, en",
'q': "0.9",
'Connection': 'keep-alive',
'Content-Length': '160',
'Content-type': 'application/x-www-form-urlencoded',
'Host': "miapi.pandorabots.com",
'Origin': "https://www.pandorabots.com",
'Referer': "https://www.pandorabots.com/mitsuku/",
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36"
}
headers = {}
URL = "https://miapi.pandorabots.com/talk"
PARAMS = {'input': 'lol',
'sessionid': 403726733,
'channel': 6,
'botkey': '<KEY>',
'client_name': 'cw16e8e9d0d10'}
def get_reply(message):
PARAMS['input'] = message
# to send the form data use session
session = requests.Session()
r = session.post(url=URL, data=PARAMS, headers=payload)
response = json.loads(r.text)['responses'][0]
print("Response: " + response)
return response
# while(True):
# message = input("Enter the message ")
# PARAMS['input'] = message
# print(PARAMS)
# # to send the form data use session
# session = requests.Session()
# r = session.post(url=URL, data=PARAMS, headers=payload)
# print(json.loads(r.text)['responses'][0])
# #########
# REPLACE WITH EMAIL AND PASSWORD
# #########
EMAIL = ""
PASSWORD = ""
class EchoBot(Client):
def onMessage(self, mid, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsRead(author_id)
msg = ""
try:
print("Message: " + message_object.text)
except:
pass
# don't reply on our own messages
if author_id == ***************:
msg = get_reply(message_object.text)
self.reactToMessage(mid, MessageReaction.ANGRY)
self.send(Message(text=msg), thread_id=thread_id,
thread_type=thread_type)
client = EchoBot(EMAIL, PASSWORD)
client.listen()
```
#### File: jonykarki/hamroscraper/ISBN_validate.py
```python
def validate_isbn(isbn):
if len(isbn) == 10:
# ten
sum = 0
for i in range(len(isbn)):
sum += int(isbn[i]) * (10-i)
if sum % 11 == 0:
print("Valid ISBN!")
else:
print("Invalid ISBN!")
elif len(isbn) == 13:
sum = 0
for i in range(len(isbn)):
if i+4 % 2 == 0:
sum += int(isbn[i])
else:
sum += int(isbn[i]) * 3
if sum % 10 == 0:
print("Valid ISBN!")
else:
print("Invalid ISBN")
if __name__ == "__main__":
isbn = input("Enter the ISBN number: ")
validate_isbn(isbn)
```
#### File: hamroscraper/music player GUI/jk-music-player.py
```python
import webbrowser
import urllib.request
import bs4 as bs
# import all the tkinter modules as well
from tkinter import *
from tkinter import ttk
# root window for our app
root = Tk()
# title of the windows
root.title("JK Music Player")
# make the window fixed size
# the window is not resizable
root.geometry("400x100+300+300")
root.resizable(width=False, height=False) # disable resizing
# GUI PART OF THE APP
# main label
Label(root, text="Enter the Name of the song you want to play").grid(row=0, sticky=W, padx=4)
# song should be called later on to find the text inside it
# so we use the following format
song = Entry(root, width=65)
song.grid(row=1, column=0, sticky=W, padx=4)
# this is the label at last
label1 = Label(root)
def play_song(event):
# user_agent for sending headers with the request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
# initialize the header with the fake user-agent
headers={'User-Agent':user_agent,}
# here we fetch the song name
# data from the label is in string format
song_name = song.get()
# youtube query
# replace all the spaces with plus(+) sign
youtube_query = song_name.replace(' ', '+')
# find the best match
# go to youtube and select the first one
url = 'https://www.youtube.com/results?search_query=' + youtube_query
# get the html
# send the request along with the headers
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
source_code = response.read()
# format all the code
soup = bs.BeautifulSoup(source_code, 'lxml')
# get the url
# of all the soup
soup_two = soup.find(attrs={'class': 'item-section'}) #item-section is the part that contains the youtube results
#print(soup_two) ----> Test
# initialize an empty list for now which will
# be filled later on the loop below
songs_list = []
# loop through the data inside soup_two
# only search for 'a' tag
for url in soup_two.find_all('a'):
# find all the hrefs from the links found in soup_two
songs = url.get('href')
# in each of the above loop go to the empty list
# that we initialized before and add the items into it
# We'll later on get the first item from the list to display
songs_list.append(songs)
# the main url of the video we want
# Youtube Main Page
prefix = 'https://www.youtube.com'
# intent to open the link of the video in the client's web browser
webbrowser.open(prefix + songs_list[0])
# loaded message
label1['text'] = '''Enjoy the Song'''
# play_button
play_button = Button(root, text="Play The Song")
# Button_Event Handling
play_button.bind("<Button-1>", play_song) # whenever there's a single left mouse click , call the play_song function
play_button.grid(row=3, column=0, padx=6)
# Info label at the end
label1.grid(row=4, sticky=W, padx=4)
label1['text'] = '''Takes Few Seconds To Load'''
# loop the windows
root.mainloop()
# THE END #
``` |
{
"source": "Jonyker/APGeneric",
"score": 3
} |
#### File: Jonyker/APGeneric/submit.py
```python
import subprocess
#查看工作区状态
def status():
archiveCmd = 'git status'
process = subprocess.Popen(archiveCmd,shell=True)
process.wait()
archiveReturnCode = process.returncode
if archiveReturnCode != 0:
print "查看工作区状态错误"
else:
add()
return True
#添加工作区
def add():
archiveCmd = 'git add --all'
process = subprocess.Popen(archiveCmd,shell=True)
process.wait()
archiveReturnCode = process.returncode
if archiveReturnCode != 0:
print "添加到缓存区错误"
else:
commit()
#提交本地版本库
def commit():
inputNote = raw_input("请输入提交内容:").decode('utf-8')
archiveCmd = "git commit -m ' " + inputNote + "'"
process = subprocess.Popen(archiveCmd,shell=True)
process.wait()
archiveReturnCode = process.returncode
if archiveReturnCode != 0:
print "提交失败"
else:
print "提交成功",inputNote
pull()
#拉取
def pull():
archiveCmd = 'git pull'
process = subprocess.Popen(archiveCmd,shell=True)
process.wait()
archiveReturnCode = process.returncode
if archiveReturnCode != 0:
print "拉取远程代码失败"
else:
push()
#推送
def push():
archiveCmd = 'git push'
process = subprocess.Popen(archiveCmd,shell=True)
process.wait()
archiveReturnCode = process.returncode
if archiveReturnCode != 0:
print "上传远程git服务器失败"
else:
print "上传成功"
#执行
def main():
status()
if __name__ == '__main__':
main()
``` |
{
"source": "jony-lee/tao-of-rust-codes",
"score": 3
} |
#### File: callrust/Python/database.py
```python
import sys, ctypes
from ctypes import c_char_p, c_uint32, Structure, POINTER
prefix = {'win32': ''}.get(sys.platform, '../target/debug/lib')
extension = {'darwin': '.dylib', 'win32': '.dll'} \
.get(sys.platform, '.so')
class DatabaseS(Structure):
pass
lib = ctypes.cdll.LoadLibrary(prefix + "callrust" + extension)
# 必须以DatabaseS作为参数,否则会产生空指针
lib.database_new.restype = POINTER(DatabaseS)
lib.database_free.argtypes = (POINTER(DatabaseS), )
lib.database_insert.argtypes = (POINTER(DatabaseS), )
lib.database_query.argtypes = (POINTER(DatabaseS), c_char_p)
lib.database_query.restype = c_uint32
class Database:
def __init__(self):
self.obj = lib.database_new()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
lib.database_free(self.obj)
def insert(self):
lib.database_insert(self.obj)
def query(self, zip):
return lib.database_query(self.obj, zip.encode('utf-8'))
with Database() as database:
database.insert()
pop1 = database.query("10186")
pop2 = database.query("10852")
print(pop2 - pop1)
``` |
{
"source": "jonyo/smart-dollhouse",
"score": 3
} |
#### File: jonyo/smart-dollhouse/iobuttons.py
```python
import board
import digitalio
class Button:
def __init__(self, pin):
self._id = pin.id
self.button = digitalio.DigitalInOut(pin)
self.button.direction = digitalio.Direction.INPUT
self.button.pull = digitalio.Pull.UP
def isPressed(self):
return self.button.value == False
buttons = [
Button(board.D17),
Button(board.D22),
Button(board.D23),
Button(board.D27),
]
``` |
{
"source": "jon-young/genetic_interact",
"score": 3
} |
#### File: genetic_interact/src/biogrid.py
```python
import os.path
def setup_filepaths(organism):
"""Setup filepaths for BIOGRID files for various organisms."""
if organism == 'cerevisiae':
biogridPath = os.path.join('..', 'data',
'BIOGRID-3.4.130-yeast-post2006.txt')
elif organism == 'pombe':
biogridPath = os.path.join('..', '..', 'DataDownload', 'BIOGRID',
'BIOGRID-ORGANISM-3.4.130.tab2',
'BIOGRID-ORGANISM-Schizosaccharomyces_pombe_972h-3.4.130.tab2'\
'.txt')
elif organism == 'melanogaster':
biogridPath = os.path.join('..', '..', 'DataDownload', 'BIOGRID',
'BIOGRID-ORGANISM-3.4.130.tab2',
'BIOGRID-ORGANISM-Drosophila_melanogaster-3.4.130.tab2.txt')
else: # organism == 'sapiens'
biogridPath = os.path.join('..', '..', 'DataDownload', 'BIOGRID',
'BIOGRID-ORGANISM-3.4.130.tab2',
'BIOGRID-ORGANISM-Homo_sapiens-3.4.130.tab2.txt')
return biogridPath
def get_interacting_genes(organism, intactType, colName):
"""Return set of gene pairs that are of given interaction type"""
intactSet = set()
biogridFile = open(setup_filepaths(organism))
header = biogridFile.readline().rstrip().split('\t')
colNum = header.index(colName)
intactTypeCol = header.index('Experimental System')
for line in biogridFile:
tokens = line.rstrip().split('\t')
if tokens[intactTypeCol] == intactType:
intactSet.add(frozenset(tokens[colNum:colNum + 2]))
biogridFile.close()
return intactSet
```
#### File: genetic_interact/src/clustintactvis.py
```python
import itertools
import matplotlib.pyplot as plt
import networkx as nx
def btw_complex_net(pairs, clust2genes, gene2idx, adjMat):
"""Clusters are protein complexes
INPUT: 1.) pairs <- [(complex 1 name, complex 2 name)]"""
fig = plt.figure()
# assemble network edges
for p in pairs:
cmplx1genes = clust2genes[p[0]]
cmplx2genes = clust2genes[p[1]]
# get interaction edges
edges = list()
for genePair in itertools.product(cmplx1genes, cmplx2genes):
i = gene2idx[genePair[0]]
j = gene2idx[genePair[1]]
if adjMat[i,j] == 1:
edges.append((genePair[0], genePair[1]))
# draw network
plt.clf()
G_gi = nx.Graph()
G_c1 = nx.Graph()
G_c2 = nx.Graph()
for e in edges:
G_gi.add_edge(e[0], e[1])
for gene in cmplx1genes:
G_c1.add_node(gene)
for gene in cmplx2genes:
G_c2.add_node(gene)
pos1 = nx.circular_layout(G_c1)
pos2 = nx.circular_layout(G_c2)
pos2.update((k, v+2) for k,v in pos2.items())
posGI = dict()
for gene in G_gi.nodes():
if gene in pos1:
posGI[gene] = pos1[gene]
else:
posGI[gene] = pos2[gene]
nx.draw_networkx_nodes(G_c1, pos=pos1, alpha=0.25)
nx.draw_networkx_labels(G_c1, pos=pos1)
nx.draw_networkx_nodes(G_c2, pos=pos2, alpha=0.25)
nx.draw_networkx_labels(G_c2, pos=pos2)
nx.draw_networkx_edges(G_gi, pos=posGI)
plt.title(p[0] + ' and ' + p[1])
plt.axis('off')
plt.show()
plt.waitforbuttonpress()
```
#### File: genetic_interact/src/func_net_pred.py
```python
import collections
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from sklearn import metrics
def setup_filepaths(organism):
"""Setup full file paths for functional net and BIOGRID"""
if organism == 'cerevisiae':
biogridpath = os.path.join('..', 'data',
'BIOGRID-3.4.130-yeast-post2006.txt')
fnetpath = os.path.join('..', 'data', 'YeastNetDataFrame.pkl')
elif organism == 'sapiens':
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID',
'BIOGRID-ORGANISM-3.4.130.tab2',
'BIOGRID-ORGANISM-Homo_sapiens-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'HumanNetDataFrame.pkl')
elif organism == 'melanogaster':
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID',
'BIOGRID-ORGANISM-3.4.130.tab2',
'BIOGRID-ORGANISM-Drosophila_melanogaster-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'FlyNetDataFrame.pkl')
else:
print('ORGANISM NOT FOUND! Exiting...')
sys.exit()
return biogridpath, fnetpath
def determine_col(organism, gene):
"""Determine which gene column in the BIOGRID file to read"""
entrezRegEx = re.compile(r'\d+')
if organism == 'cerevisiae':
sysNameRegEx = re.compile(r'Y[A-Z][A-Z]\d+')
ofcSymRegEx = re.compile(r'[A-Z]+')
elif organism == 'sapiens':
sysNameRegEx = re.compile(r'\w+')
ofcSymRegEx = re.compile(r'[A-Za-z]+.')
else: # organism == 'melanogaster'
sysNameRegEx = re.compile(r'Dmel_.')
ofcSymRegEx = re.compile(r'\w+')
if entrezRegEx.match(gene) is not None:
colName = 'Entrez Gene Interactor A'
elif sysNameRegEx.match(gene) is not None:
colName = 'Systematic Name Interactor A'
elif ofcSymRegEx.match(gene) is not None:
colName = 'Official Symbol Interactor A'
else:
print('ERROR: Unable to match ID type! Exiting...')
sys.exit()
return colName
def read_biogrid(biogridpath, experimentSys, colName):
"""Read BIOGRID genetic interactions and return dictionary converting each
interactor ID to its genetic interaction partners"""
seedSets = collections.defaultdict(set)
biogridfile = open(biogridpath)
header = biogridfile.readline().split('\t')
geneColNum = header.index(colName)
expSysColNum = header.index('Experimental System')
for line in biogridfile:
tokens = line.split('\t')
if tokens[expSysColNum] == experimentSys:
seedSets[tokens[geneColNum]].add(tokens[geneColNum + 1])
seedSets[tokens[geneColNum + 1]].add(tokens[geneColNum])
return seedSets
def seed_set_predictability(funcNetDf, seedSets):
"""For each seed gene, measure its predictability of genetic interactions
by AUC. Also return a dictionary converting the seed gene (provided it is
in the network) to the set of its known genetic interaction partners."""
seedAUC = list()
seed2interactors = dict()
for seedGene in seedSets.keys():
interactors = [gene for gene in seedSets[seedGene]
if gene in funcNetDf.index]
if len(interactors) > 0:
llsSum = funcNetDf.loc[interactors,:].sum(axis=0)
trueLabels = pd.Series([0]*llsSum.size, index=llsSum.index)
trueLabels.loc[interactors] = 1
auc = metrics.roc_auc_score(trueLabels, llsSum)
seedAUC.append((auc, seedGene))
seed2interactors[seedGene] = interactors
seedAUC.sort()
return seedAUC, seed2interactors
def plot_aucs(seedAUC):
"""ARGUMENTS: <list> [(seed AUC, seed Entrez)]"""
aucs = [t[0] for t in seedAUC]
pos = np.arange(1, len(aucs)+1)
plt.barh(pos, aucs, height=1.0, align='center')
plt.ylim([0, len(aucs)+1])
ax = plt.axes()
ax.set_yticklabels([])
plt.xlabel('AUC')
plt.ylabel('Seed sets')
plt.tight_layout()
plt.show()
def main():
if len(sys.argv) < 3: # validate input parameters
print('Usage: python {} <organism>'\
' <genetic interaction>'.format(sys.argv[0]))
sys.exit()
organism = sys.argv[1]
intactType = sys.argv[2]
biogridpath, fnetpath = setup_filepaths(organism)
funcNetDf = pd.read_pickle(fnetpath)
numNodes = len(funcNetDf.columns)
print('\nNumber of genes in functional network: {}'.format(numNodes))
geneExample = funcNetDf.columns[0]
colName = determine_col(organism, geneExample)
seedSets = read_biogrid(biogridpath, intactType, colName)
seedAUC, seed2intacts = seed_set_predictability(funcNetDf, seedSets)
print('Number of seed sets: {}\n'.format(len(seedAUC)))
plot_aucs(seedAUC)
if __name__=="__main__":
main()
``` |
{
"source": "jon-young/medicalimage",
"score": 2
} |
#### File: jon-young/medicalimage/liversegmentation.py
```python
import matplotlib.pyplot as plt
import numpy as np
import SimpleITK as sitk
import sys
from os.path import expanduser, join
from scipy.spatial.distance import euclidean
def sitk_show(img):
"""Displays SimpleITK image from its array. Includes a function to report
the pixel value under the mouse cursor."""
X = sitk.GetArrayFromImage(img)
fig, ax = plt.subplots()
ax.imshow(X, cmap=plt.cm.Greys_r)
if X.ndim == 2:
numrows, numcols = X.shape
def format_coord(x, y):
col = int(x + 0.5)
row = int(y + 0.5)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = X[row, col]
return 'x=%1.4f, y=%1.4f, z=%1.4f' %(x, y, z)
else:
return 'x=%1.4f, y=%1.4f' %(x, y)
ax.format_coord = format_coord
plt.show()
def read_dicom():
"""Read in DICOM series"""
dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase',
'TCIALocal', '0', 'images', '')
reader = sitk.ImageSeriesReader()
seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1]
dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread)
reader.SetFileNames(dicomFilenames)
return reader.Execute()
def anisotropic_diffusion(img, *args):
"""INPUT: arguments are time step; conductance; # of iterations"""
imgRecast = sitk.Cast(img, sitk.sitkFloat32)
timeStep_ = args[0]
conduct = args[1]
numIter = args[2]
curvDiff = sitk.CurvatureAnisotropicDiffusionImageFilter()
curvDiff.SetTimeStep(timeStep_)
curvDiff.SetConductanceParameter(conduct)
curvDiff.SetNumberOfIterations(numIter)
return curvDiff.Execute(imgRecast)
def gradient_magnitude(img, sigma_):
return sitk.GradientMagnitudeRecursiveGaussian(image1=img, sigma=sigma_)
def sigmoid_filter(img, K1, K2):
alpha_ = (K2 - K1)/6
beta_ = (K1 + K2)/2
imgSigmoid = sitk.Sigmoid(image1=img, alpha=alpha_, beta=beta_,
outputMaximum=1.0, outputMinimum=0.0)
return imgSigmoid
def input_level_set(featImg, seed2radius):
setupImg = sitk.Image(featImg.GetSize()[0], featImg.GetSize()[1], sitk.sitkUInt8)
X = sitk.GetArrayFromImage(setupImg)
for s in seed2radius.keys():
rowIni, rowEnd = s[0] - seed2radius[s], s[0] + seed2radius[s]
colIni, colEnd = s[1] - seed2radius[s], s[1] + seed2radius[s]
for i in range(rowIni, rowEnd+1):
for j in range(colIni, colEnd+1):
if euclidean((i,j), s) <= seed2radius[s]:
X[i,j] = 1
img = sitk.Cast(sitk.GetImageFromArray(X), featImg.GetPixelIDValue()) * -1 + 0.5
img.SetSpacing(featImg.GetSpacing())
img.SetOrigin(featImg.GetOrigin())
img.SetDirection(featImg.GetDirection())
return img
def fast_marching(img, seeds, stop):
return sitk.FastMarching(image1=img, trialPoints=seeds, stoppingValue=stop)
def shape_detection(imgInit, imgSigmoid, *args):
"""INPUT arguments are:
1.) RMS change in level set func.
2.) propagation scaling
3.) curvature scaling
4.) # of iterations"""
shapeDetect = sitk.ShapeDetectionLevelSetImageFilter()
shapeDetect.SetMaximumRMSError(args[0])
shapeDetect.SetPropagationScaling(args[1])
shapeDetect.SetCurvatureScaling(args[2])
shapeDetect.SetNumberOfIterations(args[3])
return shapeDetect.Execute(imgInit, imgSigmoid)
def geodesic_active_contour(imgInit, imgSigmoid, *args):
"""INPUT arguments are:
1.) propagation scaling
2.) curvature scaling
3.) advection scaling
4.) RMS change in level set func.
5.) # of iterations"""
gac = sitk.GeodesicActiveContourLevelSetImageFilter()
gac.SetPropagationScaling(args[0])
gac.SetCurvatureScaling(args[1])
gac.SetAdvectionScaling(args[2])
gac.SetMaximumRMSError(args[3])
gac.SetNumberOfIterations(args[4])
return gac.Execute(imgInit, imgSigmoid)
def binary_threshold(img, lowerThreshold_, upperThreshold_):
"""Produce binary mask representing segmented object to be overlaid over
original image."""
binaryThresh = sitk.BinaryThresholdImageFilter()
binaryThresh.SetLowerThreshold(lowerThreshold_)
binaryThresh.SetUpperThreshold(upperThreshold_)
binaryThresh.SetInsideValue(1)
binaryThresh.SetOutsideValue(255)
return binaryThresh.Execute(img)
def main():
# read in DICOM images
sliceNum = int(sys.argv[1])
imgSeries = read_dicom()
imgSlice = imgSeries[:,:,sliceNum]
imgSliceUInt8 = sitk.Cast(sitk.RescaleIntensity(imgSlice), sitk.sitkUInt8)
print('\nDisplaying image slice...')
sitk_show(imgSlice)
# image filtering
ans = -1
while ans not in range(1, 4):
print('Enter number of desired filtering method:')
print('1 - Curvature Anisotropic Diffusion')
print('2 - Recursive Gaussian IIR')
print('3 - Median')
ans = int(input())
if ans == 1:
anisoParams = (0.06, 9.0, 5)
imgFilter = anisotropic_diffusion(imgSlice, *anisoParams)
print('\nDisplaying anisotropic diffusion-smoothed image...')
sitk_show(imgFilter)
elif ans == 2:
recurGaussX = sitk.RecursiveGaussianImageFilter()
recurGaussY = sitk.RecursiveGaussianImageFilter()
recurGaussX.SetSigma(1.0)
recurGaussY.SetSigma(1.0)
recurGaussY.SetDirection(1)
imgFilter = recurGaussY.Execute(recurGaussX.Execute(imgSlice))
print('\nDisplaying recursive Gaussian-filtered image...')
sitk_show(imgFilter)
else:
med = sitk.MedianImageFilter()
med.SetRadius(3)
imgFilter = med.Execute(imgSlice)
print('\nDisplaying median-filtered image...')
sitk_show(imgFilter)
# compute edge potential with gradient magnitude recursive Gaussian
sigma = 3.0
imgGauss = gradient_magnitude(imgFilter, sigma)
print('\nImage from gradient magnitude recursive Gaussian:')
sitk_show(imgGauss)
# sigmoid mapping to create feature image
K1 = float(input('Enter value for K1: '))
K2 = float(input('Enter value for K2: '))
imgSigmoid = sigmoid_filter(imgGauss, K1, K2)
print('\nDisplaying feature image...')
sitk_show(imgSigmoid)
# get seeds and radii from user
numSeeds = int(input('Enter the desired number of seeds: '))
seed2radius = dict()
for i in range(numSeeds):
coord = input('Enter x- and y-coordinates separated by a comma: ')
radius = int(input('Enter desired radius of the seed: '))
seedTuple = tuple([int(n) for n in reversed(coord.split(','))])
seed2radius[seedTuple] = radius
# create input level set
initImg = input_level_set(imgSigmoid, seed2radius)
print('\nDisplaying input level set...')
sitk_show(initImg)
# segmentation by fast marching
seeds = list(seed2radius.keys())
stopVal = int(input('Enter the stopping value: '))
fastMarch = fast_marching(imgSigmoid, seeds, stopVal)
print('\nDisplaying label image:')
sitk_show(fastMarch)
labelLowThresh = float(input('Enter lower threshold from label image: '))
labelUpThresh = float(input('Enter upper threshold from label image: '))
binaryThresh = binary_threshold(fastMarch, labelLowThresh, labelUpThresh)
print('\nResult of fast marching segmentation:')
sitk_show(sitk.LabelOverlay(imgSliceUInt8, binaryThresh, backgroundValue=255))
# shape detection segmentation
shapeParams = (0.02, 1.0, 0.2, 500)
imgShape = shape_detection(initImg, imgSigmoid, *shapeParams)
print('\nDisplaying label image:')
sitk_show(imgShape)
labelLowThresh = float(input('Enter lower threshold from label image: '))
labelUpThresh = float(input('Enter upper threshold from label image: '))
binaryThresh = binary_threshold(imgShape, labelLowThresh, labelUpThresh)
print('\nDisplaying segmentation by shape detection...')
sitk_show(sitk.LabelOverlay(imgSliceUInt8, binaryThresh, backgroundValue=255))
# segmentation with geodesic active contours
gacParams = (1.0, 0.2, 4.0, 0.01, 600)
imgGac = geodesic_active_contour(initImg, imgSigmoid, *gacParams)
print('\nDisplaying label image:')
sitk_show(imgGac)
labelLowThresh = float(input('Enter lower threshold from label image: '))
labelUpThresh = float(input('Enter upper threshold from label image: '))
binaryThresh = binary_threshold(imgGac, labelLowThresh, labelUpThresh)
print('\nDisplaying segmentation by geodesic active contours...')
sitk_show(sitk.LabelOverlay(imgSliceUInt8, binaryThresh, backgroundValue=255))
if __name__=="__main__":
main()
``` |
{
"source": "jon-young/ParallelTest",
"score": 3
} |
#### File: jon-young/ParallelTest/parallel_test.py
```python
__author__ = 'jyoung'
import multiprocessing as mp
import numpy
import time
def sum_range_serial(start, end):
return numpy.sum(numpy.arange(start, end+1))
def sum_range_par(start, end, output):
output.put(numpy.sum(numpy.arange(start, end+1)))
sumLimit = int(input('Enter a number to sum to: '))
print('\nSerial job')
startTime = time.time()
serialSum = sum_range_serial(1, sumLimit)
print('Elapsed time:', time.time()-startTime)
print('The sum is', serialSum)
print('\nParallel job')
numProc = int(input('Enter the desired number of parallel processes: '))
output = mp.Queue()
processes = [mp.Process(target=sum_range_par,
args=(jobID*sumLimit/numProc+1,
(jobID+1)*sumLimit/numProc, output))
for jobID in range(numProc)]
startTime = time.time()
for p in processes:
p.start()
for p in processes:
p.join()
print('Elapsed time:', time.time()-startTime)
procResults = [output.get() for p in processes]
parSum = numpy.sum(numpy.array(procResults))
print("Each processes' results:", procResults)
print('The sum is', parSum)
``` |
{
"source": "Jonypr-code/KivyMD",
"score": 2
} |
#### File: demos/fortnightly/main.py
```python
import os
import sys
from pathlib import Path
from kivy.lang import Builder
from kivymd.app import MDApp
if getattr(sys, "frozen", False): # bundle mode with PyInstaller
os.environ["FORTNIGHTLY_ROOT"] = sys._MEIPASS
else:
os.environ["FORTNIGHTLY_ROOT"] = str(Path(__file__).parent)
KV_DIR = f"{os.environ['FORTNIGHTLY_ROOT']}/libs/kv/"
for kv_file in os.listdir(KV_DIR):
with open(os.path.join(KV_DIR, kv_file), encoding="utf-8") as kv:
Builder.load_string(kv.read())
KV = """
#:import FadeTransition kivy.uix.screenmanager.FadeTransition
#:import FortnightlyRootScreen libs.baseclass.root_screen.FortnightlyRootScreen
ScreenManager:
transition: FadeTransition()
FortnightlyRootScreen:
name: "fortnightly root screen"
"""
class MDFortnightly(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title = "Fortnightly"
self.icon = f"{os.environ['FORTNIGHTLY_ROOT']}/assets/images/logo.png"
def build(self):
FONT_PATH = f"{os.environ['FORTNIGHTLY_ROOT']}/assets/fonts/"
self.theme_cls.font_styles.update(
{
"H1": [FONT_PATH + "Merriweather-BlackItalic", 96, False, -1.5],
"H2": [FONT_PATH + "LibreFranklin-Light", 60, False, -0.5],
"H3": [FONT_PATH + "Merriweather-BlackItalic", 48, False, 0],
"H4": [FONT_PATH + "LibreFranklin-Regular", 34, False, 0.25],
"H5": [FONT_PATH + "LibreFranklin-Regular", 24, False, 0],
"H6": [FONT_PATH + "Merriweather-BoldItalic", 20, False, 0.15],
"Subtitle1": [
FONT_PATH + "LibreFranklin-Medium",
16,
False,
0.15,
],
"Subtitle2": [
FONT_PATH + "Merriweather-Regular",
14,
False,
0.1,
],
"Body1": [FONT_PATH + "Merriweather-Regular", 16, False, 0.5],
"Body2": [FONT_PATH + "LibreFranklin-Regular", 14, False, 0.25],
"Button": [FONT_PATH + "LibreFranklin-Bold", 14, True, 1.25],
"Caption": [
FONT_PATH + "Merriweather-BlackItalic",
12,
False,
0.4,
],
"Overline": [
FONT_PATH + "LibreFranklin-Bold",
10,
True,
1.5,
],
}
)
return Builder.load_string(KV)
MDFortnightly().run()
```
#### File: libs/baseclass/elevation.py
```python
from kivymd.uix.behaviors import CircularElevationBehavior
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.screen import MDScreen
class KitchenSinkElevationScreen(MDScreen):
pass
class KitchenSinkElevationExampleCircle(CircularElevationBehavior, MDBoxLayout):
def on_size(self, *dt):
self.radius = [self.size[0] / 2]
```
#### File: libs/baseclass/box_bottom_sheet.py
```python
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.metrics import dp
from kivy.properties import BooleanProperty, ObjectProperty, StringProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from kivy.uix.recycleview import RecycleView
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import CircularRippleBehavior
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.button import MDIconButton
from kivymd.uix.list import TwoLineAvatarIconListItem
class BoxBottomSheetProductList(RecycleView):
pass
class TotalPriceForBoxBottomSheetProductList(MDBoxLayout):
pass
class ToolbarForBoxBottomSheetProductList(MDBoxLayout):
pass
class ItemForBoxBottomSheetProductList(TwoLineAvatarIconListItem):
pass
class PreviousImage(CircularRippleBehavior, ButtonBehavior, Image):
description = StringProperty()
_root = ObjectProperty()
class BoxBottomSheet(ThemableBehavior, MDBoxLayout):
open_sheet_box = BooleanProperty(False)
def clear_box(self):
while len(self.ids.previous_box.children) != 1:
for widget in self.ids.previous_box.children:
if widget.__class__ is not MDIconButton:
self.ids.previous_box.remove_widget(widget)
def restore_opacity_bottom_sheet(self):
Animation(opacity=1, d=0.2).start(self.ids.previous_box)
Animation(opacity=1, d=0.2).start(self)
def restore_width_bottom_sheet(self):
if len(self.ids.previous_box.children) != 1:
for widget in self.ids.previous_box.children:
self.ids.previous_box.width += widget.width
self.width += widget.width
self.ids.previous_box.height = dp(48)
if self.parent.ids.box_bottom_sheet_product_list.width == 0:
Animation(width=self.width + dp(48), d=0.2).start(self)
def remove_box_list(self, *args):
self.parent.ids.box_bottom_sheet_product_list.data = []
self.restore_width_bottom_sheet()
self.restore_opacity_bottom_sheet()
def hide_box_bottom_sheet(self):
Animation(width=0, d=0.2).start(self)
Animation(opacity=0, d=0.2).start(self)
def do_open_bottom_sheet(self, *args):
total_price = 0
count_item = 0
for widget in self.ids.previous_box.children:
if widget.__class__ is PreviousImage:
count_item += 1
total_price += int(
float(widget.description.split("\n")[1].split("$ ")[1])
)
self.parent.ids.box_bottom_sheet_product_list.data.append(
{
"viewclass": "ItemForBoxBottomSheetProductList",
"height": dp(72),
"path_to_image": widget.source,
"description": widget.description,
}
)
self.parent.ids.box_bottom_sheet_product_list.data.insert(
0,
{
"viewclass": "ToolbarForBoxBottomSheetProductList",
"count_item": count_item,
"callback": self.hide_bottom_sheet,
},
)
self.parent.ids.box_bottom_sheet_product_list.data.append(
{
"viewclass": "TotalPriceForBoxBottomSheetProductList",
"total_price": str(total_price),
}
)
Animation(opacity=1, d=0.2).start(
self.parent.ids.box_bottom_sheet_product_list
)
self.show_clear_button()
def show_clear_button(self):
self.parent.ids.clear_button.opacity = 1
self.parent.ids.clear_button.disabled = False
self.parent.ids.clear_button.grow()
def hide_clear_button(self, *args):
def hide_clear_button(interval):
self.parent.ids.clear_button.opacity = 0
self.parent.ids.clear_button.disabled = True
self.parent.ids.clear_button.grow()
Clock.schedule_once(hide_clear_button, 0.2)
def hide_bottom_sheet(self, *args):
Animation.stop_all(self)
self.hide_clear_button()
Animation(opacity=0, d=0.2).start(
self.parent.ids.box_bottom_sheet_product_list
)
animation = Animation(
height=Window.height // 3, width=Window.width // 2, d=0.1
) + Animation(height=dp(68), width=dp(68), d=0.2)
animation.bind(on_complete=self.remove_box_list)
animation.start(self)
self.open_sheet_box = False
def open_bottom_sheet(self):
Animation.stop_all(self)
anim = Animation(
height=Window.height // 2, width=Window.width, d=0.1
) + Animation(height=Window.height, d=0.1)
anim.bind(on_complete=self.do_open_bottom_sheet)
anim.start(self)
self.open_sheet_box = True
```
#### File: libs/baseclass/product_screen.py
```python
import os
from kivy.animation import Animation
from kivy.properties import ListProperty, StringProperty
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import colors
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import MagicBehavior
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.expansionpanel import MDExpansionPanel, MDExpansionPanelOneLine
from kivymd.uix.screen import MDScreen
PATH_TO_IMAGES = f"{os.environ['SHRINE_ROOT']}/assets/images"
class MoreInformation(ThemableBehavior, MDBoxLayout):
pass
class PlanItem(ThemableBehavior, MagicBehavior, MDBoxLayout):
text_item = StringProperty()
border = StringProperty()
color_select = ListProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.color_select = self.theme_cls.disabled_hint_text_color
self.primary = get_color_from_hex(colors["BlueGray"]["500"])
def press_on_plan(self, instance_plan):
for widget in self.parent.parent.children[0].children:
if widget.color_select == self.primary:
widget.color_select = self.color_select
self.grow()
break
instance_plan.color_select = self.primary
class ProductScreen(ThemableBehavior, MDScreen):
has_already_opened = False
def show_product_screen(self):
Animation(y=0, opacity=1, d=0.3).start(self)
def hide_product_screen(self):
Animation(y=-self.height, opacity=0, d=0.3).start(self)
def on_enter(self):
if self.has_already_opened:
return
else:
self.has_already_opened = True
content_for_panel = MoreInformation()
md_expansion_panel = MDExpansionPanel(
content=content_for_panel,
icon=f"{PATH_TO_IMAGES}/information.png",
panel_cls=MDExpansionPanelOneLine(text="More information"),
)
self.ids.expansion_panel_box.add_widget(md_expansion_panel)
self.ids.previous_image.source = f"{PATH_TO_IMAGES}/previous.jpg"
```
#### File: libs/baseclass/toolbar.py
```python
from os import environ
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.metrics import dp
from kivy.properties import StringProperty
from kivymd.theming import ThemableBehavior
from kivymd.uix.boxlayout import MDBoxLayout
class ShrineToolbar(ThemableBehavior, MDBoxLayout):
"""`Toolbar` for `ShrineRootScreen` screen."""
bottom_manu_open = False
"""Open or closed box."""
search = False
path_to_icon_menu = StringProperty(
f"{environ['SHRINE_ROOT']}/assets/images/menu-dark.png"
)
path_to_icon_logo = StringProperty()
title = StringProperty("SHRINE")
def set_title_animation_text(self, text):
"""Animates text from `Title old` to `Title new`."""
def set_new_text(*args):
self.ids.title.text = text
Animation(color=self.theme_cls.text_color, d=0.2).start(
self.ids.title
)
anim = Animation(color=(0, 0, 0, 0), d=0.2)
anim.bind(on_complete=set_new_text)
anim.start(self.ids.title)
def set_search_field(self):
def set_focus_search_field(interval):
self.ids.search_field.focus = focus
if not self.search:
self.search = True
size = 0
opacity = 1
opacity_button_tune = 0
disabled = False
focus = True
title = "SEARCH"
else:
self.search = False
size = dp(42)
opacity = 0
opacity_button_tune = 1
disabled = True
focus = False
title = self.title
Animation(size=(size, size), opacity=opacity_button_tune, d=0.2).start(
self.ids.button_tune
)
Animation(size=(size, size), d=0.2).start(self.ids.button_logo)
Animation(size=(size, size), d=0.2).start(self.ids.button_menu)
self.set_title_animation_text(title)
self.ids.search_field.disabled = disabled
Animation(opacity=opacity, d=0.2).start(self.ids.search_field)
Clock.schedule_once(set_focus_search_field, 0.3)
```
#### File: uix/behaviors/hover_behavior.py
```python
__all__ = ("HoverBehavior",)
from kivy.core.window import Window
from kivy.properties import BooleanProperty, ObjectProperty
from kivy.uix.widget import Widget
class HoverBehavior(object):
"""
:Events:
:attr:`on_enter`
Called when mouse enters the bbox of the widget AND the widget is visible
:attr:`on_leave`
Called when the mouse exits the widget AND the widget is visible
"""
hovering = BooleanProperty(False)
"""
`True`, if the mouse cursor is within the borders of the widget.
Note that this is set and cleared even if the widget is not visible
:attr:`hover` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
hover_visible = BooleanProperty(False)
"""
`True` if hovering is True AND is the current widget is visible
:attr:`hover_visible` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
enter_point = ObjectProperty(allownone=True)
"""
Holds the last position where the mouse pointer crossed into the Widget
if the Widget is visible and is currently in a hovering state
:attr:`enter_point` is a :class:`~kivy.properties.ObjectProperty`
and defaults to `None`.
"""
detect_visible = BooleanProperty(True)
"""
Should this widget perform the visibility check?
:attr:`detect_visible` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
def __init__(self, **kwargs):
self.register_event_type("on_enter")
self.register_event_type("on_leave")
Window.bind(mouse_pos=self.on_mouse_update)
super(HoverBehavior, self).__init__(**kwargs)
def on_mouse_update(self, *args):
# If the Widget currently has no parent, do nothing
if not self.get_root_window():
return
pos = args[1]
#
# is the pointer in the same position as the widget?
# If not - then issue an on_exit event if needed
#
if not self.collide_point(*self.to_widget(*pos)):
self.hovering = False
self.enter_point = None
if self.hover_visible:
self.hover_visible = False
self.dispatch("on_leave")
return
#
# The pointer is in the same position as the widget
#
if self.hovering:
#
# nothing to do here. Not - this does not handle the case where
# a popup comes over an existing hover event.
# This seems reasonable
#
return
#
# Otherwise - set the hovering attribute
#
self.hovering = True
#
# We need to traverse the tree to see if the Widget is visible
#
# This is a two stage process:
# - first go up the tree to the root Window.
# At each stage - check that the Widget is actually visible
# - Second - At the root Window check that there is not another branch
# covering the Widget
#
self.hover_visible = True
if self.detect_visible:
widget: Widget = self
while True:
# Walk up the Widget tree from the target Widget
parent = widget.parent
try:
# See if the mouse point collides with the parent
# using both local and glabal coordinates to cover absoluet and relative layouts
pinside = parent.collide_point(
*parent.to_widget(*pos)
) or parent.collide_point(*pos)
except Exception:
# The collide_point will error when you reach the root Window
break
if not pinside:
self.hover_visible = False
break
# Iterate upwards
widget = parent
#
# parent = root window
# widget = first Widget on the current branch
#
children = parent.children
for child in children:
# For each top level widget - check if is current branch
# If it is - then break.
# If not then - since we start at 0 - this widget is visible
#
# Check to see if it should take the hover
#
if child == widget:
# this means that the current widget is visible
break
if child.collide_point(*pos):
# this means that the current widget is covered by a modal or popup
self.hover_visible = False
break
if self.hover_visible:
self.enter_point = pos
self.dispatch("on_enter")
def on_enter(self):
"""Called when mouse enters the bbox of the widget AND the widget is visible."""
def on_leave(self):
"""Called when the mouse exits the widget AND the widget is visible."""
```
#### File: kivymd/uix/taptargetview.py
```python
from kivy.animation import Animation
from kivy.event import EventDispatcher
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.label import Label
from kivymd.theming import ThemableBehavior
class MDTapTargetView(ThemableBehavior, EventDispatcher):
"""Rough try to mimic the working of Android's TapTargetView.
:Events:
:attr:`on_open`
Called at the time of the start of the widget opening animation.
:attr:`on_close`
Called at the time of the start of the widget closed animation.
"""
widget = ObjectProperty()
"""
Widget to add ``TapTargetView`` upon.
:attr:`widget` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `None`.
"""
outer_radius = NumericProperty(dp(200))
"""
Radius for outer circle.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-outer-radius.png
:align: center
:attr:`outer_radius` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(200)`.
"""
outer_circle_color = ListProperty()
"""
Color for the outer circle in ``rgb`` format.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
outer_circle_color=(1, 0, 0)
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-outer-circle-color.png
:align: center
:attr:`outer_circle_color` is an :class:`~kivy.properties.ListProperty`
and defaults to ``theme_cls.primary_color``.
"""
outer_circle_alpha = NumericProperty(0.96)
"""
Alpha value for outer circle.
:attr:`outer_circle_alpha` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0.96`.
"""
target_radius = NumericProperty(dp(45))
"""
Radius for target circle.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-target-radius.png
:align: center
:attr:`target_radius` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(45)`.
"""
target_circle_color = ListProperty([1, 1, 1])
"""
Color for target circle in ``rgb`` format.
.. code-block:: python
self.tap_target_view = MDTapTargetView(
...
target_circle_color=(1, 0, 0)
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/tap-target-view-widget-target-circle-color.png
:align: center
:attr:`target_circle_color` is an :class:`~kivy.properties.ListProperty`
and defaults to `[1, 1, 1]`.
"""
title_text = StringProperty()
"""
Title to be shown on the view.
:attr:`title_text` is an :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
title_text_size = NumericProperty(dp(25))
"""
Text size for title.
:attr:`title_text_size` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(25)`.
"""
title_text_color = ListProperty([1, 1, 1, 1])
"""
Text color for title.
:attr:`title_text_color` is an :class:`~kivy.properties.ListProperty`
and defaults to `[1, 1, 1, 1]`.
"""
title_text_bold = BooleanProperty(True)
"""
Whether title should be bold.
:attr:`title_text_bold` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
description_text = StringProperty()
"""
Description to be shown below the title (keep it short).
:attr:`description_text` is an :class:`~kivy.properties.StringProperty`
and defaults to `''`.
"""
description_text_size = NumericProperty(dp(20))
"""
Text size for description text.
:attr:`description_text_size` is an :class:`~kivy.properties.NumericProperty`
and defaults to `dp(20)`.
"""
description_text_color = ListProperty([0.9, 0.9, 0.9, 1])
"""
Text size for description text.
:attr:`description_text_color` is an :class:`~kivy.properties.ListProperty`
and defaults to `[0.9, 0.9, 0.9, 1]`.
"""
description_text_bold = BooleanProperty(False)
"""
Whether description should be bold.
:attr:`description_text_bold` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
draw_shadow = BooleanProperty(False)
"""
Whether to show shadow.
:attr:`draw_shadow` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
cancelable = BooleanProperty(False)
"""
Whether clicking outside the outer circle dismisses the view.
:attr:`cancelable` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
widget_position = OptionProperty(
"left",
options=[
"left",
"right",
"top",
"bottom",
"left_top",
"right_top",
"left_bottom",
"right_bottom",
"center",
],
)
"""
Sets the position of the widget on the :attr:`~outer_circle`. Available options are
`'left`', `'right`', `'top`', `'bottom`', `'left_top`', `'right_top`',
`'left_bottom`', `'right_bottom`', `'center`'.
:attr:`widget_position` is an :class:`~kivy.properties.OptionProperty`
and defaults to `'left'`.
"""
title_position = OptionProperty(
"auto",
options=[
"auto",
"left",
"right",
"top",
"bottom",
"left_top",
"right_top",
"left_bottom",
"right_bottom",
],
)
"""
Sets the position of :attr`~title_text` on the outer circle. Only works if
:attr`~widget_position` is set to `'center'`. In all other cases, it
calculates the :attr`~title_position` itself.
Must be set to other than `'auto`' when :attr`~widget_position` is set
to `'center`'.
Available options are `'auto'`, `'left`', `'right`', `'top`', `'bottom`',
`'left_top`', `'right_top`', `'left_bottom`', `'right_bottom`', `'center`'.
:attr:`title_position` is an :class:`~kivy.properties.OptionProperty`
and defaults to `'auto'`.
"""
stop_on_outer_touch = BooleanProperty(False)
"""
Whether clicking on outer circle stops the animation.
:attr:`stop_on_outer_touch` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
stop_on_target_touch = BooleanProperty(True)
"""
Whether clicking on target circle should stop the animation.
:attr:`stop_on_target_touch` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
state = OptionProperty("close", options=["close", "open"])
"""
State of :class:`~MDTapTargetView`.
:attr:`state` is an :class:`~kivy.properties.OptionProperty`
and defaults to `'close'`.
"""
_outer_radius = NumericProperty(0)
_target_radius = NumericProperty(0)
def __init__(self, **kwargs):
self.ripple_max_dist = dp(90)
self.on_outer_radius(self, self.outer_radius)
self.on_target_radius(self, self.target_radius)
self.anim_ripple = None
self.core_title_text = Label(
markup=True, size_hint=(None, None), bold=self.title_text_bold
)
self.core_title_text.bind(
texture_size=self.core_title_text.setter("size")
)
self.core_description_text = Label(markup=True, size_hint=(None, None))
self.core_description_text.bind(
texture_size=self.core_description_text.setter("size")
)
super().__init__(**kwargs)
self.register_event_type("on_outer_touch")
self.register_event_type("on_target_touch")
self.register_event_type("on_outside_click")
self.register_event_type("on_open")
self.register_event_type("on_close")
if not self.outer_circle_color:
self.outer_circle_color = self.theme_cls.primary_color[:-1]
def _initialize(self):
setattr(self.widget, "_outer_radius", 0)
setattr(self.widget, "_target_radius", 0)
setattr(self.widget, "target_ripple_radius", 0)
setattr(self.widget, "target_ripple_alpha", 0)
# Bind some function on widget event when this function is called
# instead of when the class itself is initialized to prevent all
# widgets of all instances to get bind at once and start messing up.
self.widget.bind(on_touch_down=self._some_func)
def _draw_canvas(self):
_pos = self._ttv_pos()
self.widget.canvas.before.clear()
with self.widget.canvas.before:
# Outer circle.
Color(
*self.outer_circle_color,
self.outer_circle_alpha,
group="ttv_group",
)
_rad1 = self.widget._outer_radius
Ellipse(size=(_rad1, _rad1), pos=_pos[0], group="ttv_group")
# Title text.
Color(*self.title_text_color, group="ttv_group")
Rectangle(
size=self.core_title_text.texture.size,
texture=self.core_title_text.texture,
pos=_pos[1],
group="ttv_group",
)
# Description text.
Color(*self.description_text_color, group="ttv_group")
Rectangle(
size=self.core_description_text.texture.size,
texture=self.core_description_text.texture,
pos=(
_pos[1][0],
_pos[1][1] - self.core_description_text.size[1] - 5,
),
group="ttv_group",
)
# Target circle.
Color(*self.target_circle_color, group="ttv_group")
_rad2 = self.widget._target_radius
Ellipse(
size=(_rad2, _rad2),
pos=(
self.widget.x - (_rad2 / 2 - self.widget.size[0] / 2),
self.widget.y - (_rad2 / 2 - self.widget.size[0] / 2),
),
group="ttv_group",
)
# Target ripple.
Color(
*self.target_circle_color,
self.widget.target_ripple_alpha,
group="ttv_group",
)
_rad3 = self.widget.target_ripple_radius
Ellipse(
size=(_rad3, _rad3),
pos=(
self.widget.x - (_rad3 / 2 - self.widget.size[0] / 2),
self.widget.y - (_rad3 / 2 - self.widget.size[0] / 2),
),
group="ttv_group",
)
def stop(self, *args):
"""Starts widget close animation."""
# It needs a better implementation.
if self.anim_ripple is not None:
self.anim_ripple.unbind(on_complete=self._repeat_ripple)
self.core_title_text.opacity = 0
self.core_description_text.opacity = 0
anim = Animation(
d=0.15,
t="in_cubic",
**dict(
zip(
["_outer_radius", "_target_radius", "target_ripple_radius"],
[0, 0, 0],
)
),
)
anim.bind(on_complete=self._after_stop)
anim.start(self.widget)
def _after_stop(self, *args):
self.widget.canvas.before.remove_group("ttv_group")
args[0].stop_all(self.widget)
elev = getattr(self.widget, "elevation", None)
if elev:
self._fix_elev()
self.dispatch("on_close")
# Don't forget to unbind the function or it'll mess
# up with other next bindings.
self.widget.unbind(on_touch_down=self._some_func)
self.state = "close"
def _fix_elev(self):
with self.widget.canvas.before:
Color(a=self.widget._soft_shadow_a)
Rectangle(
texture=self.widget._soft_shadow_texture,
size=self.widget._soft_shadow_size,
pos=self.widget._soft_shadow_pos,
)
Color(a=self.widget._hard_shadow_a)
Rectangle(
texture=self.widget._hard_shadow_texture,
size=self.widget._hard_shadow_size,
pos=self.widget._hard_shadow_pos,
)
Color(a=1)
def start(self, *args):
"""Starts widget opening animation."""
self._initialize()
self._animate_outer()
self.state = "open"
self.core_title_text.opacity = 1
self.core_description_text.opacity = 1
self.dispatch("on_open")
def _animate_outer(self):
anim = Animation(
d=0.2,
t="out_cubic",
**dict(
zip(
["_outer_radius", "_target_radius"],
[self._outer_radius, self._target_radius],
)
),
)
anim.cancel_all(self.widget)
anim.bind(on_progress=lambda x, y, z: self._draw_canvas())
anim.bind(on_complete=self._animate_ripple)
anim.start(self.widget)
setattr(self.widget, "target_ripple_radius", self._target_radius)
setattr(self.widget, "target_ripple_alpha", 1)
def _animate_ripple(self, *args):
self.anim_ripple = Animation(
d=1,
t="in_cubic",
target_ripple_radius=self._target_radius + self.ripple_max_dist,
target_ripple_alpha=0,
)
self.anim_ripple.stop_all(self.widget)
self.anim_ripple.bind(on_progress=lambda x, y, z: self._draw_canvas())
self.anim_ripple.bind(on_complete=self._repeat_ripple)
self.anim_ripple.start(self.widget)
def _repeat_ripple(self, *args):
setattr(self.widget, "target_ripple_radius", self._target_radius)
setattr(self.widget, "target_ripple_alpha", 1)
self._animate_ripple()
def on_open(self, *args):
"""Called at the time of the start of the widget opening animation."""
def on_close(self, *args):
"""Called at the time of the start of the widget closed animation."""
def on_draw_shadow(self, instance, value):
Logger.warning(
"The shadow adding method will be implemented in future versions"
)
def on_description_text(self, instance, value):
self.core_description_text.text = value
def on_description_text_size(self, instance, value):
self.core_description_text.font_size = value
def on_description_text_bold(self, instance, value):
self.core_description_text.bold = value
def on_title_text(self, instance, value):
self.core_title_text.text = value
def on_title_text_size(self, instance, value):
self.core_title_text.font_size = value
def on_title_text_bold(self, instance, value):
self.core_title_text.bold = value
def on_outer_radius(self, instance, value):
self._outer_radius = self.outer_radius * 2
def on_target_radius(self, instance, value):
self._target_radius = self.target_radius * 2
def on_target_touch(self):
if self.stop_on_target_touch:
self.stop()
def on_outer_touch(self):
if self.stop_on_outer_touch:
self.stop()
def on_outside_click(self):
if self.cancelable:
self.stop()
def _some_func(self, wid, touch):
"""
This function decides which one to dispatch based on the touch
position.
"""
if self._check_pos_target(touch.pos):
self.dispatch("on_target_touch")
elif self._check_pos_outer(touch.pos):
self.dispatch("on_outer_touch")
else:
self.dispatch("on_outside_click")
def _check_pos_outer(self, pos):
"""
Checks if a given `pos` coordinate is within the :attr:`~outer_radius`.
"""
cx = self.circ_pos[0] + self._outer_radius / 2
cy = self.circ_pos[1] + self._outer_radius / 2
r = self._outer_radius / 2
h, k = pos
lhs = (cx - h) ** 2 + (cy - k) ** 2
rhs = r ** 2
if lhs <= rhs:
return True
return False
def _check_pos_target(self, pos):
"""
Checks if a given `pos` coordinate is within the
:attr:`~target_radius`.
"""
cx = self.widget.pos[0] + self.widget.width / 2
cy = self.widget.pos[1] + self.widget.height / 2
r = self._target_radius / 2
h, k = pos
lhs = (cx - h) ** 2 + (cy - k) ** 2
rhs = r ** 2
if lhs <= rhs:
return True
return False
def _ttv_pos(self):
"""
Calculates the `pos` value for outer circle and text
based on the position provided.
:returns: A tuple containing pos for the circle and text.
"""
_rad1 = self.widget._outer_radius
_center_x = self.widget.x - (_rad1 / 2 - self.widget.size[0] / 2)
_center_y = self.widget.y - (_rad1 / 2 - self.widget.size[0] / 2)
if self.widget_position == "left":
circ_pos = (_center_x + _rad1 / 3, _center_y)
title_pos = (_center_x + _rad1 / 1.4, _center_y + _rad1 / 1.4)
elif self.widget_position == "right":
circ_pos = (_center_x - _rad1 / 3, _center_y)
title_pos = (_center_x - _rad1 / 10, _center_y + _rad1 / 1.4)
elif self.widget_position == "top":
circ_pos = (_center_x, _center_y - _rad1 / 3)
title_pos = (_center_x + _rad1 / 4, _center_y + _rad1 / 4)
elif self.widget_position == "bottom":
circ_pos = (_center_x, _center_y + _rad1 / 3)
title_pos = (_center_x + _rad1 / 4, _center_y + _rad1 / 1.2)
# Corner ones need to be at a little smaller distance
# than edge ones that's why _rad1/4.
elif self.widget_position == "left_top":
circ_pos = (_center_x + _rad1 / 4, _center_y - _rad1 / 4)
title_pos = (_center_x + _rad1 / 2, _center_y + _rad1 / 4)
elif self.widget_position == "right_top":
circ_pos = (_center_x - _rad1 / 4, _center_y - _rad1 / 4)
title_pos = (_center_x - _rad1 / 10, _center_y + _rad1 / 4)
elif self.widget_position == "left_bottom":
circ_pos = (_center_x + _rad1 / 4, _center_y + _rad1 / 4)
title_pos = (_center_x + _rad1 / 2, _center_y + _rad1 / 1.2)
elif self.widget_position == "right_bottom":
circ_pos = (_center_x - _rad1 / 4, _center_y + _rad1 / 4)
title_pos = (_center_x, _center_y + _rad1 / 1.2)
else:
# Center.
circ_pos = (_center_x, _center_y)
if self.title_position == "auto":
raise ValueError(
"widget_position='center' requires title_position to be set."
)
elif self.title_position == "left":
title_pos = (_center_x + _rad1 / 10, _center_y + _rad1 / 2)
elif self.title_position == "right":
title_pos = (_center_x + _rad1 / 1.6, _center_y + _rad1 / 2)
elif self.title_position == "top":
title_pos = (_center_x + _rad1 / 2.5, _center_y + _rad1 / 1.3)
elif self.title_position == "bottom":
title_pos = (_center_x + _rad1 / 2.5, _center_y + _rad1 / 4)
elif self.title_position == "left_top":
title_pos = (_center_x + _rad1 / 8, _center_y + _rad1 / 1.4)
elif self.title_position == "right_top":
title_pos = (_center_x + _rad1 / 2, _center_y + _rad1 / 1.3)
elif self.title_position == "left_bottom":
title_pos = (_center_x + _rad1 / 8, _center_y + _rad1 / 4)
elif self.title_position == "right_bottom":
title_pos = (_center_x + _rad1 / 2, _center_y + _rad1 / 3.5)
else:
raise ValueError(
f"'{self.title_position}'"
f"is not a valid value for title_position"
)
self.circ_pos = circ_pos
return circ_pos, title_pos
``` |
{
"source": "jonyrock-back/hastic-server",
"score": 2
} |
#### File: analytics/models/trough_model.py
```python
from analytic_types import TimeSeries
from models import TriangleModel
import utils
import scipy.signal
from scipy.signal import argrelextrema
from typing import Optional, List, Tuple
import numpy as np
import pandas as pd
class TroughModel(TriangleModel):
def get_model_type(self) -> (str, bool):
model = 'trough'
type_model = False
return (model, type_model)
def find_segment_center(self, dataframe: pd.DataFrame, start: int, end: int) -> int:
data = dataframe['value']
segment = data[start: end]
return segment.idxmin()
def get_best_pattern(self, close_patterns: TimeSeries, data: pd.Series) -> List[int]:
pattern_list = []
for val in close_patterns:
min_val = data[val[0]]
ind = val[0]
for i in val:
if data[i] < min_val:
min_val = data[i]
ind = i
pattern_list.append(ind)
return pattern_list
def get_extremum_indexes(self, data: pd.Series) -> np.ndarray:
return argrelextrema(data.values, np.less)[0]
def get_smoothed_data(self, data: pd.Series, confidence: float, alpha: float) -> pd.Series:
return utils.exponential_smoothing(data - self.state.confidence, alpha)
def get_possible_segments(self, data: pd.Series, smoothed_data: pd.Series, trough_indexes: List[int]) -> List[int]:
segments = []
for idx in trough_indexes:
if data[idx] < smoothed_data[idx]:
segments.append(idx)
return segments
```
#### File: analytics/services/data_service.py
```python
from services.server_service import ServerMessage, ServerService
import json
import asyncio
"""
This is how you can save a file:
async def test_file_save():
async with data_service.open('filename') as f:
print('write content')
await f.write('test string')
async with data_service.open('filename') as f:
content = await f.load()
print(content)
print('test file ok')
"""
LOCK_WAIT_SLEEP_TIMESPAN = 100 # mc
class FileDescriptor:
def __init__(self, filename: str, data_service):
self.filename = filename
self.data_service = data_service
async def write(self, content: str):
await self.data_service.save_file_content(self, content)
async def load(self) -> str:
return await self.data_service.load_file_content(self)
async def __aenter__(self):
await self.data_service.wait_and_lock(self)
return self
async def __aexit__(self, *exc):
await self.data_service.unlock(self)
class DataService:
def __init__(self, server_service: ServerService):
"""Creates fs over network via server_service"""
self.server_service = server_service
self.locks = set()
def open(self, filename: str) -> FileDescriptor:
return FileDescriptor(filename, self)
async def wait_and_lock(self, file_descriptor: FileDescriptor):
filename = file_descriptor.filename
while True:
if filename in self.locks:
asyncio.sleep(LOCK_WAIT_SLEEP_TIMESPAN)
continue
else:
self.locks.add(filename)
break
async def unlock(self, file_descriptor: FileDescriptor):
filename = file_descriptor.filename
self.locks.remove(filename)
async def save_file_content(self, file_descriptor: FileDescriptor, content: str):
""" Saves json - serializable obj with file_descriptor.filename """
self.__check_lock(file_descriptor)
message_payload = {
'filename': file_descriptor.filename,
'content': content
}
message = ServerMessage('FILE_SAVE', message_payload)
await self.server_service.send_request_to_server(message)
async def load_file_content(self, file_descriptor: FileDescriptor) -> str:
self.__check_lock(file_descriptor)
message_payload = { 'filename': file_descriptor.filename }
message = ServerMessage('FILE_LOAD', message_payload)
return await self.server_service.send_request_to_server(message)
def __check_lock(self, file_descriptor: FileDescriptor):
filename = file_descriptor.filename
if filename not in self.locks:
raise RuntimeError('No lock for file %s' % filename)
```
#### File: analytics/utils/dataframe.py
```python
from itertools import chain
import pandas as pd
import numpy as np
from typing import Generator
def prepare_data(data: list) -> pd.DataFrame:
"""
Takes list
- converts it into pd.DataFrame,
- converts 'timestamp' column to pd.Datetime,
- subtracts min value from the dataset
"""
data = pd.DataFrame(data, columns=['timestamp', 'value'])
data['timestamp'] = pd.to_datetime(data['timestamp'], unit='ms')
data.fillna(value = np.nan, inplace = True)
return data
def get_intersected_chunks(data: list, intersection: int, chunk_size: int) -> Generator[list, None, None]:
"""
Returns generator that splits dataframe on intersected segments.
Intersection makes it able to detect pattern that present in dataframe on the border between chunks.
intersection - length of intersection.
chunk_size - length of chunk
"""
assert chunk_size > 0, 'chunk size must be great than zero'
assert intersection > 0, 'intersection length must be great than zero'
data_len = len(data)
if data_len <= chunk_size:
yield data
return
nonintersected = chunk_size - intersection
offset = 0
while True:
left_values = data_len - offset
if left_values == 0:
break
if left_values <= chunk_size:
yield data[offset : data_len]
break
else:
yield data[offset: offset + chunk_size]
offset += min(nonintersected, left_values)
def get_chunks(data: list, chunk_size: int) -> Generator[list, None, None]:
"""
Returns generator that splits dataframe on non-intersected segments.
chunk_size - length of chunk
"""
assert chunk_size > 0, 'chunk size must be great than zero'
chunks_iterables = [iter(data)] * chunk_size
result_chunks = zip(*chunks_iterables)
partial_chunk_len = len(data) % chunk_size
if partial_chunk_len != 0:
result_chunks = chain(result_chunks, [data[-partial_chunk_len:]])
for chunk in result_chunks:
yield list(chunk)
```
#### File: analytics/utils/meta.py
```python
from inspect import signature, Parameter
from functools import wraps
from typing import Optional
import re
CAMEL_REGEX = re.compile(r'([A-Z])')
UNDERSCORE_REGEX = re.compile(r'_([a-z])')
def camel_to_underscore(name):
#TODO: need to rename 'from'/'to' to 'from_timestamp'/'to_timestamp' everywhere(in analytics, server, panel)
if name == 'from' or name == 'to':
name += '_timestamp'
return CAMEL_REGEX.sub(lambda x: '_' + x.group(1).lower(), name)
def underscore_to_camel(name):
if name == 'from_timestamp' or name == 'to_timestamp':
name = name.replace('_timestamp', '')
return UNDERSCORE_REGEX.sub(lambda x: x.group(1).upper(), name)
def is_field_private(field_name: str) -> Optional[str]:
m = re.match(r'_[^(__)]+__', field_name)
return m is not None
def serialize(obj):
if hasattr(obj, 'to_json') == True:
return obj.to_json()
else:
return obj
def inited_params(target_init):
target_params = signature(target_init).parameters.values()
if len(target_params) < 1:
raise ValueError('init function mush have at least self parameter')
if len(target_params) == 1:
return target_init
_, *target_params = target_params # we will not use self any more
@wraps(target_init)
def wrapped_init(wrapped_self, *wrapped_args, **wrapped_kwargs):
for tp in target_params:
if tp.default is Parameter.empty:
continue
setattr(wrapped_self, tp.name, tp.default)
for tp, v in zip(target_params, wrapped_args):
setattr(wrapped_self, tp.name, v)
for k, v in wrapped_kwargs.items():
setattr(wrapped_self, k, v)
target_init(wrapped_self, *wrapped_args, **wrapped_kwargs)
return wrapped_init
def JSONClass(target_class):
def to_json(self) -> dict:
"""
returns a json representation of the class
where all None - values and private fileds are skipped
"""
return {
underscore_to_camel(k): serialize(v) for k, v in self.__dict__.items()
if v is not None and not is_field_private(k)
}
def from_json(json_object: Optional[dict]) -> target_class:
if json_object is None:
json_object = {}
init_object = { camel_to_underscore(k): v for k, v in json_object.items() }
return target_class(**init_object)
# target_class.__init__ = inited_params(target_class.__init__)
target_class.to_json = to_json
target_class.from_json = from_json
return target_class
class SerializableList(list):
def to_json(self):
return list(map(lambda s: s.to_json(), self))
```
#### File: analytics/tests/test_bucket.py
```python
import unittest
import pandas as pd
import random
from typing import List
from analytic_types.data_bucket import DataBucket
from tests.test_dataset import create_list_of_timestamps
class TestBucket(unittest.TestCase):
def test_receive_data(self):
bucket = DataBucket()
data_val = list(range(6))
timestamp_list = create_list_of_timestamps(len(data_val))
for val in data_val:
bucket.receive_data(get_pd_dataframe([val], [1523889000000 + val]))
for idx, row in bucket.data.iterrows():
self.assertEqual(data_val[idx], row['value'])
self.assertEqual(timestamp_list[idx], row['timestamp'])
def test_drop_data(self):
bucket = DataBucket()
data_val = list(range(10))
timestamp_list = create_list_of_timestamps(len(data_val))
bucket.receive_data(get_pd_dataframe(data_val, timestamp_list))
bucket.drop_data(5)
expected_data = data_val[5:]
expected_timestamp = timestamp_list[5:]
self.assertEqual(expected_data, bucket.data['value'].tolist())
self.assertEqual(expected_timestamp, bucket.data['timestamp'].tolist())
if __name__ == '__main__':
unittest.main()
def get_pd_dataframe(value: List[int], timestamp: List[int]) -> pd.DataFrame:
if len(value) != len(timestamp):
raise ValueError(f'len(value) should be equal to len(timestamp)')
return pd.DataFrame({ 'value': value, 'timestamp': timestamp })
```
#### File: analytics/tests/test_dataset.py
```python
import unittest
import pandas as pd
import numpy as np
from utils import prepare_data
import models
import random
import scipy.signal
from typing import List
from analytic_types.segment import Segment
class TestDataset(unittest.TestCase):
def test_models_with_corrupted_dataframe(self):
data = [[1523889000000 + i, float('nan')] for i in range(10)]
dataframe = pd.DataFrame(data, columns=['timestamp', 'value'])
segments = []
model_instances = [
models.JumpModel(),
models.DropModel(),
models.GeneralModel(),
models.PeakModel(),
models.TroughModel()
]
for model in model_instances:
model_name = model.__class__.__name__
model.state = model.get_state(None)
with self.assertRaises(AssertionError):
model.fit(dataframe, segments, 'test')
def test_peak_antisegments(self):
data_val = [1.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 5.0, 7.0, 5.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.PeakModel()
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_jump_antisegments(self):
data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 9.0, 1.0, 1.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000016, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': False, 'deleted': True}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.JumpModel()
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_trough_antisegments(self):
data_val = [9.0, 9.0, 9.0, 9.0, 7.0, 4.0, 7.0, 9.0, 9.0, 9.0, 5.0, 1.0, 5.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.TroughModel()
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_drop_antisegments(self):
data_val = [9.0, 9.0, 9.0, 9.0, 9.0, 5.0, 5.0, 5.0, 5.0, 9.0, 9.0, 9.0, 9.0, 1.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000016, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': False, 'deleted': True}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.DropModel()
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_general_antisegments(self):
data_val = [1.0, 2.0, 1.0, 2.0, 5.0, 6.0, 3.0, 2.0, 1.0, 1.0, 8.0, 9.0, 8.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.GeneralModel()
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_jump_empty_segment(self):
data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000019, 'to': 1523889000025, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.JumpModel()
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_drop_empty_segment(self):
data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000019, 'to': 1523889000025, 'labeled': True, 'deleted': False},
{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.DropModel()
model.state = model.get_state(None)
model_name = model.__class__.__name__
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_value_error_dataset_input_should_have_multiple_elements(self):
data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 4.0, 5.0, 5.0, 6.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0,3.0,3.0,2.0,7.0,8.0,9.0,8.0,7.0,6.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000007, 'to': 1523889000011, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.JumpModel()
model.state = model.get_state(None)
model_name = model.__class__.__name__
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_prepare_data_for_nonetype(self):
data = [[1523889000000, None], [1523889000001, None], [1523889000002, None]]
try:
data = prepare_data(data)
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_prepare_data_for_nan(self):
data = [[1523889000000, np.nan], [1523889000001, np.nan], [1523889000002, np.nan]]
try:
data = prepare_data(data)
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_prepare_data_output_fon_nan(self):
data_nan = [[1523889000000, np.nan], [1523889000001, np.nan], [1523889000002, np.nan]]
data_none = [[1523889000000, None], [1523889000001, None], [1523889000002, None]]
return_data_nan = prepare_data(data_nan)
return_data_none = prepare_data(data_none)
for item in return_data_nan.value:
self.assertTrue(np.isnan(item))
for item in return_data_none.value:
self.assertTrue(np.isnan(item))
def test_three_value_segment(self):
data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 2.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 2.0, 3.0, 4.0, 5.0, 4.0, 2.0, 1.0, 3.0, 4.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000004, 'to': 1523889000006, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
model_instances = [
models.GeneralModel(),
models.PeakModel(),
]
try:
for model in model_instances:
model_name = model.__class__.__name__
model.state = model.get_state(None)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_general_for_two_labeling(self):
data_val = [1.0, 2.0, 5.0, 2.0, 1.0, 1.0, 3.0, 6.0, 4.0, 2.0, 1.0, 0, 0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000001, 'to': 1523889000003, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
model = models.GeneralModel()
model.state = model.get_state(None)
model.fit(dataframe, segments,'test')
result = len(data_val) + 1
for _ in range(2):
model.do_detect(dataframe)
max_pattern_index = max(model.do_detect(dataframe))
self.assertLessEqual(max_pattern_index[0], result)
def test_peak_model_for_cache(self):
cache = {
'patternCenter': [1, 6],
'patternModel': [1, 4, 0],
'confidence': 2,
'convolveMax': 8,
'convolveMin': 7,
'windowSize': 1,
'convDelMin': 0,
'convDelMax': 0,
'heightMax': 4,
'heightMin': 4,
}
data_val = [2.0, 5.0, 1.0, 1.0, 1.0, 2.0, 5.0, 1.0, 1.0, 2.0, 3.0, 7.0, 1.0, 1.0, 1.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
model = models.PeakModel()
model.state = model.get_state(cache)
result = model.fit(dataframe, segments, 'test')
self.assertEqual(len(result.pattern_center), 3)
def test_trough_model_for_cache(self):
cache = {
'patternCenter': [2, 6],
'patternModel': [5, 0.5, 4],
'confidence': 2,
'convolveMax': 8,
'convolveMin': 7,
'window_size': 1,
'convDelMin': 0,
'convDelMax': 0,
}
data_val = [5.0, 5.0, 1.0, 4.0, 5.0, 5.0, 0.0, 4.0, 5.0, 5.0, 6.0, 1.0, 5.0, 5.0, 5.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
model = models.TroughModel()
model.state = model.get_state(cache)
result = model.fit(dataframe, segments, 'test')
self.assertEqual(len(result.pattern_center), 3)
def test_jump_model_for_cache(self):
cache = {
'patternCenter': [2, 6],
'patternModel': [5, 0.5, 4],
'confidence': 2,
'convolveMax': 8,
'convolveMin': 7,
'window_size': 1,
'convDelMin': 0,
'convDelMax': 0,
}
data_val = [1.0, 1.0, 1.0, 4.0, 4.0, 0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 4.0, 4.0, 4.0, 4.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 152388900009, 'to': 1523889000013, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
model = models.JumpModel()
model.state = model.get_state(cache)
result = model.fit(dataframe, segments, 'test')
self.assertEqual(len(result.pattern_center), 3)
def test_models_for_pattern_model_cache(self):
cache = {
'patternCenter': [4, 12],
'patternModel': [],
'confidence': 2,
'convolveMax': 8,
'convolveMin': 7,
'window_size': 2,
'convDelMin': 0,
'convDelMax': 0,
}
data_val = [5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 0, 0, 0, 0, 0, 0, 6.0, 6.0, 6.0, 1.0, 1.0, 1.0, 1.0, 1.0]
dataframe = create_dataframe(data_val)
segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000019, 'to': 1523889000024, 'labeled': True, 'deleted': False}]
segments = [Segment.from_json(segment) for segment in segments]
try:
model = models.DropModel()
model_name = model.__class__.__name__
model.state = model.get_state(cache)
model.fit(dataframe, segments, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly'.format(model_name))
def test_problem_data_for_random_model(self):
problem_data = [2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 5.0, 5.0, 5.0, 5.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 6.0, 7.0, 8.0, 8.0, 4.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0,
4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0,
4.0, 4.0, 4.0, 4.0, 4.0, 6.0, 5.0, 4.0, 4.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 8.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
data = create_dataframe(problem_data)
cache = {
'patternCenter': [5, 50],
'patternModel': [],
'windowSize': 2,
'convolveMin': 0,
'convolveMax': 0,
'convDelMin': 0,
'convDelMax': 0,
}
max_ws = 20
iteration = 1
for ws in range(1, max_ws):
for _ in range(iteration):
pattern_model = create_random_model(ws)
convolve = scipy.signal.fftconvolve(pattern_model, pattern_model)
cache['windowSize'] = ws
cache['patternModel'] = pattern_model
cache['convolveMin'] = max(convolve)
cache['convolveMax'] = max(convolve)
try:
model = models.GeneralModel()
model.state = model.get_state(cache)
model_name = model.__class__.__name__
model.detect(data, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly with av_model {} and window size {}'.format(model_name, pattern_model, ws))
def test_random_dataset_for_random_model(self):
data = create_random_model(random.randint(1, 100))
data = create_dataframe(data)
model_instances = [
models.PeakModel(),
models.TroughModel()
]
cache = {
'patternCenter': [5, 50],
'patternModel': [],
'windowSize': 2,
'convolveMin': 0,
'convolveMax': 0,
'confidence': 0,
'heightMax': 0,
'heightMin': 0,
'convDelMin': 0,
'convDelMax': 0,
}
ws = random.randint(1, int(len(data['value']/2)))
pattern_model = create_random_model(ws)
convolve = scipy.signal.fftconvolve(pattern_model, pattern_model)
confidence = 0.2 * (data['value'].max() - data['value'].min())
cache['windowSize'] = ws
cache['patternModel'] = pattern_model
cache['convolveMin'] = max(convolve)
cache['convolveMax'] = max(convolve)
cache['confidence'] = confidence
cache['heightMax'] = data['value'].max()
cache['heightMin'] = confidence
try:
for model in model_instances:
model_name = model.__class__.__name__
model.state = model.get_state(cache)
model.detect(data, 'test')
except ValueError:
self.fail('Model {} raised unexpectedly with dataset {} and cache {}'.format(model_name, data['value'], cache))
if __name__ == '__main__':
unittest.main()
def create_dataframe(data_val: list) -> pd.DataFrame:
data_ind = create_list_of_timestamps(len(data_val))
data = {'timestamp': data_ind, 'value': data_val}
dataframe = pd.DataFrame(data)
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], unit='ms')
return dataframe
def create_list_of_timestamps(length: int) -> List[int]:
return [1523889000000 + i for i in range(length)]
def create_random_model(window_size: int) -> list:
return [random.randint(0, 100) for _ in range(window_size * 2 + 1)]
```
#### File: analytics/tools/send_zmq_message.py
```python
import zmq
import zmq.asyncio
import asyncio
import json
from uuid import uuid4
context = zmq.asyncio.Context()
socket = context.socket(zmq.PAIR)
socket.connect('tcp://0.0.0.0:8002')
def create_message():
message = {
"method": "DATA",
"payload": {
"_id": uuid4().hex,
"analyticUnitId": uuid4().hex,
"type": "PUSH",
"payload": {
"data": [
[
1552652025000,
12.499999999999998
],
[
1552652040000,
12.500000000000002
],
[
1552652055000,
12.499999999999996
],
[
1552652070000,
12.500000000000002
],
[
1552652085000,
12.499999999999998
],
[
1552652100000,
12.5
],
[
1552652115000,
12.83261113785909
]
],
"from": 1552652025001,
"to": 1552652125541,
"analyticUnitType": "GENERAL",
"detector": "pattern",
"cache": {
"pattern_center": [
693
],
"pattern_model": [
1.7763568394002505e-15,
5.329070518200751e-15,
1.7763568394002505e-15,
1.7763568394002505e-15,
1.7763568394002505e-15,
3.552713678800501e-15,
1.7763568394002505e-15,
3.552713678800501e-15,
3.552713678800501e-15,
1.7763568394002505e-15,
1.7763568394002505e-15,
0,
1.7763568394002505e-15,
1.7763568394002505e-15,
0
],
"convolve_max": 7.573064690121713e-29,
"convolve_min": 7.573064690121713e-29,
"WINDOW_SIZE": 7,
"conv_del_min": 7,
"conv_del_max": 7
}
}
}
}
return json.dumps(message)
async def handle_loop():
while True:
received_bytes = await socket.recv()
text = received_bytes.decode('utf-8')
print(text)
async def send_detect():
data = create_message().encode('utf-8')
await socket.send(data)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
socket.send(b'PING')
detects = [send_detect() for i in range(100)]
detects_group = asyncio.gather(*detects)
handle_group = asyncio.gather(handle_loop())
common_group = asyncio.gather(handle_group, detects_group)
loop.run_until_complete(common_group)
```
#### File: tools/prometheus-hastic-exporter/prometheus-hastic-exporter.py
```python
from prometheus_client import start_http_server, Metric, REGISTRY
import json
import requests
import sys
import time
import dateutil.parser as dt
class JsonCollector(object):
def __init__(self, endpoint):
self._endpoint = endpoint
def collect(self):
response = None
try:
resp = requests.get(self._endpoint).content.decode('UTF-8')
response = json.loads(resp)
except Exception as e:
print('got exception, skip polling loop {}'.format(e))
return
commitHash = response.get('git', {}).get('commitHash')
packageVersion = response.get('packageVersion')
metrics = {
'activeWebhooks': response.get('activeWebhooks'),
'ready': int(response.get('analytics', {}).get('ready', 0)),
'tasksQueueLength': response.get('analytics', {}).get('tasksQueueLength'),
'awaitedTasksNumber': response.get('awaitedTasksNumber'),
'detectionsCount': response.get('detectionsCount')
}
for name, value in metrics.items():
if value is not None:
metric = Metric(name, name, 'gauge')
metric.add_sample(name, value=value, labels={'commitHash': commitHash, 'packageVersion': packageVersion})
yield metric
else:
print('{} value is {}, skip metric'.format(name, value))
lastAlive = response.get('analytics', {}).get('lastAlive')
if lastAlive:
lastAlive = int(dt.parse(lastAlive).timestamp()) * 1000 #ms
metric = Metric('lastAlive', 'lastAlive', 'gauge')
metric.add_sample('lastAlive', value=lastAlive, labels={'commitHash': commitHash, 'packageVersion': packageVersion})
yield metric
timestamp = response.get('timestamp')
if timestamp:
timestamp = int(dt.parse(timestamp).timestamp()) * 1000 #ms
metric = Metric('timestamp', 'timestamp', 'gauge')
metric.add_sample('timestamp', value=timestamp, labels={'commitHash': commitHash, 'packageVersion': packageVersion})
yield metric
if __name__ == '__main__':
hastic_url = sys.argv[1]
exporter_port = int(sys.argv[2])
start_http_server(exporter_port)
REGISTRY.register(JsonCollector(hastic_url))
while True: time.sleep(1)
``` |
{
"source": "jonysalgado/machine_learning_tools",
"score": 3
} |
#### File: source/nlp/metrics.py
```python
.nlp import NLP_base
import language_tool_python
from collections import Counter
from nltk.translate.bleu_score import sentence_bleu
#-------------------------------------------------------------------------------
# Class Metrics
class Metrics(NLP_base):
def __init__(self, language):
super().__init__(language)
#TODO: check if the language exists on language_tool_python
self.tool = language_tool_python.LanguageTool(language)
def analyse_errors(self, text, except_words):
self.analyse_errors = Analyse_errors(self.tool, text, except_words)
return self.analyse_errors
def errors_score(self, query, except_words, only_speel_error=True,
mean="only"):
self.errors_metric = Errors_metric(self.tool)
return self.errors_metric.score(query, except_words, only_speel_error, mean)
def mapper_errors(self, matches):
self._mapper_errors = Mapper_errors()
self._mapper_errors.update(matches)
return self._mapper_errors.counter_matches
def MBR_score(self, query):
self.mbr = MBR()
return self.mbr.score(query)
#-------------------------------------------------------------------------------
# Class Analyse_errors
class Analyse_errors:
def __init__(self, tool, text, except_words):
self.tool = tool
self.text = text
self.except_words = except_words
self.errors = []
self.speel_errors = []
self.check_speel_text()
def get_errors(self, only_speel_errors=True):
if only_speel_errors:
return self.speel_errors
else:
return self.errors
def check_speel_text(self):
self.errors = self.tool.check(self.text)
errors = []
for error in self.errors:
offset = error.offset
errorLength = error.errorLength
word = self.text[offset:offset+errorLength]
if word not in self.except_words and error.ruleId == 'HUNSPELL_RULE':
errors.append(error)
self.speel_errors = errors
def score(self, only_speel_error=True, mean="both"):
if only_speel_error:
e = len(self.speel_errors)
else:
e = len(self.errors)
if mean == "only":
return e/len(self.text.split(" "))
elif mean == "not":
return e
else:
return e/len(self.text.split(" ")), e
def corrections(self):
dict_corrections = {}
for error in self.speel_errors:
offset = error.offset
errorLength = error.errorLength
replacements = error.replacements
word = self.text[offset:offset+errorLength]
dict_corrections[word] = replacements
return dict_corrections
#-------------------------------------------------------------------------------
# Class Errors_metric
class Errors_metric:
def __init__(self, tool):
self.scores = None
self. tool = tool
def score(self, query, except_words=[], only_speel_error=True, mean="only"):
scores = np.zeros(len(query))
for i in range(len(query)):
scores[i] = self._score(query[i], except_words, only_speel_error, mean)
self.score = scores
return scores
def _score(self, text, except_words, only_speel_error, mean):
analyser = Analyse_errors(self.tool, text, except_words)
return analyser.score(only_speel_error, mean)
class mapper_errors:
def __init__(self):
self.counter_matches = Counter()
self.list_matches = []
def update(self, matches):
for match in matches:
if match.ruleId not in sorted(self.counter_matches.elements()):
self.list_matches.append(match)
self.counter_matches.update([match.ruleId])
#-------------------------------------------------------------------------------
# MBR
class MBR:
def __init__(self):
self.scores = None
def score(self, query):
scores = np.zeros(len(query))
for i in range(len(query)):
compared = query[i]
query_without_compared = query.copy()
query_without_compared.remove(compared)
scores[i] = self._score(compared, query_without_compared)
self.scores = scores
return scores
def _score(self, compared, query_without_compared):
similarity = 0
for i in range(len(query_without_compared)):
reference = query_without_compared[i]
similarity += sentence_bleu(reference, compared)
return similarity/len(query_without_compared)
``` |
{
"source": "jonysalgado/simulation-template",
"score": 3
} |
#### File: simulation-template/robot_soccer_python/agents.py
```python
import pygame
import numpy as np
from pygame.rect import Rect
from pygame.gfxdraw import pie
from math import sin, cos, fabs, acos, pi, inf
from robot_soccer_python.constants import *
from robot_soccer_python.utils import *
# ______________________________________________________________________________
# class Agent
class Agent:
def __init__(self, pose, max_linear_speed, max_angular_speed, radius):
"""
Creates a roomba cleaning robot.
:param pose: the robot's initial pose.
:type pose: Pose
:param max_linear_speed: the robot's maximum linear speed.
:type max_linear_speed: float
:param max_angular_speed: the robot's maximum angular speed.
:type max_angular_speed: float
:param radius: the robot's radius.
:type radius: float
:param bumper_state: its mean if robot colide with other robots or wall
:type bumper_state: boolean
"""
self.pose = pose
self.linear_speed = 0.0
self.angular_speed = 0.0
self.max_linear_speed = max_linear_speed
self.max_angular_speed = max_angular_speed
self.radius = radius
self.bumper_state = False
self.collision = None
self.collision_player_speed = (0,0)
def set_velocity(self, linear_speed, angular_speed):
"""
Sets the robot's velocity.
:param linear_speed: the robot's linear speed.
:type linear_speed: float
:param angular_speed: the robot's angular speed.
:type angular_speed: float
"""
self.linear_speed = clamp(linear_speed, -self.max_linear_speed,
self.max_linear_speed)
self.angular_speed = clamp(angular_speed, -self.max_angular_speed,
self.max_angular_speed)
def set_bumper_state_collision(self, bumper_state_collision):
"""
Sets the bumper state and where agent collide.
:param bumper_state_collision: if the bumper has detected an obstacle and where agent collide.
:type bumper_state_collision: tuple
"""
self.bumper_state, self.collision, self.collision_player_speed = bumper_state_collision
def get_bumper_state(self):
"""
Obtains the bumper state.
:return: the bumper state.
:rtype: bool
"""
return self.bumper_state
def get_collision(self):
"""
Obtains the collision.
:return: where agent collide.
:rtype: string or int or None
"""
return self.collision
def get_collision_player_speed(self):
return self.collision_player_speed
def move(self):
"""
Moves the robot during one time step.
"""
dt = SAMPLE_TIME
v = self.linear_speed
w = self.angular_speed
# If the angular speed is too low, the complete movement equation fails due to a division by zero.
# Therefore, in this case, we use the equation we arrive if we take the limit when the angular speed
# is close to zero.
if fabs(self.angular_speed) < 1.0e-3:
self.pose.position.x += v * dt * cos(self.pose.rotation + w * dt / 2.0)
self.pose.position.y += v * dt * sin(self.pose.rotation + w * dt / 2.0)
else:
self.pose.position.x += ((2.0 * v / w) *
cos(self.pose.rotation + w * dt / 2.0) * sin(w * dt / 2.0))
self.pose.position.y += ((2.0 * v / w) *
sin(self.pose.rotation + w * dt / 2.0) * sin(w * dt / 2.0))
self.pose.rotation += w * dt
def update(self):
"""
Updates the robot, including its behavior.
"""
self.move()
# ______________________________________________________________________________
# class Player
class Player(Agent):
"""
Represents a player robot.
"""
def __init__(self, pose, max_linear_speed, max_angular_speed, radius):
Agent.__init__(self, pose, max_linear_speed, max_angular_speed, radius)
self.sensors = Sensors(self)
# ______________________________________________________________________________
# class Ball
class Ball(Agent):
"""
Represents a ball.
"""
def __init__(self, pose, max_linear_speed, max_angular_speed, radius, behavior):
Agent.__init__(self, pose, max_linear_speed, max_angular_speed, radius)
self.behavior = behavior
self.cont_friction = 0
def set_rotation(self, increase):
self.pose.rotation += increase
def set_cont_friction(self, initial, increase):
if initial:
self.cont_friction = 0
else:
self.cont_friction += increase
def update(self):
self.behavior.update(self)
# ______________________________________________________________________________
# class Sensors
class Sensors:
"""
Represents the sensors of a player.
"""
def __init__(self, agent):
self.flag_points = self.init_flag_points()
self.agent_center = agent.pose
self.full_vision = None
def set_full_vision(self, full_vision):
self.full_vision = full_vision
def init_flag_points(self):
"""
Find the flags around the field.
return: a list of points.
rtype: list
"""
points = []
for i in range(11):
points.append((round(SCREEN_WIDTH * i/10), 0))
for i in range(1,11):
points.append((SCREEN_WIDTH, round(SCREEN_HEIGHT * i/10)))
for i in range(10):
points.append((round(SCREEN_WIDTH * i/10), SCREEN_HEIGHT))
for i in range(1,10):
points.append((0, round(SCREEN_HEIGHT * i/10)))
return points
def calculate_distance(self, agent, list_centers):
"""
Calculate the vector distance between agent and other players, ball and flags.
param agent: the agent that we are calculating the distances.
type agent: Player
param list_centers: the list of center's position that players and ball.
type list_centers: Pose.position
return: list of distance to points
rtype: list
"""
self.agent_center = agent.pose
points = self.flag_points + list_centers
dirvector_list = []
for point in points:
center = Vector2(self.agent_center.position.x * M2PIX,
self.agent_center.position.y * M2PIX)
dirvector = Vector2(*point).dirvector(center)
dirvector = self.is_visible(dirvector)
dirvector_list.append(dirvector)
return dirvector_list
def is_visible(self, vector):
"""
Checks if a point is visible for a agent.
param vector: vector that links the center of the agent to the point in question.
type vector: Vector2
return: the same vector if is visible and infinity vector if isn't.
rtype: Vector2
"""
if not self.full_vision:
vector_agent = TransformCartesian(1, self.agent_center.rotation)
vector_agent = Vector2(vector_agent.x, vector_agent.y)
angle = acos(vector_agent.dot(vector)/vector.magnitude())
if angle <= pi/4:
return vector
return Vector2(inf, inf)
return vector
# ______________________________________________________________________________
# class Environment
class Environment:
"""
Represents the environment of simulation.
"""
def __init__(self, window):
self.window = window
self.font = pygame.font.SysFont('Comic Sans MS', 20)
self.list_centers = None
self.list_radius = None
self.list_rotation = None
def draw(self, params):
"""
This method call all other methods for drawing.
:param params: params for drawing the window.
"""
self.update(params)
self.draw_field()
self.draw_players_and_ball()
self.draw_soccer_goal_and_scoreboard()
self.draw_vision()
def draw_players_and_ball(self):
"""
Drawing players and ball.
:param params: params for drawing the window.
"""
# draw players
for i in range(1, len(self.list_centers)):
center = self.list_centers[i]
final_position = self.list_radius[i] * np.array([cos(self.list_rotation[i]),
sin(self.list_rotation[i])]) + center
if i <= len(self.list_centers)/2:
color = RED_COLOR
else:
color = YELLOW_COLOR
# Drawing player's inner circle
pygame.draw.circle(self.window, color, (center[0], center[1]),
self.list_radius[i], 0)
# Drawing player's outer circle
pygame.draw.circle(self.window, GRAY_COLOR, (center[0], center[1]),
self.list_radius[i], 4)
# Drawing player's orientation
pygame.draw.line(self.window, GRAY_COLOR, (center[0], center[1]),
(final_position[0], final_position[1]), 3)
# draw ball
center = self.list_centers[0]
# Drawing player's inner circle
pygame.draw.circle(self.window, WHITE_COLOR, (center[0], center[1]),
self.list_radius[0], 0)
def draw_field(self):
"""
Drawing soccer field.
:param window: pygame's window where the drawing will occur.
"""
self.window.fill((35,142,35))
pygame.draw.circle(self.window, (255,255,255), (round(SCREEN_WIDTH/2),
round(SCREEN_HEIGHT/2)), 70, 3)
pygame.draw.line(self.window, (255,255,255), (round(SCREEN_WIDTH/2), 30),
(round(SCREEN_WIDTH/2), SCREEN_HEIGHT - 30), 3)
pygame.draw.line(self.window, (255,255,255), (30, 30),
(round(SCREEN_WIDTH)-30, 30), 3)
pygame.draw.line(self.window, (255,255,255), (30, 30),
(30, round(SCREEN_HEIGHT)-30), 3)
pygame.draw.line(self.window, (255,255,255), (round(SCREEN_WIDTH)-30, 30),
(round(SCREEN_WIDTH)-30, round(SCREEN_HEIGHT)-30), 3)
pygame.draw.line(self.window, (255,255,255), (30, round(SCREEN_HEIGHT)-30),
(round(SCREEN_WIDTH)-30, round(SCREEN_HEIGHT)-30), 3)
def draw_soccer_goal_and_scoreboard(self):
"""
Drawing soccer goal and scoreboard.
"""
scoreboard="Left " + str(self.left_goal) + " x " + str(self.right_goal) + " Right"
textsurface = self.font.render(scoreboard, False, WHITE_COLOR)
# Drawing soccer goal
pygame.draw.rect(self.window, (0, 0, 0),
Rect(0, round(SCREEN_HEIGHT)/2-100, 30, 200))
pygame.draw.rect(self.window, (0, 0, 0),
Rect(round(SCREEN_WIDTH)-30, round(SCREEN_HEIGHT)/2-100, 30, 200))
# scoreboard
pygame.draw.rect(self.window, (0, 0, 0),
Rect(28, round(SCREEN_HEIGHT-30), 250, 30))
self.window.blit(textsurface, (40,round(SCREEN_HEIGHT-30)))
def draw_vision(self):
"""
Drawing the vision of the players.
:param params: params for drawing the window.
"""
for i in range(1, len(self.list_centers)):
center = self.list_centers[i]
pie(self.window, center[0], center[1], round(2.5 * self.list_radius[i]),
(int(RADIAN_TO_DEGREE * self.list_rotation[i])-45)%360,
(int(RADIAN_TO_DEGREE * self.list_rotation[i])+45)%360 , WHITE_COLOR)
def update(self, params):
"""
Update params of environment.
:param params: params for drawing the window.
"""
self.window = params["window"]
self.list_centers = params["list_centers"]
self.list_radius = params["list_radius"]
self.list_rotation = params["list_rotation"]
self.left_goal = params["left_goal"]
self.right_goal = params["right_goal"]
``` |
{
"source": "jonywtf/grpc",
"score": 2
} |
#### File: grpc/_adapter/_links_test.py
```python
import threading
import unittest
from grpc._adapter import _proto_scenarios
from grpc._adapter import _test_links
from grpc._adapter import fore
from grpc._adapter import rear
from grpc.framework.base import interfaces
from grpc.framework.base.packets import packets as tickets
from grpc.framework.foundation import logging_pool
_IDENTITY = lambda x: x
_TIMEOUT = 2
class RoundTripTest(unittest.TestCase):
def setUp(self):
self.fore_link_pool = logging_pool.pool(80)
self.rear_link_pool = logging_pool.pool(80)
def tearDown(self):
self.rear_link_pool.shutdown(wait=True)
self.fore_link_pool.shutdown(wait=True)
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_fore_link = _test_links.ForeLink(None, None)
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE):
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None)
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: None}, {test_method: None}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: None},
{test_method: None}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_fore_link.condition:
self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION)
def testEntireRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_front_to_back_datum = b'\x07'
test_back_to_front_datum = b'\x08'
test_fore_link = _test_links.ForeLink(None, None)
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.payload is None:
payload = None
else:
payload = test_back_to_front_datum
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if payload is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
payload)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None,
test_front_to_back_datum, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
front_to_back_payloads = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
back_to_front_payloads = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads)
self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_method = scenario.method()
test_fore_link = _test_links.ForeLink(None, None)
rear_lock = threading.Lock()
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
with rear_lock:
if front_to_back_ticket.payload is not None:
response = scenario.response_for_request(front_to_back_ticket.payload)
else:
response = None
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if response is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
response)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: scenario.deserialize_request},
{test_method: scenario.serialize_response}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool,
{test_method: scenario.serialize_request},
{test_method: scenario.deserialize_response}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
commencement_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
fore_sequence_number = 1
rear_link.accept_front_to_back_ticket(commencement_ticket)
for request in scenario.requests():
continuation_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION,
None, None, None, request, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(continuation_ticket)
completion_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None,
None, None, None, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(completion_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
requests = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
responses = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(requests))
self.assertTrue(scenario.verify_responses(responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main()
```
#### File: framework/base/interfaces.py
```python
import abc
import enum
# stream is referenced from specification in this module.
from grpc.framework.foundation import stream # pylint: disable=unused-import
@enum.unique
class Outcome(enum.Enum):
"""Operation outcomes."""
COMPLETED = 'completed'
CANCELLED = 'cancelled'
EXPIRED = 'expired'
RECEPTION_FAILURE = 'reception failure'
TRANSMISSION_FAILURE = 'transmission failure'
SERVICER_FAILURE = 'servicer failure'
SERVICED_FAILURE = 'serviced failure'
class OperationContext(object):
"""Provides operation-related information and action.
Attributes:
trace_id: A uuid.UUID identifying a particular set of related operations.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def is_active(self):
"""Describes whether the operation is active or has terminated."""
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
"""Adds a function to be called upon operation termination.
Args:
callback: A callable that will be passed an Outcome value.
"""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the operation.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the operation to complete before it is considered to have
timed out.
"""
raise NotImplementedError()
@abc.abstractmethod
def fail(self, exception):
"""Indicates that the operation has failed.
Args:
exception: An exception germane to the operation failure. May be None.
"""
raise NotImplementedError()
class Servicer(object):
"""Interface for service implementations."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def service(self, name, context, output_consumer):
"""Services an operation.
Args:
name: The name of the operation.
context: A ServicerContext object affording contextual information and
actions.
output_consumer: A stream.Consumer that will accept output values of
the operation.
Returns:
A stream.Consumer that will accept input values for the operation.
Raises:
exceptions.NoSuchMethodError: If this Servicer affords no method with the
given name.
abandonment.Abandoned: If the operation has been aborted and there no
longer is any reason to service the operation.
"""
raise NotImplementedError()
class Operation(object):
"""Representation of an in-progress operation.
Attributes:
consumer: A stream.Consumer into which payloads constituting the operation's
input may be passed.
context: An OperationContext affording information and action about the
operation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def cancel(self):
"""Cancels this operation."""
raise NotImplementedError()
class ServicedIngestor(object):
"""Responsible for accepting the result of an operation."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def consumer(self, operation_context):
"""Affords a consumer to which operation results will be passed.
Args:
operation_context: An OperationContext object for the current operation.
Returns:
A stream.Consumer to which the results of the current operation will be
passed.
Raises:
abandonment.Abandoned: If the operation has been aborted and there no
longer is any reason to service the operation.
"""
raise NotImplementedError()
class ServicedSubscription(object):
"""A sum type representing a serviced's interest in an operation.
Attributes:
kind: A Kind value.
ingestor: A ServicedIngestor. Must be present if kind is Kind.FULL. Must
be None if kind is Kind.TERMINATION_ONLY or Kind.NONE.
"""
__metaclass__ = abc.ABCMeta
@enum.unique
class Kind(enum.Enum):
"""Kinds of subscription."""
FULL = 'full'
TERMINATION_ONLY = 'termination only'
NONE = 'none'
class End(object):
"""Common type for entry-point objects on both sides of an operation."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def operation_stats(self):
"""Reports the number of terminated operations broken down by outcome.
Returns:
A dictionary from Outcome value to an integer identifying the number
of operations that terminated with that outcome.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_idle_action(self, action):
"""Adds an action to be called when this End has no ongoing operations.
Args:
action: A callable that accepts no arguments.
"""
raise NotImplementedError()
class Front(End):
"""Clientish objects that afford the invocation of operations."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def operate(
self, name, payload, complete, timeout, subscription, trace_id):
"""Commences an operation.
Args:
name: The name of the method invoked for the operation.
payload: An initial payload for the operation. May be None.
complete: A boolean indicating whether or not additional payloads to be
sent to the servicer may be supplied after this call.
timeout: A length of time in seconds to allow for the operation.
subscription: A ServicedSubscription for the operation.
trace_id: A uuid.UUID identifying a set of related operations to which
this operation belongs.
Returns:
An Operation object affording information and action about the operation
in progress.
"""
raise NotImplementedError()
class Back(End):
"""Serverish objects that perform the work of operations."""
__metaclass__ = abc.ABCMeta
``` |
{
"source": "jonyzp/parallelGames",
"score": 2
} |
#### File: aggregatedValue/python/errorPrinter.py
```python
from __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
```
#### File: aggregatedValue/python/evaluator.py
```python
import math
#Here we pass the function and the evaluator return a result
def f(num,s):
funcs = vars(math)
libres = dict(x=num)
a=eval(s, funcs, libres)
return float("%.10f"%a)
```
#### File: aggregatedValue/python/raicesMultiples.py
```python
from errorPrinter import eprint
from evaluator import f
import sys
#In this method we calculate if the given point is a root
def raicesMultiples(Xo,tol,nIter):
fx=f(Xo,funcion)
dfx=f(Xo,derivada)
ddfx=f(Xo,segundaderivada)
count=0
error=tol+1
den=(dfx*dfx)-(fx*ddfx)
print "-----------||------------||-------------||-------------||--------------"
print " X f(x) f'(x) f''(x) Error "
print Xo ," | ", fx," | " , dfx," | ", error," | "
#If the stop parameters haven't been broken here we can calculate where is the root
while fx!=0 and error>tol and den !=0 and count<nIter:
Xi=Xo-(fx*dfx)/den
error=abs(Xi-Xo)
Xo=Xi
fx=f(Xo,funcion)
dfx=f(Xo,derivada)
ddfx=f(Xo,segundaderivada)
den=(dfx *dfx) -(fx * ddfx)
count+=1
print Xo ," | ", fx," | " , dfx," | ", error," | "
#Here we show the root or the aproximation if the given point's not a root
if (fx == 0):
print Xi, " is a root"
elif (error <= tol) :
print Xi, " is an approximation with ",tol," tolerance"
elif (dfx == 0) :
print "there are possible multiple roots at ", Xi
else:
eprint ("failure after N iterations")
if len(sys.argv)==7:
funcion = sys.argv[1]
derivada = sys.argv[2]
segundaderivada = sys.argv[3]
x0 = float (sys.argv[4])
tol = float (sys.argv[5])
itera = int (sys.argv[6])
raicesMultiples(x0,tol,itera)
else:
eprint ("no se pasaron los parametros suficientes para ejecutar el metodo")
```
#### File: parallelGames/methods python/jacobi.py
```python
from pprint import pprint
from numpy import array, zeros, diag, diagflat, dot
def jacobi(A,b,N=25,x=None):
"""Solves the equation Ax=b via the Jacobi iterative method."""
# Create an initial guess if needed
if x is None:
x = zeros(len(A[0]))
# Create a vector of the diagonal elements of A
# and subtract them from A
D = diag(A)
R = A - diagflat(D)
# Iterate for N times
for i in range(N):
x = (b - dot(R,x)) / D
return x
A = array([[2.0,1.0],[5.0,7.0]])
b = array([11.0,13.0])
guess = array([1.0,1.0])
sol = jacobi(A,b,N=25,x=guess)
print("A:")
pprint(A)
print("b:")
pprint(b)
print("x:")
pprint(sol)
``` |
{
"source": "jonzarecki/cookiecutter-test-instance",
"score": 2
} |
#### File: cookiecutter-test-instance/common/logger_config.py
```python
import json
import requests
from loguru import logger
import logging # noqa
logger.add("../out.log", backtrace=True, diagnose=True, rotation="1 week") # Caution, may leak sensitive data in prod
ERRBOT_WEBSERVER_URL = "http://localhost:3141/send_message"
ERRBOT_PROJECT_CHANNEL_ID = "#general"
def _send_to_errbot(msg: str) -> None:
with requests.Session() as s:
s.post(ERRBOT_WEBSERVER_URL, data={"payload": json.dumps({"to": ERRBOT_PROJECT_CHANNEL_ID, "text": msg})})
# logger.add(_send_to_errbot, level="WARNING") # noqa
# can't run PropagateHandler and InterceptHandler together?
class InterceptHandler(logging.Handler):
def emit(self, record): # type: ignore
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back # type: ignore
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
logging.basicConfig(handlers=[InterceptHandler()], level=0)
```
#### File: cookiecutter_test_instance/tests/test_main.py
```python
from cookiecutter_test_instance.main import main
def test_main_succeeds() -> None:
"""It exits with a status code of zero."""
main()
a = 1
assert a == 1
```
#### File: cookiecutter_test_instance/utils/a_util.py
```python
def util_function() -> int:
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
print("asda")
return 3
```
#### File: jonzarecki/cookiecutter-test-instance/noxfile.py
```python
import os
import shutil
from pathlib import Path
from typing import List
import nox
import toml
from nox import Session
package = "cookiecutter_test_instance"
python_versions = ["3.7"]
nox.needs_version = ">= 2021.6.6"
nox.options.sessions = ("tests", "xdoctest", "docs-build") # , "pre-commit"
pyproject_data = toml.loads(Path("pyproject.toml").read_text())
submodule_paths = []
if os.path.exists(".gitmodules"):
with open(".gitmodules") as f:
lines = [s.strip() for s in f.readlines()]
if "path = common" in lines: # common is not a submodule of a different repo
submodule_paths.append("common")
@nox.session(name="pre-commit", python=python_versions)
def pre_commit(sess: Session) -> None:
"""Run pre-commit on all files."""
sess.install("pre-commit")
sess.run(*"pre-commit install --install-hooks -t pre-commit -t commit-msg -t post-commit -t pre-push".split(" "))
sess.run(*"pre-commit run --all-files".split(" "))
@nox.session(python=False)
def tests(sess: Session) -> None:
"""Run the test suite."""
sess.install("coverage[toml]", "pytest", "pygments")
def add_quotes_and_join(lst: List[str]) -> str:
return ",".join([f"{s}" for s in lst])
omit_paths = ["--omit"] + [
add_quotes_and_join(pyproject_data["tool"]["coverage"]["run"]["omit"] + [f"{p}/**" for p in submodule_paths])
]
run_paths = [p for p in pyproject_data["tool"]["coverage"]["run"]["source"] if p not in submodule_paths]
try:
sess.run("coverage", "run", "--parallel", *omit_paths, "-m", "pytest", *run_paths, *sess.posargs)
finally:
sess.notify("coverage", posargs=[])
@nox.session(python=python_versions)
def coverage(sess: Session) -> None:
"""Produce the coverage report."""
args = sess.posargs or ["report"]
sess.install("coverage[toml]")
if not sess.posargs and any(Path().glob(".cache/.coverage.*")):
# keep .coverage.* files if not interactive (i.e. CI)
sess.run(*(["coverage", "combine"] + (["--keep"] if not sess.interactive else [])))
sess.run("coverage", *args)
@nox.session(python=False)
def xdoctest(sess: Session) -> None:
"""Run examples with xdoctest."""
args = sess.posargs or ["all"]
sess.install("xdoctest[colors]")
sess.run("python", "-m", "xdoctest", package, *args)
@nox.session(name="docs-build", python=python_versions)
def docs_build(sess: Session) -> None:
"""Build the documentation."""
args = sess.posargs or ["docs/source", "docs/_build"]
sess.install("-r", "docs/source/requirements.txt")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
sess.run("sphinx-build", *args)
@nox.session(python=python_versions)
def docs(sess: Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = sess.posargs or ["--open-browser", "docs/source", "docs/_build"]
sess.install("-r", "docs/source/requirements.txt")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
sess.run("sphinx-autobuild", *args)
``` |
{
"source": "jonzarecki/coord2vec",
"score": 3
} |
#### File: common/db/postgres.py
```python
import datetime
from typing import List
import geopandas as gpd
import pandas as pd
import pandas.io.sql as sqlio
import psycopg2
import sqlalchemy as sa
from geoalchemy2 import WKTElement, Geography
from geoalchemy2.types import _GISType
from psycopg2._psycopg import connection
from shapely.geometry import Point
from sqlalchemy import create_engine
from coord2vec import config
from coord2vec.common.db.sqlalchemy_utils import insert_into_table, get_temp_table_name
def connect_to_db() -> connection:
"""
Build connection object for the postgres server
"""
conn = psycopg2.connect(host=config.postgis_server_ip, port=config.postgis_port, database='gis', user='renderer')
return conn
def get_sqlalchemy_engine() -> sa.engine.Engine:
return create_engine(
f"postgresql://renderer:@{config.postgis_server_ip}:{config.postgis_port}/gis"
)
# TODO: why 2 get_df. delete one
def get_df(query: str, conn: connection, dispose_conn=False) -> pd.DataFrame:
"""
Executes the query and fetches the results
Args:
query: The sql query
conn: The connection object to the postgres
dispose_conn: Whether to close the connection after the query
Returns:
The results of the query as a DataFrame
"""
res = sqlio.read_sql_query(query, conn)
if dispose_conn:
conn.close()
return res
def save_geo_series_to_tmp_table(geo_series: gpd.GeoSeries, eng: sa.engine.Engine) -> str:
"""
Save a geo series as a table in the db, for better performance
Args:
geo_series: The GeoSeries to be inserted into a db table
eng: SQL Alchemy engine
Returns:
The name of the new table
"""
geo_series = geo_series.rename('geom')
gdf = gpd.GeoDataFrame(geo_series, columns=['geom'], geometry='geom')
gdf['geom'] = gdf.geometry.apply(lambda x: WKTElement(x.wkt, srid=4326))
gdf['geom_id'] = range(len(gdf))
tbl_name = get_temp_table_name()
insert_into_table(eng, gdf, tbl_name, dtypes={'geom': Geography(srid=4326), 'geom_id': sa.INT})
add_postgis_index(eng, tbl_name, 'geom')
return tbl_name
def get_index_str_for_unique(index_columns: List[str], dtypes: dict):
return ",".join([f"ST_GeoHash({col})" if isinstance(dtypes[col], _GISType) else col
for col in index_columns])
def add_postgis_index(eng: sa.engine.Engine, table_name: str, geom_col: str):
with eng.begin() as con:
con.execute(f"create index {table_name}_{geom_col}_idx on {table_name} using gist ({geom_col});")
```
#### File: coord2vec/common/file_util.py
```python
from distutils.dir_util import copy_tree
import os
import random
import tempfile
import shutil
import errno
import pathlib
from typing import List
def shuffle_rows(in_filename, out_filename):
"""
shuffles the rows of $in_filename and puts the output in $out_filename
:param in_filename: file name of the input file
:type in_filename: str
:param out_filename: file name of the output file
:type out_filename: str
:return: None
"""
with open(in_filename, 'r') as source:
data = [(random.random(), line) for line in source]
data.sort()
with open(out_filename, 'w') as target:
for _, line in data:
target.write(line)
def merge_similar_rows(in_filename, out_filename):
"""
merges exact rows in $in_filename and puts the output file in $out_filename
:param in_filename: file name of the input file
:type in_filename: str
:param out_filename: file name of the output file
:type out_filename: str
:return: None
"""
lines_seen = set() # holds lines already seen
out_lines = []
with open(in_filename, 'r') as in_file:
for line in in_file:
if line not in lines_seen: # not a duplicate
out_lines.append(line)
lines_seen.add(line)
with open(out_filename, 'w') as out_file:
for line in out_lines:
out_file.write(line)
def create_temp_folder(prefix=None):
"""
creates a new temporary directory and returns it's path
:param prefix: the prefix for the temp folder
:return: full path of the new directory
"""
if prefix is not None:
return tempfile.mkdtemp(prefix=prefix)
else:
return tempfile.mkdtemp()
def copy_folder_contents(src_dir, dst_dir):
# type: (str, str) -> None
"""
copies all files from one directory to another
:param src_dir: path to src directory
:param dst_dir: path to dst directory
:return: None
"""
assert src_dir != dst_dir, "src and dst directories shouldn't be the same, check code"
copy_tree(src_dir, dst_dir)
def delete_folder_with_content(folder_path):
# type: (str) -> None
"""
Deletes a folder recursively with all it's contents (no warnings)
DANGEROUS USE WITH CARE
:param folder_path: The absolute path to folder
:return: None
"""
shutil.rmtree(folder_path)
def makedirs(folder_path, exists_ok=True):
# type: (str, bool) -> None
"""
Create all folders in the path, doesn't fail of exists_ok is True
:param folder_path: the absolute path to the folder
:param exists_ok: states if we should fail when the folder already exists
:return: $folder_path
"""
if exists_ok:
try:
os.makedirs(folder_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
else:
os.makedirs(folder_path)
def list_all_files_in_folder(fold_abspath, file_ext, recursively=True) -> List[str]:
if recursively:
file_list = list(pathlib.Path(fold_abspath).glob('**/*.' + file_ext))
else:
file_list = list(pathlib.Path(fold_abspath).glob('*.' + file_ext))
return [str(p) for p in file_list]
def copy_file(file_path, dst_dir):
shutil.copy(file_path, dst_dir)
def copy_files_while_keeping_structure(files_path_list, orig_dir, dst_dir):
# copy all files to their correct structure in dst_dir
for file_path in files_path_list:
file_dst_parent = os.path.join(dst_dir,
str(pathlib.PosixPath(file_path).relative_to(orig_dir).parent))
makedirs(file_dst_parent)
copy_file(file_path, file_dst_parent)
def readlines(file_path):
with open(file_path, 'r') as f:
file_lines = f.read().splitlines()
return file_lines
```
#### File: coord2vec/common/itertools.py
```python
from typing import List, TypeVar
from future.moves import itertools
_S = TypeVar('_S')
def flatten(l: List[List[_S]]) -> List[_S]:
return list(itertools.chain.from_iterable([(i if isinstance(i, list) else [i]) for i in l]))
```
#### File: logging/timing/timer.py
```python
import time
class Timer:
"""
Object used for timing for logs
"""
def __init__(self):
self.start_time = 0
self.running = False
def start(self):
if not self.running:
self.start_time = time.time()
self.running = True
else:
raise Exception("Timer is already running, please stop or reset before starting again")
def stop(self):
if self.running:
self.running = False
seconds_elapsed = time.time() - self.start_time
self.start_time = 0
return seconds_elapsed * 1000
return 0
def reset(self):
self.running = False
self.start_time = 0
```
#### File: mtl/metrics/reconstruction.py
```python
import torch
from scipy.stats import pearsonr
from sklearn.metrics import pairwise_distances
from coord2vec.common.mtl.metrics.mtl_metric import MtlMetric
class DistanceCorrelation(MtlMetric):
"""
Calculates the root mean squared error for multi-head outputs
- `update` must receive output of the form `(y_pred, y)`.
"""
def reset(self):
self.full_embedding = None
self.full_features = None
def update_mtl(self, data, embedding, loss, multi_losses, y_pred_tensor, y_tensor):
if self.full_embedding is None:
self.full_embedding = embedding
self.full_features = y_tensor
else:
self.full_embedding = torch.cat((self.full_embedding, embedding))
self.full_features = torch.cat((self.full_features, y_tensor))
def compute(self):
x_distance_matrix = pairwise_distances(self.full_embedding.detach().to('cpu'))
y_distance_matrix = pairwise_distances(self.full_features.detach().to('cpu'))
corr_coefficient, p_value = pearsonr(x_distance_matrix.flatten(), y_distance_matrix.flatten())
return corr_coefficient
```
#### File: metrics/tests/test_reconstruction_correlation.py
```python
from unittest import TestCase
import numpy as np
from coord2vec.common.mtl.metrics.reconstruction import DistanceCorrelation
class TestReconstruction_correlation(TestCase):
def test_reconstruction_correlation(self):
self.skipTest("out of date")
embedding = np.random.rand(100, 128)
features = np.random.rand(100, 16)
correlation = DistanceCorrelation().compute(embedding, features)
self.assertGreaterEqual(correlation, -1)
self.assertGreaterEqual(1, correlation)
```
#### File: common/visualizations/photo_slider.py
```python
import os
import pickle as pkl
import numpy as np
from PIL import Image
from auto_tqdm import tqdm
from bokeh.layouts import column
from bokeh.layouts import row
from bokeh.models import CustomJS, ColumnDataSource, Slider
from bokeh.models.glyphs import ImageURL
from bokeh.plotting import figure
from bokeh.plotting import show
from coord2vec.models.data_loading.tile_features_loader import TileFeaturesDataset, SingleTileFeaturesDataset
from tqdm import trange
# from familyGan.load_data import get_files_from_path
from PIL import Image
def _disable_all_for_pictures(p):
p.toolbar.logo = None
p.toolbar_location = None
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.major_tick_line_color = None # turn off x-axis major ticks
p.xaxis.minor_tick_line_color = None # turn off x-axis minor ticks
p.yaxis.major_tick_line_color = None # turn off y-axis major ticks
p.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks
p.xaxis.major_label_text_font_size = '0pt' # preferred method for removing tick labels
p.yaxis.major_label_text_font_size = '0pt' # preferred method for removing tick labels
return p
def _save_tile_images_to_local_path(dataset: TileFeaturesDataset, max_photos=None) -> (list, list, list):
"""
:param pkl_folder_path: path with triplet pickle files
:return: Returns (a_img_paths, b_img_paths, c_img_paths) with local paths for bokeh
"""
# Save folders in curr folder for bokeh access
os.makedirs("pics/", exist_ok=True)
a_img_paths, b_img_paths, c_img_paths = [], [], []
for i in trange(min(len(dataset), max_photos), desc="loading tiles"):
img_a = dataset[i][0][0]
img_b = dataset[i][0][1]
img_c = dataset[i][0][2]
a_img_p, b_img_p, c_img_p = f'pics/{i}-A.png', f'pics/{i}-B.png', f'pics/{i}-C.png'
def save_to_png(img_channel, p):
data = np.random.randint(5, size=(224, 224), dtype=np.uint8)
data[...] = img_channel.numpy() * 255
# rgbArray[..., 0] = img_channel.numpy() * 255
# rgbArray[..., 2] = img_channel.numpy() * 255
Image.fromarray(data).save(p)
save_to_png(img_a, a_img_p)
save_to_png(img_b, b_img_p)
save_to_png(img_c, c_img_p)
a_img_paths.append(a_img_p)
b_img_paths.append(b_img_p)
c_img_paths.append(c_img_p)
return a_img_paths, b_img_paths, c_img_paths
def multi_channel_tile_slider(dataset: TileFeaturesDataset):
"""
View interactively with bokeh the 3 image tiles
"""
n = 100
a_img_paths, b_img_paths, c_img_paths = _save_tile_images_to_local_path(dataset, n)
# the plotting code
plots = []
sources = []
pathes = [a_img_paths, b_img_paths, c_img_paths]
plot_num = 3
for i in range(plot_num):
p = figure(height=300, width=300)
img_paths = pathes[i]
# print(img_paths)
source = ColumnDataSource(data=dict(url=[img_paths[0]] * n,
url_orig=img_paths,
x=[1] * n, y=[1] * n, w=[1] * n, h=[1] * n))
image = ImageURL(url="url", x="x", y="y", w="w", h="h", anchor="bottom_left")
p.add_glyph(source, glyph=image)
_disable_all_for_pictures(p)
plots.append(p)
sources.append(source)
update_source_str = """
var data = source{i}.data;
url = data['url']
url_orig = data['url_orig']
for (i = 0; i < url_orig.length; i++) {
url[i] = url_orig[f-1]
}
source{i}.change.emit();
"""
# the callback
callback = CustomJS(args=dict(source0=sources[0], source1=sources[1], source2=sources[2]), code=f"""
var f = cb_obj.value;
console.log(f)
{"".join([update_source_str.replace('{i}', str(i)) for i in range(plot_num)])}
""")
slider = Slider(start=1, end=n, value=1, step=1, title="example number")
slider.js_on_change('value', callback)
column_layout = [slider]
curr_row = []
for i in range(len(plots)):
if i != 0 and i % 3 == 0:
print(curr_row)
column_layout.append(row(*curr_row.copy()))
curr_row = []
else:
curr_row.append(plots[i])
if len(curr_row) != 0:
column_layout.append(row(*curr_row.copy()))
layout = column(*column_layout)
show(layout)
```
#### File: tasks/clstr_search/one_class_baseline.py
```python
from sklearn.base import BaseEstimator
import numpy as np
class BaselineModel(BaseEstimator):
def score_samples(self, features_df):
assert 'number_of_building_0m' in features_df.columns
assert 'area_of_self_0m' in features_df.columns
assert 'building_scores_avg_0m' in features_df.columns
assert 'building_scores_max_0m' in features_df.columns
num_buildings = features_df['number_of_building_0m']
area = features_df['area_of_self_0m']
scores_max = features_df['building_scores_max_0m']
scores_avg = features_df['building_scores_avg_0m']
weights = [1, 1, 1, 1]
scores = weights[0] * 1 - (((num_buildings - 6) / 6) ** 2) + \
weights[1] * 1 - (((area - 8000) / 8000) ** 2) + \
weights[2] * scores_max + \
weights[3] * scores_avg
return scores.values # low is more anomalous
def fit(self, *args):
pass
```
#### File: visualizations/tests/test_bokeh_pr_curve.py
```python
from unittest import TestCase
import numpy as np
from bokeh.models import LayoutDOM
from coord2vec.evaluation.visualizations.bokeh_plots import bokeh_pr_curve_from_y_proba
class TestBokeh_pr_curve(TestCase):
@classmethod
def setUpClass(cls):
cls.y_pred = np.random.choice((0, 1), size=10)
cls.y_true = np.random.choice((0, 1), size=10)
def test_bokeh_pr_curve(self):
fig = bokeh_pr_curve_from_y_proba(self.y_pred, self.y_true, legend='Zarecki is special')
self.assertIsInstance(fig, LayoutDOM)
```
#### File: coord2vec/feature_extraction/feature.py
```python
import hashlib
import time
import time
from abc import ABC, abstractmethod
from typing import Tuple, List, Dict
import geopandas as gpd
import pandas as pd
from geopandas import GeoDataFrame
from shapely.geometry import Point
class Feature(ABC):
def __init__(self, table_filter_dict: Dict[str, Dict[str, str]], radius: float, feature_names: List[str], **kwargs):
# Classes that add apply functions should add them to the dictionary
self.feature_names = feature_names
self.radius = None
self.intersect_tbl_name_dict, self.input_geom_table = None, None
self.default_value = None
self.input_gs = None
self.cache = kwargs.get('cache', None)
self.table_filter_dict = table_filter_dict
assert radius is not None, "Radius is now in feature, update your code"
self.set_radius(radius)
def extract(self, geom_gs: gpd.GeoSeries = None) -> pd.DataFrame:
"""
Applies the feature on the gdf, returns the series after the apply
Args:
geom_gs: The gdf we want to apply the feature on it's geom column
Returns:
The return values as a DataFrame with feature_names as columns
"""
geom_gs = geom_gs if geom_gs is not None else self.input_gs
if geom_gs is None:
raise Exception("Must supply a geo-series, either directly or by factory")
calculated_gdf = self._calculate_feature(geom_gs)
calculated_df = calculated_gdf.drop('geom', axis=1, errors='ignore')
return calculated_df
def _calculate_feature(self, input_gs: gpd.GeoSeries):
"""
Calculate a the feature, with the use of a temp table in the db
Args:
input_gs: a gs with geometry column of the geometries to query about
Returns:
a gs with the same geometry column, and the feature columns
"""
raise NotImplementedError()
def extract_single_coord(self, coordinate: Tuple[float, float]) -> pd.DataFrame:
"""
Applies the feature on the gdf, returns the series after the apply
Args:
coordinate: (lat, lon) the coordinate to extract the feature on
Returns:
The return value
"""
return self.extract_coordinates([coordinate])
def extract_coordinates(self, coords: List[Tuple[float, float]]) -> pd.DataFrame:
"""
extract the desired features on desired points
Args:
coords: list of coordinates
Returns:
a pandas dataframe, with columns as features, and rows as the points in gdf
"""
wkt_points = [Point(coord) for coord in coords]
gdf = GeoDataFrame(pd.DataFrame({'geom': wkt_points}), geometry='geom')
return self.extract(gdf)
def set_radius(self, radius: float) -> None:
"""
set the radius of the feature
Args:
radius: the radius in meters
Returns:
None
"""
self.radius = radius
self.feature_names = [f"{name}_{radius}m" for name in self.feature_names]
self.set_default_value(radius)
def set_intersection_table_names(self, tbl_name_to_intersect_tbl_name: Dict[str, str]) -> None:
"""
Set the temporary intersection table name for the feature calculation
Args:
tbl_name_to_intersect_tbl_name: name of the temporary intersection table
Returns:
None
"""
self.intersect_tbl_name_dict = tbl_name_to_intersect_tbl_name
def set_input_geom_table(self, table_name: str) -> None:
"""
Set the temporary input geom table name for the feature calculation
Args:
table_name: name of the temporary input geom table
Returns:
None
"""
self.input_geom_table = table_name
def set_input_gs(self, input_gs: gpd.GeoSeries) -> None:
"""
Set the input geo series to be calculated
Args:
table_name: geo series with all the geometries to calculate features on
Returns:
None
"""
self.input_gs = input_gs
@abstractmethod
def set_default_value(self, radius) -> float:
"""
Set the default value of the feature, you can use the radius for that
Args:
radius: the radius of the feature
Returns:
The default value to set in the feature
"""
raise NotImplementedError
@abstractmethod
def _build_postgres_query(self) -> str:
raise NotImplementedError
def __str__(self):
return '; '.join(self.feature_names)
```
#### File: features/osm_features/area_of_nearest.py
```python
from shapely.geometry.base import BaseGeometry
import geopandas as gpd
from coord2vec.feature_extraction.osm.base_postgres_feature import BasePostgresFeature
class AreaOfNearest(BasePostgresFeature):
def __init__(self, object_filter: str, table: str, object_name: str = None, **kwargs):
table_filter_dict = {table: {object_name: object_filter}}
feature_name = f"area_of_nearest_{object_name}"
self.object_name = object_name
self.table = table
super().__init__(table_filter_dict=table_filter_dict, feature_names=[feature_name], **kwargs)
def _build_postgres_query(self):
intersection_table = self.intersect_tbl_name_dict[self.table]
query = f"""
SELECT geom_id, area
FROM (SELECT geom_id,
CASE
when st_geometrytype(t_geom) = 'ST_Polygon' then ST_AREA(t_geom, TRUE)
else ST_Length(t_geom, TRUE)
end area,
row_number() OVER (PARTITION BY geom_id ORDER BY ST_Distance(q_geom, t_geom) ASC, t_geom) r
FROM {intersection_table}
Where {self.object_name} = 1) s
WHERE r = 1
"""
return query
def set_default_value(self, radius):
self.default_value = 0
```
#### File: features/osm_features/heights.py
```python
from shapely.geometry.base import BaseGeometry
import geopandas as gpd
from coord2vec.feature_extraction.osm.base_postgres_feature import BasePostgresFeature
class Heights(BasePostgresFeature):
def __init__(self, object_filter: str, table: str, object_name: str = None, **kwargs):
table_filter_dict = {table: {object_name: object_filter}}
self.agg_functions = ['avg', 'min', 'max', 'stddev']
feature_names = [f"height_of_{object_name}_{agg}" for agg in self.agg_functions]
feature_names += [f"absolute_height_of_{object_name}_{agg}" for agg in self.agg_functions]
self.object_name = object_name
self.table = table
super().__init__(table_filter_dict=table_filter_dict, feature_names=feature_names, **kwargs)
def _build_postgres_query(self):
intersection_table = self.intersect_tbl_name_dict[self.table]
height_agg_sql = ' , '.join(f'{agg}(height) as {agg}' for agg in self.agg_functions)
abs_height_agg_sql = ' , '.join(f'{agg}(absolute_height) as {agg}' for agg in self.agg_functions)
query = f"""
SELECT geom_id, {height_agg_sql}, {abs_height_agg_sql}
FROM {intersection_table} f
where {self.object_name} = 1
GROUP BY geom_id
"""
return query
def set_default_value(self, radius):
self.default_value = 0
```
#### File: features/osm_features/number_of.py
```python
from shapely.geometry.base import BaseGeometry
import geopandas as gpd
from coord2vec.feature_extraction.osm.base_postgres_feature import BasePostgresFeature
class NumberOf(BasePostgresFeature):
def __init__(self, object_filter: str, table: str, object_name: str = None, **kwargs):
table_filter_dict = {table: {object_name: object_filter}}
feature_name = f"number_of_{object_name}"
self.object_name = object_name
self.table = table
super().__init__(table_filter_dict=table_filter_dict, feature_names=[feature_name], **kwargs)
def _build_postgres_query(self):
intersection_table = self.intersect_tbl_name_dict[self.table]
query = f"""
SELECT geom_id, SUM({self.object_name}*coverage) AS cnt
FROM {intersection_table} f
where {self.object_name} = 1
GROUP BY geom_id
"""
return query
def set_default_value(self, radius):
self.default_value = {self.feature_names[0]: 0}
```
#### File: ors/tests/test_ors_feature.py
```python
import unittest
import openrouteservice
from coord2vec.feature_extraction.ors.distance_feature import DistanceTimeFeature
from coord2vec.feature_extraction.osm.osm_tag_filters import ROAD, OSM_LINE_TABLE
class TestOrsFeatures(unittest.TestCase):
def test_server_running(self):
coords = ((34.482724, 31.492354), (34.492724, 31.452354))
print("")
feat = DistanceTimeFeature(ROAD, OSM_LINE_TABLE)
feat.extract_coordinates(coords)
# key can be omitted for local host
client = openrouteservice.Client(base_url='http://192.168.127.12:8080/ors')
# Only works if you didn't change the ORS endpoints manually
routes = client.directions(coords, instructions=False, geometry=False)
self.assertIsNotNone(routes)
if __name__ == '__main__':
unittest.main()
```
#### File: image_extraction/tests/test_tile_image.py
```python
import unittest
import numpy as np
from staticmap import StaticMap
from coord2vec.config import IMG_WIDTH, IMG_HEIGHT, tile_server_dns_noport, tile_server_ports
from coord2vec.image_extraction.tile_image import render_single_tile, generate_static_maps, render_multi_channel
from coord2vec.image_extraction.tile_utils import build_tile_extent
class TestTileImage(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.m = StaticMap(IMG_WIDTH, IMG_HEIGHT, url_template=tile_server_dns_noport.replace('{p}',
str(tile_server_ports[0])))
cls.center = [36.1070, 36.7855]
cls.s = generate_static_maps(tile_server_dns_noport, tile_server_ports)
cls.ext = build_tile_extent(cls.center, radius_in_meters=50)
def test_rendering_single_image_works(self):
self.skipTest("Image rendering is obsolete")
image = np.array(render_single_tile(self.m, self.ext))
# self.assertTupleEqual((IMG_HEIGHT, IMG_WIDTH, 3), image.shape)
import matplotlib.pyplot as plt
plt.imshow(image)
# plt.show()
def test_rendering_multi_channel_image_works(self):
self.skipTest("Image rendering is obsolete")
image = render_multi_channel(self.s, self.ext)
self.assertTupleEqual((3, IMG_HEIGHT, IMG_WIDTH), image.shape)
def test_multi_channel_layers_are_just_rgb_converted_to_greyscale(self):
self.skipTest("Image rendering is obsolete")
image_single = render_single_tile(self.m, self.ext)
image_multi = render_multi_channel(self.s, self.ext)
self.assertTrue(np.array_equal(np.array(image_single.convert('L')), image_multi[0, :, :]))
if __name__ == '__main__':
unittest.main()
```
#### File: models/baselines/coord2vec_model.py
```python
import os
import random
from typing import List, Tuple, Callable
import torch
from ignite.contrib.handlers import ProgressBar, LRScheduler
from ignite.handlers import ModelCheckpoint
from sklearn.base import BaseEstimator, TransformerMixin
from torch import nn
from torch import optim
from torch.nn.modules.loss import _Loss, L1Loss
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, MultiStepLR
from torch.utils.data import DataLoader
from ignite.metrics import Metric, RunningAverage
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from coord2vec.common.itertools import flatten
from coord2vec.common.mtl.metrics import EmbeddingData, DistanceCorrelation, RootMeanSquaredError
from coord2vec import config
from coord2vec.config import HALF_TILE_LENGTH, TENSORBOARD_DIR
from coord2vec.feature_extraction.features_builders import FeaturesBuilder
from coord2vec.image_extraction.tile_image import generate_static_maps, render_multi_channel
from coord2vec.image_extraction.tile_utils import build_tile_extent
from coord2vec.models.architectures import dual_fc_head, multihead_model, simple_cnn, simple_head
from coord2vec.models.baselines.tensorboard_utils import TrainExample, \
create_summary_writer, add_metrics_to_tensorboard, add_embedding_visualization, build_example_image_figure
from coord2vec.models.data_loading.tile_features_loader import TileFeaturesDataset
from coord2vec.models.losses import MultiheadLoss
from coord2vec.models.resnet import wide_resnet50_2, resnet18, resnet50, resnet34
class Coord2Vec(BaseEstimator, TransformerMixin):
"""
Wrapper for the coord2vec algorithm
Project's "main"
"""
def __init__(self, feature_builder: FeaturesBuilder, n_channels: int, losses: List[_Loss] = None,
losses_weights: List[float] = None, log_loss: bool = False, exponent_heads: bool = False,
cnn_model: Callable = resnet34, model_save_path: str = None,
embedding_dim: int = 128, multi_gpu: bool = False, cuda_device: int = 0, lr: float = 1e-4,
lr_steps: List[int] = None, lr_gamma: float = 0.1):
"""
Args:
feature_builder: FeatureBuilder to create features with the features were created with
n_channels: the number of channels in the input images
losses: a list of losses to use. must be same length of the number of features
losses_weights: weights to give the different losses. if None then equals weights of 1
log_loss: whether to use the log function on the loss before back propagation
embedding_dim: dimension of the embedding to create
multi_gpu: whether to use more than one GPU or not
cuda_device: if multi_gpu==False, choose the GPU to work on
lr: learning rate for the Adam optimizer
lr_steps: Training steps in which we apply a multiply by lr_gamma to the LR
lr_gamma: The multiplier we multiply the LR
"""
self.model_save_path = model_save_path
self.losses_weights = losses_weights
self.log_loss = log_loss
self.exponent_head = exponent_heads
self.embedding_dim = embedding_dim
self.cnn_model = cnn_model
self.n_channels = n_channels
self.multi_gpu = multi_gpu
if not multi_gpu:
self.device = torch.device(f'cuda:{cuda_device}' if torch.cuda.is_available() else 'cpu')
else:
self.device = torch.device(f'cuda' if torch.cuda.is_available() else 'cpu')
# self.device = 'cpu'
self.feature_names = feature_builder.features_names
self.n_features = len(self.feature_names)
# create L1 losses if not supplied
self.losses = [L1Loss() for _ in range(self.n_features)] if losses is None else losses
assert len(self.losses) == self.n_features, "Number of losses must be equal to number of features"
# create the model
self.model = self._build_model(cnn_model, self.n_channels, self.n_features)
if multi_gpu:
self.model = nn.DataParallel(self.model)
self.model.to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
self.step_scheduler = MultiStepLR(self.optimizer, milestones=lr_steps, gamma=lr_gamma)
def fit(self, train_dataset: TileFeaturesDataset,
val_dataset: TileFeaturesDataset = None,
epochs: int = 10,
batch_size: int = 10,
num_workers: int = 10,
evaluate_every: int = 300,
save_every: int = 1000):
"""
Args:
train_dataset: The dataset object for training data
val_dataset: The dataset object for validation data, optional
epochs: number of epochs to train the network
batch_size: batch size for the network
num_workers: number of workers for the network
evaluate_every: every how many steps to run evaluation
save_every: every how many steps to save the model
Returns:
a trained pytorch model
"""
# create data loader
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
if val_dataset is not None:
val_data_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
else:
val_data_loader = None
# create the model
criterion = MultiheadLoss(self.losses, use_log=self.log_loss, weights=self.losses_weights).to(self.device)
# create tensorboard
writer = create_summary_writer(self.model, train_data_loader, log_dir=TENSORBOARD_DIR)
def multihead_loss_func(y_pred, y):
return criterion(y_pred[1], torch.split(y, 1, dim=1))[0]
def multihead_output_transform(x, y, y_pred, *args):
embedding, output = y_pred
y_pred_tensor = torch.stack(output).squeeze(2).transpose(0, 1)
y_tensor = y
data = x
with torch.no_grad():
loss, multi_losses = criterion(output, torch.split(y, 1, dim=1))
return data, embedding, loss, multi_losses, y_pred_tensor, y_tensor
eval_metrics = {'rmse': RootMeanSquaredError(), # 'corr': DistanceCorrelation(),
# 'embedding_data': EmbeddingData()
}
train_metrics = {'rmse': RootMeanSquaredError() # , 'corr': DistanceCorrelation()
}
trainer = create_supervised_trainer(self.model, self.optimizer, multihead_loss_func, device=self.device,
output_transform=multihead_output_transform)
for name, metric in train_metrics.items(): # Calculate metrics also on trainer
metric.attach(trainer, name)
evaluator = create_supervised_evaluator(self.model,
metrics=eval_metrics,
device=self.device,
output_transform=multihead_output_transform)
if self.model_save_path is not None:
# do we want to use it ? from Ignite
checkpoint_handler = ModelCheckpoint(self.model_save_path, 'checkpoint',
save_interval=save_every,
n_saved=10, require_empty=False, create_dir=True)
pbar = ProgressBar()
# RunningAverage(output_transform=lambda x: x[2])
pbar.attach(trainer)
scheduler = LRScheduler(self.step_scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': self.model})
@trainer.on(Events.EPOCH_STARTED)
def init_state_params(engine):
engine.state.plusplus_ex, engine.state.plusminus_ex = [None] * self.n_features, [None] * self.n_features
engine.state.minusminus_ex, engine.state.minusplus_ex = [None] * self.n_features, [None] * self.n_features
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
writer.add_scalar('General/LR', scheduler.get_param(), global_step=engine.state.iteration)
_, embedding, loss, multi_losses, y_pred_tensor, y_tensor = engine.state.output
images_batch, features_batch = engine.state.batch
plusplus_ex, plusminus_ex = engine.state.plusplus_ex, engine.state.plusminus_ex
minusminus_ex, minusplus_ex = engine.state.minusminus_ex, engine.state.minusplus_ex
writer.add_scalar('General/Train Loss', loss, global_step=engine.state.iteration)
feat_diff = (y_pred_tensor - y_tensor) # / y_tensor + 1
feat_sum = y_pred_tensor + y_tensor
for j in range(self.n_features):
writer.add_scalar(f'Multiple Losses/{self.feature_names[j]}', multi_losses[j],
global_step=engine.state.iteration)
for i in range(len(images_batch)):
itm_diff, itm_sum = feat_diff[i][j].item(), feat_sum[i][j].item()
itm_pred, itm_actual = y_pred_tensor[i][j].item(), y_tensor[i][j].item()
ex = TrainExample(images_batch[i], predicted=itm_pred, actual=itm_actual, sum=itm_sum,
diff=itm_diff)
if minusminus_ex[j] is None or minusminus_ex[j].sum > itm_sum:
engine.state.minusminus_ex[j] = ex
elif plusminus_ex[j] is None or plusminus_ex[j].diff < itm_diff:
engine.state.plusminus_ex[j] = ex
elif minusplus_ex[j] is None or minusplus_ex[j].diff > itm_diff:
engine.state.minusplus_ex[j] = ex
elif plusplus_ex[j] is None or plusplus_ex[j].sum < itm_sum:
engine.state.plusplus_ex[j] = ex
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
global_step = engine.state.iteration
metrics = engine.state.metrics # already attached to the trainer engine to save
# can add more metrics here
add_metrics_to_tensorboard(metrics, writer, self.feature_names, global_step, log_str="train")
# plot min-max examples
plusplus_ex, plusminus_ex = engine.state.plusplus_ex, engine.state.plusminus_ex
minusminus_ex, minusplus_ex = engine.state.minusminus_ex, engine.state.minusplus_ex
for j in range(self.n_features):
if plusplus_ex[j] is None:
continue
writer.add_figure(tag=f"{self.feature_names[j]}/plusplus",
figure=build_example_image_figure(plusplus_ex[j]), global_step=global_step)
writer.add_figure(tag=f"{self.feature_names[j]}/plusminus",
figure=build_example_image_figure(plusminus_ex[j]), global_step=global_step)
writer.add_figure(tag=f"{self.feature_names[j]}/minusminus",
figure=build_example_image_figure(minusminus_ex[j]), global_step=global_step)
writer.add_figure(tag=f"{self.feature_names[j]}/minusplus",
figure=build_example_image_figure(minusplus_ex[j]), global_step=global_step)
@trainer.on(Events.ITERATION_COMPLETED)
def log_validation_results(engine):
global_step = engine.state.iteration
if global_step % evaluate_every == 0:
evaluator.run(val_data_loader)
metrics = evaluator.state.metrics
# can add more metrics here
add_metrics_to_tensorboard(metrics, writer, self.feature_names, global_step, log_str="validation")
# add_embedding_visualization(writer, metrics, global_step)
if global_step % save_every == 0:
self.save_trained_model(f"{self.model_save_path}/{global_step}_model.pth")
trainer.run(train_data_loader, max_epochs=epochs)
return self.model
def load_trained_model(self, path: str):
"""
load a trained model
Args:
path: path of the saved torch NN
Returns:
the trained model in 'path'
"""
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.embedding_dim = checkpoint['embedding_dim']
self.losses = checkpoint['losses']
self.model = self.model.to(self.device)
return self
def _model_to(self):
self.model = self.model.to(self.device)
# from apex import amp
# if self.amp:
# model, optimizer = amp.initialize(model.to('cuda'), optimizer, opt_level="O1")
def save_trained_model(self, path: str):
"""
save a trained model
Args:
path: path of the saved torch NN
"""
self.model = self.model.to('cpu')
os.makedirs(os.path.dirname(path), exist_ok=True)
torch.save({
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'embedding_dim': self.embedding_dim,
'losses': self.losses,
}, path)
self.model = self.model.to(self.device)
def transform(self, coords: List[Tuple[float, float]]) -> torch.tensor:
"""
get the embedding of coordinates
Args:
coords: a list of tuple like (lat, long) to predict on
Returns:
A tensor of shape [n_coords, embedding_dim]
"""
# create tiles using the coords
s = generate_static_maps(config.tile_server_dns_noport, config.tile_server_ports)
images = []
for coord in coords:
ext = build_tile_extent(coord, radius_in_meters=HALF_TILE_LENGTH)
image = render_multi_channel(s, ext)
images.append(image)
images = torch.tensor(images).float().to(self.device)
# predict the embedding
embeddings, output = self.model(images)
return embeddings.to('cpu')
def _build_model(self, cnn_model, n_channels, n_heads):
model = cnn_model(n_channels, self.embedding_dim)
# model = simple_cnn(n_channels, self.embedding_dim)
heads = [simple_head(self.embedding_dim) for _ in range(n_heads)]
model = multihead_model(model, heads)
return model
```
#### File: models/baselines/maml.py
```python
import datetime
import os
from collections import OrderedDict
from typing import List
import torch
from torch import nn
from torch.nn.modules.loss import _Loss, L1Loss
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from coord2vec.config import TENSORBOARD_DIR
from coord2vec.feature_extraction.features_builders import FeaturesBuilder
from coord2vec.models.architectures import dual_fc_head
from coord2vec.models.data_loading.tile_features_loader import TileFeaturesDataset
from coord2vec.models.resnet import wide_resnet50_2
class MAML:
def __init__(self,
feature_builder: FeaturesBuilder,
n_channels: int,
losses: List[_Loss] = None,
embedding_dim: int = 128,
tb_dir: str = 'default'):
self.tb_dir = tb_dir
self.embedding_dim = embedding_dim
self.n_channels = n_channels
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.feature_builder = feature_builder
self.n_features = len(feature_builder.features)
# create L1 losses if not supplied
self.losses = [L1Loss() for i in range(self.n_features)] if losses is None else losses
assert len(self.losses) == self.n_features, "Number of losses must be equal to number of features"
# create the model
self.common_model = wide_resnet50_2(n_channels, self.embedding_dim)
self.head_models = [dual_fc_head(self.embedding_dim) for i in range(self.n_features)]
self.optimizer = torch.optim.SGD(self.common_model.parameters(), lr=1e-3)
def fit(self, dataset: TileFeaturesDataset,
n_epochs: int = 10,
batch_size: int = 64,
num_workers: int = 4,
alpha_lr: float = 1e-5,
beta: float = 1e-5):
# create a DataLoader
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
val_data_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
# create tensorboard
tb_path = os.path.join(TENSORBOARD_DIR, self.tb_dir) if self.tb_dir == 'test' \
else os.path.join(TENSORBOARD_DIR, self.tb_dir, str(datetime.datetime.now()))
writer = SummaryWriter(tb_path)
for epoch in tqdm(range(n_epochs), desc='Epochs', unit='epoch'):
# create a new model using the meta model
task_gradients = []
for task_ind in range(self.n_features):
fast_weights = OrderedDict(self.common_model.named_parameters())
task_model = nn.Sequential(self.common_model, self.head_models[task_ind])
for image_batch, features_batch in train_data_loader:
# forward pass
output = task_model(image_batch)
loss = self.losses[task_ind](output, features_batch[task_ind:task_ind + 1])
# backward pass
gradient = torch.autograd.grad(loss, task_model.parameters())
# Update weights manually
fast_weights = OrderedDict(
(name, param - alpha_lr * grad)
for ((name, param), grad) in zip(fast_weights.items(), gradient)
)
# accumulate gradients from all the tasks
for image_batch, features_batch in val_data_loader:
output = task_model(image_batch, fast_weights)
loss = self.losses[task_ind](output, features_batch[task_ind:task_ind + 1])
loss.backward(retain_graph=True)
gradients = torch.autograd.grad(loss, fast_weights.values())
named_grads = {name: g for ((name, _), g) in zip(fast_weights.items(), gradients)}
task_gradients.append(named_grads)
# meta step
sum_task_gradients = {k: torch.stack([grad[k] for grad in task_gradients]).mean(dim=0)
for k in task_gradients[0].keys()}
hooks = []
for name, param in model.named_parameters():
hooks.append(
param.register_hook(replace_grad(sum_task_gradients, name))
)
model.train()
optimiser.zero_grad()
# Dummy pass in order to create `loss` variable
# Replace dummy gradients with mean task gradients using hooks
logits = model(torch.zeros((k_way,) + data_shape).to(device, dtype=torch.double))
loss = loss_fn(logits, create_nshot_task_label(k_way, 1).to(device))
loss.backward()
optimiser.step()
for h in hooks:
h.remove()
# preform the meta learning
self.common_model.load_state_dict(meta_state)
```
#### File: data_loading/tests/test_data_creation_and_loading.py
```python
import pickle
import shutil
import unittest
import torch
from coord2vec.config import TEST_CACHE_DIR, IMG_WIDTH, IMG_HEIGHT, tile_server_ports
from coord2vec.models.data_loading.create_dataset_script import sample_and_save_dataset
from coord2vec.models.data_loading.tile_features_loader import get_files_from_path, TileFeaturesDataset
class TestDataCreation(unittest.TestCase):
def test_script_creates_correct_number_of_samples(self):
self.skipTest("Tiles not relevant at the moment")
self.fail("hangs for a long time")
# sample_and_save_dataset(TEST_CACHE_DIR, sample_num=7, use_existing=False)
# for img_path, feats_paths in get_files_from_path(TEST_CACHE_DIR):
# image_arr = np.load(img_path)
# features_arr = np.load(feats_paths)
#
# self._check_pkl_ok(features_arr, image_arr)
def _check_pkl_ok(self, feats, image):
if isinstance(feats, torch.Tensor):
self.assertFalse(any(torch.isnan(feats)), f"{feats}")
else:
self.assertFalse(np.isnan(feats).any(), f"{feats}")
self.assertTupleEqual((len(tile_server_ports), IMG_WIDTH, IMG_HEIGHT), image.shape)
def test_no_nones_in_dataset(self):
self.skipTest("Tiles not relevant at the moment")
self.fail("hangs for a long time")
# sample_and_save_dataset(TEST_CACHE_DIR, sample_num=3, use_existing=False)
# ds = TileFeaturesDataset(TEST_CACHE_DIR, example_features_builder)
#
# for i in range(len(ds)):
# im, feats = ds[i]
# self._check_pkl_ok(feats, im)
@classmethod
def tearDownClass(cls) -> None:
pass
# shutil.rmtree(TEST_CACHE_DIR)
if __name__ == '__main__':
unittest.main()
```
#### File: data_loading/tests/test_singleTileFeaturesDataset.py
```python
import os
import shutil
from unittest import TestCase
from coord2vec.config import TEST_CACHE_DIR, TENSORBOARD_DIR
# from coord2vec.feature_extraction.features_builders import example_features_builder
from coord2vec.models.data_loading.create_dataset_script import sample_and_save_dataset
from coord2vec.models.data_loading.tile_features_loader import SingleTileFeaturesDataset
class TestSingleTileFeaturesDataset(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.tb_dir = 'test'
def test___getitem__(self):
self.skipTest("Tiles not relevant at the moment")
# sample_and_save_dataset(TEST_CACHE_DIR, sample_num=7, use_existing=False)
# dataset = SingleTileFeaturesDataset(TEST_CACHE_DIR, feature_index=2)
# self.assertTupleEqual(dataset[0][1].shape, (1,))
@classmethod
def tearDownClass(cls) -> None:
pass
# shutil.rmtree(os.path.join(TENSORBOARD_DIR, cls.tb_dir))
# shutil.rmtree(TEST_CACHE_DIR)
```
#### File: models/geo_convolution/geo_convolution.py
```python
from itertools import product
import numpy as np
import pandas as pd
from shapely.geometry import Point
class GeoConvolution:
def __init__(self):
pass
def _image2tiles(self, image: np.ndarray, tile_size) -> np.ndarray:
"""
Crop a big image into many small tiles
Args:
image: the big image to crop
tile_size: The height and width of the small tiles
Returns:
a 5d numpy array with all the tiles [n_row, n_col, width, height, n_channels]
"""
# better way
cropped_image = image[:image.shape[0] - image.shape[0] % tile_size,
:image.shape[1] - image.shape[1] % tile_size]
n_rows, n_cols = int(cropped_image.shape[0] / tile_size), int(cropped_image.shape[1] / tile_size)
n_channels = image.shape[-1]
tiles = cropped_image.reshape(n_rows, n_cols, tile_size, tile_size, n_channels)
return tiles
def _tiles2image(self, tiles: np.ndarray) -> np.ndarray:
"""
Join many tiles back into one image
Args:
tiles:
Returns:
"""
n_row, n_col = tiles.shape[0], tiles.shape[1]
tile_size = tiles.shape[2]
n_channels = tiles.shape[-1]
image = tiles.reshape((n_row * tile_size, n_col * tile_size, n_channels))
return image
def image2points(self, image: np.ndarray, bottom_left_point: Point, top_right_point: Point) -> pd.Series:
min_long, min_lat = bottom_left_point.coords[0]
max_long, max_lat = top_right_point.coords[0]
longs = np.linspace(min_long, max_long, image.shape[1])
lats = np.linspace(min_lat, max_lat, image.shape[0])
points = [Point(long, lat) for long, lat in product(longs, lats)]
values = image.flatten()
series = pd.Series(index=points, data=values)
return series
```
#### File: coord2vec/pipelines/calculate_building_scores_cv.py
```python
import os
import random
from datetime import datetime
from itertools import product
import logging
import numpy as np
import pandas as pd
from lagoon.dags import DAG, Stage
from lagoon.executors.local_executor import LocalExecutor
from shapely import wkt
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from xgboost import XGBClassifier
from coord2vec.common.itertools import flatten
from coord2vec.config import BUILDINGS_FEATURES_TABLE, BUILDING_RESULTS_DIR, NEG_RATIO, SCORES_TABLE, \
BUILDING_EXPERIMENT_NAME, TRUE_POSITIVE_RADIUS
from coord2vec.evaluation.tasks.task_handler import TaskHandler
from coord2vec.evaluation.tasks.tasks_utils import save_scores_to_db, hash_geoseries
from coord2vec.feature_extraction.feature_bundles import create_building_features
from coord2vec.feature_extraction.features_builders import FeaturesBuilder
from coord2vec.pipelines.lagoon_utils.expr_saver_task import ExprSaverTask
from coord2vec.pipelines.lagoon_utils.for_wrapper import define_for_dependencies, ForCalcTask, ForInputTask
from coord2vec.pipelines.lagoon_utils.lambda_task import LambdaTask
import geopandas as gpd
from imblearn.ensemble import BalancedRandomForestClassifier
from catboost import CatBoostClassifier
def _create_task():
features = create_building_features()
feature_builder = FeaturesBuilder(features, cache_table=BUILDINGS_FEATURES_TABLE)
models = {
f'CatBoost_depth{depth}_lr{lr}_l2reg{l2reg}': CatBoostClassifier(depth=depth, learning_rate=lr,
l2_leaf_reg=l2reg, iterations=300,
verbose=False, thread_count=8, od_pval=1e-5)
for depth, lr, l2reg in product([4, 7, 10], [0.03, 0.1, 0.15], [1, 4, 9])
# 'SVM C=0.1': SVC(C=0.1, probability=True, gamma='auto'),
# 'SVM C=0.01': SVC(C=0.01, probability=True, gamma='auto'),
# 'SVM C=1': SVC(C=1, probability=True, gamma='auto'),
# 'CatBoost': CatBoostClassifier(loss_function = 'CrossEntropy', iterations=300, depth=3, learning_rate=0.15, l2_leaf_reg=4, verbose=False),
# 'Logistic Regression': LogisticRegression(),
# 'BalancedRF1000': BalancedRandomForestClassifier(n_estimators=1000),
# 'BalancedRF1000_depth4': BalancedRandomForestClassifier(n_estimators=1000, max_depth=4),
# 'BalancedRF100_depth3': BalancedRandomForestClassifier(n_estimators=100),
# 'BalancedRF100_depth5': BalancedRandomForestClassifier(n_estimators=100, max_depth=5),
# 'XGBoost': XGBClassifier(n_estimators=50, early_stopping_round=10)
}
task = TaskHandler(feature_builder, models=models)
return task
def run_experiment_lagoon():
np.random.seed(42)
random.seed(42)
S_program = Stage("program")
# task = _create_task()
get_task = LambdaTask(_create_task, ["task"])
get_dataset = LambdaTask(lambda task: task.get_dataset(), ["geos", "y"])
# geos, y = task.get_dataset()
S_program.add_dependency(get_task, get_dataset)
# convert to buildings
extract_buildings = LambdaTask(lambda task, geos, y:
task.extract_buildings_from_polygons(geos, y, return_source=True),
["building_gs", "buildings_y", "source_indices"])
# building_gs, buildings_y, source_indices = task.extract_buildings_from_polygons(geos, y, neg_ratio=2,
# return_source=True)
S_program.add_dependency([get_task, get_dataset], extract_buildings)
# transform the buildings to features
transform = LambdaTask(lambda task, building_gs: task.transform(building_gs), ["X_df"])
# X_df = task.transform(building_gs)
S_program.add_dependency([get_task, extract_buildings], transform)
def train_predict_on_split(task, X_df, buildings_y, source_indices, source_train_indices, source_test_indices,
geos):
train_indices = np.isin(source_indices, source_train_indices)
test_indices = np.isin(source_indices, source_test_indices)
# fetch train-set and fit
X_train_df = X_df.iloc[train_indices]
y_train = buildings_y[train_indices]
# sample neg_ratio false samples
num_neg_samples = int(NEG_RATIO * y_train.sum())
X_train_neg_df = X_train_df[y_train == 0]
random_indices = np.random.choice(range(len(X_train_neg_df)), num_neg_samples, replace=False)
X_train_df = pd.concat([X_train_df[y_train == 1], X_train_neg_df.iloc[random_indices]])
y_train = np.concatenate([y_train[y_train == 1], y_train[y_train == 0][random_indices]]).astype(int)
# try larger labels - it didnt work..
y_train_soft = task.get_soft_labels(gpd.GeoSeries(data=X_train_df.index.values), radius=TRUE_POSITIVE_RADIUS)
# y_train = (y_train > 0.5).astype(int)
X_test_df = X_df.iloc[test_indices]
soft_labels_cache = os.path.join(os.path.curdir, "soft_cache", hash_geoseries(gpd.GeoSeries(X_test_df.index)))
y_test_soft = task.get_soft_labels(gpd.GeoSeries(data=X_test_df.index.values), radius=TRUE_POSITIVE_RADIUS,
cache_dir=soft_labels_cache)
# y_test = buildings_y[test_indices].astype(int) # for Catboost
task.fit_all_models(X_train_df, y_train_soft)
# task.fit_all_models(X_train_df, y_train_soft, X_test_df, y_test_soft) # for Catboost
task.save_all_models()
train_probas_df = task.predict_all_models(X_train_df)
test_probas_df = task.predict_all_models(X_test_df)
# score models
model2scores = {}
model2score = task.score_all_models(X_test_df, y_test_soft)
for model, score in model2score.items():
model2scores.setdefault(model, []).append(score)
print(f"Insererting building results to {SCORES_TABLE}")
probas_df = task.predict_all_models(X_df)
train_hash = hash_geoseries(geos[source_train_indices])
save_scores_to_db(probas_df, SCORES_TABLE, BUILDING_EXPERIMENT_NAME, train_hash)
return X_train_df, y_train, X_test_df, y_test_soft, train_probas_df, test_probas_df, task.models_dict, model2scores
def merge_predict_results(X_train_df, X_test_df, y_train, y_test, source_train_indices,
source_test_indices, train_probas_df, test_probas_df, models_dict, model2scores):
# merge_dicts
kfold_results = [(X_train_df[i], X_test_df[i], y_train[i], y_test[i], source_train_indices[i],
source_test_indices[i], train_probas_df[i], test_probas_df[i], models_dict[i],
model2scores[i]) for i in
range(len(X_train_df))]
return (kfold_results,)
for_input_task = ForInputTask(lambda task, geos, y: (task.kfold_split(geos, y, n_splits=4),),
["source_train_indices", "source_test_indices"], 4)
S_program.add_dependency([get_task, get_dataset], for_input_task)
for_params = ["X_train_df", "y_train", "X_test_df", "y_test", "source_train_indices",
"source_test_indices", "train_probas_df", "test_probas_df", "models_dict",
"model2scores", "geos"]
for_train_predict_on_split = ForCalcTask(train_predict_on_split,
for_params, [get_task, get_dataset, extract_buildings, transform])
for_train_predict_on_split_merge = LambdaTask(merge_predict_results, 'model_results')
define_for_dependencies(S_program, for_train_predict_on_split, for_input_task, for_train_predict_on_split_merge)
def print_model_scores(kfold_results):
kfold_scores = [res[-1] for res in kfold_results]
all_models = set(flatten([list(kfold.keys()) for kfold in kfold_scores]))
for model in all_models:
model_mean = np.mean(flatten([kfold[model] for kfold in kfold_scores]))
print(f"{model} AUC: \t {model_mean}")
print_model2scores = LambdaTask(print_model_scores, [])
S_program.add_dependency(for_train_predict_on_split_merge, print_model2scores)
def save_results(geos, kfold_results):
return ([(geos.iloc[source_train_indices], X_train_df, y_train, train_probas_df,
geos.iloc[source_test_indices], X_test_df, y_test, test_probas_df,
models_dict, model2scores) for
X_train_df, X_test_df, y_train, y_test, source_train_indices,
source_test_indices, train_probas_df, test_probas_df, models_dict, model2scores in kfold_results],)
# return X_train_df, y_train, X_test_df, y_test, geos.iloc[source_train_indices], \
# geos.iloc[source_test_indices], train_probas_df, test_probas_df, model2scores
# change for results to objects to be saved
save_params = ["model_results"]
results2save = LambdaTask(save_results, save_params,
override_input_names=save_params)
S_program.add_dependency([get_dataset, for_train_predict_on_split_merge], results2save)
expr_path = f"{BUILDING_RESULTS_DIR}/{datetime.now().isoformat(' ', 'seconds')}"
saver = ExprSaverTask(expr_path, save_params)
S_program.add_dependency(results2save, saver)
main_dag = DAG("main")
main_dag.add(S_program)
main_dag.visualize()
a = LocalExecutor(num_workers=4, log_to=["elastic_prod"]).execute(main_dag)
if __name__ == "__main__":
np.random.seed(42)
random.seed(42)
run_experiment_lagoon()
```
#### File: pipelines/lagoon_utils/auto_stage.py
```python
from typing import Union, List
from lagoon import Stage, Task
from coord2vec.pipelines.lagoon_utils.lambda_task import LambdaTask
class AutoStage(Stage):
def __init__(self, name: str, **kwargs):
super().__init__(name, **kwargs)
self.output_param_to_task = dict()
def update_output_params(self, task):
# TODO: kind-of ugly, uses internal _dict_graph
if isinstance(task, LambdaTask) and task not in self._dict_graph:
for output_param in (task.pass_input_names + task.func_output_names):
self.output_param_to_task[output_param] = task
def add_auto(self, task: LambdaTask):
relevant_connections = set()
for input_param in task.func_input_names:
if input_param in self.output_param_to_task:
relevant_connections.add(self.output_param_to_task[input_param])
else:
pass # can come from pipelines variable
# raise AssertionError(f"input {input_param} not presented before")
if len(relevant_connections) == 0:
self.add(task)
else:
self.add_dependency(list(relevant_connections), task)
def add_dependency(
self, current_task: Union[Task, List[Task]], next_task: Union[Task, List[Task]]
) -> "Stage":
if not isinstance(current_task, list):
current_task = [current_task]
if not isinstance(next_task, list):
next_task = [next_task]
for task in (next_task + current_task):
self.update_output_params(task) # will try for all NEW tasks
return super(AutoStage, self).add_dependency(current_task, next_task)
def add_to_DAG(task: LambdaTask, s: AutoStage):
s.add_auto(task)
```
#### File: pipelines/lagoon_utils/expr_saver_task.py
```python
import os
from typing import Any, List, Union
from lagoon import Task
from lagoon.io.file import lagoon_open
class ExprSaverTask(Task):
def __init__(self, save_path: str, save_params: Union[List[str], str], **kwargs):
super().__init__(**kwargs)
self.save_path = save_path
self.save_params = save_params
def run(self):
# TODO: input names can also come from pipelines variable/constants
with lagoon_open(os.path.join(self.save_path, "results.pickle"), "w") as f:
f.save({name: self.input[name] for name in self.save_params})
``` |
{
"source": "jonzarecki/mapnik-image-render",
"score": 3
} |
#### File: jonzarecki/mapnik-image-render/main.py
```python
from StringIO import StringIO
import mapnik
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def initialize_map():
m = mapnik.Map(300,300)
m.background = mapnik.Color('white')
s = mapnik.Style()
r = mapnik.Rule()
polygon_symbolizer = mapnik.PolygonSymbolizer()
polygon_symbolizer.fill = mapnik.Color('#000000')
r.symbols.append(polygon_symbolizer)
line_symbolizer = mapnik.LineSymbolizer()
line_symbolizer.stroke = mapnik.Color('rgb(100%,100%,100%)')
line_symbolizer.stroke_width = 0.5
r.symbols.append(line_symbolizer)
s.rules.append(r)
m.append_style('My Style',s)
ds = mapnik.Shapefile(file='ne_110m_admin_0_countries.shp')
layer = mapnik.Layer('world')
layer.datasource = ds
layer.styles.append('My Style')
m.layers.append(layer)
m.zoom_all()
return m
def render_image(map_obj):
im = mapnik.Image(300, 300)
# mapnik.render_to_file(map_obj, '/tmp/world.png', 'png')
map_obj.zoom_to_box(mapnik.Box2d(32.2654333839, 27.5013261988, 35.8363969256, 33.2774264593))
mapnik.render(map_obj, im)
string = im.tostring('tiff')
img = Image.open(StringIO(string))
plt.imshow(np.array(img), cmap="gray")
plt.axis('off')
plt.show(axis='off')
m = initialize_map()
render_image(m)
print "rendered image to 'world.png'"
``` |
{
"source": "jonzarecki/show-attend-and-tell-django",
"score": 2
} |
#### File: show-attend-and-tell-django/showattendtell/test.py
```python
import os
import matplotlib.pyplot as plt
import cPickle as pickle
import tensorflow as tf
from showattendtell.core.test_solver import CaptioningSolver
from showattendtell.core.model import CaptionGenerator
from showattendtell.core.utils import load_coco_data
from showattendtell.core.bleu import evaluate
dir_path = os.path.dirname(os.path.realpath(__file__))
# %matplotlib inline
plt.rcParams['figure.figsize'] = (8.0, 6.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
def test_model_on_image(img_path):
with open(os.path.join(dir_path, 'data/train/word_to_idx.pkl'), 'rb') as f:
word_to_idx = pickle.load(f)
model = CaptionGenerator(word_to_idx, dim_feature=[196, 512], dim_embed=512,
dim_hidden=1024, n_time_step=16, prev2out=True,
ctx2out=True, alpha_c=1.0, selector=True, dropout=True)
solver = CaptioningSolver(model, test_model=os.path.join(dir_path, 'model/lstm/model-20'))
return solver.test_live(img_path)
```
#### File: uploads/core/views.py
```python
from django.shortcuts import render, redirect
from uploads.core.forms import ImageForm
def home(request):
return render(request, 'core/home.html')
def model_form_upload(request):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
inst = form.save()
return redirect(u'../../media/'+inst.img.name)
else:
form = ImageForm()
return render(request, 'core/model_form_upload.html', {
'form': form
})
``` |
{
"source": "jonzarecki/TemporalEdgeProp",
"score": 3
} |
#### File: common/graph_label_prop/temporal_label_prop.py
```python
from datetime import datetime
import networkx as nx
import numpy as np
from sklearn.semi_supervised._label_propagation import BaseLabelPropagation
class TemporalGraphLabelPropagation(BaseLabelPropagation):
"""
Class for the temporal LabelProp alg'
- node_time: the node attribute name containing the time in datetime format
"""
def __init__(self, node_time: str, *args, **kwargs):
super(TemporalGraphLabelPropagation, self).__init__(*args, **kwargs)
self.node_time = node_time
def _build_graph(self):
"""
here we assume self.X already hold our affinity matyrix as calculated by networkx
"""
return self.X_
@staticmethod
def decay_function(times_list: np.ndarray) -> np.ndarray:
"""
Calculates the decayed time function for all times in $timeslist in comparison with now
TODO: now is weird
Args:
times_list: list of timestamps in np array
Returns:
Decayed scores for all times in $times_list
"""
alpha = 1.
beta = -0.0005
return alpha * np.exp(beta * np.abs(datetime.now() - times_list) / (60**2))
def fit(self, g: nx.Graph, y):
"""
Uses the laplacian matrix to acy as affinity matrix for the label-prop algorithm
Args:
g: The graph in nx format
#TODO: y is weird, should be an node attribute
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns:
self
"""
weight_name = 'TGLP_weight'
edge_weight = {}
for n1, n2, attrs in g.edges(data=True):
edge_weight[(n1, n2)] = (self.decay_function(g.nodes[n1][self.node_time]) +
self.decay_function(g.nodes[n2][self.node_time])) / 2.0
nx.set_edge_attributes(g, edge_weight, weight_name)
X = - nx.normalized_laplacian_matrix(g, nodelist=sorted(g.nodes), weight=weight_name).toarray()
X = X - np.diag(np.diagonal(X))
retval = super(TemporalGraphLabelPropagation, self).fit(X, y)
nx.set_edge_attributes(g, {}, weight_name) # delete attr
return retval
```
#### File: graph_label_prop/tests/test_label_prop.py
```python
from unittest import TestCase
import unittest
import numpy as np
import networkx as nx
from datetime import datetime, timedelta
from edge_prop.common.graph_label_prop.label_prop import GraphLabelPropagation
class TestLabelProp(TestCase):
def test_label_prop(self):
"""
Easy test for sanity for normal LabelProp
"""
g = nx.Graph()
g.add_node(0, label=-1)
g.add_edge(0, 1)
g.add_node(2, label=1)
g.add_edge(1, 2)
g.add_edge(2, 4)
g.add_edge(2, 3)
true_labels = np.array([-1, 1, 1, 1, 1])
initial_node_labels = nx.get_node_attributes(g, 'label')
labels_sorted = [initial_node_labels.setdefault(n, 0) for n in sorted(g.nodes)]
# prop_model = GraphLabelPropagation(y_attr='label') #TODO: might be a good idea
prop_model = GraphLabelPropagation()
prop_model.fit(g, labels_sorted)
pred = prop_model.predict()
self.assertTrue(np.array_equal(pred, true_labels))
```
#### File: TemporalEdgeProp/edge_prop/data_loader.py
```python
from os.path import join
import numpy as np
import networkx as nx
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from edge_prop.graph_wrappers import BaseGraph
from edge_prop.constants import LABEL_GT, LABEL_TRAIN, NO_LABEL
class DataLoader:
def __init__(self, path: str, test_size: float = 0.2, dtype_tuples=[(LABEL_GT, int)]):
self.path = path
self.test_size = test_size
self.dtype_tuples = dtype_tuples
def load_data(self, trunc_nodes: int = None):
if 'aminer' in self.path:
graph = self._load_aminer(self.path, trunc_nodes)
elif 'epinions' in self.path or 'Slashdot' in self.path or 'elec' in self.path or 'ucidata' in self.path :
graph = self._load_konect_dataset(self.path, trunc_nodes)
else:
raise Exception('No such dataset exists')
g = BaseGraph(graph)
train_labels_dict, all_labels_dict = nx.get_edge_attributes(g.graph_nx, LABEL_TRAIN), \
nx.get_edge_attributes(g.graph_nx, LABEL_GT)
test_indices = np.array([i for i, e in enumerate(g.edge_order)
if (train_labels_dict[e] == [NO_LABEL] and all_labels_dict[e] != [NO_LABEL])])
train_indices = np.array([i for i, e in enumerate(g.edge_order)
if (train_labels_dict[e] != [NO_LABEL] and all_labels_dict[e] != [NO_LABEL])])
true_labels = np.array([all_labels_dict[e] for e in g.edge_order]) # all true labels
true_labels_ndarray = MultiLabelBinarizer().fit_transform(true_labels)
for cur_y_test, true_label in zip(true_labels_ndarray, true_labels):
if not (sum(cur_y_test[true_label]) == len(true_label) == sum(cur_y_test)):
raise Exception('Classes got binarized not in the right order')
return g, true_labels_ndarray, test_indices, train_indices
def _load_konect_dataset(self, path, trunc_nodes):
graph = nx.read_edgelist(path, comments='#', data=self.dtype_tuples)
if trunc_nodes is not None:
graph.remove_nodes_from(map(str, range(trunc_nodes, graph.number_of_nodes())))
edge2label = nx.get_edge_attributes(graph, LABEL_GT)
edge2label = {edge: [0 if label < 0 else label] for edge, label in edge2label.items()}
nx.set_edge_attributes(graph, edge2label, LABEL_GT) # override LABEL_GT
train_edges, test_edges = train_test_split(list(edge2label.keys()), test_size=self.test_size)
# jz: this is very hard to read, whoever wrote it.
edge2label.update({edge: [NO_LABEL] for edge in test_edges}) # train labels are retained, test are overriden
nx.set_edge_attributes(graph, edge2label, LABEL_TRAIN) # LABEL_GT still holds all labels
# no labels in LABEL_TRAIN yet
return graph
def _load_aminer(self, path, trunc_nodes):
edges_train, labels_train = DataLoader._get_triples(join(path, 'train.txt'))
edges_val, labels_val = DataLoader._get_triples(join(path, 'valid.txt'))
edges_test, labels_test = DataLoader._get_triples(join(path, 'test.txt'))
if trunc_nodes is not None:
edges_train, labels_train = edges_train[:trunc_nodes], labels_train[:trunc_nodes]
edges_val, labels_val = edges_val[:trunc_nodes], labels_val[:trunc_nodes]
edges_test, labels_test = edges_test[:trunc_nodes], labels_test[:trunc_nodes]
graph = nx.from_edgelist(np.concatenate([edges_train, edges_val, edges_test]))
edge2label = {}
edge2label.update({edge: label for edge, label in zip(edges_train, labels_train)})
edge2label.update({edge: label for edge, label in zip(edges_val, labels_val)})
edge2label.update({edge: label for edge, label in zip(edges_test, labels_test)})
nx.set_edge_attributes(graph, edge2label, LABEL_GT)
edge2label.update({edge: [NO_LABEL] for edge in edges_test})
nx.set_edge_attributes(graph, edge2label, LABEL_TRAIN)
return graph
@staticmethod
def _get_triples(path):
headList = []
tailList = []
relationList = []
headSet = []
tailSet = []
f = open(path, "r")
content = f.readline()
global tripleTotal, entityTotal, tagTotal
tripleTotal, entityTotal, tagTotal = [int(i) for i in content.strip().split()]
for i in range(entityTotal):
headSet.append(set())
tailSet.append(set())
while (True):
content = f.readline()
if content == "":
break
values = content.strip().split()
values = [(int)(i) for i in values]
headList.append(values[0])
tailList.append(values[1])
headSet[values[0]].add(values[1])
tailSet[values[1]].add(values[0])
relationList.append(values[2:])
f.close()
edges = list(zip(headList, tailList))
relationList = [[label] if not isinstance(label, list) else label for label in relationList]
return edges, relationList
```
#### File: edge_prop/models/base_model.py
```python
import abc
import io
import logging
import os
import warnings
from abc import ABCMeta
import six
from sklearn.base import BaseEstimator, ClassifierMixin
import numpy as np
from sparse import DOK, COO
from torch.utils.tensorboard import SummaryWriter
from matplotlib.pyplot import cm
from edge_prop.graph_wrappers import BaseGraph
from edge_prop.constants import NO_LABEL, EDGEPROP_BASE_DIR, TENSORBOARD_DIR
from edge_prop.common.metrics import get_all_metrics
from edge_prop.visualization.adj_mat_to_image import graph2image
class BaseModel(six.with_metaclass(ABCMeta), BaseEstimator, ClassifierMixin):
"""
Edge Propgation
EXPECTS non-multi edge graphs
Parameters
------------
max_iter: integer
Change maximum number of iterations allowed
tol: float
Convergence tolerance: threshold to consider the system at a steady state
"""
_variant = 'propagation'
def __init__(self, max_iter: int = 50, tol: float = 1e-5, alpha: float = 1, tb_exp_name: str = None):
self.alpha = alpha
self.tol = tol
self.max_iter = max_iter
self.tb_exp_name = tb_exp_name
self.verbose = False
if tb_exp_name is not None:
path = os.path.join(TENSORBOARD_DIR, tb_exp_name) # , str(datetime.datetime.now()))
self.writer = SummaryWriter(path)
self.verbose = True
self.sparse = False
def predict(self, indices=None):
"""
Predict labels across all edges
Parameters
----------
Returns
------
y : array_like, shape = [n_edges]
Predictions for entire graph
"""
if indices is None:
indices = range(self.graph.n_edges)
results = []
for i in indices:
u, v = self.graph.edge_order[i]
dist = self.get_edge_distributions(u, v) # label distribution
assert np.allclose(self.get_edge_distributions(u, v),
self.get_edge_distributions(v, u)), "graphs are undirectional, shouldn't happen"
if len(dist[dist == dist.max()]) > 1:
warnings.warn(f"edge {(u, v)} doesn't have a definitive max: {dist}", category=RuntimeWarning)
results.append(self.classes[dist.argmax()]) # returned index and not the class
results = np.array(results, dtype=np.int)
# results = np.ones_like(self.edge_distributions[:, :, 0]) * NO_LABEL
# edge_exists = self.edge_distributions.sum(axis=-1) != 0
# results[edge_exists] = self.edge_distributions.argmax(axis=-1)[edge_exists]
return results
def predict_proba(self, indices=None):
if indices is None:
indices = range(self.graph.n_edges)
results = []
for i in indices:
u, v = self.graph.edge_order[i]
results.append(self.get_edge_distributions(u, v))
results = np.array(results)
return results
def get_edge_distributions(self, u, v):
u_index, v_index = self.graph.node_to_idx[u], self.graph.node_to_idx[v]
if self.sparse:
return self.edge_distributions[u_index, v_index].todense()
else:
return self.edge_distributions[u_index, v_index]
def fit(self, g: BaseGraph, label, val={}):
"""
Uses the laplacian matrix to act as affinity matrix for the label-prop alg'
:param g: The graph
Returns
-------
self : returns a pointer to self
"""
self.graph = g
self.val = val
self.adj_mat = g.adjacency_matrix(sparse=self.sparse)
self.y = self._create_y(g, label)
self.num_classes = self.y.shape[-1]
self.edge_distributions = self._perform_edge_prop_on_graph(self.adj_mat, self.y, max_iter=self.max_iter,
tol=self.tol)
return self
@abc.abstractmethod
def _perform_edge_prop_on_graph(self, adj_mat: np.ndarray, y: np.ndarray, max_iter=100, tol=1e-1) -> COO:
"""
Performs the EdgeProp algorithm on the given graph.
returns the label distribution (|N|, |N|) matrix with scores between -1, 1 stating the calculated label distribution.
"""
pass
def _get_classes(self, g: BaseGraph, label) -> np.ndarray:
edge_labels = g.get_edge_attributes_ordered(label)
classes = np.unique([label for _, y in edge_labels for label in y])
classes = classes[classes != NO_LABEL]
return classes
def _create_y(self, g, label):
classes = self._get_classes(g, label)
self.classes = classes
edge_labels = g.get_edge_attributes_ordered(label)
y = np.zeros((g.n_nodes, g.n_nodes, len(classes)))
for ((u, v), labels) in edge_labels:
edge = g.node_to_idx[u], g.node_to_idx[v]
reverse_edge = tuple(reversed(edge))
for label in labels:
if label != NO_LABEL:
y[edge][label] = 1 / len(labels)
y[reverse_edge][label] = 1 / len(labels)
return y
def write_evaluation_to_tensorboard(self, global_step):
# if self.num_classes > 2:
# graph_image = graph2image(self.edge_distributions.argmax(axis=-1), self.adj_mat, color_map=cm.gist_ncar)
# else:
# graph_image = graph2image(self.edge_distributions[:, :, -1], self.adj_mat, color_map=cm.seismic)
# self.writer.add_image("Graph", graph_image, global_step=global_step)
for val_name, (val_indices, y_val) in self.val.items():
y_pred = self.predict_proba(val_indices)
metrics = get_all_metrics(y_pred, y_val)
for metric_name, metric_value in metrics.items():
self.writer.add_scalar(f'{val_name}/{metric_name}', metric_value, global_step=global_step)
pred_classes = np.argmax(y_pred, axis=-1)
self.writer.add_histogram(f'{val_name}/predicted_class', pred_classes, global_step=global_step)
self.writer.flush()
```
#### File: edge_prop/models/node2vec_classifier.py
```python
import os
from node2vec import Node2Vec
from sklearn.linear_model import LogisticRegression
import numpy as np
from gensim.models import KeyedVectors
from edge_prop.constants import LABEL_TRAIN, NO_LABEL, NODE2VEC_CACHE
from edge_prop.graph_wrappers import BaseGraph
class Node2VecClassifier:
def __init__(self, n2v_kwargs={}, n2v_fit_kwargs={}, cache_name="default"):
self.n2v_kwargs = n2v_kwargs
self.n2v_fit_kwargs = n2v_fit_kwargs
self.clf = LogisticRegression(multi_class='ovr')
self.save_path = os.path.join(NODE2VEC_CACHE, cache_name + '.emb')
def fit(self, g: BaseGraph, label=LABEL_TRAIN):
graph = g.graph_nx
# fit node2vec
if os.path.exists(self.save_path):
node_vectors_dict = KeyedVectors.load(self.save_path)
else:
self.node2vec = Node2Vec(graph, workers = 1, **self.n2v_kwargs)
self.node2vec = self.node2vec.fit(**self.n2v_fit_kwargs)
node_vectors_dict = self.node2vec.wv
# save the embedding
if not os.path.exists(NODE2VEC_CACHE):
os.mkdir(NODE2VEC_CACHE)
node_vectors_dict.save(self.save_path)
edge_vectors = [np.concatenate([node_vectors_dict[str(u)], node_vectors_dict.wv[str(v)]]) for u, v in g.edge_order]
self.edge_vectors = np.stack(edge_vectors)
# extract the train set
edge_labels = np.array([label[0] for edge, label in g.get_edge_attributes_ordered(label)])
train_mask = edge_labels != NO_LABEL
x_train = self.edge_vectors[train_mask == True]
y_train = edge_labels[train_mask]
# fit the logistic regression
self.clf.fit(x_train, y_train)
def predict_proba(self, indices):
x_test = self.edge_vectors[indices]
y_proba = self.clf.predict_proba(x_test)
return y_proba
```
#### File: TemporalEdgeProp/edge_prop/run.py
```python
import random
import time
import logging
from datetime import datetime
from itertools import product
from edge_prop.common.metrics import get_all_metrics
from edge_prop.common.multiproc_utils import parmap
from edge_prop.constants import DATASET2PATH
from edge_prop.data_loader import DataLoader
from edge_prop.models import SparseBaseline, SparseEdgeProp
from edge_prop.constants import LABEL_TRAIN
from edge_prop.models.node2vec_classifier import Node2VecClassifier
import numpy as np
def get_expr_name(alpha, test_size, alg_cls):
return f"data_name={data_name}, test_size={test_size}, alpha={alpha}, model={alg_cls.__name__}"
def run_alg_on_data(alpha, test_size, alg_cls):
if alg_cls in (SparseBaseline, Node2VecClassifier): # no alpha
if alpha != 0.:
return (get_expr_name(alpha, test_size, alg_cls), {})
if 'aminer' in data_name:
if test_size != 0.75:
return (get_expr_name(alpha, test_size, alg_cls), {})
test_size = 1.
expr_name = f"{get_expr_name(alpha, test_size, alg_cls)}-{datetime.now().isoformat(' ', 'seconds')}"
print(expr_name)
# create dataset
path = DATASET2PATH[data_name]
graph, true_labels, test_indices, train_indices = DataLoader(path, test_size=test_size).load_data() # node number doesn't work on aminer
y_test = true_labels[test_indices]
y_train = true_labels[train_indices]
print(f"Calculating {alg_cls.__name__}:")
st = time.time()
if alg_cls == Node2VecClassifier:
model = alg_cls(cache_name=data_name)
model.fit(graph, LABEL_TRAIN)
else:
model = alg_cls(max_iter=300, alpha=alpha, tol=1e-2, tb_exp_name=expr_name)
model.fit(graph, LABEL_TRAIN, val={'train': (train_indices, y_train), 'validation': (test_indices, y_test)})
y_pred = model.predict_proba(test_indices)
print(np.unique(y_pred.argmax(axis=1), return_counts=True))
# breakpoint()
metrics = get_all_metrics(y_pred, y_test)
print(f"took {int(time.time() - st) / 60}. {expr_name}: {metrics}")
return expr_name, metrics
data_name = 'aminer_m'#'slashdot'#'epinions'#'aminer_s'
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info(f"start")
np.random.seed(18)
random.seed(18)
alphas = [0, 0.8, 1]
test_sizes = [0.75]#[0.2,0.75, 0.8]
compared_algs = [SparseEdgeProp, SparseBaseline, Node2VecClassifier] #SparseEdgeProp,
# compared_algs = [SparseEdgeProp, SparseBaseline] #SparseEdgeProp,
results_tpls = [run_alg_on_data(*args) for args in product(alphas, test_sizes, compared_algs)] #TODO no linux
# results_tpls = parmap(lambda args: run_alg_on_data(*args), list(product(alphas, test_sizes, compared_algs)),
# use_tqdm=True, desc="Calculating model results:", nprocs=1)
results = dict(results_tpls)
print(results)
for (alpha, test_size) in product(alphas, test_sizes):
baseline_metrics = results[get_expr_name(alpha, test_size, SparseBaseline)]
our_metrics = results[get_expr_name(alpha, test_size, SparseEdgeProp)]
node2vec_metrics = results[get_expr_name(alpha, test_size, Node2VecClassifier)]
print(f"alpha={alpha}, test_size={test_size}, \t Baseline: {baseline_metrics} \t New Model: {our_metrics}, Node2vec: {node2vec_metrics}")
```
#### File: tests/visualization/test_plot_graph.py
```python
from unittest import TestCase
import sparse
from edge_prop.visualization.adj_mat_to_image import graph2image
import matplotlib.pyplot as plt
class TestPlot_graph(TestCase):
def test_plot_graph(self):
coords, data = [[1,2,0,0],[0,0,1,2]], [1,1,1,1]
adj_mat = sparse.COO(coords, data)
probas = [0.0, 1, 0, 1]
probas_mat = sparse.COO(coords, probas)
image = graph2image(probas_mat, adj_mat)
plt.imshow(image.T)
plt.show()
``` |
{
"source": "JonZeolla/incubator-metron",
"score": 2
} |
#### File: ansible/callback_plugins/0_minimum_ansible_version.py
```python
import sys
from ansible import __version__
from ansible.plugins.callback import CallbackBase
from ansible.utils.display import Display
def display(*args, **kwargs):
display_instance = Display()
display_instance.display(*args, **kwargs)
MINIMUM_ANSIBLE_VERSION = '2.4.0'
def version_requirement(version):
return version >= MINIMUM_ANSIBLE_VERSION
class CallbackModule(CallbackBase):
"""
This enforces a minimum version of ansible
"""
CALLBACK_NAME = 'minimum_ansible_version'
def __init__(self):
super(CallbackModule, self).__init__()
if not version_requirement(__version__):
display('Metron requires Ansible %s or newer, current version is %s' % (MINIMUM_ANSIBLE_VERSION, __version__), color='red')
sys.exit(1)
```
#### File: package/scripts/management_ui_commands.py
```python
import metron_service
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
# Wrap major operations and functionality in this class
class ManagementUICommands:
__params = None
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
def start_management_ui(self):
"""
Starts the Management UI
:param env: Environment
"""
Logger.info('Starting Management UI')
start_cmd = ('service', 'metron-management-ui', 'start')
Execute(start_cmd, sudo=True)
Logger.info('Done starting Management UI')
def stop_management_ui(self):
"""
Stops the Management UI
:param env: Environment
"""
Logger.info('Stopping Management UI')
stop_cmd = ('service', 'metron-management-ui', 'stop')
Execute(stop_cmd, sudo=True)
Logger.info('Done stopping Management UI')
def restart_management_ui(self, env):
"""
Restarts the Management UI
:param env: Environment
"""
Logger.info('Restarting the Management UI')
restart_cmd = ('service', 'metron-management-ui', 'restart')
Execute(restart_cmd, sudo=True)
Logger.info('Done restarting the Management UI')
def status_management_ui(self, env):
"""
Performs a status check for the Management UI
:param env: Environment
"""
Logger.info('Status check the Management UI')
metron_service.check_http(
self.__params.metron_management_ui_host,
self.__params.metron_management_ui_port,
self.__params.metron_user)
def service_check(self, env):
"""
Performs a service check for the Management UI
:param env: Environment
"""
Logger.info('Checking connectivity to Management UI')
metron_service.check_http(
self.__params.metron_management_ui_host,
self.__params.metron_management_ui_port,
self.__params.metron_user)
Logger.info("Management UI service check completed successfully")
``` |
{
"source": "jonzia/Chess",
"score": 4
} |
#### File: jonzia/Chess/state.py
```python
import tensorflow as tf
import numpy as np
import pieces as p
import random as r
import state as s
import time as t
import copy as c
import math
import os
def initialize_pieces(random=False, keep_prob=1.0):
"""Construct list of pieces as objects"""
# Args: (1) random: Whether board is initialized to random initial state
# (2) keep_prob: Probability of retaining piece
# Returns: Python list of pieces
# 1,1 = a1 ... 8,8 = h8
piece_list = [p.Rook('white',1,1), p.Knight('white',2,1), p.Bishop('white',3,1), p.Queen('white'),
p.King('white'), p.Bishop('white',6,1), p.Knight('white',7,1), p.Rook('white',8,1),
p.Pawn('white',1,2), p.Pawn('white',2,2), p.Pawn('white',3,2), p.Pawn('white',4,2),
p.Pawn('white',5,2), p.Pawn('white',6,2), p.Pawn('white',7,2), p.Pawn('white',8,2),
p.Pawn('black',1,7), p.Pawn('black',2,7), p.Pawn('black',3,7), p.Pawn('black',4,7),
p.Pawn('black',5,7), p.Pawn('black',6,7), p.Pawn('black',7,7), p.Pawn('black',8,7),
p.Rook('black',1,8), p.Knight('black',2,8), p.Bishop('black',3,8), p.Queen('black'),
p.King('black'), p.Bishop('black',6,8), p.Knight('black',7,8), p.Rook('black',8,8)]
# If random is True, randomize piece positions and activity
if random:
# For piece in piece list...
for piece in piece_list:
# Toggle activity based on uniform distribution (AND PIECE IS NOT KING)
if r.random() >= keep_prob and piece.name != 'King':
piece.remove()
# If the piece was not removed, randomize file and rank
else:
newfile = r.randint(1,8)
newrank = r.randint(1,8)
# If there is another piece in the target tile, swap places
for other_piece in piece_list:
if other_piece.is_active and other_piece.file == newfile and other_piece.rank == newrank:
# Swap places
other_piece.file = piece.file
other_piece.rank = piece.rank
# Else, and in the previous case, update the piece's file and rank
piece.file = newfile
piece.rank = newrank
piece.move_count += 1
return piece_list
def board_state(piece_list):
"""Configuring inputs for value function network"""
# Args: (1) piece list
# The output contains M planes of dimensions (N X N) where (N X N) is the size of the board.
# There are M planes "stacked" in layers where each layer represents a different "piece group"
# (e.g. white pawns, black rooks, etc.) in one-hot format where 1 represents a piece in those
# coordinates and 0 represents the piece is not in those coordinates.
# Define parameters
N = 8 # N = board dimensions (8 x 8)
M = 12 # M = piece groups (6 per player)
# Initializing board state with dimensions N x N x (MT + L)
board = np.zeros((N,N,M))
# The M layers each represent a different piece group. The order of is as follows:
# 0: White Pawns Pieces 8 - 15
# 1: White Knights Pieces 1 and 6
# 2: White Bishops Pieces 2 and 5
# 3: White Rooks Pieces 0 and 7
# 4: White Queen Piece 3
# 5: White King Piece 4
# 6: Black Pawns Pieces 16 - 23
# 7: Black Knights Pieces 25 and 30
# 8: Black Bishops Pieces 26 and 29
# 9: Black Rooks Pieces 24 and 31
# 10: Black Queen Piece 27
# 11: Black King Piece 28
# Note that the number of pieces in each category may change upon piece promotion or removal
# (hence the code below will remain general).
# Fill board state with pieces
for piece in piece_list:
# Place active white pawns in plane 0 and continue to next piece
if piece.is_active and piece.color == 'white' and piece.name == 'Pawn':
board[piece.file-1, piece.rank-1, 0] = 1
# Place active white knights in plane 1 and continue to next piece
elif piece.is_active and piece.color == 'white' and piece.name == 'Knight':
board[piece.file-1, piece.rank-1, 1] = 1
# Place active white bishops in plane 2 and continue to next piece
elif piece.is_active and piece.color == 'white' and piece.name == 'Bishop':
board[piece.file-1, piece.rank-1, 2] = 1
# Place active white rooks in plane 3 and continue to next piece
elif piece.is_active and piece.color == 'white' and piece.name == 'Rook':
board[piece.file-1, piece.rank-1, 3] = 1
# Place active white queen(s) in plane 4 and continue to next piece
elif piece.is_active and piece.color == 'white' and piece.name == 'Queen':
board[piece.file-1, piece.rank-1, 4] = 1
# Place active white king in plane 5 and continue to next piece
elif piece.is_active and piece.color == 'white' and piece.name == 'King':
board[piece.file-1, piece.rank-1, 5] = 1
# Place active black pawns in plane 6 and continue to next piece
elif piece.is_active and piece.color == 'black' and piece.name == 'Pawn':
board[piece.file-1, piece.rank-1, 6] = 1
# Place active black knights in plane 7 and continue to next piece
elif piece.is_active and piece.color == 'black' and piece.name == 'Knight':
board[piece.file-1, piece.rank-1, 7] = 1
# Place active black bishops in plane 8 and continue to next piece
elif piece.is_active and piece.color == 'black' and piece.name == 'Bishop':
board[piece.file-1, piece.rank-1, 8] = 1
# Place active black rooks in plane 9 and continue to next piece
elif piece.is_active and piece.color == 'black' and piece.name == 'Rook':
board[piece.file-1, piece.rank-1, 9] = 1
# Place active black queen(s) in plane 10 and continue to next piece
elif piece.is_active and piece.color == 'black' and piece.name == 'Queen':
board[piece.file-1, piece.rank-1, 10] = 1
# Place active black king in plane 11 and continue to next piece
elif piece.is_active and piece.color == 'black' and piece.name == 'King':
board[piece.file-1, piece.rank-1, 11] = 1
# Return board state
return board
def visualize_state(piece_list):
"""Visualizing board in terminal"""
# Args: (1) piece list
# The output is an 8x8 grid indicating the present locations for each piece
# Initializing empty grid
visualization = np.empty([8,8],dtype=object)
for i in range(0,8):
for j in range(0,8):
visualization[i,j] = ' ';
for piece in piece_list:
# Load active pawns
if piece.is_active and piece.color == 'white' and piece.name == 'Pawn':
visualization[piece.file-1, piece.rank-1] = 'P'
elif piece.is_active and piece.color == 'black' and piece.name == 'Pawn':
visualization[piece.file-1, piece.rank-1] = 'p'
elif piece.is_active and piece.color == 'white' and piece.name == 'Rook':
visualization[piece.file-1, piece.rank-1] = 'R'
elif piece.is_active and piece.color == 'black' and piece.name == 'Rook':
visualization[piece.file-1, piece.rank-1] = 'r'
elif piece.is_active and piece.color == 'white' and piece.name == 'Knight':
visualization[piece.file-1, piece.rank-1] = 'N'
elif piece.is_active and piece.color == 'black' and piece.name == 'Knight':
visualization[piece.file-1, piece.rank-1] = 'n'
elif piece.is_active and piece.color == 'white' and piece.name == 'Bishop':
visualization[piece.file-1, piece.rank-1] = 'B'
elif piece.is_active and piece.color == 'black' and piece.name == 'Bishop':
visualization[piece.file-1, piece.rank-1] = 'b'
elif piece.is_active and piece.color == 'white' and piece.name == 'Queen':
visualization[piece.file-1, piece.rank-1] = 'Q'
elif piece.is_active and piece.color == 'black' and piece.name == 'Queen':
visualization[piece.file-1, piece.rank-1] = 'q'
elif piece.is_active and piece.color == 'white' and piece.name == 'King':
visualization[piece.file-1, piece.rank-1] = 'K'
elif piece.is_active and piece.color == 'black' and piece.name == 'King':
visualization[piece.file-1, piece.rank-1] = 'k'
# Return visualization
return visualization
def action_space(piece_list, player):
"""Determining available moves for evaluation"""
# Args: (1) piece list, (2) player color
# The output is a P x 56 matrix where P is the number of pieces and 56 is the maximum
# possible number of moves for any piece. For pieces which have less than possible
# moves, zeros are appended to the end of the row. A value of 1 indicates that a
# move is available while a value of 0 means that it is not.
# See pieces.py for move glossary
# Initializing action space with dimensions P x 56
action_space = np.zeros((16,56))
# For each piece...
for i in range(0,16):
# If it is white's turn to move...
if player == 'white':
# Obtain vector of possible actions and write to corresponding row
action_space[i,:] = piece_list[i].actions(piece_list)
else:
action_space[i,:] = piece_list[i+16].actions(piece_list)
# Return action space
return action_space
def points(piece_list):
"""Calculating point differential for the given board state"""
# Args: (1) piece list
# Returns: differential (white points - black points)
# The points are calculated via the standard chess value system:
# Pawn = 1, King = 3, Bishop = 3, Rook = 5, Queen = 9
# King = 100 (arbitrarily large)
differential = 0
# For all white pieces...
for i in range(0,16):
# If the piece is active, add its points to the counter
if piece_list[i].is_active:
differential = differential + piece_list[i].value
# For all black pieces...
for i in range(16,32):
# If the piece is active, subtract its points from the counter
if piece_list[i].is_active:
differential = differential - piece_list[i].value
# Return point differential
return differential
``` |
{
"source": "jonziefle/random-names",
"score": 3
} |
#### File: random-names/scripts/process_names.py
```python
import os
import sys
import csv
import json
import argparse
# global variables
letterArray = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
ngramArray = ["1gram", "2gram", "3gram", "4gram", "5gram"]
genderArray = ["male", "female"]
def main(inputFiles, outputFile):
outputFileUnweighted = os.path.splitext(outputFile)[0] + "-unweighted.json"
outputFileWeighted = os.path.splitext(outputFile)[0] + "-weighted.json"
# data objects
data = {}
dataWeighted = {}
# initialize data object for gender and ngram
for gender in genderArray:
data[gender] = {}
dataWeighted[gender] = {}
for ngram in ngramArray:
data[gender][ngram] = {}
dataWeighted[gender][ngram] = {}
# iterates through all files
for inputFile in inputFiles:
# open and process csv file
with open(inputFile, newline='') as f:
print("Processing: " + inputFile)
reader = csv.reader(f)
# add counts for each letter
for row in reader:
name = "_" + row[0].lower() + "_"
if (row[1] == "M"):
gender = "male"
else:
gender = "female"
count = row[2]
# iterate through letters and multiplies by the name count
nameLength = len(name)
for i in range(nameLength):
for ngram in ngramArray:
beforeLetter = ""
if (ngram == "1gram" and name[i] != "_"):
beforeLetter = "_"
afterLetter = name[i]
elif (ngram == "2gram"):
gramLength = 2
if (i == 0):
beforeLetter = name[0:i+1]
afterLetter = name[i+1:gramLength]
elif (i >= gramLength):
beforeLetter = name[i-gramLength+1:i]
afterLetter = name[i]
elif (ngram == "3gram" and nameLength >= 3):
gramLength = 3
if (i == 0 or i == 1):
beforeLetter = name[0:i+1]
afterLetter = name[i+1:gramLength]
elif (i >= gramLength):
beforeLetter = name[i-gramLength+1:i]
afterLetter = name[i]
elif (ngram == "4gram" and nameLength >= 4):
gramLength = 4
if (i == 0 or i == 1):
beforeLetter = name[0:i+1]
afterLetter = name[i+1:gramLength]
elif (i >= gramLength):
beforeLetter = name[i-gramLength+1:i]
afterLetter = name[i]
elif (ngram == "5gram" and nameLength >= 5):
gramLength = 5
if (i == 0 or i == 1):
beforeLetter = name[0:i+1]
afterLetter = name[i+1:gramLength]
elif (i >= gramLength):
beforeLetter = name[i-gramLength+1:i]
afterLetter = name[i]
# increment letter count
if (beforeLetter != ""):
if (beforeLetter not in data[gender][ngram]):
data[gender][ngram][beforeLetter] = {}
dataWeighted[gender][ngram][beforeLetter] = {}
if (afterLetter not in data[gender][ngram][beforeLetter]):
data[gender][ngram][beforeLetter][afterLetter] = 0
dataWeighted[gender][ngram][beforeLetter][afterLetter] = 0
data[gender][ngram][beforeLetter][afterLetter] += 1
dataWeighted[gender][ngram][beforeLetter][afterLetter] += int(count)
# divide counts by total sum to get frequency
for gender in genderArray:
for ngram in ngramArray:
for beforeLetter in data[gender][ngram]:
afterLetterCountSum = sum(data[gender][ngram][beforeLetter].values())
for afterLetter, afterLetterCount in data[gender][ngram][beforeLetter].items():
afterLetterFrequency = round(afterLetterCount / afterLetterCountSum * 100, 4)
data[gender][ngram][beforeLetter][afterLetter] = afterLetterFrequency
afterLetterCountSum = sum(dataWeighted[gender][ngram][beforeLetter].values())
for afterLetter, afterLetterCount in dataWeighted[gender][ngram][beforeLetter].items():
afterLetterFrequency = round(afterLetterCount / afterLetterCountSum * 100, 4)
dataWeighted[gender][ngram][beforeLetter][afterLetter] = afterLetterFrequency
# print json data
#print(json.dumps(data, sort_keys=True, indent=2))
#print(json.dumps(dataWeighted, sort_keys=True, indent=2))
# write json data (unweighted)
with open(outputFileUnweighted, 'w') as f:
print("Writing: " + outputFileUnweighted)
json.dump(data, f, sort_keys=True)
# write json data (weighted)
with open(outputFileWeighted, 'w') as f:
print("Writing: " + outputFileWeighted)
json.dump(dataWeighted, f, sort_keys=True)
if __name__ == "__main__":
# parses command line for input file and output path
parser = argparse.ArgumentParser()
parser.add_argument('--input', nargs='+', help='<Required> Input File(s)', required=True)
parser.add_argument('--output', help='<Required> Output File', required=True)
args = parser.parse_args()
print(args)
# execute only if run as a script
main(args.input, args.output)
``` |
{
"source": "Jonzky/hm-diag",
"score": 2
} |
#### File: hm-diag/hw_diag/app.py
```python
import logging
import os
import sentry_sdk
from flask import Flask
from flask_apscheduler import APScheduler
from retry import retry
from hw_diag.cache import cache
from hw_diag.tasks import perform_hw_diagnostics
from hw_diag.views.diagnostics import DIAGNOSTICS
from hm_pyhelper.miner_param import provision_key
from sentry_sdk.integrations.flask import FlaskIntegration
DIAGNOSTICS_VERSION = os.getenv('DIAGNOSTICS_VERSION')
DSN_SENTRY = os.getenv('SENTRY_DIAG')
sentry_sdk.init(
dsn=DSN_SENTRY,
integrations=[FlaskIntegration()],
release=f"diagnostics@{DIAGNOSTICS_VERSION}",
)
DEBUG = bool(os.getenv('DEBUG', '0'))
log = logging.getLogger()
if DEBUG:
# Defaults to INFO if not explicitly set.
log.setLevel(logging.DEBUG)
@retry(ValueError, tries=10, delay=1, backoff=2, logger=log)
def perform_key_provisioning():
if not provision_key():
raise ValueError
def get_app(name):
try:
if os.getenv('BALENA_DEVICE_TYPE', False):
perform_key_provisioning()
except Exception as e:
log.error('Failed to provision key: {}'
.format(e))
app = Flask(name)
cache.init_app(app)
# Configure the backend scheduled tasks
scheduler = APScheduler()
scheduler.api_enabled = True
scheduler.init_app(app)
scheduler.start()
@scheduler.task('cron', id='ship_diagnostics', minute='0')
def run_ship_diagnostics_task():
perform_hw_diagnostics(ship=True)
# Register Blueprints
app.register_blueprint(DIAGNOSTICS)
return app
``` |
{
"source": "Jonzky/hm-pktfwd",
"score": 2
} |
#### File: hm-pktfwd/pktfwd/pktfwd_app.py
```python
import os
from hm_pyhelper.hardware_definitions import variant_definitions
from pktfwd.utils import init_sentry, is_concentrator_sx1302, \
update_global_conf, write_diagnostics, \
await_system_ready, retry_start_concentrator
from hm_pyhelper.logger import get_logger
from hm_pyhelper.miner_param import retry_get_region, await_spi_available
LOGGER = get_logger(__name__)
# Name of envvar that reset_lgw.sh expects to contain the reset_pin value
RESET_LGW_RESET_PIN_ENV_KEY = "CONCENTRATOR_RESET_PIN"
class PktfwdApp:
def __init__(self, variant, region_override, region_filepath,
sx1301_region_configs_dir, sx1302_region_configs_dir,
sentry_key, balena_id, balena_app,
diagnostics_filepath, await_system_sleep_seconds,
reset_lgw_filepath,
util_chip_id_filepath, root_dir,
sx1302_lora_pkt_fwd_filepath, sx1301_lora_pkt_fwd_dir): # noqa
init_sentry(sentry_key, balena_id, balena_app)
self.set_variant_attributes(variant)
self.sx1301_region_configs_dir = sx1301_region_configs_dir
self.sx1302_region_configs_dir = sx1302_region_configs_dir
self.region_override = region_override
self.region_filepath = region_filepath
self.diagnostics_filepath = diagnostics_filepath
self.await_system_sleep_seconds = await_system_sleep_seconds
self.reset_lgw_filepath = reset_lgw_filepath
self.util_chip_id_filepath = util_chip_id_filepath
self.root_dir = root_dir
self.sx1301_lora_pkt_fwd_dir = sx1301_lora_pkt_fwd_dir
self.sx1302_lora_pkt_fwd_filepath = sx1302_lora_pkt_fwd_filepath
def start(self):
LOGGER.debug("STARTING PKTFWD")
self.prepare_to_start()
is_sx1302 = is_concentrator_sx1302(self.util_chip_id_filepath,
self.spi_bus)
update_global_conf(is_sx1302, self.root_dir,
self.sx1301_region_configs_dir,
self.sx1302_region_configs_dir, self.region,
self.spi_bus)
retry_start_concentrator(is_sx1302, self.spi_bus,
self.sx1302_lora_pkt_fwd_filepath,
self.sx1301_lora_pkt_fwd_dir,
self.reset_lgw_filepath,
self.diagnostics_filepath)
# retry_start_concentrator will hang indefinitely while the
# upstream packet_forwarder runs. The lines below will only
# be reached if the concentrator exits unexpectedly.
LOGGER.warning("Shutting down concentrator.")
self.stop()
def prepare_to_start(self):
"""
Performs additional initialization not done in __init__
because it depends on the filesystem being available.
"""
write_diagnostics(self.diagnostics_filepath, False)
await_spi_available(self.spi_bus)
self.region = retry_get_region(self.region_override,
self.region_filepath)
LOGGER.debug("Region set to %s" % self.region)
await_system_ready(self.await_system_sleep_seconds)
LOGGER.debug("Finished preparing pktfwd")
def stop(self):
LOGGER.debug("STOPPING PKTFWD")
write_diagnostics(self.diagnostics_filepath, False)
def set_variant_attributes(self, variant):
self.variant = variant
self.variant_attributes = variant_definitions[self.variant]
self.reset_pin = 23
# reset_lgw.sh is called throughout this app without supplying
# the reset pin as an argument. The script falls back to the
# value in envvar RESET_LGW_RESET_PIN_ENV_KEY.
os.environ[RESET_LGW_RESET_PIN_ENV_KEY] = str(self.reset_pin)
self.spi_bus = "spidev0.0"
LOGGER.debug("Variant %s set with reset_pin %s and spi_bus %s" %
(self.variant, self.reset_pin, self.spi_bus))
``` |
{
"source": "jonzlin95/ies-django-base",
"score": 2
} |
#### File: lib/ies_base/views.py
```python
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from models import TaggableModel
from django.contrib.contenttypes.models import ContentType
from serializers import TagSerializer
# Create your views here.
class AllTagsView(APIView):
"""
Returns a list of all tags in this project
"""
def get(self, request, *args, **kwargs):
out_list = []
for ct in ContentType.objects.all():
model = ct.model_class()
if model and isinstance(model(), TaggableModel):
for item in model.objects.all():
t = TagSerializer(data=item.get_tags())
if t.is_valid():
out_list.append(t.save())
else:
print t.errors
return Response(TagSerializer(out_list, many=True).data)
```
#### File: ies-django-base/ies_base/models.py
```python
from django.db import models
from django.db.models.fields.related import ManyToOneRel, ManyToManyField, RelatedField
from django.db.models.fields import AutoField
from django import VERSION
# Create your models here.
class ManualUpdateModel(models.Model):
class Meta:
abstract = True
def auto_save(self, force_insert=False, force_update=False):
updated_fields = []
if VERSION >= (1, 8 ,0, '', 0):
for field in type(self)._meta.get_fields():
if (not field.name.startswith("m_") and
not isinstance(field, ManyToOneRel) and
not isinstance(field, AutoField) and
not isinstance(field, ManyToManyField)):
try:
if not getattr(self, "m_" + field.name):
updated_fields.append(field.name)
except AttributeError:
updated_fields.append(field.name)
else:
for field in type(self)._meta.fields:
if (not field.name.startswith("m_") and
not isinstance(field, models.ManyToManyField) and
not isinstance(field, models.AutoField)):
try:
if not getattr(self, "m_" + field.name):
updated_fields.append(field.name)
except AttributeError as e:
updated_fields.append(field.name)
try:
return self.save(update_fields=updated_fields, force_insert=force_insert, force_update=force_update)
except ValueError as e:
return self.save(force_insert=force_insert, force_update=force_update)
class TaggableModel(models.Model):
taggable = models.BooleanField(default=True)
class Meta:
abstract = True
def get_tags(self):
"""
Takes an object, and returns its tags. MUST be implemented to take advantage of automatic tagging
:return:
{
"name": Name of the tag
"related_tags": Related things to tag
"equivalent_names": Names that are the same (TSM = Team Solo Mid)
}
"""
raise NotImplementedError
class FollowableModel(models.Model):
PLAYER = 0
TEAM = 1
ORGANIZATION = 2
SERIES = 3
TOURNAMENT = 4
GAME_CHARACTER = 5
REGION = 6
NONE = 0
LEAGUE_OF_LEGENDS = 1
DOTA2 = 2
HEARTHSTONE = 3
CSGO = 4
TYPE_CHOICES = (
(PLAYER, "PLAYER"),
(TEAM, "TEAM"),
(ORGANIZATION, "ORGANIZATION"),
(SERIES, "SERIES"),
(TOURNAMENT, "TOURNAMENT"),
(GAME_CHARACTER, "GAME_CHARACTER"),
(REGION, "REGION"),
)
GAME_CHOICES = (
(NONE, "NONE"),
(LEAGUE_OF_LEGENDS, "LEAGUE OF LEGENDS"),
(DOTA2, "DOTA2"),
(HEARTHSTONE, "HEARTHSTONE"),
(CSGO, "CS:GO")
)
followable = models.BooleanField(default=True)
class Meta:
abstract = True
def get_following_information(self):
"""
Takes an object and returns the information for following that item
GAME_ENUM:
NONE = 0
LEAGUE_OF_LEGENDS = 1
DOTA2 = 2
TYPE_ENUM:
PLAYER = 0
TEAM = 1
ORGANIZATION = 2
SERIES = 3
TOURNAMENT = 4
:return:
{
"game": INTEGER based on ENUM above
"type": Integer based on ENUM above
"name": Display name
"object_id": ID of the object on the other end
"thumbnail_url": Absolute URL of the thumbnail for this followable
}
"""
raise NotImplementedError
``` |
{
"source": "jonzxz/project-piscator",
"score": 3
} |
#### File: app/machine_learning/utils.py
```python
import re
import joblib
from sklearn.ensemble import RandomForestClassifier
from typing import List, Tuple
# Cleans up a messed up HTML / tabbed raw content into space delimited content
def clean_up_raw_body(raw_text: str) -> str:
return ' '.join([line.strip() for line in raw_text.strip().splitlines() \
if line.strip()])
# Flattens a list of tuples for (Sender, SenderDomain) into [Sender, SenderDomain]
# By right there SHOULD only be a single pair but kept in list just in case!
# Even indexes are Sender and odd indexs are SenderDomains
def flatten_from_tuples(list_tupl: List[Tuple]) -> List:
return [item for tup in list_tupl for item in tup]
# Retrieves a list of [Sender, SenderDomain] and returns domain names only
# eg. ['Person', '<EMAIL>']
# Returns [Company.com]
# By right there should only be one entry but kept in list just in case
# set list to remove duplicates
def identify_domains(list_of_sender_domain_pairs: List):
if isinstance(list_of_sender_domain_pairs, list):
return list(set([item.split(sep='@')[1] for item \
in list_of_sender_domain_pairs if '@' in item]))
return list_of_sender_domain_pairs.split(sep='@')[-1]
def load_model(MODEL_NAME) -> RandomForestClassifier:
return joblib.load(MODEL_NAME)
```
#### File: app/models/EmailAddress.py
```python
from app import db, encryption_engine
## Utilities
from datetime import datetime
# Defines model for EmailAddress class
class EmailAddress(db.Model):
__tablename__ = 'email_address'
email_id = db.Column(db.Integer, primary_key=True)
email_address = db.Column(db.String(30), index=True, unique=True\
, nullable=False)
email_password = db.Column(db.String(255), nullable=False)
phishing_mail_detected = db.Column(db.Integer, nullable=True, default=0)
total_mails_checked = db.Column(db.Integer, nullable=True, default=0)
active = db.Column(db.Boolean, nullable=False, default=True)
last_updated = db.Column(db.DateTime, nullable=True, default=None)
created_at = db.Column(db.DateTime, index=True,default=datetime.now)
notification_preference = db.Column(db.Boolean, nullable=False, default=True)
# FK
owner_id = db.Column(db.Integer, db.ForeignKey('user.user_id'))
owner = db.relationship('User', backref='addresses')
phishing_mails = db.relationship('PhishingEmail', backref='owner'\
, lazy='dynamic')
def __repr__(self):
return "Email Address: {} -- Owned by User ID: {}"\
.format(self.email_address, self.owner_id)
def get_email_id(self) -> int:
return self.email_id
def get_email_address(self) -> str:
return self.email_address
def set_email_address(self, email_addr: str) -> None:
self.email_address = email_addr
def get_email_password(self) -> str:
return self.email_password
def set_email_password(self, pw: str) -> None:
self.email_password = encryption_engine.encrypt(pw)
def get_decrypted_email_password(self) ->str:
return encryption_engine.decrypt(self.email_password)
def set_owner_id(self, user_id: int):
self.owner_id = user_id
def get_owner_id(self) -> int:
return self.owner_id
def get_phishing_mail_detected(self) -> int:
return self.phishing_mail_detected
def set_phishing_mail_detected(self, num_phish_detected: int) -> None:
self.phishing_mail_detected += num_phish_detected
def get_total_mails_checked(self) -> int:
return self.total_mails_checked
def set_total_mails_checked(self, num_mails_checked: int) -> None:
self.total_mails_checked += num_mails_checked
def get_active_status(self) -> bool:
return self.active
def set_active_status(self, boolean: bool) -> None:
self.active = boolean
def set_created_at(self, created_at: datetime) -> None:
self.created_at = created_at
def get_created_at(self) -> datetime:
return self.created_at
def get_notification_pref(self) -> bool:
return self.notification_preference
def set_notification_pref(self, pref: bool) -> None:
self.notification_preference = pref
def set_last_updated(self, last_updated: datetime) -> None:
self.last_updated = last_updated
def get_last_updated(self) -> datetime:
return self.last_updated
def get_prettified_date(self) -> str:
return self.get_last_updated().strftime('%d-%m-%Y %H:%M')
```
#### File: app/models/PhishingEmail.py
```python
from app import db
## Utilities
from datetime import datetime
from sqlalchemy import extract
from sqlalchemy.ext.hybrid import hybrid_property
# Defines model for EmailAddress class
class PhishingEmail(db.Model):
__tablename__ = 'phishing_email'
mail_id = db.Column(db.Integer, primary_key=True)
sender_address = db.Column(db.String(255), index=True, unique=False\
, nullable=False)
subject = db.Column(db.Text, nullable=False, unique=False)
content = db.Column(db.Text, nullable=False, unique=False)
created_at = db.Column(db.DateTime, index=True,default=datetime.now)
# FK
receiver_id = db.Column(db.Integer, db.ForeignKey('email_address.email_id')\
, index=True, unique=False, nullable=False)
def __repr__(self) -> str:
return "From: {}\nSubject: {}".format(self.sender_address, self.subject)
def get_sender_address(self) -> str:
return self.sender_address
def get_subject(self) -> str:
return self.subject
def get_detected_on(self) -> datetime:
return self.created_at
def get_created_month(self) -> int:
return self.created_at.month
"""
Following hybrid properties are created to retrieve phishing emails detected
based on the time criteria for monthly overview in dashboard statistics
"""
@hybrid_property
def created_at_year(self):
return self.created_at.year
@created_at_year.expression
def created_at_year(cls):
return extract('year', cls.created_at)
@hybrid_property
def created_at_month(self):
return self.created_at.month
@created_at_month.expression
def created_at_month(cls):
return extract('month', cls.created_at)
@hybrid_property
def created_at_week(self):
return self.created_at.isocalendar()[1]
@created_at_week.expression
def created_at_week(cls):
return extract('week', cls.created_at)
```
#### File: app/views/views.py
```python
from app import db, logger
## Flask
from flask_admin import AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_login import current_user
from flask import redirect, url_for
## WTForms
from wtforms import StringField
from wtforms.validators import DataRequired, NumberRange, ValidationError, Email
class GlobalIndexView(AdminIndexView):
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('login'))
class BaseView(ModelView):
def scaffold_filters(self, name):
filters = super().scaffold_filters(name)
if hasattr(self, 'column_filter_labels') \
and name in self.column_filter_labels:
for f in filters:
f.name = self.column_filter_labels[name]
return filters
class AdminBaseView(BaseView):
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('login'))
## User table view
class AdminUserView(AdminBaseView):
### Display Rules
# Page set allowed
# PK displayed, selected columns relabelled and displayed
can_set_page_size = True
column_display_pk = True
column_list = ['user_id', 'username', 'created_at', 'last_logged_in'\
, 'is_active', 'is_admin', 'reset_token']
column_labels = {
'user_id' : 'ID',
'created_at' : 'Created At',
'last_logged_in' : 'Last Logged In',
'is_admin' : 'Administrator',
'is_active' : 'Active',
'reset_token' : 'Reset Token'
}
# Edit columns
column_editable_list = ['is_active']
create_modal = True
edit_modal = True
# column_searchable_list = ['username', 'user_id']
column_filters = ['user_id', 'username']
## Custom View template
list_template = 'admin/admin_base_list.html'
create_template = 'admin/admin_base_create.html'
edit_template = 'admin/admin_base_edit.html'
# Sortable columns
column_sortable_list = ['user_id', 'username', 'created_at', 'last_logged_in',\
'is_active', 'is_admin']
### Create / Edit form rules
# Additional fields not in column_list
# 'password' for creating new users
# 'change_password' for editing existing users
form_extra_fields = {
'password' : StringField('Password', validators=[DataRequired()]),
'change_password': StringField('<PASSWORD>') }
# Changes created_at and last_logged_in to be unmodifiable when editing entity
form_widget_args = {
'created_at' : {
'readonly' : True,
'disabled' : True
},
'last_logged_in' : {
'readonly' : True,
'disabled' : True
},
}
# Rulesets for creating and editing, these columns will appear in
# respective pages (create / edit)
form_create_rules = ['username', 'password', 'is_admin']
form_edit_rules = ['username', 'change_password', 'created_at',\
'last_logged_in', 'is_active', 'is_admin']
column_default_sort = 'user_id'
# Date formatters for created_at and last_logged_in columns
def create_date_format(view, context, model, name):
return model.created_at.strftime('%d-%m-%Y %H:%M:%S')
def last_login_format(view, context, model, name):
return model.last_logged_in.strftime('%d-%m-%Y %H:%M:%S') \
if model.last_logged_in else None
column_formatters = {
'created_at' : create_date_format,
'last_logged_in' : last_login_format
}
# Function on creating a new user or editing password of user
def on_model_change(self, form, model, is_created):
logger.info("User form submitted")
# If new user is created
if is_created:
logger.info("New user created: {}".format(model.get_username()))
model.set_password(form.password.data)
else:
# If change_password field has data.
# If edit and field is empty does not do anything
# hasattr is a check for if edit is done via Modal or inline in list
if hasattr(form, 'change_password') and form.change_password.data:
logger.info("User {}'s password is changed"\
.format(model.get_username()))
# Password hashing is automatically done on Model level
model.set_password(form.change_password.data)
# Function on deleting user - handling of deleting currently logged in user
def on_model_delete(self, model):
if model == current_user:
logger.error("Attempted to delete {} as {}. Throwing error"\
.format(model.get_username(), current_user.get_username()))
raise ValidationError('Cannot delete currently logged in account')
# Function to prefill edit forms - called when edit_view is invoked
# During editing of entries, username is set to readonly so it cannot be modified
def on_form_prefill(self, form, model):
form.username.render_kw = {
'readonly':True
}
class AdminEmailView(AdminBaseView):
### Display Rules
# Page set allowed
# PK displayed, selected columns relabelled and displayed
can_set_page_size = True
column_display_pk = True
column_list = ['email_id', 'email_address', 'owner_id',
'phishing_mail_detected', 'total_mails_checked', 'created_at', 'last_updated'\
, 'active', 'notification_preference']
column_labels = {
'email_id' : 'ID',
'email_address' : 'Email Address',
'owner_id' : 'Owner ID',
'phishing_mail_detected' : 'Detections',
'total_mails_checked' : 'Mails Checked',
'created_at' : 'Created At',
'last_updated' : 'Last Updated',
'active' : 'Active',
'notification_preference' : 'Notifications'
}
# Edit columns
column_editable_list = ['active', 'notification_preference']
create_modal = True
edit_modal = True
column_filters = ['owner_id', 'email_address', 'email_id', 'active'\
, 'notification_preference']
list_template = 'admin/admin_base_list.html'
create_template = 'admin/admin_base_create.html'
edit_template = 'admin/admin_base_edit.html'
# Sortable columns
columns_sortable_list = ['email_id', 'owner_id', 'phishing_mail_detected',\
'created_at', 'last_updated', 'active']
column_default_sort = 'email_id'
### Create / Edit form rules
# Additional fields not in column_list
# 'email_password' for creating new addresses
# 'change_password' for editing existing addresses
form_extra_fields = {
'email_password' : StringField('Password', validators=[DataRequired()]),
'change_password': StringField('<PASSWORD>')
}
# Read only columns for the following supposedly manually unmodifiable columns
form_widget_args = {
'last_mailbox_size' : {
'readonly' : True,
'disabled' : True
},
'created_at' : {
'readonly' : True,
'disabled' : True
},
'last_updated' : {
'readonly' : True,
'disabled' : True
}
}
# Rulesets for creating and editing, these columns will appear
# in respective pages (create / edit)
form_create_rules = ['email_address', 'email_password', 'owner']
form_edit_rules = ['email_address', 'change_password', 'owner', \
'created_at', 'last_updated', 'active', 'notification_preference' ]
# Date formatters for created_at and last_logged_in columns
def create_date_format(view, context, model, name):
return model.created_at.strftime('%d-%m-%Y %H:%M:%S')
def last_updated_format(view, context, model, name):
return model.last_updated.strftime('%d-%m-%Y %H:%M:%S')\
if model.last_updated else "Never"
column_formatters = {
'created_at' : create_date_format,
'last_updated' : last_updated_format
}
# Function on creating a new address or editing password of an address
def on_model_change(self, form, model, is_created):
logger.info("Email form submitted")
# If new email is created
if is_created:
logger.info("New email created: {}".format(model.get_email_address()))
model.set_email_password(form.email_password.data)
else:
if hasattr(form, 'change_password') and form.change_password.data:
logger.info("Email Addr {}'s password is changed"\
.format(model.get_email_address()))
model.set_email_password(form.change_password.data)
# Function to prefill edit forms - called when edit_view is invoked
# During editing of entries, email is set to readonly so it cannot be modified
def on_form_prefill(self, form, model):
form.email_address.render_kw = {
'readonly':True
}
class AdminPhishingView(AdminBaseView):
### Display Rules
# Page set allowed
# PK displayed, selected columns relabelled and displayed
can_set_page_size = True
column_display_pk = True
can_create = False
column_list = ['mail_id', 'sender_address', 'subject', 'created_at']
column_labels = {
'mail_id' : 'ID',
'sender_address' : 'Sender',
'subject' : 'Subject',
'created_at' : 'Detected At'
}
create_modal = True
edit_modal = True
column_searchable_list = ['subject']
## Custom View template
list_template = 'admin/admin_base_list.html'
create_template = 'admin/admin_base_create.html'
edit_template = 'admin/admin_base_edit.html'
# Sortable columns
column_sortable_list = ['mail_id', 'sender_address', 'created_at']
form_widget_args = {
'sender_address' : {
'readonly' : True
},
'subject' : {
'readonly' : True
},
'created_at' : {
'readonly' : True
},
'content' : {
'rows' : 12,
'readonly' : True
}
}
column_default_sort = 'mail_id'
def _content_formatter(view, context, model, name):
return model.content[:20] + '...' if len(model.content) > 20 else model.content
def _subject_formatter(view, context, model, name):
return model.subject[:30] + '...' if len(model.subject) > 100 else model.subject
column_formatters = {
'subject' : _subject_formatter,
'content' : _content_formatter
}
```
#### File: migrations/versions/e86dd3bc539c_change_admin_to_boolean.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e86dd3bc539c'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('email_address', sa.Column('active', sa.Boolean(), nullable=False))
op.add_column('email_address', sa.Column('email_password', sa.String(length=255), nullable=False))
op.add_column('email_address', sa.Column('last_mailbox_size', sa.Integer(), nullable=True))
op.add_column('email_address', sa.Column('last_updated', sa.DateTime(), nullable=True))
op.add_column('email_address', sa.Column('phishing_mail_detected', sa.Integer(), nullable=True))
op.add_column('user', sa.Column('is_active', sa.Boolean(), nullable=False))
op.add_column('user', sa.Column('is_admin', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('last_logged_in', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_logged_in')
op.drop_column('user', 'is_admin')
op.drop_column('user', 'is_active')
op.drop_column('email_address', 'phishing_mail_detected')
op.drop_column('email_address', 'last_updated')
op.drop_column('email_address', 'last_mailbox_size')
op.drop_column('email_address', 'email_password')
op.drop_column('email_address', 'active')
# ### end Alembic commands ###
```
#### File: project-piscator/preprocessing/utils.py
```python
import re
import joblib
from sklearn.ensemble import RandomForestClassifier
# Cleans up a messed up HTML / tabbed raw content into space delimited content
def clean_up_raw_body(raw_text):
return ' '.join([line.strip() for line in raw_text.strip().splitlines() \
if line.strip()])
# Flattens a list of tuples for (Sender, SenderDomain) into [Sender, SenderDomain]
# By right there SHOULD only be a single pair but kept in list just in case!
# Even indexes are Sender and odd indexs are SenderDomains
def flatten_from_tuples(list_tupl):
# print("Retrieved: {}".format(list_tupl))
return [item for tup in list_tupl for item in tup]
# Retrieves a list of [Sender, SenderDomain] and returns domain names only
# eg. ['Person', '<EMAIL>']
# Returns [Company.com]
# By right there should only be one entry but kept in list just in case
# set list to remove duplicates
def identify_domains(list_of_sender_domain_pairs):
if isinstance(list_of_sender_domain_pairs, list):
return list(set([item.split(sep='@')[1] for item in list_of_sender_domain_pairs if '@' in item]))
return list_of_sender_domain_pairs.split(sep='@')[-1]
# Reads all .eml files and rewrites it by prepending a tab for non-header starts
# this is so that the .eml files are modified such that the headers are properly parseable
# Example usage: format_all_mails('../../Mailboxes/IndividualTestMails/Phish/ORG/', 1, 45)
# All header values AND LINES CONTAINING +0000 are skipped the prepending.
# if lines with +0000 are tabbed the email body breaks
def format_all_mails(FILE_PATH, start, end):
spl = []
for i in range(start, end+1):
try:
with open ('{}{}.eml'.format(FILE_PATH, i), 'r', encoding='utf-8') as pf:
data = pf.read()
spl = data.split(sep='\n')
for idx, line in enumerate(spl):
result = re.search(r'(^(Received|Authentication-Results|DKIM-Signature|X-Facebook|Date|To|Subject|Reply-to|Return-Path|From|Errors-To|Feedback-ID|Content-|OriginalChecksum|Message-|X-.*:|--))', line)
if not result:
spl[idx] = '\t{}'.format(spl[idx])
# print(result)
with open('{}../{}.eml'.format(FILE_PATH, i), 'w', encoding='utf-8') as nf:
for line in spl:
nf.write(line + "\n")
nf.close()
except FileNotFoundError:
pass
def load_model(MODEL_NAME) -> RandomForestClassifier:
return joblib.load(MODEL_NAME)
```
#### File: tests/api/test_6_admin_edit_user.py
```python
import pytest
from test_2_authentication import login, register, logout
from app.models.User import User
from app import db
from app.utils.DBUtils import get_user_by_name
# Test valid amendment of user account
def test_edit_user(client, db):
# Creates a new user to be disabled via direct database access
TEST_DISABLE_USER = 'disableme'
TEST_DISABLE_PASSWORD = 'password'
new_user = User(username=TEST_DISABLE_USER)
new_user.set_password(<PASSWORD>)
db.session.add(new_user)
db.session.commit()
# Logs in to admin account via API and asserts successful log in
USERNAME = 'admin'
PASSWORD = 'password'
login_response = login(client, USERNAME, PASSWORD)
assert login_response.status_code == 200
assert b'Administrator Dashboard' in login_response.data
# Retrieves newly created user - gets ID, username and assert current status is ACTIVE
user_to_disable = get_user_by_name(TEST_DISABLE_USER)
user_to_disable_id = user_to_disable.get_id()
user_to_disable_name = user_to_disable.get_username()
# Assert user is active
assert user_to_disable.get_active_status()
# Sends POST request to disable user - sets is_active to None
client.post(
'/admin/user/edit/?id={}'.format(user_to_disable_id), data={
'username' : '{}'.format(user_to_disable_name),
# 'is_active' : True
'is_active' : None
},
follow_redirects = True
)
# Assert user is now inactive
assert get_user_by_name(TEST_DISABLE_USER).get_active_status() == False
# Sends POST request to disable user - sets is_active to None
client.post(
'/admin/user/edit/?id={}'.format(user_to_disable_id), data={
'username' : '{}'.format(user_to_disable_name),
'is_active' : True
},
follow_redirects = True
)
# Assert user is now inactive
assert get_user_by_name(TEST_DISABLE_USER).get_active_status() == True
```
#### File: tests/api/test_7_reset_password.py
```python
import pytest
from test_2_authentication import login, logout
from test_3_add_email import add_mail
from app.models.User import User
from app import db
from app.utils.DBUtils import get_user_by_name
from app.utils.FileUtils import get_server_mail_cred
from app.utils.DBUtils import get_email_address_by_address
def request_reset_password(client, db, username, email_address):
return client.post(
'/reset', data={
'username' : username,
'email_address' : email_address
},
follow_redirects=True
)
def update_new_password(client, db, new_password, token):
return client.post(
'/reset/change_password', data={
'token_received' : token,
'new_password' : <PASSWORD>
},
follow_redirects=True
)
# Test valid reset password request
def test_request_reset_password(client, db):
# Creates a new user
TEST_RESET_USER = 'resetmyaccount'
TEST_RESET_PASSWORD = 'password'
new_user = User(username=TEST_RESET_USER)
new_user.set_password(<PASSWORD>)
db.session.add(new_user)
db.session.commit()
# Logs in to user and add an email address and log out
login_response = login(client, TEST_RESET_USER, TEST_RESET_PASSWORD)
assert login_response.status_code == 200
assert b'dashboard' in login_response.data
MAIL_CREDS = get_server_mail_cred()
TEST_EMAIL_ADDRESS = MAIL_CREDS[2]
TEST_EMAIL_PASSWORD = MAIL_CREDS[3]
response = add_mail(client, TEST_EMAIL_ADDRESS, TEST_EMAIL_PASSWORD)
assert response.status_code == 200
assert get_email_address_by_address(TEST_EMAIL_ADDRESS)
assert b'<EMAIL>' in response.data
logout(client)
reset_response = request_reset_password(client, db, TEST_RESET_USER\
, TEST_EMAIL_ADDRESS)
# Assert redirected to update password page
assert b'token' in reset_response.data
# Assert token is generated
assert get_user_by_name(TEST_RESET_USER).get_reset_token()
# Test valid reset password update
def test_update_forgotten_password(client, db):
TEST_RESET_USER = 'resetmyaccount'
NEW_PASSWORD = '<PASSWORD>'
USER_ENTITY = get_user_by_name(TEST_RESET_USER)
TOKEN_VALUE = USER_ENTITY.get_reset_token()
# Creates a session variable for id to be passed in to route
with client.session_transaction() as sess:
sess['reset_user_id'] = USER_ENTITY.get_id()
# Sends a post request to change_password with retrieved token
r = client.post(
'/reset/change_password', data={
'token' : TOKEN_VALUE,
'new_password' : <PASSWORD>
},
follow_redirects=True
)
login_response = login(client, TEST_RESET_USER, NEW_PASSWORD)
# Assert TEST_RESET_USER token is None
assert not get_user_by_name(TEST_RESET_USER).get_reset_token()
# Assert successful login with new password
assert login_response.status_code == 200
assert b'dashboard' in login_response.data
```
#### File: tests/functional/conftest.py
```python
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
from app import db
from app.models.EmailAddress import EmailAddress
@pytest.fixture(scope='session')
def driver():
options = Options()
driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.get('http://localhost:5000')
yield driver
driver.quit()
""" Run after all tests
mail = db.session.query(EmailAddress).filter(EmailAddress.email_address == '<EMAIL>').first()
db.session.delete(mail)
user = db.session.query(User).filter(User.username == 'testuser123')
db.session.delete(user)
db.session.commit()
"""
```
#### File: tests/functional/test_4_user_settings.py
```python
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .test_2_authentication import login, logout
from app import db
from time import sleep
from app.models.User import User
# Reusable function to click on "Account Settings" and "Disable Account" tab
def disable_acc_navtab(driver):
# Wait for account settings button to appear
wait_acc_set_btn = WebDriverWait(driver, 3)
wait_acc_set_btn.until(EC.element_to_be_clickable((By.XPATH\
, '//*[@id="user-panel"]/a[4]')))
driver.find_element(By.XPATH, '//*[@id="user-panel"]/a[4]').click()
# Assert redirected to /account page
assert driver.current_url.split(sep='/')[-1] == 'account'
# Wait for "Disable Account" navtab to appear
wait_nav_disable_acc_tab = WebDriverWait(driver, 3)
wait_nav_disable_acc_tab.until(EC.element_to_be_clickable((By.ID\
, 'nav-disable-account')))
driver.find_element(By.ID, 'nav-disable-account').click()
wait_disable_submit = WebDriverWait(driver, 3)
wait_disable_submit.until(EC.visibility_of_element_located((By.ID\
, 'disable_acc_submit')))
# Flows after deactivating email so user is in dashboard still
# Test password change in account setting by clicking into 'Account'
# Test invalid change password with empty passwords
def test_change_password_no_input(driver):
USERNAME = 'testuser123'
MISSING_PASSWORD_ERR = 'Invalid Current Password!'
# Wait for account settings button to appear and click
wait_acc_set_btn = WebDriverWait(driver, 3)
wait_acc_set_btn.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="user-panel"]/a[4]')))
driver.find_element(By.XPATH, '//*[@id="user-panel"]/a[4]').click()
# Assert redirected to /account page
assert driver.current_url.split(sep='/')[-1] == 'account'
# Wait for update button to appear (for form to appear)
wait_update_btn = WebDriverWait(driver, 5)
wait_update_btn.until(EC.visibility_of_element_located((By.ID\
, 'update_password_submit')))
assert USERNAME in driver.page_source
driver.find_element_by_id('update_password_submit').click()
assert MISSING_PASSWORD_ERR in driver.page_source
# Test invalid change password with incorrect current password
def test_change_invalid_current_password(driver):
USERNAME = 'testuser123'
CUR_PASS = '<PASSWORD>'
NEW_PASS = '<PASSWORD>'
CONF_NEW_PASS = '<PASSWORD>'
CURRENT_PASSWORD_ERR = 'Invalid Current Password!'
assert USERNAME in driver.page_source
driver.find_element_by_id('current_password').send_keys(<PASSWORD>_<PASSWORD>)
driver.find_element_by_id('new_password').send_keys(<PASSWORD>)
driver.find_element_by_id('confirm_new_password').send_keys(<PASSWORD>)
driver.find_element_by_id('update_password_submit').click()
assert CURRENT_PASSWORD_ERR in driver.page_source
# Test invalid change password with mismatched new passwords
def test_change_different_new_password(driver):
USERNAME = 'testuser123'
CUR_PASS = 'password'
NEW_PASS = '<PASSWORD>'
CONF_NEW_PASS = '<PASSWORD>'
DIFFERENT_PASSWORD_ERR = 'New Password and Confirm New Password must match!'
assert USERNAME in driver.page_source
driver.find_element_by_id('current_password').send_keys(<PASSWORD>)
driver.find_element_by_id('new_password').send_keys(<PASSWORD>)
driver.find_element_by_id('confirm_new_password').send_keys(CONF_NEW_PASS)
driver.find_element_by_id('update_password_submit').click()
assert DIFFERENT_PASSWORD_ERR in driver.page_source
# Test valid change password and login with new password
def test_change_password(driver):
USERNAME = 'testuser123'
CUR_PASS = 'password'
NEW_PASS = '<PASSWORD>'
CONF_NEW_PASS = '<PASSWORD>'
CHANGE_PASSWORD_SUCCESS = 'Password Successfully Changed!'
driver.find_element_by_id('current_password').send_keys(<PASSWORD>_PASS)
driver.find_element_by_id('new_password').send_keys(NEW_PASS)
driver.find_element_by_id('confirm_new_password').send_keys(CONF_NEW_PASS)
driver.find_element_by_id('update_password_submit').click()
assert CHANGE_PASSWORD_SUCCESS in driver.page_source
# Logs out after changing password and reattempts login with new password
# assert log out successful
logout(driver)
assert driver.current_url.split(sep='/')[-1] == 'index'
# Assert login successful with new password
login(driver, USERNAME, NEW_PASS)
assert driver.current_url.split(sep='/')[-1] == 'dashboard'
# Flows after password change
# Test invalid disable account with no slider enabled
def test_without_slider_disable_account(driver):
USERNAME = 'testuser123'
PASSWORD = '<PASSWORD>'
INVALID_DISABLE_ACC_ERR = 'If you intend to disable your account, click the slider!'
disable_acc_navtab(driver)
driver.find_element_by_id('disable_acc_current_password').send_keys(PASSWORD)
driver.find_element_by_id('disable_acc_submit').click()
assert driver.current_url.split(sep='/')[-1] == 'account'
assert INVALID_DISABLE_ACC_ERR in driver.page_source
# Test invalid disable account with empty current password
def test_without_password_disable_account(driver):
USERNAME = 'testuser123'
INVALID_DISABLE_ACC_PASS_ERR = 'Invalid Current Password!'
disable_acc_navtab(driver)
checkbox = driver.find_element_by_css_selector("#disable_acc_switch")
driver.execute_script("arguments[0].click();", checkbox)
sleep(3)
driver.find_element_by_id('disable_acc_submit').click()
assert driver.current_url.split(sep='/')[-1] == 'account'
assert INVALID_DISABLE_ACC_PASS_ERR in driver.page_source
# Test invalid disable account with incorrect current password
def test_wrong_password_disable_account(driver):
USERNAME = 'testuser123'
PASSWORD = '<PASSWORD>'
INVALID_DISABLE_ACC_PASS_ERR = 'Invalid Current Password!'
disable_acc_navtab(driver)
driver.find_element_by_id('disable_acc_current_password').send_keys(PASSWORD)
checkbox = driver.find_element_by_css_selector("#disable_acc_switch")
driver.execute_script("arguments[0].click();", checkbox)
sleep(3)
driver.find_element_by_id('disable_acc_submit').click()
assert driver.current_url.split(sep='/')[-1] == 'account'
assert INVALID_DISABLE_ACC_PASS_ERR in driver.page_source
# Test valid account disable
def test_disable_account(driver):
USERNAME = 'testuser123'
PASSWORD = '<PASSWORD>'
DISABLE_LOGOUT_MSG = 'Account is Disabled! You\'ll be logged out in 5 seconds..'
ACCOUNT_IS_DISABLED = 'Account is disabled, contact support!'
disable_acc_navtab(driver)
# Enters current password
# Slider for disable is actually a checkbox that must be interacted using JS
driver.find_element_by_id('disable_acc_current_password').send_keys(PASSWORD)
checkbox = driver.find_element_by_css_selector("#disable_acc_switch")
driver.execute_script("arguments[0].click();", checkbox)
sleep(3)
driver.find_element_by_id('disable_acc_submit').click()
assert DISABLE_LOGOUT_MSG in driver.page_source
wait_home = WebDriverWait(driver, 10)
wait_home.until(EC.visibility_of_element_located((By.ID, 'home-nav')))
assert driver.current_url.split(sep='/')[-1] == 'index'
# Assert login failed so user does not go into dashboard
login(driver, USERNAME, PASSWORD)
assert driver.current_url.split(sep='/')[-1] != 'dashboard'
assert ACCOUNT_IS_DISABLED in driver.page_source
assert db.session.query(User)\
.filter(User.username == USERNAME)\
.first().get_active_status() == False
```
#### File: tests/functional/test_6_admin_edit_user.py
```python
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .test_2_authentication import login, logout
from app.models.User import User
from app import db
from time import sleep
# Flows from test_5 admin logout so this starts from homepage
# Creates a new dummy user via direct DB access to be tested on
# Logs in using utility login function, retrieves new user ID, disable active status of user.
# Test valid disabling of user account
def test_admin_disable_user(driver):
TEST_DISABLE_USER = 'disableme'
TEST_DISABLE_PASSWORD = 'password'
# Creation of dummy user
new_user = User(username=TEST_DISABLE_USER)
new_user.set_password(<PASSWORD>)
db.session.add(new_user)
db.session.commit()
# Logs in to admin dashboard
ADMIN_USER = 'admin'
PASSWORD = 'password'
login(driver, ADMIN_USER, PASSWORD)
# Search for newly created user for it's ID
user_to_disable = db.session.query(User)\
.filter(User.username ==TEST_DISABLE_USER).first()
user_to_disable_id = user_to_disable.get_id()
# Clicks on 'Users' in admin navbar
wait_user_btn = WebDriverWait(driver, 3)
wait_user_btn.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="admin-panel"]/a[2]')))
driver.find_element(By.XPATH, '//*[@id="admin-panel"]/a[2]').click()
# Searches for edit (a pencil in 1st column) by href where it's url is href_link
# and click to enter edit page for the particular user
href_link = '//a[@href="{}"]'\
.format('/admin/user/edit/?id={}&url=%2Fadmin%2Fuser%2F&modal=True'\
.format(user_to_disable_id))
wait_user_entry = WebDriverWait(driver, 5)
wait_user_entry.until(EC.visibility_of_element_located((By.XPATH, href_link)))
driver.find_element(By.XPATH, href_link).click()
# The sleep(2) is required otherwise wait_active_box does not work
sleep(2)
# Unchecks active checkbox
wait_active_box = WebDriverWait(driver, 3)
wait_active_box.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="fa_modal_window"]/div/div/form/fieldset/div/div[5]/input')))
checkbox = driver.find_element_by_css_selector(".form-control-lg#is_active")
driver.execute_script("arguments[0].click();", checkbox)
# Clicks "Save" in edit page
# The click here is done by send_keys(Keys.RETURN) simulating enter on the key
# And the sleep(2) is required otherwise the click will NOT work sometimes
sleep(2)
wait_submit = WebDriverWait(driver, 3)
wait_submit.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="fa_modal_window"]/div/div/form/fieldset/div[2]/input')))
driver.find_element(By.XPATH\
, '//*[@id="fa_modal_window"]/div/div/form/fieldset/div[2]/input')\
.send_keys(Keys.RETURN)
# Finally redirects to list view again after editing user status
# The database is actually updated at this point
# But Assertion statement fails here for some reason - 20/12/20 <NAME>
# ***Check for this test is done in the next test***
# As mentioned above, assertion have to be done in a separate test
# in order to reflect the correct user active status, no idea why - 25/12/20 <NAME>
# Secondary function to assert previous test
def test_assert_disable_user(driver):
TEST_DISABLE_USER = 'disableme'
updated_user = db.session.query(User)\
.filter(User.username == TEST_DISABLE_USER).first()
assert updated_user.get_active_status() == False
# Test valid enabling of user account
def test_enable_user(driver):
TEST_ENABLE_USER = 'disableme'
# Search for newly created user for it's ID
user_to_enable = db.session.query(User)\
.filter(User.username == TEST_ENABLE_USER).first()
user_to_enable_id = user_to_enable.get_id()
# Clicks on 'Users' in admin navbar
wait_user_btn = WebDriverWait(driver, 3)
wait_user_btn.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="admin-panel"]/a[2]')))
driver.find_element(By.XPATH, '//*[@id="admin-panel"]/a[2]').click()
# Searches for edit (a pencil in 1st column) by href where it's url is href_link
# and click to enter edit page for the particular user
href_link = '//a[@href="{}"]'\
.format('/admin/user/edit/?id={}&url=%2Fadmin%2Fuser%2F&modal=True'\
.format(user_to_enable_id))
wait_user_entry = WebDriverWait(driver, 5)
wait_user_entry.until(EC.visibility_of_element_located((By.XPATH, href_link)))
driver.find_element(By.XPATH, href_link).click()
# The sleep(2) is required otherwise wait_active_box does not work
sleep(2)
# Checks active checkbox
wait_active_box = WebDriverWait(driver, 3)
wait_active_box.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="fa_modal_window"]/div/div/form/fieldset/div/div[5]/input')))
checkbox = driver.find_element_by_css_selector(".form-control-lg#is_active")
driver.execute_script("arguments[0].click();", checkbox)
# Clicks "Save" in edit page
# The click here is done by send_keys(Keys.RETURN) simulating enter on the key
# And the sleep(2) is required otherwise the click will NOT work sometimes
sleep(2)
wait_submit = WebDriverWait(driver, 3)
wait_submit.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="fa_modal_window"]/div/div/form/fieldset/div[2]/input')))
driver.find_element(By.XPATH\
, '//*[@id="fa_modal_window"]/div/div/form/fieldset/div[2]/input')\
.send_keys(Keys.RETURN)
# Finally redirects to list view again after editing user status
# The database is actually updated at this point
# But Assertion statement fails here for some reason - 20/12/20 <NAME>
# ***Check for this test is done in the next test***
# As mentioned above, assertion have to be done in a separate test
# in order to reflect the correct user active status, no idea why - 25/12/20 <NAME>
# Secondary function to assert previous test
def test_assert_enable_user(driver):
TEST_ENABLE_USER = 'disableme'
updated_user = db.session.query(User)\
.filter(User.username == TEST_ENABLE_USER).first()
assert updated_user.get_active_status() == True
# Logout back to index
wait_logout = WebDriverWait(driver, 5)
wait_logout.until(EC.visibility_of_element_located((By.XPATH\
, '//*[@id="admin-panel"]/a[5]')))
driver.find_element(By.XPATH, '//*[@id="admin-panel"]/a[5]').click()
# Assert redirected to index
assert driver.current_url.split(sep='/')[-1] == 'index'
``` |
{
"source": "jooaomarcos/Projeto-Agenda-de-Contato-em-Python",
"score": 4
} |
#### File: Projeto-Agenda-de-Contato-em-Python/AgendaContato/lib.py
```python
from banco import con
from time import sleep
import os
# Validação de Valor Inteiro
def leiaint(valor):
while True:
try:
ent = int(input(valor))
except:
print('\033[1;33mDigite um valor inteiro\033[m')
else:
break
return ent
# Validação de String
def leiaTexto(txt):
while True:
try:
ent = str(input(txt))
except:
if ent.isnumeric():
print('\033[1;33mDigite um texto válido\033[m')
else:
break
return ent
# Cabecalho
def cabecalho(msg):
print('-'*40)
print(msg.center(40).upper())
print('-'*40)
# Menu Principal
def menuprincipal():
print('''
[1] - Inserir Contato
[2] - Listar Contatos
[3] - Consultar Contato
[4] - Editar Contato
[5] - Excluir
[6] - Sair
''')
# Inserir Contato
def insertContato():
cabecalho('NOVO CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, insira um registro válido\033[m')
else:
break
while True:
nome = leiaTexto('NOME: ').strip().title()
if len(nome) == 0 or nome.isnumeric():
print('\033[1;33mPreencha o campo\033[m')
else:
break
while True:
matr = leiaTexto('CHAPA: ').strip().upper()
if len(matr) <= 4 or len(matr) > 5:
print('\033[1;33mPor favor, insira uma matricula válida\033[m')
else:
break
while True:
func = leiaTexto('FUNÇÃO: ').strip().title()
if len(func) == 0 or func.isnumeric():
print('\033[1;33mPreencha o campo\033[m')
else:
break
while True:
period = leiaint('PERÍODO: ')
if period < 1 or period > 2:
print('\033[1;33mPor favor, insira um período corretamente\033[m')
else:
break
while True:
tel = leiaTexto('TELEFONE 1: ').strip()
if len(tel) < 11 or len(tel) > 14:
print('\033[1;33mPor favor, Insira um telefone válido\033[m')
else:
break
while True:
tel_2 = leiaTexto('TELEFONE 2: ').strip()
if len(tel_2) > 14:
print('\033[1;33mTelefone Inválido\033[m')
else:
break
except:
print('\033[1;31mErro na Inserção de dados\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
ssql = 'SELECT * FROM contato WHERE registro= "'+regs+'"'
c.execute(ssql)
inserir = c.fetchall()
except:
print('\033[1;33mErro na conferência\033[m')
else:
if inserir:
print('\033[1;33mCONTATO JÁ EXISTE\033[m')
else:
try:
sql = 'INSERT INTO contato(registro, nome, matricula, funcao, periodo, telefone, telefone_2) SELECT "'+regs+'", "'+nome+'", "'+matr+'", "'+func+'", "'+str(period)+'", "'+tel+'", "'+tel_2+'" WHERE NOT EXISTS (SELECT 1 FROM contato WHERE registro = "'+regs+'")'
c.execute(sql)
except:
print(f'Erro ao inserir contato')
else:
print('\033[1;32mCONTATO INSERIDO COM SUCESSO!\033[m')
con.commit()
# Listar Contatos
def listarContatos():
cabecalho('LISTAR CONTATOS')
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
lsql = 'SELECT * FROM contato ORDER BY registro asc'
c.execute(lsql)
except:
print('\033[1;33mErro ao listar contatos\033[m')
else:
dados = c.fetchall()
contador = 0
limite = 30
for d in dados:
print(f'\033[1;36mNº REGISTRO:\033[m{d[1]} \033[1;36mNOME:\033[m{d[2]:<32} \033[1;36mCHAPA:\033[m{d[3]} \033[1;36mFUNÇÃO:\033[m{d[4]:<10} \033[1;36mPERÍODO:\033[m{d[5]} \033[1;36mTELEFONE:\033[m{d[6]} \033[1;36mTELEFONE 2:\033[m{d[7]}')
print()
contador += 1
if contador > limite:
contador = 0
os.system('pause')
os.system('cls')
con.commit()
while True:
v = leiaint('PRESSIONE 8 PARA VOLTAR AO MENU: ')
if v < 8 or v > 8 :
print('\033[1;33mpressione a tecla correta\033[m')
else:
break
os.system('cls')
# Consultar Contato
def consContato():
cabecalho('CONSULTAR CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, insira um registro válido\033[m')
else:
break
except:
print('\033[1;31mErro na consulta do contato\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
csql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(csql)
mostra = c.fetchall()
except:
print('\033[1;33mErro ao Consultar Contato\033[m')
else:
if mostra:
for m in mostra:
print(f'\033[1;36mNº REGISTRO:\033[m{m[1]} \033[1;36mNOME:\033[m{m[2]} \033[1;36mCHAPA:\033[m{m[3]} \033[1;36mFUNÇÃO:\033[m{m[4]:^<8} \033[1;36mPERÍODO:\033[m{m[5]} \033[1;36mTELEFONE:\033[m{m[6]} \033[1;36mTELEFONE 2:\033[m{m[7]}')
else:
print('\033[1;33mESSE CONTATO NÃO ESTÁ CADASTRADO\033[m')
con.commit()
# Editar Contato
def editContato():
cabecalho('EDITAR CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, digite um registro válido\033[m')
else:
break
except:
print('\033[1;33mErro no contato\033[m')
else:
try:
c = con.cursor()
except:
print('\033[1;31mErro na Conexão com Banco de Dados\033[m')
else:
try:
sql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(sql)
mostra = c.fetchall()
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
if mostra:
while True:
period = leiaint('PERÍODO: ')
if period < 1 or period > 2:
print('\033[1;33mPor favor, insira um período corretamente\033[m')
else:
break
while True:
tel = leiaTexto('TELEFONE 1: ').strip()
if len(tel) < 11 or len(tel) > 14:
print('\033[1;33mPor favor, Insira um telefone válido\033[m')
else:
break
while True:
tel_2 = leiaTexto('TELEFONE 2: ').strip()
if len(tel_2) > 14:
print('\033[1;33mTelefone Inválido\033[m')
else:
break
esql = 'UPDATE contato SET periodo="'+str(period)+'", telefone="'+tel+'", telefone_2="'+tel_2+'" WHERE registro= "'+regs+'"'
c.execute(esql)
con.commit()
print('\033[1;32mCONTATO ALTERADO COM SUCESSO!\033[m')
sleep(1)
else:
print('\033[1;33mCONTATO NÃO ESTÁ CADASTRADO\033[m')
# Deletar Contato
def apagaContato():
cabecalho('APAGAR CONTATO')
try:
while True:
regs = leiaTexto('Nº Registro que deseja apagar o contato: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, digite um registro válido\033[m')
else:
break
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
sql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(sql)
mostra = c.fetchall()
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
while True:
resp = leiaTexto('Tem certeza que deseja apagar o registro [S/N] ?: ').strip().upper()[0]
if resp not in 'SN':
print('Responda')
else:
break
if resp in 'S':
if mostra:
try:
dsql = 'DELETE FROM contato WHERE registro = "'+regs+'"'
c.execute(dsql)
except:
print('\033[1;33mErro ao deletar contato\033[m')
else:
print('\033[1;32mCONTATO DELETADO COM SUCESSO!\033[m')
con.commit()
else:
print('\033[1;33mCONTATO NÃO ESTÁ CADASTRADO\033[m')
else:
print('nada deletado')
``` |
{
"source": "joobih/captcha_identify",
"score": 3
} |
#### File: captcha_identify/gen_captcha/gen_sample_base.py
```python
import os
from random import choice, randint, uniform
import json
import time
from PIL.ImageDraw import Draw
"""
sudo pip3 install PIL
"""
from PIL import Image, ImageDraw, ImageFont, ImageFilter
class VerifyCode(object):
"""生成验证码模块"""
# 去掉易混淆字符'i' 'I' 'l' 'L' 'o' 'O' 'z' 'Z'
DEFAULT_CHARS = "0123456789abcdefghjkmnpqrstuvwxyABCDEFGHJKMNPQSTUVWXY"
# 默认字体
DEFAULT_FONTS = ("./fonts/Arial.ttf", "./fonts/DeeDee.ttf")
# 默认字体大小
DEFAULT_FONTS_SIZE = (34, 37, 40, 43, 46)
# 默认长宽
DEFAULT_WIDTH, DEFAULT_HEIGHT = 160, 40
# 默认字符长度
DEFAULT_LENGHT = 6
# 默认间隔距离倍数
DEFAULT_INTERVAL = 0.8
# 默认扰动角度范围
DEFAULT_ROTATE_INTERVAL = (-30, 30)
def __init__(self, length=DEFAULT_LENGHT,
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT,
fonts=DEFAULT_FONTS,
fonts_size=DEFAULT_FONTS_SIZE,
characters=DEFAULT_CHARS,
char_color = None,
background_color = None,
interval = DEFAULT_INTERVAL,
is_guss = False,
rotate_interval = DEFAULT_ROTATE_INTERVAL
):
"""
验证码初始化方法
:param length: 验证码长度 默认length=6
:param width: 验证码图片宽度 默认width=160
:param height: 验证码图片高度 默认height=50
:param fonts_size: 字体大小 默认font_size=[40, 42, 44, 46]
:param char_color: 验证码字体颜色,默认随机
:param interval: 每个字符之间的距离倍数设置,默认为0.8
:param is_guss: 是否进行高斯模糊,默认为否
:param background_color: 背景图,默认随机浅色范围
:param rotate_interval: 扰动范围参数,不扰动(0, 0)
"""
self._verify_code_image = None # PIL图片Image对象
self._length = length # 验证码长度
self._width = width # 图片宽度
self._height = height # 图片高度
self._fonts = fonts
self._font_size = fonts_size # 字体大小
self._characters = characters
self._char_color = char_color
self._interval = interval
self._is_guss = is_guss
self._background_color = background_color
self._rotate_interval = rotate_interval
def get_random_code(self):
"""
随机生成验证码字符
"""
code = '' # 生成的验证码
for _ in range(self._length): # 循环随机取一个字符
code += choice(self._characters)
return code
@staticmethod
def random_color(s=0, e=255):
"""
随机生成RGB颜色
:param s: 开始范围
:param e: 结束范围
:return: Tuple (r, g, b)
"""
s = s if 0 <= s <= 255 else 0 # 限定范围 0 - 255
e = e if 0 <= e <= 255 else 255 # 限定范围 0 - 255
s, e = (s, e) if s < e else (e, s) # 限定大小 s 必须小于 e
return randint(s, e), randint(s, e), randint(s, e)
def set_image(self, random_code):
"""
生成验证码图片
:return: None
"""
# 创建一个Image对象, 全黑的画布
if not self._background_color:
self._background_color = self.random_color()
image = Image.new('RGB', (self._width, self._height), self._background_color)
# 创建一个字体对象
# table = []
# for i in range(256):
# table.append(i * 1.97)
# font = ImageFont.truetype('fonts/Arial.ttf', self._font_size)
# 创建一个画图对象
draw = ImageDraw.Draw(image)
# for循环随机生成噪点
for x in range(self._width):
for y in range(self._height):
temp = x + y + randint(0, 10)
if temp % 5 == 0:
draw.point((x, y), fill=self.random_color(100, 200))
# for循环将字符添加到图中
for t in range(self._length):
dev_x = randint(0, 4) # 随机左右浮动
dev_y = randint(0, 2) # 睡觉上下浮动
# print(self._font_size)
rand_font_size = choice(list(self._font_size))
# print(rand_font_size, type(rand_font_size))
x, y = rand_font_size * self._interval * t + dev_x, dev_y
# print(x, y, rand_font_size)
# 将字符通过随机颜色画到图片中
rand_font_size = choice(list(self._font_size))
rand_font = choice(self._fonts)
font = ImageFont.truetype(rand_font, rand_font_size)
if not self._char_color:
char_color = self.random_color()
else:
char_color = self._char_color
# im = Image.new('RGBA', (0,0,0,0))
# Draw(im).text((x, y), random_code[t],
# font=font, fill=char_color)
# imdraw =
# mask = im.convert('L').point(table)
# image.paste(im, (0, int((self._height - h) / 2)), mask)
w, h = draw.textsize("2", font=font)
dx = randint(0, 4)
dy = randint(0, 3)
im = Image.new('RGBA', (w + dx, h + dy))
Draw(im).text((dx, dy), random_code[t], font=font, fill=char_color)
im = im.rotate(uniform(self._rotate_interval[0], self._rotate_interval[1]), Image.BILINEAR, expand=1)
r, g, b, a = im.split()
image.paste(im, (int(x), y), mask=a)
# draw.text((x, y), random_code[t],
# font=font, fill=char_color)
# im = im.rotate(uniform(-30, 30), Image.BILINEAR, expand=1)
# for x in range(self._width):
# for y in range(self._height):
# temp = x + y + randint(0, 10)
# if temp % 5 == 0:
# draw.point((x, y), fill=self.random_color(100, 200))
# 进行高斯模糊
if self._is_guss:
image = image.filter(ImageFilter.GaussianBlur)
# image = image.filter(ImageFilter.SMOOTH)
# 将图片对象赋值给当前对象的verify_code_image属性
return image
def test_img():
import random
s_width, s_height = 120, 4
image = Image.new("RGB", (120, 40), (255,255,255))
draw = Draw(image)
# image.show()
# im = Image.new("RGB", )
fontpath = "./fonts/Arial.ttf"
font = ImageFont.truetype(fontpath, 35)
draw.text((0,0),"1", font=font, fill=(0, 0, 0))
w, h = draw.textsize("2", font=font)
dx = random.randint(0, 4)
dy = random.randint(0, 6)
im = Image.new('RGBA', (w + dx, h + dy))
Draw(im).text((dx, dy), "2", font=font, fill=(56, 90, 0))
im = im.rotate(random.uniform(-90, 90), Image.BILINEAR, expand=1)
r, g, b, a = im.split()
image.paste(im, (10, int((s_height - h) / 2)), mask=a)
# im.show()
image.show()
# def _draw_character(c):
# font = "./fonts/Arial.ttf"
# fonts = ImageFont.truetype(font, 35)
# w, h = draw.textsize(c, font=fonts)
#
# dx = random.randint(0, 4)
# dy = random.randint(0, 6)
# im = Image.new('RGBA', (w + dx, h + dy))
# color = (0,0,0,10)
# Draw(im).text((dx, dy), c, font=font, fill=color)
#
# # rotate
# im = im.crop(im.getbbox())
# im = im.rotate(random.uniform(-30, 30), Image.BILINEAR, expand=1)
#
# # warp
# dx = w * random.uniform(0.1, 0.3)
# dy = h * random.uniform(0.2, 0.3)
# x1 = int(random.uniform(-dx, dx))
# y1 = int(random.uniform(-dy, dy))
# x2 = int(random.uniform(-dx, dx))
# y2 = int(random.uniform(-dy, dy))
# w2 = w + abs(x1) + abs(x2)
# h2 = h + abs(y1) + abs(y2)
# data = (
# x1, y1,
# -x1, h2 - y2,
# w2 + x2, h2 + y2,
# w2 - x2, -y1,
# )
# im = im.resize((w2, h2))
# im = im.transform((w, h), Image.QUAD, data)
# return im
#
# images = []
# chars = "1234"
# for c in chars:
# if random.random() > 0.5:
# images.append(_draw_character(" "))
# images.append(_draw_character(c))
#
# text_width = sum([im.size[0] for im in images])
#
#
# width = max(text_width, s_width)
# image = image.resize((width, s_height))
#
# average = int(text_width / len(chars))
# rand = int(0.25 * average)
# offset = int(average * 0.1)
# table = []
# for i in range(256):
# table.append(i * 1.97)
# for im in images:
# w, h = im.size
# mask = im.convert('L').point(table)
# image.paste(im, (offset, int((s_height - h) / 2)), mask)
# offset = offset + w + random.randint(-rand, 0)
#
# if width > s_width:
# image = image.resize((s_width, s_height))
# image.show()
def get_one():
with open("../conf/captcha_config.json", "r") as f:
config = json.load(f)
# 配置参数
root_dir = "./test_img/"
if not os.path.exists(root_dir):
os.makedirs(root_dir)
image_suffix = config["image_suffix"] # 图片储存后缀
characters = config["characters"] # 图片上显示的字符集 # characters = "0123456789abcdefghijklmnopqrstuvwxyz"
count = 1 # 生成多少张样本
char_count = config["char_count"] # 图片上的字符数量
# 设置图片高度和宽度
width = config["width"]
height = config["height"]
rand_color = VerifyCode.random_color(100,150)
rand_back = VerifyCode.random_color(150, 200)
vcode = VerifyCode(length=char_count, width=width, height=height,
characters=characters,
fonts=("fonts/Arial.ttf",),
fonts_size=(36,),
# char_color=(1,3,200),
background_color=rand_back,
char_color=rand_color,
rotate_interval=(-60, 60))
for i in range(count):
text = vcode.get_random_code()
img = vcode.set_image(text)
timec = str(time.time()).replace(".", "")
p = os.path.join(root_dir, f"{text}_{timec}.{image_suffix}")
img.save(p)
print("Generate captcha image",text,i)
def main():
with open("../conf/captcha_config.json", "r") as f:
config = json.load(f)
# 配置参数
root_dir = config["root_dir"] # 图片储存路径
if not os.path.exists(root_dir):
os.makedirs(root_dir)
image_suffix = config["image_suffix"] # 图片储存后缀
characters = config["characters"] # 图片上显示的字符集 # characters = "0123456789abcdefghijklmnopqrstuvwxyz"
count = config["count"] # 生成多少张样本
char_count = config["char_count"] # 图片上的字符数量
# 设置图片高度和宽度
width = config["width"]
height = config["height"]
for i in range(count):
rand_color = VerifyCode.random_color(100, 150)
rand_back = VerifyCode.random_color(150, 200)
vcode = VerifyCode(length=char_count, width=width, height=height,
characters=characters,
fonts=("fonts/Arial.ttf",),
fonts_size=(36,),
# char_color=(1,3,200),
background_color=rand_back,
char_color=rand_color,
rotate_interval=(-60, 60))
# for i in range(count):
text = vcode.get_random_code()
img = vcode.set_image(text)
timec = str(time.time()).replace(".", "")
p = os.path.join(root_dir, f"{text}_{timec}.{image_suffix}")
img.save(p)
print("Generate captcha image",text,i)
def get_px_color():
import cv2
imgpath = "../sample/huaxi_captcha/0023_157855692296774.jpg"
r,g,b = 0, 0, 0
rm,gm,bm = 255, 255, 255
img = cv2.imread(imgpath)
for x in range(img.shape[0]):
for y in range(img.shape[1]):
px = img[x, y]
print(px)
if px[0]>r:
r=px[0]
if px[1]>g:
g= px[1]
if px[2]>b:
b = px[2]
if px[0]<rm:
rm=px[0]
if px[1]<gm:
gm= px[1]
if px[2]<bm:
bm = px[2]
print(r,g,b)
print(rm,gm,bm)
if __name__ == '__main__':
# vcode = VerifyCode()
# str_code = vcode.verify_code
# image_code = vcode.verify_image
# image_code.save("rand2.jpg")
main()
# for i in range(0, 10):
# get_one()
# test_img()
# get_px_color()
``` |
{
"source": "joocer/cronicl",
"score": 4
} |
#### File: cronicl/models/queue.py
```python
import queue
import logging
import re
__queues = {}
def get_queue(topic):
"""
Call this to get an instance of the queue list
"""
topic = re.sub("[^0-9a-zA-Z]+", "_", topic).lower().rstrip("_").lstrip("_")
if topic not in __queues:
new_queue = queue.SimpleQueue()
__queues[topic] = new_queue
logging.debug(f"Created new queue: {topic}")
return __queues.get(topic)
def queue_sizes():
"""
Return the sizes of all of the queues
"""
response = {}
for q in __queues:
response[q] = __queues[q].qsize()
return response
def queues_empty():
"""
Returns True when all queues are empty
"""
return all(__queues[q].empty() for q in __queues)
```
#### File: providers/google/write_to_biq_query_operation.py
```python
from ..models.baseoperation import BaseOperation
import warnings
try:
from google.cloud import bigquery
except ImportError:
pass
class WriteToBiqQueryOperation(BaseOperation):
"""
Writes an entry to a GCS BigQuery Dataset
"""
__attribute_override_warning = True
def __init__(self, project=None, dataset=None, table=None):
self.gcp_project = project
self.bq_dataset = dataset
self.bq_table = table
# call the base initializer
super.__init__()
def execute(self, message):
payload = message.payload
project = message.attributes.get("project", self.gcp_project)
dataset = message.attributes.get("dataset", self.bq_dataset)
table = message.attributes.get("table", self.bq_table)
inited_table = ".".join([self.gcp_project, self.bq_dataset, self.bq_table])
my_table = ".".join([project, dataset, table])
if self.__attribute_override_warning and (inited_table != my_table):
self.__attribute_override_warning = False
warnings.warn(
"BigQuerySink is using project/dataset/table attributes from the message to override initialized values"
)
client = bigquery.Client()
table = client.get_table(my_table)
row_to_insert = [payload]
errors = client.insert_rows(table, row_to_insert)
if len(errors) > 0:
for error in errors:
print(error)
return [message]
```
#### File: cronicl/tracers/base_tracer.py
```python
import abc
class BaseTracer(abc.ABC):
"""
Base Class for Tracer
"""
@abc.abstractmethod
def emit(
self,
msg_id,
execution_start,
execution_duration,
operation,
version,
child,
initializer,
record,
):
raise NotImplementedError("This method must be overriden.")
def close(self):
pass # placeholder
class __tracer(object):
"""
Handles writing trace logs out.
Implemented as a Singleton.
"""
_instance = None
tracer = None
def set_handler(self, tracer):
if not issubclass(tracer.__class__, BaseTracer):
raise TypeError("Tracers must inherit from BaseTracer.")
self.tracer = tracer
def emit(
self,
msg_id,
execution_start,
execution_duration,
operation,
version,
child,
initializer,
record,
):
if self.tracer:
self.tracer.emit(
msg_id,
execution_start,
execution_duration,
operation,
version,
child,
initializer,
record,
)
def close(self):
if self.tracer:
self.tracer.close()
def get_tracer():
"""
Call this to get an instance of the tracer
"""
if __tracer._instance is None:
__tracer._instance = __tracer()
return __tracer._instance
```
#### File: cronicl/triggers/file_watch_trigger.py
```python
from .base_trigger import BasePollingTrigger
import datetime
import pathlib
from ..exceptions import MissingInformationError
class FileWatchTrigger(BasePollingTrigger):
"""
Watch for the presence of a file; the filename can contain
markers for date formatting.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "filename" not in kwargs:
raise MissingInformationError(
"FileWatchTrigger requires 'filename' parameter"
)
self.filename = kwargs["filename"]
self.last_filename = None
if self.label:
self.label = self.label + " - " + self.filename
else:
self.label = self.filename
def nudge(self):
# build filename,
filename = datetime.datetime.today().strftime(self.filename)
# does the filename exist
path = pathlib.Path(filename)
if path.is_file() and path != self.last_filename:
self.on_event(filename)
self.last_filename = path
return True
return False
```
#### File: cronicl/datasets/io.py
```python
json_parser = None
try:
import orjson as json
json_parser = "orjson"
except ImportError:
pass
if not json_parser:
try:
import ujson
json_parser = "ujson"
except ImportError:
import json
import csv
from pathlib import Path
from ._datasets import select_fields
def to_csv(dataset, filename, columns=["first_row"]):
"""
Saves a dataset as a CSV
"""
import csv
with open(filename, "w", encoding="utf8", newline="") as file:
# get the first record
row = dataset.__next__()
# get the columns from the record
if columns == ["first_row"]:
columns = dataset.keys()
# write the headers
csv_file = csv.DictWriter(file, fieldnames=columns)
csv_file.writeheader()
# cycle the rest of the file
while row:
row = select_fields(row, columns)
csv_file.writerow(row)
row = dataset.__next__()
def read_jsonl(filename, limit=-1, chunk_size=1024 * 1024, delimiter="\n"):
""""""
file_reader = read_file(filename, chunk_size=chunk_size, delimiter=delimiter)
line = next(file_reader)
while line:
yield json.loads(line)
limit -= 1
if limit == 0:
return
try:
line = next(file_reader)
except StopIteration:
return
def read_file(filename, chunk_size=1024 * 1024, delimiter="\n"):
"""
Reads an arbitrarily long file, line by line
"""
with open(filename, "r", encoding="utf8") as f:
carry_forward = ""
chunk = "INITIALIZED"
while len(chunk) > 0:
chunk = f.read(chunk_size)
augmented_chunk = carry_forward + chunk
lines = augmented_chunk.split(delimiter)
carry_forward = lines.pop()
yield from lines
if carry_forward:
yield carry_forward
def read_csv_lines(filename):
with open(filename, "r", encoding="utf-8") as csvfile:
datareader = csv.reader(csvfile)
headers = next(datareader)
row = next(datareader)
while row:
yield dict(zip(headers, row))
try:
row = next(datareader)
except StopIteration:
row = None
def write_jsonl(filename, data):
with open(filename, "w", encoding="utf-8") as jsonfile:
for r in data:
try:
jsonfile.write(json.dumps(r) + "\n")
except ValueError:
jsonfile.write("*****" + "\n")
# https://gist.github.com/nwjlyons/621fabfc0d4c1119b2ad338f615ce4ef#file-chunks-py
def generator_chunker(generator, chunk_size):
"""Yield successive chunks from a generator"""
chunk = []
for item in generator:
if len(chunk) >= chunk_size:
yield chunk
chunk = [item]
else:
chunk.append(item)
if chunk:
yield chunk
def clear_screen():
print(chr(27) + "[2j")
print("\033c")
print("\x1bc")
``` |
{
"source": "joocer/data-expectations",
"score": 3
} |
#### File: data-expectations/tests/test_expect_column_to_exist.py
```python
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import data_expectations as de
from rich import traceback
traceback.install()
# fmt:off
DATA = [
{ "number": 7, "string": "d" }, # pass
{ "number": 5, "string": "c" }, # pass
{ "number": 3, "string": None }, # the column exists but is None - pass
{ "number": 1 }, # fail
]
# fmt:on
def test_expect_column_to_exist():
test_func = de.Expectations([]).expect_column_to_exist
assert not test_func(row='{"number":1}', column="number")
for i, row in enumerate(DATA):
assert test_func(row=row, column="number"), row
if i in (3,):
assert not test_func(row=row, column="string"), row
else:
assert test_func(row=row, column="string"), row
if __name__ == "__main__": # pragma: no cover
test_expect_column_to_exist()
print("test manually run")
```
#### File: data-expectations/tests/test_expect_column_values_to_be_between.py
```python
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import data_expectations as de
from rich import traceback
traceback.install()
def test_expect_column_values_to_be_between():
test_func = de.Expectations([]).expect_column_values_to_be_between
assert test_func(row={"key": "b"}, column="key", minimum="a", maximum="c")
assert test_func(row={"key": "b"}, column="key", minimum="a", maximum="b")
assert test_func(row={"key": "b"}, column="key", minimum="b", maximum="c")
assert not test_func(row={"key": "g"}, column="key", minimum="a", maximum="c")
assert test_func(row={"key": 2}, column="key", minimum=1, maximum=3)
assert test_func(row={"key": 2}, column="key", minimum=1, maximum=2)
assert test_func(row={"key": 2}, column="key", minimum=2, maximum=3)
assert not test_func(row={"key": 10}, column="key", minimum=1, maximum=3)
assert test_func(row={"key": None}, column="key", minimum="a", maximum="c")
if __name__ == "__main__": # pragma: no cover
test_expect_column_values_to_be_between()
print("test manually run")
```
#### File: data-expectations/tests/test_expect_column_values_to_be_increasing.py
```python
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import data_expectations as de
from rich import traceback
traceback.install()
# fmt:off
VALID_DATA = [
{ "number": 1, "string": "a" },
{ "number": 3, "string": "b" },
{ "number": 5, "string": "c" },
{ "number": 7, "string": "d" },
]
VALID_DATA_SPARSE = [
{ "number": 1, "string": "a" },
{ "number": None, "string": None },
{ "number": 5, "string": "c" },
{ "number": 7, "string": "d" },
]
INVALID_DATA = [
{ "number": 7, "string": "d" },
{ "number": 5, "string": "c" },
{ "number": 3, "string": "b" },
{ "number": 1, "string": "a" },
]
INVALID_DATA_SPARSE = [
{ "number": 7, "string": "d" },
{ "number": 5, "string": "c" },
{ "number": None, "string": None },
{ "number": 1, "string": "a" },
]
# fmt:on
def test_expect_column_values_to_be_increasing_valid():
test_func = de.Expectations([]).expect_column_values_to_be_increasing
# valid data is always valid
for row in VALID_DATA:
assert test_func(row=row, column="number")
assert test_func(row=row, column="string")
def test_expect_column_values_to_be_increasing_valid_with_nulls():
test_func = de.Expectations([]).expect_column_values_to_be_increasing
# valid data is always valid
for row in VALID_DATA_SPARSE:
assert test_func(row=row, column="number")
assert test_func(row=row, column="string")
def test_expect_column_values_to_be_increasing_valid_with_nulls_which_arent_ignored():
test_func = de.Expectations([]).expect_column_values_to_be_increasing
for i, row in enumerate(VALID_DATA_SPARSE):
if i in (1,):
assert not test_func(row=row, column="number", ignore_nulls=False)
assert not test_func(row=row, column="string", ignore_nulls=False)
else:
assert test_func(row=row, column="number", ignore_nulls=False)
assert test_func(row=row, column="string", ignore_nulls=False)
def test_expect_column_values_to_be_increasing_invalid():
test_func = de.Expectations([]).expect_column_values_to_be_increasing
# invalid data is valid the first cycle
for i, row in enumerate(INVALID_DATA):
if i == 0:
assert test_func(row=row, column="number")
assert test_func(row=row, column="string")
else:
assert not test_func(row=row, column="number")
assert not test_func(row=row, column="string")
def test_expect_column_values_to_be_increasing_invalid_with_nulls():
test_func = de.Expectations([]).expect_column_values_to_be_increasing
# invalid data is valid the first cycle
for i, row in enumerate(INVALID_DATA_SPARSE):
if i in (0, 2):
assert test_func(row=row, column="number")
assert test_func(row=row, column="string")
else:
assert not test_func(row=row, column="number")
assert not test_func(row=row, column="string")
if __name__ == "__main__": # pragma: no cover
test_expect_column_values_to_be_increasing_valid()
test_expect_column_values_to_be_increasing_valid_with_nulls()
test_expect_column_values_to_be_increasing_valid_with_nulls_which_arent_ignored()
test_expect_column_values_to_be_increasing_invalid()
test_expect_column_values_to_be_increasing_invalid_with_nulls()
print("test manually run")
```
#### File: data-expectations/tests/test_expect_column_values_to_be_in_set.py
```python
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import data_expectations as de
from rich import traceback
traceback.install()
def test_expect_column_values_to_be_in_set():
test_func = de.Expectations([]).expect_column_values_to_be_in_set
assert test_func(row={"key": "a"}, column="key", symbols=("a", "b", "c"))
assert test_func(row={"key": None}, column="key", symbols=("a", "b", "c"))
assert not test_func(row={"key": "g"}, column="key", symbols=("a", "b", "c"))
assert test_func(row={"key": 1}, column="key", symbols=(1, 2, 3))
assert test_func(row={"key": None}, column="key", symbols=(1, 2, 3))
assert not test_func(row={"key": 8}, column="key", symbols=(1, 2, 3))
if __name__ == "__main__": # pragma: no cover
test_expect_column_values_to_be_in_set()
print("test manually run")
```
#### File: data-expectations/tests/test_expect_column_values_to_not_be_null.py
```python
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import data_expectations as de
from rich import traceback
traceback.install()
def test_expect_column_values_to_not_be_null():
test_func = de.Expectations([]).expect_column_values_to_not_be_null
assert test_func(row={"key": "value"}, column="key")
assert not test_func(row={"key": None}, column="key")
assert not test_func(row={"key": "value"}, column="field")
if __name__ == "__main__": # pragma: no cover
test_expect_column_values_to_not_be_null()
print("test manually run")
``` |
{
"source": "joocer/mitre-tools",
"score": 2
} |
#### File: source/data/03 build_graph.py
```python
import networkx as nx
import pandas as pd
import re
import time
capec_attack_pattern_filepath = r'../../data/intermediate/capec-attack_pattern.csv'
capec_relationship_filepath = r'../../data/intermediate/capec-relationship.csv'
capec_course_of_action_filepath = r'../../data/intermediate/capec-course_of_action.csv'
capec_consequences_filepath = r'../../data/intermediate/capec-consequences.csv'
capec_prerequisite_filepath = r'../../data/intermediate/capec-prerequisite.csv'
asvs_filepath = r'../../data/raw/OWASP Application Security Verification Standard 4.0-en.csv'
cwe_filepath = r'../../data/raw/2000-cwe.csv'
## currently aren't loading these into the graph
nvd_filepath = r'../../data/intermediate/nvd-cve.csv'
exploitdb_filepath = r'../../data/raw/mitre-exploitdb.csv'
attack_malware_filepath = r'../../data/intermediate/attack-malware.csv'
attack_intrusion_set_filepath = r'../../data/intermediate/attack-intrusion_set.csv'
attack_tool_filepath = r'../../data/intermediate/attack-tool.csv'
attack_relationship_filepath = r'../../data/intermediate/attack-relationship.csv'
attack_x_mitre_tactic_filepath = r'../../data/intermediate/attack-x_mitre_tactic.csv'
attack_course_of_action_filepath = r'../../data/intermediate/attack-course_of_action.csv'
attack_attack_pattern_filepath = r'../../data/intermediate/attack-attack_pattern.csv'
def find_relationships(string):
tokens = re.findall(r"(?i)ChildOf\:CWE ID\:\d{1,5}", string)
result = []
for token in tokens:
result.append('CWE-' + token[15:])
return result
def find_cves(string):
tokens = re.findall(r"(?i)CVE.\d{4}-\d{4,7}", string)
result = []
for token in tokens:
token = token.upper().strip()
token = token[:3] + '-' + token[4:] # snort rules list cves as CVE,2009-0001
result.append(token)
return result
# new graph
graph = nx.DiGraph()
###########################
capecs = {}
coa = {}
###########################
print('capec_attack_pattern_filepath')
data = pd.read_csv(capec_attack_pattern_filepath)
# add nodes
for i, row in data.iterrows():
graph.add_node(row['capec'],
label=row['capec'],
kind='capec',
description=row['name'],
likelihood_of_attack=row['likelihood_of_attack'],
typical_severity=row['typical_severity']
)
capecs[row['id']] = row['capec']
print (len(graph.nodes), len(graph.edges))
###########################
print('capec_course_of_action_filepath')
data = pd.read_csv(capec_course_of_action_filepath)
# add nodes
for i, row in data.iterrows():
graph.add_node(row['name'],
label=row['name'],
description=row['description'],
kind='course of action')
coa[row['id']] = row['name']
print (len(graph.nodes), len(graph.edges))
###########################
print('capec_consequences_filepath')
data = pd.read_csv(capec_consequences_filepath)
# add nodes
for i, row in data.iterrows():
graph.add_node(row['id'], label=row['consequence'], kind='consequence', group=row['group'])
print (len(graph.nodes), len(graph.edges))
###########################
print('capec_prerequisite_filepath')
data = pd.read_csv(capec_prerequisite_filepath)
# add nodes
for i, row in data.iterrows():
graph.add_node(row['id'], label=row['id'], kind='prerequisite')
print (len(graph.nodes), len(graph.edges))
###########################
print('cwe_filepath')
data = pd.read_csv(cwe_filepath, index_col=False)
data.fillna('', inplace=True)
# add nodes
for i, row in data.iterrows():
graph.add_node('CWE-' + str(row['CWE-ID']),
label='CWE-' + str(row['CWE-ID']),
kind='cwe',
name=row['Name'])
# add edges
for i, row in data.iterrows():
for rel in find_relationships(row['Related Weaknesses']):
graph.add_edge(rel, 'CWE-' + str(row['CWE-ID']), relationship='ChildOf')
graph.add_edge('CWE-' + str(row['CWE-ID']), rel, relationship='ParentOf')
print (len(graph.nodes), len(graph.edges))
###########################
print('asvs_filepath')
data = pd.read_csv(asvs_filepath, index_col=False)
data.fillna('', inplace=True)
# add nodes
for i, row in data.iterrows():
graph.add_node('ASVS-' + str(row['Item']),
label='ASVS-' + str(row['Item']),
kind='asvs',
description=row['Description'],
section_id=row['Section'],
section_name=row['Name'],
level_1=(row['L1']=='X'),
level_2=(row['L2']=='X'),
level_3=(row['L3']=='X'))
# add edges
for i, row in data.iterrows():
CWE = row['CWE']
if CWE != '':
CWE = 'CWE-' + str(int(CWE))
graph.add_edge('ASVS-' + str(row['Item']), CWE, relationship='Prevents')
graph.add_edge(CWE, 'ASVS-' + str(row['Item']), relationship='Inverse-Prevents')
print (len(graph.nodes), len(graph.edges))
###########################
print('capec_relationship_filepath')
data = pd.read_csv(capec_relationship_filepath)
# add edges
for i, row in data.iterrows():
relationship = row['relationship']
if relationship == 'mitigates':
graph.add_edge(coa.get(row['source']), capecs.get(row['target']), relationship='Mitigates')
graph.add_edge(capecs.get(row['target']), coa.get(row['source']), relationship='Inverse-Mitigates')
elif relationship == 'ResultsIn':
graph.add_edge(capecs.get(row['source']), row['target'], relationship=relationship)
graph.add_edge(row['target'], capecs.get(row['source']), relationship='Inverse-' + relationship)
else:
graph.add_edge(row['source'], row['target'], relationship=relationship)
graph.add_edge(row['target'], row['source'], relationship='Inverse-' + relationship)
print (len(graph.nodes), len(graph.edges))
###########################
def remove_orphans(graph):
orphan_nodes = []
g = graph.copy()
for node_id in g.nodes():
node = g.nodes()[node_id]
if node.get('kind') in [None]:
orphan_nodes.append(node_id)
for node_id in orphan_nodes:
g.remove_node(node_id)
return g
graph = remove_orphans(graph)
###########################
print('saving to graphml')
nx.write_graphml(graph, r'../../data/processed/mitre-data.graphml')
print ('done')
``` |
{
"source": "joocer/movingrange",
"score": 3
} |
#### File: movingrange/timeseries/general.py
```python
import math
num_types = ["<class 'int'>", "<class 'float'>"]
# replaces nan items from a series with a given value
def fillna(series, filler=0):
result = []
for i in series:
if (str(type(i)) in num_types) and not math.isnan(i):
result.append(i)
else:
result.append(filler)
return result
# average of the series
def mean(series):
s = [s for s in series if s is not None and not math.isnan(s)]
if len(s) == 0:
return None
return sum(s) / float(len(s))
# standard deviateion of the series
def standard_deviation(series):
var = variance(series)
if var is None:
return None
return variance(series) ** (0.5)
# statistal variance of the series
def variance(series):
s = [s for s in series if s is not None and not math.isnan(s)]
if len(s) == 0:
return None
series_mean = mean(s)
return sum((x - series_mean) ** 2.0 for x in s) / (len(s) - 1)
# executes a rule against each item in a series and returns matches, rule should be a lambe
# matches(data, lambda x: x > 2)
# list comprehension is slower compared to loops for shorter lists but faster for longer lists
def matches(series, rule):
return [i for i, item in enumerate(series) if rule(item)]
# applied a function to a series of values, formula should be a lambda
# f_x(data, lambda x: 3x + 2)
def f_x(series, formula):
return [formula(item) for item in series]
def series_diff(series_a, series_b, adjustment=None):
if len(series_a) != len (series_b):
raise Exception('series_diff: two series must be the same length')
if adjustment == None:
adjustment = mean(series_a)
series = []
for i in range(len(series_a)):
series.append(series_a[i] - series_b[i] + adjustment)
return series
``` |
{
"source": "joocer/newidydd",
"score": 3
} |
#### File: newidydd/datasets/_datasets.py
```python
import warnings
import logging
"""
select_fields
select_rows
column_to_list
concatenate
set_column
set_field
filter_dataset
fill_none
replace_values
merge_datasets
Deduplicate (buffer_size)
Reorder (buffer_size)
Summarize -> min, max, mean, mode, median, std, iqr, variance
Copy (make another generator)
"""
"""
dataset = a data set (list of dictionaries)
row = a record within a dataset
field = a labelled piece of data in a row
column = a field across multiple rows
value = a piece of data
item = field/value pair
"""
INNER_JOIN = 1
LEFT_JOIN = 2
def _select_all(dummy):
"""
Always returns True.
"""
return True
def select_fields(dic, fields):
"""
Selects items from a row, if the row doesn't exist, None is used.
"""
return {field: dic.get(field, None) for field in fields}
def select_columns(dataset, columns):
""""""
for row in dataset:
yield select_fields(row, columns)
def column_to_list(dataset, column):
""""""
for row in dataset:
yield row.get(column)
def concatenate(datasets):
""""""
for dataset in datasets:
for row in dataset:
yield row
def set_column(dataset, column, setter):
""""""
for row in dataset:
yield set_field(row, column, setter)
def set_field(row, column, setter):
""""""
if type(setter).__name__ == "function":
row[column] = setter(row)
else:
row[column] = setter
return row
def filter_dataset(dataset, columns=["*"], condition=_select_all):
""""""
for row in dataset:
if condition(row):
if columns != ["*"]:
row = select_fields(row, columns)
yield row
def fill_none(dataset, filler=""):
"""
Replaces 'None' values in a dataset with a default
"""
return replace_values(dataset, None, filler)
def replace_values(dataset, oldvalue, newvalue):
"""
Replace all instances of a value.
"""
for row in dataset:
yield {
field: (newvalue if value is oldvalue else value)
for field, value in row.items()
}
def merge_datasets(left, right, column, join_type=INNER_JOIN):
right_index = {}
for row in right:
index_value = row[column]
right_index[index_value] = row
for row in left:
value = row.get(column)
if right_index.get(value):
yield {**row, **right_index[value]}
elif join_type == LEFT_JOIN:
yield row
```
#### File: newidydd/operations/end_operator.py
```python
from .base_operator import BaseOperator
class EndOperator(BaseOperator):
def execute(self, data={}, context={}):
return None
```
#### File: joocer/newidydd/sample_flow.py
```python
from newidydd import BaseOperator, runner
import time
import random
class ThisOperator(BaseOperator):
def execute(self, data={}, context={}):
for i in range(10000):
n = i * i
return data, context
class ThatOperator(BaseOperator):
def execute(self, data={}, context={}):
return data, context
class TheOtherOperator(BaseOperator):
def __init__(self, count, **kwargs):
super().__init__(**kwargs)
self.count = count
def execute(self, data={}, context={}):
for i in range(self.count):
yield data, context
this = ThisOperator()
that = ThatOperator()
other = TheOtherOperator(count=5)
flow = other > that > this
data_reader = ['A', 'B', 'C', 'D']
for data in data_reader:
runner.go(flow=flow, data=data, context={"trace": random.choice([True, False])}) #nosec
print(this)
``` |
{
"source": "joocer/orwell",
"score": 3
} |
#### File: orwell/orwell/dictset.py
```python
from typing import Iterator, Any, List, Union, Callable
import json
json_parser: Callable = json.loads
json_dumper: Callable = json.dumps
try:
import orjson
json_parser = orjson.loads
json_dumper = orjson.dumps
except ImportError:
pass
try:
import ujson
json_parser = ujson.loads
except ImportError:
pass
class JOINS(object):
INNER_JOIN = 'INNER'
LEFT_JOIN = 'LEFT'
def select_all(dummy: Any) -> bool:
"""
Returns True
"""
return True
def select_record_fields(
record: dict,
fields: List[str]) -> dict:
"""
Selects a subset of fields from a dictionary
"""
return {k: record.get(k, None) for k in fields}
def order(record: Union[dict, list]) -> Union[dict, list]:
if isinstance(record, dict):
return dict(sorted(record.items()))
if isinstance(record, list):
return sorted((order(x) for x in record), key=lambda item: '' if not item else item)
return record
def join_dictsets(
left: Iterator[dict],
right: Iterator[dict],
column: str,
join_type=JOINS.INNER_JOIN) -> Iterator[dict]:
"""
Iterates over the left table, matching records fron the right table.
INNER_JOIN, the default, will discard records unless they appear in both
tables, LEFT_JOIN will keep all the records fron the left table and add
records for the right table if a match is found.
It is recommended that the left table be the larger of the two tables as
the right table is loaded into memory to perform the matching and look ups.
NOTES:
- where columns are in both tables - I don't know what happens.
- resultant records may have inconsistent columns (same as
source lists)
Approximate SQL:
SELECT * FROM left JOIN right ON left.column = right.column
"""
index = create_index(right, column)
for record in left:
value = record.get('QID')
if index.get(value):
yield {**record, **index[value]}
elif join_type == JOINS.LEFT_JOIN:
yield record
def union_dictsets(
dictset_1: Iterator[dict],
dictset_2: List[dict]) -> Iterator[dict]:
"""
Append the records from a set of lists together, doesn't ensure columns
align.
Approximate SQL:
SELECT * FROM dictset_1
UNION
SELECT * FROM dictset_2
"""
for record in dictset_1:
yield record
for record in dictset_2:
yield record
def create_index(
dictset: Iterator[dict],
index_column: str) -> dict:
"""
Create an index of a file to speed up look-ups.
"""
index = {}
for record in dictset:
index_value = record[index_column]
index[index_value] = record
return index
def select_from_dictset(
dictset: Iterator[dict],
columns: List[str] = ['*'],
condition: Callable = select_all) -> Iterator[dict]:
"""
Scan a dictset, filtering rows and selecting columns.
Basic implementation of SQL SELECT statement for a single table
Approximate SQL:
SELECT columns FROM dictset WHERE condition
"""
for record in dictset:
if condition(record):
if columns != ['*']:
record = select_record_fields(record, columns)
yield record
def set_column(
dictset: Iterator[dict],
column_name: str,
setter: Callable) -> Iterator[dict]:
"""
Performs set_value on each row in a set
"""
for record in dictset:
yield set_value(record, column_name, setter)
def set_value(
record: dict,
column_name: str,
setter: Callable) -> dict:
"""
Sets the value of a column to either a fixed value or as the
result of a function which recieves the row as a parameter
"""
if callable(setter):
record[column_name] = setter(record)
else:
record[column_name] = setter
return record
def distinct(
dictset: Iterator[dict],
columns: List[str] = ['*']):
"""
Removes duplicate records from a dictset
"""
def _noop(x):
return x
def _filter(x):
return {k: x.get(k, '') for k in columns}
seen_hashes: dict = {}
selector = _noop
if columns != ['*']:
selector = _filter
for record in dictset:
entry = selector(record)
entry = json_dumper(entry)
_hash = hash(entry)
if seen_hashes.get(_hash):
continue
seen_hashes[_hash] = 1
yield record
def limit(
dictset: Iterator[dict],
limit: int):
"""
Returns up to 'limit' number of records
"""
counter = limit
for record in dictset:
if counter == 0:
return None
counter -= 1
yield record
def dictsets_match(
dictset_1: Iterator[dict],
dictset_2: Iterator[dict]):
"""
Tests if two sets match - this terminates generators
"""
def _hash_set(dictset: Iterator[dict]):
xor = 0
for record in dictset:
entry = order(record)
entry = json_dumper(entry)
_hash = hash(entry)
xor = xor ^ _hash
return xor
return _hash_set(dictset_1) == _hash_set(dictset_2)
def generator_chunker(
generator: Iterator,
chunk_size: int) -> Iterator:
chunk: list = []
for item in generator:
if len(chunk) >= chunk_size:
yield chunk
chunk = [item]
else:
chunk.append(item)
if chunk:
yield chunk
```
#### File: orwell/helpers/blob_paths.py
```python
import datetime
class BlobPaths(object):
@staticmethod
def split_filename(filename: str):
""" see test cases for all handled edge cases """
if not filename:
return '', ''
ext = ''
name = ''
parts = filename.split('.')
if len(parts) == 1:
return filename, ''
if parts[0] == '':
parts.pop(0)
parts[0] = '.' + parts[0]
if len(parts) > 1:
ext = '.' + parts.pop()
if ext.find('/') > 0:
ext = ext.lstrip('.')
parts.append(ext)
ext = ''
name = '.'.join(parts)
if ext == '.':
name = ''
return name, ext
@staticmethod
def get_parts(path_string: str):
if not path_string:
raise ValueError('get_parts: path_string must have a value')
parts = str(path_string).split('/')
bucket = parts.pop(0)
name, ext = BlobPaths.split_filename(parts.pop())
path = '/'.join(parts) + '/'
return bucket, path, name, ext
@staticmethod
def build_path(path: str, date: datetime.date = None):
if not date:
date = datetime.datetime.now()
if not path:
raise ValueError('build_path: path must have a value')
if not path[0] == '/':
path_string = path.lstrip('/')
else:
path_string = path
path_string = path_string.replace('%date', '%Y-%m-%d')
path_string = path_string.replace('%time', '%H%M%S')
path_string = date.strftime(path_string)
return path_string
```
#### File: orwell/readers/blob_reader.py
```python
try:
from google.cloud import storage # type:ignore
except ImportError:
pass
import lzma
import datetime
from ..helpers.blob_paths import BlobPaths
from typing import Tuple, Union, Optional
import gva.logging # type:ignore
def blob_reader(
path: str,
project: str,
date_range: Tuple[Optional[datetime.date], Optional[datetime.date]] = (None, None),
chunk_size=16*1024*1024,
**kwargs):
"""
Blob reader, will iterate over as set of blobs in a path.
"""
# validate request
if not project:
raise ValueError('Blob Reader requires Project to be set')
if not path:
raise ValueError('Blob Reader requires Path to be set')
# if dates aren't provided, use today
start_date, end_date = date_range
if not end_date:
end_date = datetime.date.today()
if not start_date:
start_date = datetime.date.today()
bucket, blob_path, name, extention = BlobPaths.get_parts(path)
# cycle through the days, loading each days' file
for cycle in range(int((end_date - start_date).days) + 1):
cycle_date = start_date + datetime.timedelta(cycle)
cycle_path = BlobPaths.build_path(path=blob_path, date=cycle_date)
blobs_at_path = find_blobs_at_path(project=project, bucket=bucket, path=cycle_path, extention=extention)
blobs_at_path = list(blobs_at_path)
for blob in blobs_at_path:
reader = _inner_blob_reader(blob_name=blob.name, project=project, bucket=bucket, chunk_size=chunk_size)
yield from reader
def find_blobs_at_path(
project: str,
bucket: str,
path: str,
extention: str):
client = storage.Client(project=project)
gcs_bucket = client.get_bucket(bucket)
blobs = client.list_blobs(bucket_or_name=gcs_bucket, prefix=path)
if extention:
blobs = [blob for blob in blobs if extention in blob.name]
yield from blobs
def _inner_blob_reader(
project: str,
bucket: str,
blob_name: str,
chunk_size: int = 16*1024*1024,
delimiter: str = '\n'):
"""
Reads lines from an arbitrarily long blob, line by line.
Automatically detecting if the blob is compressed.
"""
blob = get_blob(project=project, bucket=bucket, blob_name=blob_name)
if blob:
blob_size = blob.size
else:
blob_size = 0
carry_forward = ''
cursor = 0
while (cursor < blob_size):
chunk = _download_chunk(blob=blob, start=cursor, end=min(blob_size, cursor+chunk_size-1))
cursor += chunk_size # was previously +len(chunk)
chunk = chunk.decode('utf-8')
# add the last line from the previous cycle
chunk += carry_forward
lines = chunk.split(delimiter)
# the list line is likely to be incomplete, save it to carry forward
carry_forward = lines.pop()
yield from lines
if len(carry_forward) > 0:
yield carry_forward
def _download_chunk(
blob: storage.blob,
start: int,
end: int):
"""
Detects if a chunk is compressed by looking for a magic string
"""
chunk = blob.download_as_string(start=start, end=end)
if blob.name.endswith('.lzma'):
try:
return lzma.decompress(chunk)
except lzma.LZMAError:
# if we fail maybe we're not compressed
pass
return chunk
def get_blob(
project: str,
bucket: str,
blob_name: str):
client = storage.Client(project=project)
gcs_bucket = client.get_bucket(bucket)
blob = gcs_bucket.get_blob(blob_name)
return blob
```
#### File: orwell/writers/writer.py
```python
import lzma
import time
import os
import threading
import tempfile
import datetime
from .blob_writer import blob_writer
from typing import Callable, Optional, Any, Union
from gva.data.validator import Schema # type:ignore
try:
import ujson as json
except ImportError:
import json # type:ignore
class Writer():
def __init__(
self,
writer: Callable = blob_writer,
to_path: str = 'year_%Y/month_%m/day_%d',
partition_size: int = 8*1024*1024,
schema: Schema = None,
commit_on_write: bool = False,
compress: bool = False,
use_worker_thread: bool = True,
idle_timeout_seconds: int = 60,
date: Optional[datetime.date] = None,
**kwargs):
"""
DataWriter
Parameters:
- path: the path to save records to, this is a folder name
- partition_size: the number of records per partition (-1) is unbounded
- commit_on_write: commit rather than cache writes - is slower but less
chance of loss of data
- schema: Schema object - if set records are validated before being
written
- use_worker_thread: creates a thread which performs regular checks
and corrections
- idle_timeout_seconds: the time with no new writes to a partition before
closing it and creating a new partition regardless of the records
- compress: compress the completed file using LZMA
"""
self.to_path = to_path
self.partition_size = partition_size
self.bytes_left_to_write_in_partition = partition_size
self.schema = schema
self.commit_on_write = commit_on_write
self.file_writer: Optional[_PartFileWriter] = None
self.last_write = time.time_ns()
self.idle_timeout_seconds = idle_timeout_seconds
self.use_worker_thread = use_worker_thread
self.writer = writer
self.kwargs = kwargs
self.compress = compress
self.file_name: Optional[str] = None
self.date = date
if use_worker_thread:
self.thread = threading.Thread(target=_worker_thread, args=(self,))
self.thread.daemon = True
self.thread.start()
def _get_temp_file_name(self):
file = tempfile.NamedTemporaryFile(prefix='gva-', delete=True)
file_name = file.name
file.close()
try:
os.remove(file_name)
except OSError:
pass
return file_name
def append(self, record: dict = {}):
"""
Saves new entries to the partition; creating a new partition
if one isn't active.
"""
# this is a killer - check the new record conforms to the
# schema before bothering with anything else
if self.schema and not self.schema.validate(subject=record, raise_exception=True):
print(F'Validation Failed ({self.schema.last_error}):', record)
return False
self.last_write = time.time_ns()
# serialize the record
serialized = json.dumps(record) + '\n'
len_serial = len(serialized)
with threading.Lock():
# if this write would exceed the partition
self.bytes_left_to_write_in_partition -= len_serial
if self.bytes_left_to_write_in_partition <= 0:
if len_serial > self.partition_size:
raise ValueError('Record size is larger than partition.')
self.on_partition_closed()
# if we don't have a current file to write to, create one
if not self.file_writer:
self.file_name = self._get_temp_file_name()
self.file_writer = _PartFileWriter(
file_name=self.file_name, # type:ignore
commit_on_write=self.commit_on_write,
compress=self.compress)
self.bytes_left_to_write_in_partition = self.partition_size
# write the record to the file
self.file_writer.append(serialized)
return True
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.on_partition_closed()
def on_partition_closed(self):
# finalize the writer
if self.file_writer:
self.file_writer.finalize()
# save the file to it's destination
if self.file_name:
self.writer(
source_file_name=self.file_name,
target_path=self.to_path,
add_extention='.lzma' if self.compress else '',
date=self.date,
**self.kwargs)
try:
os.remove(self.file_name)
except (OSError, TypeError):
pass
self.file_writer = None
self.file_name = None
def __del__(self):
self.on_partition_closed()
self.use_worker_thread = False
def finalize(self):
if self.file_writer:
self.on_partition_closed()
class _PartFileWriter():
""" simple wrapper for file writing to a temp file """
def __init__(
self,
file_name: str, # type:ignore
commit_on_write: bool = False,
compress: bool = False):
self.file: Any = open(file_name, mode='wb')
if compress:
self.file = lzma.open(self.file, mode='wb')
self.commit_on_write = commit_on_write
def append(self, record: str = ""):
self.file.write(record.encode())
if self.commit_on_write:
try:
self.file.flush()
except ValueError:
pass
def finalize(self):
try:
self.file.flush()
self.file.close()
except Exception: # nosec - ignore errors
pass
def __del__(self):
self.finalize()
def _worker_thread(data_writer: Writer):
"""
Method to run an a separate thread performing the following tasks
- when the day changes, it closes the existing partition so a new one is
opened with today's date
- close partitions when new records haven't been recieved for a period of
time (default 300 seconds)
- attempt to flush writes to disk regularly
These are done in a separate thread so the 'append' method doesn't need to
perform these checks every write - it can just assume they are being
handled and focus on writes
"""
while data_writer.use_worker_thread:
if (time.time_ns() - data_writer.last_write) > (data_writer.idle_timeout_seconds * 1e9):
with threading.Lock():
data_writer.on_partition_closed()
# if not data_writer.formatted_path == datetime.datetime.today().strftime(data_writer.path):
# change_partition = True
# try flushing writes
try:
if data_writer.file_writer:
data_writer.file_writer.file.flush()
except ValueError: # nosec - if it fails, it doesn't /really/ matter
pass
time.sleep(1)
``` |
{
"source": "joocer/seren",
"score": 4
} |
#### File: juon/dictset/dictset.py
```python
from typing import Iterator, Any, List, Callable
from .group_by import Groups
from .records import select_record_fields, set_value, order
from .. import json
INNER_JOIN = "INNER"
LEFT_JOIN = "LEFT"
def join(
left: Iterator[dict], right: Iterator[dict], column: str, join_type=INNER_JOIN
) -> Iterator[dict]:
"""
Iterates over the left dictset, matching records fron the right dictset.
Dictsets provided to this method are expected to be bounded.
INNER_JOIN, the default, will discard records unless they appear in both
tables, LEFT_JOIN will keep all the records fron the left table and add
records for the right table if a match is found.
It is recommended that the left table be the larger of the two tables as
the right table is loaded into memory to perform the matching and look ups.
Parameters:
left: iterable of dictionaries
The 'left' dictset
right: iterable of dictionaries
The 'right' dictset
column: string
The name of column shared by both dictsets to join on
join_type: string (optional, default INNER_JOIN)
The type of join, 'INNER_JOIN' or 'LEFT'
Yields:
dictionary
"""
index = create_index(right, column)
for record in left:
value = record.get(column)
if index.get(value):
yield {**record, **index[value]}
elif join_type == LEFT_JOIN:
yield record
def union(*args) -> Iterator[dict]:
"""
Append the records from a set of lists together, doesn't ensure columns
align.
Parameters:
args: list of iterables of dictionaries
The lists of dictionaries to concatenate
Yields:
dictionary
"""
for dictset in args:
yield from dictset
def create_index(dictset: Iterator[dict], index_column: str) -> dict:
"""
Create an index of a file to speed up look-ups, it is expected that the
value in the index_column is unique but this is not enforced.
Parameters:
dictset: iterable of dictionaries
The dictset to process
index_column: string
the column in the dictset to index on
Returns:
dictionary
"""
index = {}
for record in dictset:
index_value = record[index_column]
index[index_value] = record
return index
def select_from(
dictset: Iterator[dict], columns: List[str] = ["*"], where: Callable = None
) -> Iterator[dict]:
"""
Scan a dictset, filtering rows and selecting columns.
Parameters:
dictset: iterable of dictionaries
The dictset to process
columns: list of strings
The list of column names to return
where: callable (optional)
The function to apply to filter records, we return the rows that
evaluate to True, default returns all records
Yields:
dictionary
"""
def _select_columns(dictset, columns):
for record in dictset:
record = select_record_fields(record, columns)
yield record
if where is not None:
dictset = filter(where, dictset)
if columns != ["*"]:
dictset = _select_columns(dictset, columns)
yield from dictset
def set_column(
dictset: Iterator[dict], column_name: str, setter: Callable
) -> Iterator[dict]:
"""
Performs set_value on each row in a set.
Parameters:
dictset: iterable of dictionaries
The dictset to process
column_name: string
The column to create or update
setter: callable or any
A function or constant to update the column with
Yields:
dictionary
"""
for record in dictset:
yield set_value(record, column_name, setter)
def drop_duplicates(dictset: Iterator[dict], cache_size: int = 10000):
"""
NOTE:
THIS MAY NOT DO WHAT YOU EXPECT IT TO.
Removes duplicate records from a dictset, as it able to run against
an unbounded (infinite) set, it may not fully deduplicate a set. The
larger the cache_size the more likely the duplication will be correct
however, this is at the expense of speed and memory.
Parameters:
dictset: iterable of dictionaries:
The dictset to process
cache_size: integer (optional):
the number of records to cache, default 10,000
Yields:
dictionary
"""
from ..utils.lru_index import LruIndex
lru = LruIndex(size=cache_size)
for record in dictset:
entry = json.serialize(record)
if lru.test(entry):
continue
yield record
def limit(dictset: Iterator[dict], limit: int):
"""
Returns up to 'limit' number of records.
Parameters:
dictset: iterable of dictionaries:
The dictset to process
limit: integer:
the maximum number of records to return
Yields:
dictionary
"""
for counter, record in enumerate(dictset):
if counter == limit:
break
yield record
def dictsets_match(dictset_1: Iterator[dict], dictset_2: Iterator[dict]):
"""
Tests if two dictsets match regardless of the order of the order of the
records in the dictset. Return is True if the sets match.
Note that this will exhaust a generator.
Parameters:
dictset_1: iterable of dictionaries:
The first dictset to match
dictset_2: iterable of dictionaries:
The second dictset to match
Returns:
boolean
"""
def _hash_set(dictset: Iterator[dict]):
xor = 0
for record in dictset:
entry = json.serialize(record) # type:ignore
_hash = hash(entry)
xor = xor ^ _hash
return xor
return _hash_set(dictset_1) == _hash_set(dictset_2)
def page_dictset(dictset: Iterator[dict], page_size: int) -> Iterator:
"""
Enables paging through a dictset by returning a page of records at a time.
Parameters:
dictset: iterable of dictionaries:
The dictset to process
page_size: integer:
The number of records per page
Yields:
dictionary
"""
chunk: list = []
for record in dictset:
if len(chunk) >= page_size:
yield chunk
chunk = [record]
else:
chunk.append(record)
if chunk:
yield chunk
def sort(
dictset: Iterator[dict], column: str, cache_size: int, descending: bool = True
):
"""
NOTE:
THIS DOES NOT SORT THE ENTIRE DICTSET.
Sorts a dictset by a column. As it able to run an unbounded dataset it
may not correctly order a set completely. The larger the cache_size the
more likely the set will be ordered correctly, at the cost of memory.
This method works best with partially sorted data, for randomized data
and a small cache, the effect of sorting is poor; for partially sorted
data, and/or a large cache, the effect is better.
Note that if this method is placed in a pipeline, it will need to process
cache_size number of records before it will emit any records.
Parameters:
dictset: iterable of dictionaries:
The dictset to process
column: string:
The field to order by
cache_size: integer:
The number of records to cache
descending: boolean (optional):
Order greatest first, default True
Yields:
dictionary
"""
def _sort_key(key):
"""
called like this: _sort_key(key)(row)
"""
k = key
def _inner_sort_key(row):
return row.get(k)
return _inner_sort_key
# cache_size is the high water mark, 3/4 is the low water mark. We fill the cache
# to the high water mark, sort it and yield the top 1/4 before filling again.
# This reduces the number of times we execute the sorted function which is the
# slowest part of this method.
# A cache_size of 1000 has a neglible impact on performance, a cache_size of
# 50000 introduces a performance hit of about 15%.
quarter_cache = max(cache_size // 4, 1)
cache = []
for record in dictset:
cache.append(record)
if len(cache) > cache_size:
cache = sorted(cache, key=_sort_key(column), reverse=descending)
if descending:
yield from reversed(cache[:quarter_cache])
else:
yield from cache[:quarter_cache] # pragma: no cover
del cache[:quarter_cache]
cache = sorted(cache, key=_sort_key(column), reverse=descending)
yield from cache
def to_pandas(dictset: Iterator[dict]):
"""
Load an iterable of dictionaries into a pandas dataframe.
Parameters:
dictset: iterable of dictionaries:
The dictset to load
Returns:
pandas dataframe
"""
import pandas # type:ignore
return pandas.DataFrame(dictset)
def extract_column(dictset: Iterator[dict], column: str) -> list:
"""
Extract the values from column into a list
Parameters:
dictset: iterable of dictionaries:
The dictset to extract values from
column: string:
The name of the column to extract the values of
Returns:
list
"""
return [record.get(column) for record in dictset]
def group_by(dictset: Iterator[dict], column: str, dedupe: bool = False) -> Groups:
"""
Create a Groups object
Parameters:
dictset: iterable of dictionaries:
The dictset to group
column: string:
The name of the column to group by
Returns:
mabel.formats.Groups
"""
return Groups(dictset, column, dedupe)
def jsonify(list_of_json_strings: Iterator[dict]):
"""
Convert a list of strings to a list of dictionaries
Parameters:
list_of_json_strings: iterable of strings:
The JSON formatted strings to parse to dictionaries
Yields:
dictionary
"""
return map(json.parse, list_of_json_strings) # type:ignore
def pass_thru_counter(dictset: Iterator[dict]):
"""
Count the number of records in a dictset that passes through this function.
The result is returned as an exception when the set is exhausted.
Parameters:
dictset: iterable of dictionaries:
The dictset to count
Raises:
Exception with the number of records in the message
"""
counter = -1
for counter, record in enumerate(dictset):
yield record
raise Exception(counter + 1)
```
#### File: seren/tests/test_index_lru.py
```python
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from juon.utils.lru_index import LruIndex
from rich import traceback
traceback.install()
item_1 = "one"
item_2 = "two"
item_3 = "three"
item_4 = "four"
item_5 = "five"
def test_lru():
lru = LruIndex(size=3)
assert not lru(item_1), item_1 # first item shouldn't be on there
assert not lru(item_2), item_2 # different second item, not there
assert lru(item_2), item_2 # repeat of second item, is there
assert not lru(item_3), item_3 # new third item, not there
assert lru(item_1), item_1 # the first item should be there
assert not lru(item_4), item_4 # new forth item, not there, item_2 ejected
assert lru(item_1), item_1 # test the expected items are present
assert lru(item_3), item_3
assert lru(item_4), item_4
assert not lru(item_5), item_5 # add a new item, eject item_1
assert lru(item_5), item_5 # test the expected items are present
assert lru(item_3), item_3
assert lru(item_4), item_4
assert not lru(item_1), item_1 # confirm item_1 was ejected above
if __name__ == "__main__": # pragma: no cover
test_lru()
print("okay")
``` |
{
"source": "Joocheol/manim-master",
"score": 3
} |
#### File: MyFiles/Derivatives/Time_Value_of_Money.py
```python
from manimlib.imports import *
class Time_value_of_money(Scene):
def construct(self):
text_01 = TextMobject(*["Time ", "Value ", "of ", "Money"]).scale(2)
self.play(Write(text_01))
self.wait()
class intro_03(Scene):
def construct(self):
nline = NumberLine(x_min=0, x_max=4.1, unit_size=2, number_at_center=2, include_numbers=True)
d = Dot()
c_arrow_01 = CurvedArrow(np.array([-4, -0.6, 0]), np.array([-2, -0.6, 0]))
c_arrow_02 = CurvedArrow(np.array([-2, -0.6, 0]), np.array([0, -0.6, 0]))
c_arrow_03 = CurvedArrow(np.array([0, -0.6, 0]), np.array([2, -0.6, 0]))
c_arrow_04 = CurvedArrow(np.array([2, -0.6, 0]), np.array([4, -0.6, 0]))
self.play(ShowCreation(d.move_to([-4, 0, 0])))
self.play(ShowCreation(nline))
self.play(ShowCreation(c_arrow_01))
self.play(ShowCreation(c_arrow_02))
self.play(ShowCreation(c_arrow_03))
self.play(ShowCreation(c_arrow_04))
self.wait()
class intro_02(Scene):
def construct(self):
text_01 = TextMobject("1. Simple interest")
text_02 = TextMobject("2. Compound interest")
g_01 = VGroup(text_01, text_02).arrange(direction=DOWN, aligned_edge=LEFT)
self.play(Write(g_01))
self.wait()
class Interest_simple(Scene):
def construct(self):
formula_01 = TexMobject(*['10', r"\cdot", '(1+r)', '=', '11'])
formula_02 = TexMobject(*['10', '=', r"{{11}", r"\over", r"{1 + r}}"])
formula_03 = TexMobject(*['10', r"\cdot", '(1+r)', '=', '11'])
self.play(
Write(formula_01)
)
self.wait()
self.play(
ReplacementTransform(formula_01[0], formula_02[0]),
ReplacementTransform(formula_01[1], formula_02[3]),
ReplacementTransform(formula_01[2], formula_02[4]),
ReplacementTransform(formula_01[3], formula_02[1]),
ReplacementTransform(formula_01[4], formula_02[2]),
)
self.wait()
self.play(
ReplacementTransform(formula_02[0], formula_03[0]),
ReplacementTransform(formula_02[3], formula_03[1]),
ReplacementTransform(formula_02[4], formula_03[2]),
ReplacementTransform(formula_02[1], formula_03[3]),
ReplacementTransform(formula_02[2], formula_03[4]),
)
self.wait()
class Apple(Scene):
def construct(self):
apple_01 = TexMobject(*[r"\textrm{Apple }", r"(", "S_0", ")"]).move_to(LEFT * 4 + UP * 0)
apple_02 = TexMobject(*[r"\textrm{Apple }", r"(", "S_T", ")"]).move_to(RIGHT * 4 + UP * 0)
line = DashedLine(start=TOP, end=BOTTOM)
text_01 = TextMobject(r"Present").move_to(LEFT * 4 + UP * 3)
text_02 = TextMobject(r"Future").move_to(RIGHT * 4 + UP * 3)
c_arrow_01 = CurvedArrow(LEFT * 4 + 0.5 * DOWN, RIGHT * 4 + 0.5 * DOWN)
c_arrow_02 = CurvedArrow(RIGHT * 4 + 0.5 * UP, LEFT * 4 + 0.5 * UP)
self.add(*[line, text_01, text_02])
self.wait()
self.play(
Write(apple_01),
Write(apple_02),
)
self.wait()
self.play(
ShowCreation(c_arrow_01),
ShowCreation(c_arrow_02),
)
self.wait()
self.play(
Uncreate(c_arrow_01),
Uncreate(c_arrow_02),
run_time=2
)
self.wait()
graph = FunctionGraph(
lambda x: np.exp(-0.5/0.1 * (x - 4) ** 2) - 2,
x_min=3,
x_max=5,
color=YELLOW,
stroke_width=3,
)
self.play(ShowCreation(graph))
scale_factor = 3
self.play(
ScaleInPlace(apple_01[2], scale_factor)
)
self.wait(0.2)
self.play(
ScaleInPlace(apple_01[2], 1/scale_factor)
)
self.wait()
self.play(
ScaleInPlace(apple_02[2], scale_factor)
)
self.wait(0.2)
self.play(
ScaleInPlace(apple_02[2], 1/scale_factor)
)
self.wait()
class Money(Scene):
def construct(self):
formula_01 = TexMobject(*[r"10", r"\neq", r"11"])
formula_02 = TexMobject(*[r"\$ 10", r"\neq", r"\$ 11"])
path_01 = ArcBetweenPoints(formula_02[0].get_center(), LEFT * 4)
path_02 = ArcBetweenPoints(formula_02[2].get_center(), RIGHT * 4)
line = DashedLine(start=TOP, end=BOTTOM)
text_01 = TextMobject(r"Present")
text_02 = TextMobject(r"Future")
c_arrow_01 = CurvedArrow(LEFT * 4 + 0.5 * DOWN, RIGHT * 4 + 0.5 * DOWN)
c_arrow_02 = CurvedArrow(RIGHT * 4 + 0.5 * UP, LEFT * 4 + 0.5 * UP)
self.play(
Write(formula_01[0]),
)
self.wait()
self.play(
Write(formula_01[2]),
)
self.wait()
self.play(
Write(formula_01[1]),
)
self.wait()
self.play(
ReplacementTransform(formula_01[0], formula_02[0]),
ReplacementTransform(formula_01[2], formula_02[2]),
)
self.wait()
self.play(
FadeOut(formula_01[1]),
ShowCreation(line),
MoveAlongPath(formula_02[0], path_01),
MoveAlongPath(formula_02[2], path_02),
Write(text_01.move_to(LEFT * 4 + UP * 3)),
Write(text_02.move_to(RIGHT * 4 + UP * 3)),
)
self.wait()
self.play(
ShowCreation(c_arrow_01),
)
self.wait()
self.play(
ShowCreation(c_arrow_02),
)
self.wait()
class intro_old(Scene):
CONFIG = {
"n_of_steps": 20,
"width": 7,
"height": 5,
"radius": 0.1,
"origin": np.array([-4, 0, 0])
}
def construct(self):
nodes = [
[Circle(radius=self.radius) for j in range(i + 1)] for i in range(self.n_of_steps + 1)
]
for i in range(self.n_of_steps + 1):
for j in range(i + 1):
location = self.origin \
+ np.array([self.width / self.n_of_steps * i, 0, 0]) \
+ np.array([0, self.height / self.n_of_steps * (j - i / 2), 0])
nodes[i][j].move_to(location)
center = TextMobject("In this world, we have").scale(0.7)
self.play(Write(center.shift(LEFT * 0.5)))
nodes_g = VGroup(*[nodes[0][0]])
self.play(Write(nodes_g), run_time=1)
text_1 = TextMobject("Present").scale(0.7)
self.play(Write(text_1.next_to(nodes[0][0], LEFT)))
nodes_g = VGroup(*nodes[self.n_of_steps])
self.play(Write(nodes_g), run_time=1)
text_2 = TextMobject("Future").scale(0.7)
brace = Brace(nodes_g, RIGHT)
self.play(Write(brace))
self.play(Write(text_2.next_to(brace, RIGHT)))
center_1 = TextMobject("They are connected").scale(0.7)
self.play(Transform(center, center_1.shift(LEFT * 0.5)))
self.wait(2)
self.play(FadeOut(center))
nodes_g = VGroup(*[nodes[i][j] for i in range(1, self.n_of_steps) for j in range(i + 1)])
self.play(ShowCreation(nodes_g), run_time=2)
self.wait()
self.play(FadeOut(nodes_g))
text_3 = TextMobject("This is the binomial world").scale(0.7)
self.play(Write(text_3.shift(LEFT * 0.5)))
self.wait()
self.play(ShowCreationThenFadeOut(nodes_g))
self.wait()
class steps(Scene):
CONFIG = {
"n_of_steps": 20,
"width": 7,
"height": 5,
"radius": 0.1,
"origin": np.array([-4, 0, 0])
}
def construct(self):
for i in [1, 2, 5, 10, 20]:
self.my_tree(i)
self.wait()
def my_tree(self, steps):
nodes = [
[Circle(radius=self.radius) for j in range(i + 1)] for i in range(self.n_of_steps + 1)
]
for i in range(steps + 1):
for j in range(i + 1):
location = self.origin \
+ np.array([self.width / steps * i, 0, 0]) \
+ np.array([0, self.height / steps * (j - i / 2), 0])
nodes[i][j].move_to(location)
nodes_g = VGroup(*[nodes[i][j] for i in range(steps + 1) for j in range(i + 1)])
self.play(ShowCreation(nodes_g))
self.wait()
self.play(Uncreate(nodes_g))
# def construct(self):
# nodes = [
# [Circle(radius = self.radius) for j in range(i+1)] for i in range(self.n_of_steps+1)
# ]
# for i in range(self.n_of_steps+1):
# for j in range(i+1):
# location = self.origin \
# + np.array([self.width/self.n_of_steps * i, 0, 0]) \
# + np.array([0, self.height/self.n_of_steps * (j-i/2), 0])
# nodes[i][j].move_to(location)
# center = TextMobject("It starts from").scale(0.7)
# self.play(Write(center.shift(LEFT*0.5)))
# nodes_g = VGroup(*[nodes[0][0]])
# self.play(Write(nodes_g), run_time=1)
# text_1 = TextMobject("Present").scale(0.7)
# self.play(Write(text_1.next_to(nodes[0][0], LEFT)))
# nodes_g = VGroup(*nodes[self.n_of_steps])
# self.play(Write(nodes_g), run_time=1)
# text_2 = TextMobject("Future").scale(0.7)
# brace = Brace(nodes_g, RIGHT)
# self.play(Write(brace))
# self.play(Write(text_2.next_to(brace, RIGHT)))
#
```
#### File: MyFiles/MyLib/MyModules.py
```python
from manimlib.imports import *
# computer code printing.
# This is really sensitive to word "verbatim"
def code(self, text):
run_time = len(text) / 30
tmp = TextMobject("\\begin{verbatim} " + text + "\\end{verbatim}").set_color(GRAY)
self.play(ShowCreation(tmp.scale(0.6).to_edge(UL, buff=1)), run_time=run_time)
# mobject 좌우를 괄호로 감싸기
def add_brackets(mobj):
bracket_pair = TexMobject("\\big[", "\\big]")
bracket_pair.scale(2)
bracket_pair.stretch_to_fit_height(
mobj.get_height() + 2 * 0.1
)
l_bracket, r_bracket = bracket_pair.split()
l_bracket.next_to(mobj, LEFT, .2)
r_bracket.next_to(mobj, RIGHT, .2)
return VGroup(l_bracket, mobj, r_bracket)
# 화면을 작은 사각형으로 채우기
rect_size = 0.25
def fill_rect(m=1, n=1, h=rect_size, w=rect_size, column=True):
rect = Rectangle(height=h, width=w).set_fill(YELLOW, opacity=0.8).set_stroke(width=0)
if column:
col = VGroup(*[rect.copy() for i in range(m)]).arrange(DOWN)
result = VGroup(*[col.copy() for i in range(n)]).arrange(RIGHT)
else:
row = VGroup(*[rect.copy() for i in range(n)]).arrange(RIGHT)
result = VGroup(*[row.copy() for i in range(m)]).arrange(DOWN)
return result
def col_rect(m, n):
h = rect_size * (2 * m - 1)
return fill_rect(m=1, n=n, h=h, w=rect_size, column=False)
def row_rect(m, n):
w = rect_size * (2 * n - 1)
return fill_rect(m=m, n=1, h=rect_size, w=w, column=True)
```
#### File: MyFiles/TensorFlow/Plotting_a_line.py
```python
from manimlib.imports import *
class Plotting_a_line(GraphScene):
def construct(self):
self.setup_axes()
self.wait()
dot_01 = Dot().move_to(self.coords_to_point(2, 3))
dot_02 = Dot().move_to(self.coords_to_point(5, 7))
self.add(*[dot_01, dot_02])
self.wait()
tracker_01 = ValueTracker(-1)
tracker_02 = ValueTracker(5)
def func_01():
m = tracker_01.get_value()
b = tracker_02.get_value()
graph = self.get_graph(lambda x: m * x + b)
graph.set_color(color=YELLOW)
return graph
graph_01 = always_redraw(func_01)
self.play(ShowCreation(graph_01))
self.wait()
self.play(tracker_01.set_value, 4/3, tracker_02.set_value, 3-(8/3), rate_func=smooth, run_time=5)
self.wait()
formula_01 = TexMobject(r"y = m x + b").move_to(RIGHT*3)
self.play(Write(formula_01))
self.wait()
``` |
{
"source": "joochlee/iotivity-lite",
"score": 2
} |
#### File: python/obt_gui/ui.py
```python
from ctypes import sizeof
import time
import queue
import signal
import logging
import os
import sys
import json
from json.decoder import JSONDecodeError
import tkinter as tk
from tkinter.constants import END
from tkinter.scrolledtext import ScrolledText
from tkinter import ttk, VERTICAL, HORIZONTAL, N, S, E, W
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import iotivity
logger = logging.getLogger(__name__)
app = None
my_iotivity = iotivity.Iotivity()
def show_window_with_text(window_name, my_text):
""" call back for the IDD file request
Args:
client (class): mqtt client
userdata (not used): not used
message (class): received mqtt message
udn (string): udn, the responder udn
"""
window = tk.Toplevel()
window.title(window_name)
text_area = ScrolledText(window, wrap=tk.WORD, width=80, height=50)
text_area.grid(column=0, pady=10, padx=10)
text_area.insert(tk.INSERT, my_text)
text_area.configure(state='disabled')
class QueueHandler(logging.Handler):
"""Class to send logging records to a queue
It can be used from different threads
The ConsoleUi class polls this queue to display records in a ScrolledText widget
"""
# Example from <NAME>: https://gist.github.com/moshekaplan/c425f861de7bbf28ef06
# (https://stackoverflow.com/questions/13318742/python-logging-to-tkinter-text-widget) is not thread safe!
# See https://stackoverflow.com/questions/43909849/tkinter-python-crashes-on-new-thread-trying-to-log-on-main-thread
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(record)
class ConsoleUi:
"""Poll messages from a logging queue and display them in a scrolled text widget"""
def __init__(self, frame):
self.frame = frame
# Create a ScrolledText wdiget
self.scrolled_text = ScrolledText(frame, state='disabled', height=40)
self.scrolled_text.grid(row=0, column=0, sticky=(N, S, W, E))
self.scrolled_text.configure(font='TkFixedFont')
self.scrolled_text.tag_config('INFO', foreground='black')
self.scrolled_text.tag_config('DEBUG', foreground='gray')
self.scrolled_text.tag_config('WARNING', foreground='orange')
self.scrolled_text.tag_config('ERROR', foreground='red')
self.scrolled_text.tag_config('CRITICAL', foreground='red', underline=1)
# Create a logging handler using a queue
self.log_queue = queue.Queue()
self.queue_handler = QueueHandler(self.log_queue)
formatter = logging.Formatter('%(asctime)s: %(message)s')
self.queue_handler.setFormatter(formatter)
logger.addHandler(self.queue_handler)
# Start polling messages from the queue
self.frame.after(100, self.poll_log_queue)
def display(self, record):
msg = self.queue_handler.format(record)
self.scrolled_text.configure(state='normal')
self.scrolled_text.insert(tk.END, msg + '\n', record.levelname)
self.scrolled_text.configure(state='disabled')
# Autoscroll to the bottom
self.scrolled_text.yview(tk.END)
def poll_log_queue(self):
# Check every 100ms if there is a new message in the queue to display
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.frame.after(100, self.poll_log_queue)
class FormUi:
def __init__(self, frame):
self.frame = frame
# Create a combobbox to select the request type
values = ['GET', 'POST']
self.request_type = tk.StringVar()
ttk.Label(self.frame, text='Request Type:').grid(
column=0, row=0, sticky=W)
self.combobox = ttk.Combobox(
self.frame,
textvariable=self.request_type,
width=25,
state='readonly',
values=values
)
self.combobox.current(0)
self.combobox.grid(column=1, row=0, sticky=W)
my_width = 60
# Create a text field to enter the request URL
self.URL = tk.StringVar()
ttk.Label(self.frame, text='Request URL:').grid(column=0, row=2, sticky=W)
ttk.Entry(self.frame, textvariable=self.URL, width=my_width).grid(
column=1, row=2, sticky=(W, E))
self.URL.set('oic/d')
# Create a text field to enter POST query
self.query = tk.StringVar()
ttk.Label(self.frame, text='Request Query:').grid(column=0, row=3, sticky=W)
ttk.Entry(self.frame, textvariable=self.query, width=my_width).grid(
column=1, row=3, sticky=(W, E))
self.query.set('')
# Create a text field to enter Payload as json
self.payload_json = tk.StringVar()
ttk.Label(self.frame, text='Request Payload:').grid(column=0, row=6, sticky=W)
ttk.Entry(self.frame, textvariable=self.payload_json, width=my_width).grid(
column=1, row=6, sticky=(W, E))
self.payload_json.set('{"property1": new_value1, "property2": new_value2}')
row_index = 10
row_index += 1
# Add a button to publish the message as cbor
tk.Label(self.frame, text=' ').grid(column=0, row=row_index, sticky=W)
self.button = ttk.Button(
self.frame, text='Send Request', command=self.send_request)
self.button.grid(column=1, row=row_index, sticky=W)
row_index += 1
ttk.Label(self.frame, text=' ').grid(column=0, row=row_index, sticky=W)
row_index += 1
# Add a button to do discovery
tk.Label(self.frame, text='Device Discovery:').grid(column=0, row=row_index, sticky=W)
self.button = ttk.Button(
self.frame, text='Discover', command=self.discover_devices)
self.button.grid(column=1, row=row_index, sticky=W)
row_index += 1
# list box section
tk.Label(self.frame, text='Discovered:').grid(column=0, row=row_index, sticky=W)
# len_max = len(random_string())
self.l1 = tk.Listbox(self.frame, height=3, width=my_width, exportselection=False)
self.l1.grid(column=1, row=row_index, sticky=(W, E))
row_index += 3
# Add a button to publish the message as cbor
self.button_clear = ttk.Button(self.frame, text='Clear', command=self.submit_clear)
self.button_clear.grid(column=0, row=row_index, sticky=W)
def update_display(self):
time.sleep(0.1)
app.root.update()
def discover_devices(self):
logger.log(logging.INFO, f"Doing device discovery")
self.update_display()
my_iotivity.discover_all()
nr_unowned = my_iotivity.get_nr_unowned_devices()
logger.log(logging.INFO, f"{nr_unowned} devices discovered: ")
self.update_display()
for i in range(nr_unowned):
unowned_uuid = my_iotivity.get_unowned_uuid(i)
unowned_name = my_iotivity.get_device_name(unowned_uuid)
logger.log(logging.INFO, f"Unowned No.{i}: {unowned_uuid} - {unowned_name}")
self.update_display()
logger.log(logging.INFO, f"Onboarding all devices")
self.update_display()
my_iotivity.onboard_all_unowned()
my_iotivity.list_owned_devices()
nr_owned = my_iotivity.get_nr_owned_devices()
logger.log(logging.INFO, f"{nr_owned}/{nr_unowned} devices onboarded")
self.update_display()
obt_uuid = my_iotivity.get_obt_uuid()
for i in range(0, my_iotivity.get_nr_owned_devices()):
device_uuid = my_iotivity.get_owned_uuid(i)
device_name = my_iotivity.get_device_name(device_uuid)
device_info = f"{device_uuid} - {device_name}"
logger.log(logging.INFO, f"Provisioning device No.{i}: {device_info}")
self.update_display()
discovered_devices = app.form.l1.get(0, END)
if device_info not in discovered_devices:
app.form.l1.insert(END, device_info)
my_iotivity.provision_id_cert(device_uuid)
my_iotivity.provision_ace_chili(device_uuid, obt_uuid)
def send_request(self):
if self.l1.curselection() == ():
print("No device selected!")
return
device_index = int(self.l1.curselection()[0])
device_uuid = my_iotivity.get_owned_uuid(device_index)
if self.request_type.get() == 'GET':
request_url = self.URL.get()
result, response_payload = my_iotivity.general_get(device_uuid, request_url)
if result:
logger.log(logging.INFO, f"GET {request_url} succeeded")
self.update_display()
show_window_with_text(f"{self.request_type.get()} {request_url} response payload", response_payload)
else:
logger.log(logging.INFO, f"GET {request_url} failed")
self.update_display()
elif self.request_type.get() == 'POST':
request_query = self.query.get()
request_url = self.URL.get()
payload_json_str = self.payload_json.get()
payload_property_list = payload_value_list = payload_type_list = []
if payload_json_str:
json_data = json.loads(payload_json_str)
payload_property_list = list(json_data.keys())
payload_value_list = list(json_data.values())
payload_type_list = []
for i in range(len(payload_value_list)):
# Determine payload type
if isinstance(payload_value_list[i], bool):
payload_value_list[i] = "1" if payload_value_list[i] else "0"
payload_type_list.append("bool")
elif isinstance(payload_value_list[i], int):
payload_value_list[i] = str(payload_value_list[i])
payload_type_list.append("int")
elif isinstance(payload_value_list[i], float):
payload_value_list[i] = str(payload_value_list[i])
payload_type_list.append("float")
elif isinstance(payload_value_list[i], str):
payload_type_list.append("str")
else:
logger.log(logging.INFO, f"Unrecognised payload type! ")
self.update_display()
return
result, response_payload = my_iotivity.general_post(device_uuid, request_query, request_url, payload_property_list, payload_value_list, payload_type_list)
if result:
logger.log(logging.INFO, f"POST {request_url} succeeded")
self.update_display()
show_window_with_text(f"POST {request_url} response payload", response_payload)
else:
logger.log(logging.INFO, f"POST {request_url} failed")
self.update_display()
def submit_clear(self):
""" clear the discovered device list
"""
print("Clear - delete all devices")
logger.log(logging.INFO, "Clear - offboard all devices")
self.update_display()
self.l1.delete(0, END)
my_iotivity.offboard_all_owned()
class App:
def __init__(self, root):
""" create the application, having 3 panes.
"""
self.root = root
root.title('OBT GUI')
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Config", command=donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About...", command=donothing)
root.config(menu=menubar)
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
# Create the panes and frames
vertical_pane = ttk.PanedWindow(self.root, orient=VERTICAL)
vertical_pane.grid(row=0, column=0, sticky="nsew")
# vertical_pane.grid(row=1, column=1, sticky="nsew")
horizontal_pane = ttk.PanedWindow(vertical_pane, orient=HORIZONTAL)
vertical_pane.add(horizontal_pane)
form_frame = ttk.Labelframe(
horizontal_pane, text="Publish Information")
form_frame.columnconfigure(1, weight=1)
horizontal_pane.add(form_frame, weight=1)
console_frame = ttk.Labelframe(horizontal_pane, text="Console")
console_frame.columnconfigure(0, weight=1)
console_frame.rowconfigure(0, weight=1)
horizontal_pane.add(console_frame, weight=1)
# Initialize all frames
self.form = FormUi(form_frame)
self.form.app = self
self.console = ConsoleUi(console_frame)
self.console.app = self
self.root.protocol('WM_DELETE_WINDOW', self.quit)
self.root.bind('<Control-q>', self.quit)
signal.signal(signal.SIGINT, self.quit)
def quit(self, *args):
""" quit function for the app
"""
my_iotivity.offboard_all_owned()
self.root.destroy()
def donothing():
filewin = tk.Toplevel(app.root)
button = tk.Button(filewin, text="Do nothing button")
button.pack()
def main():
# initalize the GUI application
global app
root = tk.Tk()
app = App(root)
logging.basicConfig(level=logging.DEBUG)
logger.log(logging.INFO, "Onboarding tool started with UUID: " + my_iotivity.get_obt_uuid())
# app.root.config(menu=menubar)
app.root.mainloop()
my_iotivity.quit()
if __name__ == '__main__':
main()
``` |
{
"source": "jooddang/terra-py",
"score": 3
} |
#### File: msg/bank/msgmultisend.py
```python
from typing import List
from terra.msg.inout import InOut
from terra.utils.jsonserializable import JsonSerializable
class MsgMultiSend(JsonSerializable):
def __init__(self, inputs: List[InOut], outputs: List[InOut]) -> None:
"""Represent the top level of a MsgMultiSend message."""
self.type = "bank/MsgMultiSend"
self.value = MsgMultiSendValue(inputs, outputs)
class MsgMultiSendValue(JsonSerializable):
def __init__(self, inputs: List[InOut], outputs: List[InOut]) -> None:
"""Values of a MsgMultiSend message."""
self.inputs = inputs
self.outputs = outputs
```
#### File: msg/distribution/msgsetwithdrawaddress.py
```python
from terra.utils.jsonserializable import JsonSerializable
class MsgSetWithdrawAddress(JsonSerializable):
def __init__(self, delegator_address: str, withdraw_address: str) -> None:
"""Represent the top level of a MsgSetWithdrawAddress message."""
self.type = "distribution/MsgSetWithdrawAddress"
self.value = MsgSetWithdrawAddressValue(
delegator_address, withdraw_address
)
class MsgSetWithdrawAddressValue(JsonSerializable):
def __init__(self, delegator_address: str, withdraw_address: str) -> None:
"""Values of a MsgSetWithdrawAddress message."""
self.delegator_address = delegator_address
self.withdraw_address = withdraw_address
```
#### File: msg/staking/msgdelegate.py
```python
from terra.msg.coin import Coin
from terra.utils.jsonserializable import JsonSerializable
class MsgDelegate(JsonSerializable):
def __init__(
self, delegator_address: str, validator_address: str, amount: Coin
) -> None:
"""Represent the top level of a MsgDelegate message."""
self.type = "staking/MsgDelegate"
self.value = MsgDelegateValue(
delegator_address, validator_address, amount
)
class MsgDelegateValue(JsonSerializable):
def __init__(
self, delegator_address: str, validator_address: str, amount: Coin
) -> None:
"""Values of a MsgDelegate message."""
self.delegator_address = delegator_address
self.validator_address = validator_address
self.amount = amount
```
#### File: terra/utils/crypto.py
```python
import hashlib
import uuid
from ecdsa import curves, SECP256k1, SigningKey
from ecdsa.util import sigencode_string, sigencode_string_canonize
def generate_salt() -> str:
"""Generate a 4 bytes salt."""
return uuid.uuid4().hex[:4]
def sha256_and_sign(
payload: str,
private_key: str,
curve: curves.Curve = SECP256k1,
canonize: bool = True,
) -> bytes:
"""Sign a payload.
Uses ecdsa curves, SECP256k1 by default.
"""
sk = SigningKey.from_string(bytes.fromhex(private_key), curve=curve)
sigencode = sigencode_string_canonize if canonize else sigencode_string
return sk.sign_deterministic(
payload.encode(), hashfunc=hashlib.sha256, sigencode=sigencode
)
def sha256(payload: str) -> bytes:
"""Hash a payload with sha256."""
return hashlib.sha256(payload.encode()).digest()
```
#### File: terra-py/tests/test_account.py
```python
from terra import Account
ACCOUNT = {
"account_address": "terra1ganslgkvaen5gcqfpxu2fvqa08hxpfzn0ayw2s",
"mnemonic": "bread genuine element reopen cliff power mean quiz mutual "
"six machine planet dry detect edit slim clap firm jelly "
"success narrow orange echo tomorrow",
"operator_address": "terravaloper1ganslgkvaen5gcqfpxu2fvqa08hxpfzn0jgn6r",
"private_key": "861c3746d1bf6bc83acac4c9e72dbe7cdcf944031823b1c7e1248d163"
"c2b9c01",
"public_key": "<KEY>"
"d4e2cbc6",
}
def test_account():
a = Account(ACCOUNT["mnemonic"])
assert a.account_address == ACCOUNT["account_address"]
assert a.mnemonic == ACCOUNT["mnemonic"]
assert a.operator_address == ACCOUNT["operator_address"]
assert a.private_key == ACCOUNT["private_key"]
assert a.public_key == ACCOUNT["public_key"]
def test_account_generate():
a = Account.generate()
assert len(a.mnemonic.split()) == 24
assert a.mnemonic != Account.generate().mnemonic
```
#### File: terra-py/tests/test_terra.py
```python
import toml
from terra import __version__
def test_version():
# tests are started from root folder so path is relative to there
with open("pyproject.toml") as f:
assert __version__ == toml.load(f)["tool"]["poetry"]["version"]
``` |
{
"source": "joodicator/mcchat",
"score": 3
} |
#### File: joodicator/mcchat/Session.py
```python
from McClient.networking.Exceptions import *
from McClient.networking.Session import *
import urllib
import urllib2
class BaseSession(object):
game_version = None
username = None
sessionID = None
UID = None
online = False
def connect(self, username, password):
raise NotImplementedError()
def joinserver(self, serverID):
raise NotImplementedError()
class Session(BaseSession):
"""Session object for connecting to online server."""
__LOGIN_URL = "https://login.minecraft.net"
__LOGIN_HEADER = {"Content-Type": "application/x-www-form-urlencoded"}
__JOIN_URL = "http://session.minecraft.net/game/joinserver.jsp"
VERSION = 13
def connect(self, username, password):
"""Connects minecraft.net and gets a session id."""
data = urllib.urlencode({"user": username,
"password": password,
"version": self.VERSION})
req = urllib2.Request(self.__LOGIN_URL, data, self.__LOGIN_HEADER)
opener = urllib2.build_opener()
try:
response = opener.open(req, None, 10).read()
except urllib2.URLError:
raise SessionError("Unable to connect to login server.")
if response.lower() == "bad login":
raise SessionBadLogin("Wrong username/password combination.")
if response.lower() in ("old version", "bad response"):
raise SessionVersionError("Client version deprecated.")
if response.lower() == "account migrated, use e-mail":
raise SessionBadLogin("Account migrated, use e-mail")
if len(response.split(':')) < 5:
raise SessionError(response)
response = response.split(":")
self.online = True
self.game_version = response[0]
# field #1 is deprecated, always!
self.username = response[2]
self.sessionID = response[3]
self.UID = response[4]
def joinserver(self, serverID):
url = self.__JOIN_URL + "?user=%s&sessionId=%s&serverId=%s" \
% (self.username, self.sessionID, serverID)
response = urllib2.urlopen(url).read()
if response != "OK":
raise SessionError("Authenticating with Minecraft.net failed, " +
"response was: %s" % response)
return True
class OfflineSession(BaseSession):
"""Session object for connecting to offline servers."""
def connect(self, username, password):
"""Since this is offline mode, we don't need the password."""
self.username = username
self.sessionID = "-"
def joinserver(self, serverID):
"""Since this is offline mode, we don't need the serverID."""
return True
``` |
{
"source": "joodicator/youtube-sync-2",
"score": 3
} |
#### File: joodicator/youtube-sync-2/ytdata.py
```python
import sys
import os.path
import apiclient.discovery
API_KEY_FILE = os.path.join(os.path.dirname(__file__), 'ytdata-api-key')
def build_service(api_key=None):
if api_key is None and os.path.exists(API_KEY_FILE):
with open(API_KEY_FILE) as file:
api_key = file.read().strip()
elif api_key is None:
print('Warning: %s does not exist, but it should contain a YouTube'
' Data API key (see https://developers.google.com/youtube/'
'registering_an_application). Proceeding with no API key.\n'
% API_KEY_FILE, file=sys.stderr)
return apiclient.discovery.build(
serviceName = 'youtube',
version = 'v3',
developerKey = api_key)
``` |
{
"source": "joodo/jazz-or-not",
"score": 3
} |
#### File: jazz-or-not/train/resnet.py
```python
from paddle import fluid as fluid
def conv_bn(input, ch_out, filter_size, stride, padding, act='relu',
bias_attr=False):
conv_layer = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr,
)
return fluid.layers.batch_norm(input=conv_layer, act=act)
def shortcut(input, ch_in, ch_out, stride):
if ch_in != ch_out:
return conv_bn(input, ch_out, 1, stride, 0, None) # change channel by conv 1x1
else:
return input
def basic_block(input, ch_in, ch_out, stride):
conv1 = conv_bn(input, ch_out, 3, stride, 1)
conv2 = conv_bn(conv1, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=conv2, y=short, act='relu')
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
layer = block_func(input, ch_in, ch_out, stride)
for i in range(1, count):
layer = block_func(layer, ch_out, ch_out, 1)
return layer
def resnet(input, depth=32):
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
conv1 = conv_bn(input, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basic_block, conv1, 16, 16, n, 1)
res2 = layer_warp(basic_block, res1, 16, 32, n, 2)
res3 = layer_warp(basic_block, res2, 32, 64, n, 2)
#drop = fluid.layers.dropout(x=res3, dropout_prob=0.5)
pool = fluid.layers.pool2d(
input=res3,
pool_size=8,
pool_type='avg',
pool_stride=1,
)
predict = fluid.layers.fc(input=pool, size=1, act='sigmoid')
return predict
``` |
{
"source": "joodo/loser-fan",
"score": 2
} |
#### File: management/commands/callapi.py
```python
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils.encoding import smart_str
import sys, urllib, re
from fanfouapi import oauth, auth, api
from urllib2 import Request, urlopen
from urlparse import urljoin
import getpass
def get_api(oauth_token=settings.PUBLISHER_OAUTH_TOKEN,
oauth_secret=settings.PUBLISHER_OAUTH_SECRET):
handler = auth.OAuthHandler(settings.FF_API_KEY, settings.FF_API_SECRET)
handler.set_access_token(oauth_token, oauth_secret)
return api.API(handler)
class Command(BaseCommand):
help = "Call API"
def handle(self, action, *args, **options):
a = get_api()
if action == 'statuses/update':
if len(args) > 0:
print a.update_status(status=args[0])
else:
print >>sys.stderr, 'statuses/update <status>'
elif action == 'statuses/repost':
if len(args) == 0:
print >>sys.stderr, 'statuses/repost <statusid> <status>'
else:
repost_status_id = args[0]
repost_status = a.get_status(repost_status_id)
print repost_status.get_text()
if len(args) > 1:
status = args[1]
else:
status = '\xe8\xbd\xac@' + smart_str(repost_status.user.screen_name + ' ' + repost_status.get_text())
#status = prefix + msg.sendFromRealName
a.update_status(status=status, repost_status_id=repost_status_id)
elif action == 'friendships/exists':
if len(args) >= 2:
v = a.exists_friendship(user_a=args[0], user_b=args[1])
print v.value
else:
print >>sys.stderr, 'friendships/exists <user_a> <user_b>'
elif action == 'photos/upload':
if len(args) > 1:
print a.upload(args[0], args[1])
else:
print >>sys.stderr, 'photos/upload <imgfile> <status>'
elif action in ('statuses/friends_timeline',
'statuses/home_timeline'):
print a.friends_timeline()
elif action == 'statuses/public_timeline':
print a.public_timeline()
elif action in ('statuses/user_timeline',):
if len(args) > 0:
print a.user_timeline(user_id=args[0])
else:
print >>sys.stderr, 'statuses/user_timeline <user_id>'
else:
print >>sys.stderr, 'Unknown action', action
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.