content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#py_gui.py
"gui basics"
from tkinter import *
class Application(Frame):
pass
root = Tk()
app = Application(master=root)
app.mainloop()
|
python
|
'''
There are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
**Example 1**
`Input: numCourses = 2, prerequisites = [[1,0]]`
`Output: true`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
**Example 2**
`Input: numCourses = 2, prerequisites = [[1,0],[0,1]]`
`Output: false`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should
also have finished course 1. So it is impossible.
**Note**
You may assume that there are no duplicate edges in the input prerequisites.
'''
from collections import defaultdict
class Solution(object):
def __init__(self):
self.eligibleCourses = []
self.visited = []
def seedEligibleCourses(self, g):
for index, node in g.items():
if len(node) == 0 and index not in self.visited:
self.eligibleCourses.append(index)
def dfs(self, node, g):
if node in self.visited:
return
self.visited.append(node)
for _, n in g.items():
if node in n:
n.remove(node)
for successor in g[node]:
if successor not in self.visited:
self.eligibleCourses.append(successor)
self.dfs(node, g)
def canFinish(self, numCourses, prerequisites):
if not prerequisites:
return True
graph = defaultdict(list)
for relation in prerequisites:
currentCourse, prerequisite = relation[0], relation[1]
graph[prerequisite].append(currentCourse) # post order!!
if currentCourse not in graph:
graph[currentCourse] = []
self.seedEligibleCourses(graph)
while self.eligibleCourses:
current = self.eligibleCourses.pop(0)
self.dfs(current, graph)
self.seedEligibleCourses(graph)
for _, n in graph.items():
if len(n) > 0:
return False
return True
|
python
|
"""This acts as a kind of middleware - which has now been whittled down to only providing
logging information"""
import logging
import platform
from .constants import NameSpace
from .config import CLIInputs, ParsedArgs
from .utils import read_sdk_version
logger = logging.getLogger("validate-cli-args")
class ValidateCliArgs:
"""Called via ArgParser.validate_cli_args"""
def __init__(self, cli_inputs: CLIInputs):
self.cli_inputs = cli_inputs
# ----- MAIN ARGUMENT HANDLERS ----- #
def handle_top_level_args(self, parsed_args: ParsedArgs) -> None:
if not self.cli_inputs.namespace == NameSpace.TOP_LEVEL:
return
if parsed_args.version:
logger.info("ElectrumSV Software Development Kit")
logger.info(f"Python version {platform.python_version()}-{platform.architecture()[0]}")
logger.info(f"SDK version {read_sdk_version()}")
def handle_install_args(self, parsed_args: ParsedArgs) -> None:
if not self.cli_inputs.namespace == NameSpace.INSTALL:
return
# logging
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
if parsed_args.repo != "":
logger.debug(f"repo flag={parsed_args.repo}")
if parsed_args.branch != "":
logger.debug(f"branch flag={parsed_args.branch}")
def handle_start_args(self, parsed_args: ParsedArgs) -> None:
if not self.cli_inputs.namespace == NameSpace.START:
return
# logging
if parsed_args.new:
logger.debug("new flag=set")
if parsed_args.gui:
logger.debug("gui flag=set")
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
if parsed_args.repo != "":
logger.debug(f"repo flag={parsed_args.repo}")
if parsed_args.branch != "":
logger.debug(f"branch flag={parsed_args.branch}")
def handle_stop_args(self, parsed_args: ParsedArgs) -> None:
"""takes no arguments"""
if not self.cli_inputs.namespace == NameSpace.STOP:
return
# logging
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
def handle_reset_args(self, parsed_args: ParsedArgs) -> None:
"""takes no arguments"""
if not self.cli_inputs.namespace == NameSpace.RESET:
return
# logging
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
if parsed_args.repo != "":
logger.debug(f"repo flag={parsed_args.repo}")
def handle_status_args(self, _parsed_args: ParsedArgs) -> None:
return
def handle_config_args(self, parsed_args: ParsedArgs) -> None:
return
|
python
|
# unet.py
#
from __future__ import division
import torch.nn as nn
import torch.nn.functional as F
import torch
from numpy.linalg import svd
from numpy.random import normal
from math import sqrt
class UNet(nn.Module):
def __init__(self, colordim = 1):
super(UNet, self).__init__()
self.conv1_1 = nn.Conv2d(colordim, 64, 3, padding = 1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding = 1)
self.bn1_1 = nn.BatchNorm2d(64)
self.bn1_2 = nn.BatchNorm2d(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding = 1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding = 1)
self.bn2_1 = nn.BatchNorm2d(128)
self.bn2_2 = nn.BatchNorm2d(128)
self.conv4_1 = nn.Conv2d(128, 256, 3, padding = 1)
self.conv4_2 = nn.Conv2d(256, 256, 3, padding = 1)
self.upconv4 = nn.Conv2d(256, 128, 1)
self.bn4 = nn.BatchNorm2d(128)
self.bn4_1 = nn.BatchNorm2d(256)
self.bn4_2 = nn.BatchNorm2d(256)
self.bn4_out = nn.BatchNorm2d(256)
self.conv7_1 = nn.Conv2d(256, 128, 3, padding = 1)
self.conv7_2 = nn.Conv2d(128, 128, 3, padding = 1)
self.upconv7 = nn.Conv2d(128, 64, 1)
self.bn7 = nn.BatchNorm2d(64)
self.bn7_1 = nn.BatchNorm2d(128)
self.bn7_2 = nn.BatchNorm2d(128)
self.bn7_out = nn.BatchNorm2d(128)
self.conv9_1 = nn.Conv2d(128, 64, 3, padding = 1)
self.conv9_2 = nn.Conv2d(64, 64, 3, padding = 1)
self.bn9_1 = nn.BatchNorm2d(64)
self.bn9_2 = nn.BatchNorm2d(64)
self.conv9_3 = nn.Conv2d(64, colordim, 1)
self.bn9_3 = nn.BatchNorm2d(colordim)
self.bn9 = nn.BatchNorm2d(colordim)
self.maxpool = nn.MaxPool2d(2, stride = 2, return_indices = False, ceil_mode = False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor = 2)
self._initialize_weights()
def forward(self, x1):
x1 = F.relu(self.bn1_2(self.conv1_2(F.relu(self.bn1_1(self.conv1_1(x1))))))
x2 = F.relu(self.bn2_2(self.conv2_2(F.relu(self.bn2_1(self.conv2_1(self.maxpool(x1)))))))
xup = F.relu(self.bn4_2(self.conv4_2(F.relu(self.bn4_1(self.conv4_1(self.maxpool(x2)))))))
xup = self.bn4(self.upconv4(self.upsample(xup)))
xup = self.bn4_out(torch.cat((x2, xup), 1))
xup = F.relu(self.bn7_2(self.conv7_2(F.relu(self.bn7_1(self.conv7_1(xup))))))
xup = self.bn7(self.upconv7(self.upsample(xup)))
xup = self.bn7_out(torch.cat((x1, xup), 1))
xup = F.relu(self.bn9_3(self.conv9_3(F.relu(self.bn9_2(self.conv9_2(F.relu(self.bn9_1(self.conv9_1(xup)))))))))
return F.softsign(self.bn9(xup))
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
python
|
# First, we import a tool to allow text to pop up on a plot when the cursor
# hovers over it. Also, we import a data structure used to store arguments
# of what to plot in Bokeh. Finally, we will use numpy for this section as well!
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure, output_file, show
import numpy as np
# Let's plot a simple 5x5 grid of squares, alternating in color as red and blue.
plot_values = [1,2,3,4,5]
plot_colors = ["red", "blue"]
# How do we tell Bokeh to plot each point in a grid? Let's use a function that
# finds each combination of values from 1-5.
from itertools import product
grid = list(product(plot_values, plot_values))
print(grid)
# The first value is the x coordinate, and the second value is the y coordinate.
# Let's store these in separate lists.
xs, ys = zip(*grid)
print(xs)
print(ys)
# Now we will make a list of colors, alternating between red and blue.
colors = [plot_colors[i%2] for i in range(len(grid))]
print(colors)
# Finally, let's determine the strength of transparency (alpha) for each point,
# where 0 is completely transparent.
alphas = np.linspace(0, 1, len(grid))
# Bokeh likes each of these to be stored in a special dataframe, called
# ColumnDataSource. Let's store our coordinates, colors, and alpha values.
source = ColumnDataSource(
data={
"x": xs,
"y": ys,
"colors": colors,
"alphas": alphas,
}
)
# We are ready to make our interactive Bokeh plot!
output_file("Basic_Example.html", title="Basic Example")
fig = figure(tools="resize, hover, save")
fig.rect("x", "y", 0.9, 0.9, source=source, color="colors",alpha="alphas")
hover = fig.select(dict(type=HoverTool))
hover.tooltips = {
"Value": "@x, @y",
}
show(fig)
|
python
|
from typing import Literal, Optional, Union
class Trust_Region_Options:
border_abstol: float = 1e-10
tol_step: float = 1.0e-10
tol_grad: float = 1.0e-6
abstol_fval: Optional[float] = None
max_stall_iter: Optional[int] = None
init_delta: float = 1.0
max_iter: int
check_rel: float = 1.0e-2
check_abs: Optional[float] = None
check_iter: Optional[int] = None # 0表示只在最优化开始前进行一次梯度检查,-1表示完全关闭检查,默认的None表示始终进行检查
shaking: Union[Literal["x.shape[0]"], int] = "x.shape[0]"
display: bool = True
def __init__(self, *, max_iter: int) -> None:
self.max_iter = max_iter
|
python
|
import sqlite3
import os
import MLBProjections.MLBProjections.DB.MLB as MLB
import MLBProjections.MLBProjections.Environ as ENV
from pprint import pprint
################################################################################
################################################################################
pitchContactCmd = """
SELECT pitches.game_id,
pitcher_id,
batter_id,
pitch_type_id,
pitch_result_id,
ab_type_id,
pitch_num,
box,
turn,
sequence,
pitch_velocity,
balls,
strikes,
outs,
(CASE WHEN pitcher.throws = batter.bats THEN 1 ELSE 0 END) AS side,
(CASE WHEN base_runners.first_base != -10 THEN 1 ELSE 0 END) AS first_base,
(CASE WHEN base_runners.second_base != -10 THEN 1 ELSE 0 END) AS second_base,
(CASE WHEN base_runners.third_base != -10 THEN 1 ELSE 0 END) AS third_base,
hit_style,
hit_hardness,
hit_angle,
hit_distance
FROM pitches
LEFT JOIN ab_results
ON pitches.pitch_id = ab_results.pitch_id
INNER JOIN pro_players AS pitcher
ON pitches.pitcher_id = pitcher.player_id
INNER JOIN pro_players AS batter
ON pitches.batter_id = batter.player_id
INNER JOIN pitch_locations
ON pitches.pitch_location_id = pitch_locations.pitch_location_id
INNER JOIN pitch_counts
ON pitches.pitch_count_id = pitch_counts.pitch_count_id
INNER JOIN base_runners
ON pitches.base_runners_id = base_runners.base_runners_id
WHERE {0[playerType]}_id = ?
"""
playerNameCmd = "SELECT first_name, last_name FROM pro_players WHERE player_id = ?"
leagueCmd = "SELECT league FROM pro_teams WHERE team_id = ?"
lineupCmd = """
SELECT lineups.player_id, first_name, last_name, batt_order, lineups.pos
FROM lineups
INNER JOIN pro_players
ON lineups.player_id = pro_players.player_id
INNER JOIN (SELECT team_id, MAX(game_id) AS game_id
FROM lineups
WHERE team_id = ?) AS max_id
ON lineups.game_id = max_id.game_id AND lineups.team_id = max_id.team_id
WHERE sub_order = 1 AND lineups.pos != 'P'
ORDER BY batt_order
"""
similarPitcherCmd = """
SELECT pp.player_id,
team_id
FROM pro_players AS pp
INNER JOIN (SELECT pos, throws FROM pro_players WHERE player_id =?) AS a
ON pp.pos = a.pos AND pp.throws = a.throws
INNER JOIN (SELECT pitcher_id, COUNT(pitcher_id) AS pitch_count FROM pitches GROUP BY pitcher_id) AS b
ON pp.player_id = b.pitcher_id
INNER JOIN (SELECT MAX(game_id), player_id, team_id FROM lineups GROUP BY player_id) AS c
ON pp.player_id = c.player_id
WHERE pitch_count >= 100
ORDER BY pp.player_id DESC
"""
similarBatterCmd = """
SELECT pp.player_id,
team_id
FROM pro_players AS pp
INNER JOIN (SELECT pos, bats FROM pro_players WHERE player_id =?) AS a
ON pp.pos = a.pos AND pp.bats = a.bats
INNER JOIN (SELECT batter_id, COUNT(batter_id) AS pitch_count FROM pitches GROUP BY batter_id) AS b
ON pp.player_id = b.batter_id
INNER JOIN (SELECT MAX(game_id), player_id, team_id FROM lineups GROUP BY player_id) AS c
ON pp.player_id = c.player_id
WHERE pitch_count >= 100
ORDER BY pp.player_id DESC
"""
################################################################################
################################################################################
class DatabaseManager:
def __init__(self, db):
self.mlbDB = db
self.mlbDB.openDB()
self.gameDBs = {}
def __del__(self):
self.db.closeDB()
def findPlayer(self, playerId):
return self.mlbDB.fetchOne("SELECT player_id FROM pro_players WHERE player_id = ?", (playerId,))
def addPlayerToDB(self, info):
print("new Player")
self.mlbDB.insert(MLB.proPlayersTable, info=info )
self.mlbDB.commit()
def getLeague(self, teamId):
return self.mlbDB.fetchOne(leagueCmd, (teamId,))[0]
def update(self):
self.mlbDB.update()
def gameDBExists(self, index):
return index in self.gameDBs.keys()
def getRecentLineup(self, teamId):
return self.mlbDB.fetchAll(lineupCmd, (teamId,))
def cloneDB(self, matchup):
gameDB = MLB.MLBGame(matchup.getGameId())
if not os.path.exists(ENV.getPath("game", fileName=matchup.getGameId())):
gameDB.openDB()
info = matchup.getInfo()
self.setMetaData(gameDB, info)
self.setTeams(gameDB, info)
self.setBullpens(gameDB, info)
self.setLineups(gameDB, info)
self.setContacts(gameDB, info)
self.setGames(gameDB)
gameDB.commit()
gameDB.closeDB()
self.gameDBs[matchup.getGameId()] = gameDB
return gameDB
def setGames(self, gameDB):
for gameId in gameDB.fetchAll("SELECT DISTINCT game_id FROM pitch_contacts"):
gameDB.insert(MLB.gamesTable, values=self.mlbDB.fetchOne("SELECT * FROM games WHERE game_id = ?",(gameId[0],)))
def setMetaData(self, gameDB, info):
gameId = info["gameId"]
homeId = info["teams"]["home"]["info"]["team_id"]
awayId = info["teams"]["away"]["info"]["team_id"]
stadiumId = info["teams"]["home"]["info"]["stadium_id"]
gameDB.insert(MLB.metaTable, values=(gameId, homeId, awayId, stadiumId))
stadiumInfo = self.mlbDB.fetchOne("SELECT * FROM stadiums WHERE stadium_id = ?",(stadiumId,))
gameDB.insert(MLB.stadiumsTable, values=stadiumInfo)
def setContacts(self, gameDB, info):
homeId = info["teams"]["home"]["info"]["team_id"]
awayId = info["teams"]["away"]["info"]["team_id"]
for teamId in (homeId, awayId):
for data in self.mlbDB.fetchAll("SELECT hit_style, hit_hardness, hit_angle, hit_distance, ab_type_id FROM pitches INNER JOIN ab_results ON pitches.pitch_id = ab_results.pitch_id INNER JOIN games ON pitches.game_id = games.game_id WHERE (home_id = ? OR away_id = ?) AND hit_style != -1", (teamId, teamId)):
cabId = gameDB.nextKey(MLB.contactAtBatsTable)
try:
gameDB.insert(MLB.contactAtBatsTable, values=[cabId,teamId]+list(data))
except sqlite3.IntegrityError:
pass
def setBullpens(self, gameDB, info):
for key in ("home", "away"):
team = info["teams"][key]
teamId = team["teamId"]
starterId = team["starter"]["playerId"]
self.newPitcher(teamId, starterId, gameDB, True)
for pitcher in team["roster"]["pitchers"]:
self.newPitcher(teamId, pitcher["playerId"], gameDB)
def setLineups(self, gameDB, info):
for key in ("home", "away"):
team = info["teams"][key]
teamId = team["teamId"]
for batterId in [batter["playerId"] for batter in team["roster"]["batters"]]:
self.newBatter(teamId, batterId, gameDB)
if info["league"] == "NL":
self.newBatter(teamId, team["starter"]["playerId"], gameDB)
for batter in team["lineup"]:
lId = gameDB.nextKey(MLB.lineupsTable)
gameDB.insert(MLB.lineupsTable, values=(lId, info["gameId"], teamId, batter[0], batter[3], 1, batter[-1]))
def setTeams(self, gameDB, info):
homeId = info["teams"]["home"]["teamId"]
awayId = info["teams"]["away"]["teamId"]
for teamId in (homeId, awayId):
teamInfo = self.mlbDB.fetchOne("SELECT * FROM pro_teams WHERE team_id = ?", (teamId,))
gameDB.insert(MLB.proTeamsTable, values=teamInfo)
def addPlayer(self, gameDB, playerId):
playerInfo = self.mlbDB.curs.execute("SELECT * FROM pro_players WHERE player_id = ?",(playerId,)).fetchone()
pprint(playerInfo)
gameDB.insert(MLB.proPlayersTable, values=playerInfo)
def newBatter(self, teamId, batterId, gameDB):
if not gameDB.curs.execute("SELECT player_id FROM pro_players WHERE player_id = ?",(batterId,)).fetchone():
self.addPlayer(gameDB, batterId)
pitchCount = self.mlbDB.fetchOne("SELECT COUNT(batter_id) FROM pitches INNER JOIN pro_players ON pitches.batter_id = pro_players.player_id WHERE batter_id = ?", (batterId,))[0]
checkId = batterId
checkTeamId = teamId
if pitchCount < 100:
checkId, checkTeamId = self.mlbDB.fetchOne(similarBatterCmd, (batterId,))
pitchContacts = self.mlbDB.fetchAll(pitchContactCmd.format({"playerType":"batter"}), (checkId,))
for contact in pitchContacts:
pitchContactId = gameDB.nextKey(MLB.pitchContactsTable)
try:
gameDB.insert(MLB.pitchContactsTable, values=[pitchContactId, *contact[:2], batterId, *contact[3:]])
except sqlite3.IntegrityError:
pass
def newPitcher(self, teamId, pitcherId, gameDB, starter=0):
if not gameDB.curs.execute("SELECT player_id FROM pro_players WHERE player_id = ?",(pitcherId,)).fetchone():
self.addPlayer(gameDB, pitcherId)
bpId = gameDB.nextKey(MLB.bullpensTable)
gameDB.insert(MLB.bullpensTable, values=(bpId, teamId, pitcherId, starter))
pitchCount = self.mlbDB.fetchOne("SELECT COUNT(pitcher_id) FROM pitches INNER JOIN pro_players ON pitches.pitcher_id = pro_players.player_id WHERE pitcher_id = ?", (pitcherId,))[0]
checkId = pitcherId
checkTeamId = teamId
try:
if pitchCount < 100:
checkId, checkTeamId = self.mlbDB.fetchOne(similarPitcherCmd, (pitcherId,))
except TypeError:
pass
pitches = self.mlbDB.fetchAll(pitchContactCmd.format({"playerType":"pitcher"}), (checkId,))
for contact in pitches:
pitchContactId = gameDB.nextKey(MLB.pitchContactsTable)
try:
gameDB.insert(MLB.pitchContactsTable, values=[pitchContactId, contact[0], pitcherId, *contact[2:]])
except sqlite3.IntegrityError:
pass
for replace in self.mlbDB.fetchAll("SELECT * FROM pitcher_replace WHERE (remove_id = ? OR replace_id = ?)",(checkId, checkId)):
try:
gameDB.insert(MLB.pitchReplaceTable, values=replace)
except sqlite3.IntegrityError:
pass
################################################################################
################################################################################
|
python
|
#
# Functional Python: The Lambda Lambada (Recursion)
# Python Techdegree
#
# Created by Dulio Denis on 3/22/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
# Recursion Challenge
# ------------------------------------------------
# Challenge Task 1 of 1
# Finish the prereqs function so that it recursively
# finds all of the prerequisite course titles in courses
# (like "Object-Oriented Python" is a prerequisite for
# "Django Basics").
# You should add() the title of the prerequisite to the pres
# set and then call prereqs again with the child courses.
# In the end, return the prereqs set.
courses = {'count': 2,
'title': 'Django Basics',
'prereqs': [{'count': 3,
'title': 'Object-Oriented Python',
'prereqs': [{'count': 1,
'title': 'Python Collections',
'prereqs': [{'count':0,
'title': 'Python Basics',
'prereqs': []}]},
{'count': 0,
'title': 'Python Basics',
'prereqs': []},
{'count': 0,
'title': 'Setting Up a Local Python Environment',
'prereqs': []}]},
{'count': 0,
'title': 'Flask Basics',
'prereqs': []}]}
def prereqs(data, pres=None):
pres = pres or set()
# for each prereq in this courses' prereq
for prereq in data['prereqs']:
# add title of this prereq course
pres.add(prereq['title'])
# use recursive call to drill into any further prereqs
prereqs(prereq, pres)
return pres
print(prereqs(courses))
|
python
|
#!/usr/bin/env python
import os
import random
import requests
import subprocess
import argparse
import datetime
import time
import sys
"""Based off https://github.com/fogleman/primitive/blob/master/bot/main.py
"""
with open(os.path.expanduser('~/.flickr_api_key'), 'r') as key_file:
FLICKR_API_KEY = key_file.readline().rstrip()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Config(AttrDict):
def randomize(self):
self.m = random.choice([1, 5, 7])
self.n = random.randint(15, 50) * 10
self.rep = 0
self.a = 128
self.r = 256
def parse(self, text):
text = (text or '').lower()
tokens = text.split()
for i, name in enumerate(MODE_NAMES):
if name in text:
self.m = i
for token in tokens:
try:
self.n = int(token)
except Exception:
pass
def validate(self):
self.m = clamp(self.m, 0, 8)
if self.m == 6:
self.n = random.randint(1400, 2000)
@property
def description(self):
total = self.n + self.n * self.rep
return '%d %s' % (total, MODE_NAMES[self.m])
def clamp(x, lo, hi):
if x < lo:
x = lo
if x > hi:
x = hi
return x
def random_date(max_days_ago=1000):
today = datetime.date.today()
days = random.randint(1, max_days_ago)
d = today - datetime.timedelta(days=days)
return d.strftime('%Y-%m-%d')
def interesting(date=None):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.interestingness.getList',
)
if date:
params['date'] = date
r = requests.get(url, params=params)
return r.json()['photos']['photo']
def get_aspect_ratio(p):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.photos.getSizes',
photo_id=p['id']
)
r = requests.get(url, params=params)
sizes = r.json()['sizes']['size']
thumbnail = filter(lambda x: x['label']=='Thumbnail', sizes)
return float(thumbnail[0]['width']) / float(thumbnail[0]['height'])
def photo_url(p, size=None):
# See: https://www.flickr.com/services/api/misc.urls.html
if size:
url = 'https://farm%s.staticflickr.com/%s/%s_%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'], size)
else:
url = 'https://farm%s.staticflickr.com/%s/%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'])
def download_photo(url, path):
r = requests.get(url)
with open(path, 'wb') as fp:
fp.write(r.content)
def primitive(primitive_path, **kwargs):
args = []
for k, v in kwargs.items():
if v is None:
continue
args.append('-%s' % k)
args.append(str(v))
args = ' '.join(args)
cmd = '{0} {1}'.format(primitive_path, args)
subprocess.call(cmd, shell=True)
def create_wallpaper(args):
download_path = None
try:
print 'Finding interesting photo...'
photos = interesting(date=random_date())
photo = random.choice(photos)
aspect_ratio = get_aspect_ratio(photo)
print 'Downloading photo...'
url = photo_url(photo, 'z')
download_path = os.path.join('/tmp', photo['id'] + '.png')
download_photo(url, download_path)
output_path = os.path.expanduser(args.output)
output_path = os.path.join(output_path, 'landscape' if aspect_ratio > 1 else 'portrait')
if not os.path.exists(output_path):
os.makedirs(output_path)
config = Config()
config.randomize()
config.validate()
print 'Generating wallpaper with parameters {0}'.format(config)
primitive(args.primitive_path,
i=download_path,
s=args.size,
o='\'{0}\''.format(os.path.join(output_path, photo['id'] + '.png')),
**config)
print 'Done!'
except Exception as e:
print e
finally:
if download_path is not None and os.path.exists(download_path):
os.remove(download_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help="path to output directory", required=True)
parser.add_argument('-s', '--size', type=int, help="width of output image", required=True)
parser.add_argument('--primitive_path', help="path to primitive executable", default='/usr/local/bin/primitive')
parser.add_argument('-n', '--num', type=int, help="number of wallpapers to generate", default=1)
args = parser.parse_args()
# check network status
max_retries = 10
attempt = 0
response = None
while attempt < max_retries:
attempt += 1
try:
print 'Checking network...'
response = interesting()
break
except:
print 'No network, retrying...'
time.sleep(5)
if response is None:
print 'No network connection'
sys.exit(1)
for n in xrange(args.num):
create_wallpaper(args)
|
python
|
import multiprocessing as mp
import time
def foo_pool(taskQ, x):
print(x)
taskQ.put(x)
return x*x
result_list = []
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
result_list.append(result)
def apply_async_with_callback():
pool = mp.Pool()
taskQ = mp.Queue(4)
for i in range(10):
pool.apply_async(foo_pool, args = (taskQ, i, ), callback = log_result)
pool.close()
pool.join()
print(result_list)
if __name__ == '__main__':
apply_async_with_callback()
|
python
|
# Crie um script Python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado
msg = 'Olá Mundo!'
print(msg)
|
python
|
import ldap
import json
import socket
from urllib.parse import urlparse
def create_from_env():
import os
auth = LdapAuth(os.environ.get('LDAP_ADDRESS'))
auth.base_dn = os.environ.get('LDAP_BASE_DN')
auth.bind_dn = os.environ.get('LDAP_BIND_DN')
auth.bind_pass = os.environ.get('LDAP_BIND_PASS')
return auth
class LdapAuthException(Exception):
pass
class LdapAuth(object):
def __init__(self, address=None):
self.address = address
self.base_dn = None
self.bind_dn = None
self.bind_pass = None
self.search_template = 'uid=%(username)s'
self.ldap_timeout = 2
self.conn_timeout = 2
def assert_configs(self):
print(json.dumps({
'address': self.address,
'base_dn': self.base_dn,
'bind_dn': self.bind_dn,
'bind_pass': '***' if self.bind_pass else None,
'search_template': self.search_template,
}))
assert self.address is not None
assert self.base_dn is not None
assert self.bind_dn is not None
assert self.bind_pass is not None
assert self.search_template is not None
def check_credentials(self, username, password):
# -> (str msg, bool authorized)
try:
self.whoami(username, password)
return ("OK: "+username, True)
except ldap.LDAPError as e:
return (e.__class__.__name__, False)
except Exception as e:
return (str(e), False)
def check_connection(self):
address = urlparse(self.address)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.conn_timeout)
try:
s.connect((address.hostname, int(address.port)))
s.shutdown(2)
return True
except:
return False
def check_binding(self):
# initialize
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l = ldap.initialize(self.address)
l.timeout = self.ldap_timeout
try:
l.simple_bind_s(self.bind_dn, self.bind_pass)
whoami = l.whoami_s()
except:
whoami = None
finally:
l.unbind_s()
return whoami is not None and len(whoami) > 0
def whoami(self, username, password):
# initialize
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l = ldap.initialize(self.address)
l.timeout = self.ldap_timeout
try:
l.simple_bind_s(self.bind_dn, self.bind_pass)
# search user
search_filter = self.search_template % {'username': username}
users = l.search_s(self.base_dn, ldap.SCOPE_SUBTREE, search_filter)
if len(users) == 0:
msg = "User with username '%s' not found on %s" % (username, self.base_dn)
raise LdapAuthException(msg)
if len(users) > 1:
raise LdapAuthException("Multiple users found")
# try to verify user password
user_dn, _ = users[0]
l.simple_bind_s(user_dn, password)
whoami = l.whoami_s()
except:
whoami = None
finally:
l.unbind_s()
if whoami is None or len(whoami) == 0:
raise LdapAuthException("Invalid username/password")
return whoami
|
python
|
# %% coding=utf-8
import pandas as pd
from atm import ATM
from sklearn.model_selection import train_test_split
beauty_data = pd.read_csv('/data/face/df_input.csv')
select_cols = ['Image', 'label', '0_10_x', '0_10_y', '0_11_x', '0_11_y', '0_12_x', '0_12_y', '0_13_x', '0_13_y',
'0_14_x', '0_14_y', '0_15_x', '0_15_y', '0_16_x', '0_16_y', '0_17_x', '0_17_y', '0_18_x', '0_18_y',
'0_19_x', '0_19_y', '0_1_x', '0_1_y', '0_20_x', '0_20_y', '0_21_x', '0_21_y', '0_22_x', '0_22_y',
'0_23_x', '0_23_y', '0_24_x', '0_24_y', '0_25_x', '0_25_y', '0_26_x', '0_26_y', '0_27_x', '0_27_y',
'0_28_x', '0_28_y', '0_29_x', '0_29_y', '0_2_x', '0_2_y', '0_30_x', '0_30_y', '0_31_x', '0_31_y',
'0_32_x', '0_32_y', '0_33_x', '0_33_y', '0_34_x', '0_34_y', '0_35_x', '0_35_y', '0_36_x', '0_36_y',
'0_37_x', '0_37_y', '0_38_x', '0_38_y', '0_39_x', '0_39_y', '0_3_x', '0_3_y', '0_40_x', '0_40_y',
'0_41_x', '0_41_y', '0_42_x', '0_42_y', '0_43_x', '0_43_y', '0_44_x', '0_44_y', '0_45_x', '0_45_y',
'0_46_x', '0_46_y', '0_47_x', '0_47_y', '0_48_x', '0_48_y', '0_49_x', '0_49_y', '0_4_x', '0_4_y',
'0_50_x', '0_50_y', '0_51_x', '0_51_y', '0_52_x', '0_52_y', '0_53_x', '0_53_y', '0_54_x', '0_54_y',
'0_55_x', '0_55_y', '0_56_x', '0_56_y', '0_57_x', '0_57_y', '0_58_x', '0_58_y', '0_59_x', '0_59_y',
'0_5_x', '0_5_y', '0_60_x', '0_60_y', '0_61_x', '0_61_y', '0_62_x', '0_62_y', '0_63_x', '0_63_y',
'0_64_x', '0_64_y', '0_65_x', '0_65_y', '0_66_x', '0_66_y', '0_67_x', '0_67_y', '0_6_x', '0_6_y',
'0_7_x', '0_7_y', '0_8_x', '0_8_y', '0_9_x', '0_9_y', '10_11_x', '10_11_y', '10_12_x', '10_12_y',
'10_13_x', '10_13_y', '10_14_x', '10_14_y', '10_15_x', '10_15_y', '10_16_x', '10_16_y', '10_17_x',
'10_17_y', '10_18_x', '10_18_y', '10_19_x', '10_19_y', '10_20_x', '10_20_y', '10_21_x', '10_21_y',
'10_22_x', '10_22_y', '10_23_x', '10_23_y', '10_24_x', '10_24_y', '10_25_x', '10_25_y', '10_26_x',
'10_26_y', '10_27_x', '10_27_y', '10_28_x', '10_28_y', '10_29_x', '10_29_y', '10_30_x', '10_30_y',
'10_31_x', '10_31_y', '10_32_x', '10_32_y', '10_33_x', '10_33_y', '10_34_x', '10_34_y', '10_35_x',
'10_35_y', '10_36_x', '10_36_y', '10_37_x', '10_37_y', '10_38_x', '10_38_y', '10_39_x', '10_39_y',
'10_40_x', '10_40_y', '10_41_x', '10_41_y', '10_42_x', '10_42_y', '10_43_x', '10_43_y', '10_44_x',
'10_44_y', '10_45_x', '10_45_y', '10_46_x', '10_46_y', '10_47_x', '10_47_y', '10_48_x', '10_48_y',
'10_49_x', '10_49_y', '10_50_x', '10_50_y', '10_51_x', '10_51_y', '10_52_x', '10_52_y', '10_53_x',
'10_53_y', '10_54_x', '10_54_y', '10_55_x', '10_55_y', '10_56_x', '10_56_y', '10_57_x', '10_57_y',
'10_58_x', '10_58_y', '10_59_x', '10_59_y', '10_60_x', '10_60_y', '10_61_x', '10_61_y', '10_62_x',
'10_62_y', '10_63_x', '10_63_y', '10_64_x', '10_64_y', '10_65_x', '10_65_y', '10_66_x', '10_66_y',
'10_67_x', '10_67_y', '11_12_x', '11_12_y', '11_13_x', '11_13_y', '11_14_x', '11_14_y', '11_15_x',
'11_15_y', '11_16_x', '11_16_y', '11_17_x', '11_17_y', '11_18_x', '11_18_y', '11_19_x', '11_19_y',
'11_20_x', '11_20_y', '11_21_x', '11_21_y', '11_22_x', '11_22_y', '11_23_x', '11_23_y', '11_24_x',
'11_24_y', '11_25_x', '11_25_y', '11_26_x', '11_26_y', '11_27_x', '11_27_y', '11_28_x', '11_28_y',
'11_29_x', '11_29_y', '11_30_x', '11_30_y', '11_31_x', '11_31_y', '11_32_x', '11_32_y', '11_33_x',
'11_33_y', '11_34_x', '11_34_y', '11_35_x', '11_35_y', '11_36_x', '11_36_y', '11_37_x', '11_37_y',
'11_38_x', '11_38_y', '11_39_x', '11_39_y', '11_40_x', '11_40_y', '11_41_x', '11_41_y', '11_42_x',
'11_42_y', '11_43_x', '11_43_y', '11_44_x', '11_44_y', '11_45_x', '11_45_y', '11_46_x', '11_46_y',
'11_47_x', '11_47_y', '11_48_x', '11_48_y', '11_49_x', '11_49_y', '11_50_x', '11_50_y', '11_51_x',
'11_51_y', '11_52_x', '11_52_y', '11_53_x', '11_53_y', '11_54_x', '11_54_y', '11_55_x', '11_55_y',
'11_56_x', '11_56_y', '11_57_x', '11_57_y', '11_58_x', '11_58_y', '11_59_x', '11_59_y', '11_60_x',
'11_60_y', '11_61_x', '11_61_y', '11_62_x', '11_62_y', '11_63_x', '11_63_y', '11_64_x', '11_64_y',
'11_65_x', '11_65_y', '11_66_x', '11_66_y', '11_67_x', '11_67_y', '12_13_x', '12_13_y', '12_14_x',
'12_14_y', '12_15_x', '12_15_y', '12_16_x', '12_16_y', '12_17_x', '12_17_y', '12_18_x', '12_18_y',
'12_19_x', '12_19_y', '12_20_x', '12_20_y', '12_21_x', '12_21_y', '12_22_x', '12_22_y', '12_23_x',
'12_23_y', '12_24_x', '12_24_y', '12_25_x', '12_25_y', '12_26_x', '12_26_y', '12_27_x', '12_27_y',
'12_28_x', '12_28_y', '12_29_x', '12_29_y', '12_30_x', '12_30_y', '12_31_x', '12_31_y', '12_32_x',
'12_32_y', '12_33_x', '12_33_y', '12_34_x', '12_34_y', '12_35_x', '12_35_y', '12_36_x', '12_36_y',
'12_37_x', '12_37_y', '12_38_x', '12_38_y', '12_39_x', '12_39_y', '12_40_x', '12_40_y', '12_41_x',
'12_41_y', '12_42_x', '12_42_y', '12_43_x', '12_43_y', '12_44_x', '12_44_y', '12_45_x', '12_45_y',
'12_46_x', '12_46_y', '12_47_x', '12_47_y', '12_48_x', '12_48_y', '12_49_x', '12_49_y', '12_50_x',
'12_50_y', '12_51_x', '12_51_y', '12_52_x', '12_52_y', '12_53_x', '12_53_y', '12_54_x', '12_54_y',
'12_55_x', '12_55_y', '12_56_x', '12_56_y', '12_57_x', '12_57_y', '12_58_x', '12_58_y', '12_59_x',
'12_59_y', '12_60_x', '12_60_y', '12_61_x', '12_61_y', '12_62_x', '12_62_y', '12_63_x', '12_63_y',
'12_64_x', '12_64_y', '12_65_x', '12_65_y', '12_66_x', '12_66_y', '12_67_x', '12_67_y', '13_14_x',
'13_14_y', '13_15_x', '13_15_y', '13_16_x', '13_16_y', '13_17_x', '13_17_y', '13_18_x', '13_18_y',
'13_19_x', '13_19_y', '13_20_x', '13_20_y', '13_21_x', '13_21_y', '13_22_x', '13_22_y', '13_23_x',
'13_23_y', '13_24_x', '13_24_y', '13_25_x', '13_25_y', '13_26_x', '13_26_y', '13_27_x', '13_27_y',
'13_28_x', '13_28_y', '13_29_x', '13_29_y', '13_30_x', '13_30_y', '13_31_x', '13_31_y', '13_32_x',
'13_32_y', '13_33_x', '13_33_y', '13_34_x', '13_34_y', '13_35_x', '13_35_y', '13_36_x', '13_36_y',
'13_37_x', '13_37_y', '13_38_x', '13_38_y', '13_39_x', '13_39_y', '13_40_x', '13_40_y', '13_41_x',
'13_41_y', '13_42_x', '13_42_y', '13_43_x', '13_43_y', '13_44_x', '13_44_y', '13_45_x', '13_45_y',
'13_46_x', '13_46_y', '13_47_x', '13_47_y', '13_48_x', '13_48_y', '13_49_x', '13_49_y', '13_50_x',
'13_50_y', '13_51_x', '13_51_y', '13_52_x', '13_52_y', '13_53_x', '13_53_y', '13_54_x', '13_54_y',
'13_55_x', '13_55_y', '13_56_x', '13_56_y', '13_57_x', '13_57_y', '13_58_x', '13_58_y', '13_59_x',
'13_59_y', '13_60_x', '13_60_y', '13_61_x', '13_61_y', '13_62_x', '13_62_y', '13_63_x', '13_63_y',
'13_64_x', '13_64_y', '13_65_x', '13_65_y', '13_66_x', '13_66_y', '13_67_x', '13_67_y', '14_15_x',
'14_15_y', '14_16_x', '14_16_y', '14_17_x', '14_17_y', '14_18_x', '14_18_y', '14_19_x', '14_19_y',
'14_20_x', '14_20_y', '14_21_x', '14_21_y', '14_22_x', '14_22_y', '14_23_x', '14_23_y', '14_24_x',
'14_24_y', '14_25_x', '14_25_y', '14_26_x', '14_26_y', '14_27_x', '14_27_y', '14_28_x', '14_28_y',
'14_29_x', '14_29_y', '14_30_x', '14_30_y', '14_31_x', '14_31_y', '14_32_x', '14_32_y', '14_33_x',
'14_33_y', '14_34_x', '14_34_y', '14_35_x', '14_35_y', '14_36_x', '14_36_y', '14_37_x', '14_37_y',
'14_38_x', '14_38_y', '14_39_x', '14_39_y', '14_40_x', '14_40_y', '14_41_x', '14_41_y', '14_42_x',
'14_42_y', '14_43_x', '14_43_y', '14_44_x', '14_44_y', '14_45_x', '14_45_y', '14_46_x', '14_46_y',
'14_47_x', '14_47_y', '14_48_x', '14_48_y', '14_49_x', '14_49_y', '14_50_x', '14_50_y', '14_51_x',
'14_51_y', '14_52_x', '14_52_y', '14_53_x', '14_53_y', '14_54_x', '14_54_y', '14_55_x', '14_55_y',
'14_56_x', '14_56_y', '14_57_x', '14_57_y', '14_58_x', '14_58_y', '14_59_x', '14_59_y', '14_60_x',
'14_60_y', '14_61_x', '14_61_y', '14_62_x', '14_62_y', '14_63_x', '14_63_y', '14_64_x', '14_64_y',
'14_65_x', '14_65_y', '14_66_x', '14_66_y', '14_67_x', '14_67_y', '15_16_x', '15_16_y', '15_17_x',
'15_17_y', '15_18_x', '15_18_y', '15_19_x', '15_19_y', '15_20_x', '15_20_y', '15_21_x', '15_21_y',
'15_22_x', '15_22_y', '15_23_x', '15_23_y', '15_24_x', '15_24_y', '15_25_x', '15_25_y', '15_26_x',
'15_26_y', '15_27_x', '15_27_y', '15_28_x', '15_28_y', '15_29_x', '15_29_y', '15_30_x', '15_30_y',
'15_31_x', '15_31_y', '15_32_x', '15_32_y', '15_33_x', '15_33_y', '15_34_x', '15_34_y', '15_35_x',
'15_35_y', '15_36_x', '15_36_y', '15_37_x', '15_37_y', '15_38_x', '15_38_y', '15_39_x', '15_39_y',
'15_40_x', '15_40_y', '15_41_x', '15_41_y', '15_42_x', '15_42_y', '15_43_x', '15_43_y', '15_44_x',
'15_44_y', '15_45_x', '15_45_y', '15_46_x', '15_46_y', '15_47_x', '15_47_y', '15_48_x', '15_48_y',
'15_49_x', '15_49_y', '15_50_x', '15_50_y', '15_51_x', '15_51_y', '15_52_x', '15_52_y', '15_53_x',
'15_53_y', '15_54_x', '15_54_y', '15_55_x', '15_55_y', '15_56_x', '15_56_y', '15_57_x', '15_57_y',
'15_58_x', '15_58_y', '15_59_x', '15_59_y', '15_60_x', '15_60_y', '15_61_x', '15_61_y', '15_62_x',
'15_62_y', '15_63_x', '15_63_y', '15_64_x', '15_64_y', '15_65_x', '15_65_y', '15_66_x', '15_66_y',
'15_67_x', '15_67_y', '16_17_x', '16_17_y', '16_18_x', '16_18_y', '16_19_x', '16_19_y', '16_20_x',
'16_20_y', '16_21_x', '16_21_y', '16_22_x', '16_22_y', '16_23_x', '16_23_y', '16_24_x', '16_24_y',
'16_25_x', '16_25_y', '16_26_x', '16_26_y', '16_27_x', '16_27_y', '16_28_x', '16_28_y', '16_29_x',
'16_29_y', '16_30_x', '16_30_y', '16_31_x', '16_31_y', '16_32_x', '16_32_y', '16_33_x', '16_33_y',
'16_34_x', '16_34_y', '16_35_x', '16_35_y', '16_36_x', '16_36_y', '16_37_x', '16_37_y', '16_38_x',
'16_38_y', '16_39_x', '16_39_y', '16_40_x', '16_40_y', '16_41_x', '16_41_y', '16_42_x', '16_42_y',
'16_43_x', '16_43_y', '16_44_x', '16_44_y', '16_45_x', '16_45_y', '16_46_x', '16_46_y', '16_47_x',
'16_47_y', '16_48_x', '16_48_y', '16_49_x', '16_49_y', '16_50_x', '16_50_y', '16_51_x', '16_51_y',
'16_52_x', '16_52_y', '16_53_x', '16_53_y', '16_54_x', '16_54_y', '16_55_x', '16_55_y', '16_56_x',
'16_56_y', '16_57_x', '16_57_y', '16_58_x', '16_58_y', '16_59_x', '16_59_y', '16_60_x', '16_60_y',
'16_61_x', '16_61_y', '16_62_x', '16_62_y', '16_63_x', '16_63_y', '16_64_x', '16_64_y', '16_65_x',
'16_65_y', '16_66_x', '16_66_y', '16_67_x', '16_67_y', '17_18_x', '17_18_y', '17_19_x', '17_19_y',
'17_20_x', '17_20_y', '17_21_x', '17_21_y', '17_22_x', '17_22_y', '17_23_x', '17_23_y', '17_24_x',
'17_24_y', '17_25_x', '17_25_y', '17_26_x', '17_26_y', '17_27_x', '17_27_y', '17_28_x', '17_28_y',
'17_29_x', '17_29_y', '17_30_x', '17_30_y', '17_31_x', '17_31_y', '17_32_x', '17_32_y', '17_33_x',
'17_33_y', '17_34_x', '17_34_y', '17_35_x', '17_35_y', '17_36_x', '17_36_y', '17_37_x', '17_37_y',
'17_38_x', '17_38_y', '17_39_x', '17_39_y', '17_40_x', '17_40_y', '17_41_x', '17_41_y', '17_42_x',
'17_42_y', '17_43_x', '17_43_y', '17_44_x', '17_44_y', '17_45_x', '17_45_y', '17_46_x', '17_46_y',
'17_47_x', '17_47_y', '17_48_x', '17_48_y', '17_49_x', '17_49_y', '17_50_x', '17_50_y', '17_51_x',
'17_51_y', '17_52_x', '17_52_y', '17_53_x', '17_53_y', '17_54_x', '17_54_y', '17_55_x', '17_55_y',
'17_56_x', '17_56_y', '17_57_x', '17_57_y', '17_58_x', '17_58_y', '17_59_x', '17_59_y', '17_60_x',
'17_60_y', '17_61_x', '17_61_y', '17_62_x', '17_62_y', '17_63_x', '17_63_y', '17_64_x', '17_64_y',
'17_65_x', '17_65_y', '17_66_x', '17_66_y', '17_67_x', '17_67_y', '18_19_x', '18_19_y', '18_20_x',
'18_20_y', '18_21_x', '18_21_y', '18_22_x', '18_22_y', '18_23_x', '18_23_y', '18_24_x', '18_24_y',
'18_25_x', '18_25_y', '18_26_x', '18_26_y', '18_27_x', '18_27_y', '18_28_x', '18_28_y', '18_29_x',
'18_29_y', '18_30_x', '18_30_y', '18_31_x', '18_31_y', '18_32_x', '18_32_y', '18_33_x', '18_33_y',
'18_34_x', '18_34_y', '18_35_x', '18_35_y', '18_36_x', '18_36_y', '18_37_x', '18_37_y', '18_38_x',
'18_38_y', '18_39_x', '18_39_y', '18_40_x', '18_40_y', '18_41_x', '18_41_y', '18_42_x', '18_42_y',
'18_43_x', '18_43_y', '18_44_x', '18_44_y', '18_45_x', '18_45_y', '18_46_x', '18_46_y', '18_47_x',
'18_47_y', '18_48_x', '18_48_y', '18_49_x', '18_49_y', '18_50_x', '18_50_y', '18_51_x', '18_51_y',
'18_52_x', '18_52_y', '18_53_x', '18_53_y', '18_54_x', '18_54_y', '18_55_x', '18_55_y', '18_56_x',
'18_56_y', '18_57_x', '18_57_y', '18_58_x', '18_58_y', '18_59_x', '18_59_y', '18_60_x', '18_60_y',
'18_61_x', '18_61_y', '18_62_x', '18_62_y', '18_63_x', '18_63_y', '18_64_x', '18_64_y', '18_65_x',
'18_65_y', '18_66_x', '18_66_y', '18_67_x', '18_67_y', '19_20_x', '19_20_y', '19_21_x', '19_21_y',
'19_22_x', '19_22_y', '19_23_x', '19_23_y', '19_24_x', '19_24_y', '19_25_x', '19_25_y', '19_26_x',
'19_26_y', '19_27_x', '19_27_y', '19_28_x', '19_28_y', '19_29_x', '19_29_y', '19_30_x', '19_30_y',
'19_31_x', '19_31_y', '19_32_x', '19_32_y', '19_33_x', '19_33_y', '19_34_x', '19_34_y', '19_35_x',
'19_35_y', '19_36_x', '19_36_y', '19_37_x', '19_37_y', '19_38_x', '19_38_y', '19_39_x', '19_39_y',
'19_40_x', '19_40_y', '19_41_x', '19_41_y', '19_42_x', '19_42_y', '19_43_x', '19_43_y', '19_44_x',
'19_44_y', '19_45_x', '19_45_y', '19_46_x', '19_46_y', '19_47_x', '19_47_y', '19_48_x', '19_48_y',
'19_49_x', '19_49_y', '19_50_x', '19_50_y', '19_51_x', '19_51_y', '19_52_x', '19_52_y', '19_53_x',
'19_53_y', '19_54_x', '19_54_y', '19_55_x', '19_55_y', '19_56_x', '19_56_y', '19_57_x', '19_57_y',
'19_58_x', '19_58_y', '19_59_x', '19_59_y', '19_60_x', '19_60_y', '19_61_x', '19_61_y', '19_62_x',
'19_62_y', '19_63_x', '19_63_y', '19_64_x', '19_64_y', '19_65_x', '19_65_y', '19_66_x', '19_66_y',
'19_67_x', '19_67_y', '1_10_x', '1_10_y', '1_11_x', '1_11_y', '1_12_x', '1_12_y', '1_13_x', '1_13_y',
'1_14_x', '1_14_y', '1_15_x', '1_15_y', '1_16_x', '1_16_y', '1_17_x', '1_17_y', '1_18_x', '1_18_y',
'1_19_x', '1_19_y', '1_20_x', '1_20_y', '1_21_x', '1_21_y', '1_22_x', '1_22_y', '1_23_x', '1_23_y',
'1_24_x', '1_24_y', '1_25_x', '1_25_y', '1_26_x', '1_26_y', '1_27_x', '1_27_y', '1_28_x', '1_28_y',
'1_29_x', '1_29_y', '1_2_x', '1_2_y', '1_30_x', '1_30_y', '1_31_x', '1_31_y', '1_32_x', '1_32_y',
'1_33_x', '1_33_y', '1_34_x', '1_34_y', '1_35_x', '1_35_y', '1_36_x', '1_36_y', '1_37_x', '1_37_y',
'1_38_x', '1_38_y', '1_39_x', '1_39_y', '1_3_x', '1_3_y', '1_40_x', '1_40_y', '1_41_x', '1_41_y',
'1_42_x', '1_42_y', '1_43_x', '1_43_y', '1_44_x', '1_44_y', '1_45_x', '1_45_y', '1_46_x', '1_46_y',
'1_47_x', '1_47_y', '1_48_x', '1_48_y', '1_49_x', '1_49_y', '1_4_x', '1_4_y', '1_50_x', '1_50_y',
'1_51_x', '1_51_y', '1_52_x', '1_52_y', '1_53_x', '1_53_y', '1_54_x', '1_54_y', '1_55_x', '1_55_y',
'1_56_x', '1_56_y', '1_57_x', '1_57_y', '1_58_x', '1_58_y', '1_59_x', '1_59_y', '1_5_x', '1_5_y',
'1_60_x', '1_60_y', '1_61_x', '1_61_y', '1_62_x', '1_62_y', '1_63_x', '1_63_y', '1_64_x', '1_64_y',
'1_65_x', '1_65_y', '1_66_x', '1_66_y', '1_67_x', '1_67_y', '1_6_x', '1_6_y', '1_7_x', '1_7_y', '1_8_x',
'1_8_y', '1_9_x', '1_9_y', '20_21_x', '20_21_y', '20_22_x', '20_22_y', '20_23_x', '20_23_y', '20_24_x',
'20_24_y', '20_25_x', '20_25_y', '20_26_x', '20_26_y', '20_27_x', '20_27_y', '20_28_x', '20_28_y',
'20_29_x', '20_29_y', '20_30_x', '20_30_y', '20_31_x', '20_31_y', '20_32_x', '20_32_y', '20_33_x',
'20_33_y', '20_34_x', '20_34_y', '20_35_x', '20_35_y', '20_36_x', '20_36_y', '20_37_x', '20_37_y',
'20_38_x', '20_38_y', '20_39_x', '20_39_y', '20_40_x', '20_40_y', '20_41_x', '20_41_y', '20_42_x',
'20_42_y', '20_43_x', '20_43_y', '20_44_x', '20_44_y', '20_45_x', '20_45_y', '20_46_x', '20_46_y',
'20_47_x', '20_47_y', '20_48_x', '20_48_y', '20_49_x', '20_49_y', '20_50_x', '20_50_y', '20_51_x',
'20_51_y', '20_52_x', '20_52_y', '20_53_x', '20_53_y', '20_54_x', '20_54_y', '20_55_x', '20_55_y',
'20_56_x', '20_56_y', '20_57_x', '20_57_y', '20_58_x', '20_58_y', '20_59_x', '20_59_y', '20_60_x',
'20_60_y', '20_61_x', '20_61_y', '20_62_x', '20_62_y', '20_63_x', '20_63_y', '20_64_x', '20_64_y',
'20_65_x', '20_65_y', '20_66_x', '20_66_y', '20_67_x', '20_67_y', '21_22_x', '21_22_y', '21_23_x',
'21_23_y', '21_24_x', '21_24_y', '21_25_x', '21_25_y', '21_26_x', '21_26_y', '21_27_x', '21_27_y',
'21_28_x', '21_28_y', '21_29_x', '21_29_y', '21_30_x', '21_30_y', '21_31_x', '21_31_y', '21_32_x',
'21_32_y', '21_33_x', '21_33_y', '21_34_x', '21_34_y', '21_35_x', '21_35_y', '21_36_x', '21_36_y',
'21_37_x', '21_37_y', '21_38_x', '21_38_y', '21_39_x', '21_39_y', '21_40_x', '21_40_y', '21_41_x',
'21_41_y', '21_42_x', '21_42_y', '21_43_x', '21_43_y', '21_44_x', '21_44_y', '21_45_x', '21_45_y',
'21_46_x', '21_46_y', '21_47_x', '21_47_y', '21_48_x', '21_48_y', '21_49_x', '21_49_y', '21_50_x',
'21_50_y', '21_51_x', '21_51_y', '21_52_x', '21_52_y', '21_53_x', '21_53_y', '21_54_x', '21_54_y',
'21_55_x', '21_55_y', '21_56_x', '21_56_y', '21_57_x', '21_57_y', '21_58_x', '21_58_y', '21_59_x',
'21_59_y', '21_60_x', '21_60_y', '21_61_x', '21_61_y', '21_62_x', '21_62_y', '21_63_x', '21_63_y',
'21_64_x', '21_64_y', '21_65_x', '21_65_y', '21_66_x', '21_66_y', '21_67_x', '21_67_y', '22_23_x',
'22_23_y', '22_24_x', '22_24_y', '22_25_x', '22_25_y', '22_26_x', '22_26_y', '22_27_x', '22_27_y',
'22_28_x', '22_28_y', '22_29_x', '22_29_y', '22_30_x', '22_30_y', '22_31_x', '22_31_y', '22_32_x',
'22_32_y', '22_33_x', '22_33_y', '22_34_x', '22_34_y', '22_35_x', '22_35_y', '22_36_x', '22_36_y',
'22_37_x', '22_37_y', '22_38_x', '22_38_y', '22_39_x', '22_39_y', '22_40_x', '22_40_y', '22_41_x',
'22_41_y', '22_42_x', '22_42_y', '22_43_x', '22_43_y', '22_44_x', '22_44_y', '22_45_x', '22_45_y',
'22_46_x', '22_46_y', '22_47_x', '22_47_y', '22_48_x', '22_48_y', '22_49_x', '22_49_y', '22_50_x',
'22_50_y', '22_51_x', '22_51_y', '22_52_x', '22_52_y', '22_53_x', '22_53_y', '22_54_x', '22_54_y',
'22_55_x', '22_55_y', '22_56_x', '22_56_y', '22_57_x', '22_57_y', '22_58_x', '22_58_y', '22_59_x',
'22_59_y', '22_60_x', '22_60_y', '22_61_x', '22_61_y', '22_62_x', '22_62_y', '22_63_x', '22_63_y',
'22_64_x', '22_64_y', '22_65_x', '22_65_y', '22_66_x', '22_66_y', '22_67_x', '22_67_y', '23_24_x',
'23_24_y', '23_25_x', '23_25_y', '23_26_x', '23_26_y', '23_27_x', '23_27_y', '23_28_x', '23_28_y',
'23_29_x', '23_29_y', '23_30_x', '23_30_y', '23_31_x', '23_31_y', '23_32_x', '23_32_y', '23_33_x',
'23_33_y', '23_34_x', '23_34_y', '23_35_x', '23_35_y', '23_36_x', '23_36_y', '23_37_x', '23_37_y',
'23_38_x', '23_38_y', '23_39_x', '23_39_y', '23_40_x', '23_40_y', '23_41_x', '23_41_y', '23_42_x',
'23_42_y', '23_43_x', '23_43_y', '23_44_x', '23_44_y', '23_45_x', '23_45_y', '23_46_x', '23_46_y',
'23_47_x', '23_47_y', '23_48_x', '23_48_y', '23_49_x', '23_49_y', '23_50_x', '23_50_y', '23_51_x',
'23_51_y', '23_52_x', '23_52_y', '23_53_x', '23_53_y', '23_54_x', '23_54_y', '23_55_x', '23_55_y',
'23_56_x', '23_56_y', '23_57_x', '23_57_y', '23_58_x', '23_58_y', '23_59_x', '23_59_y', '23_60_x',
'23_60_y', '23_61_x', '23_61_y', '23_62_x', '23_62_y', '23_63_x', '23_63_y', '23_64_x', '23_64_y',
'23_65_x', '23_65_y', '23_66_x', '23_66_y', '23_67_x', '23_67_y', '24_25_x', '24_25_y', '24_26_x',
'24_26_y', '24_27_x', '24_27_y', '24_28_x', '24_28_y', '24_29_x', '24_29_y', '24_30_x', '24_30_y',
'24_31_x', '24_31_y', '24_32_x', '24_32_y', '24_33_x', '24_33_y', '24_34_x', '24_34_y', '24_35_x',
'24_35_y', '24_36_x', '24_36_y', '24_37_x', '24_37_y', '24_38_x', '24_38_y', '24_39_x', '24_39_y',
'24_40_x', '24_40_y', '24_41_x', '24_41_y', '24_42_x', '24_42_y', '24_43_x', '24_43_y', '24_44_x',
'24_44_y', '24_45_x', '24_45_y', '24_46_x', '24_46_y', '24_47_x', '24_47_y', '24_48_x', '24_48_y',
'24_49_x', '24_49_y', '24_50_x', '24_50_y', '24_51_x', '24_51_y', '24_52_x', '24_52_y', '24_53_x',
'24_53_y', '24_54_x', '24_54_y', '24_55_x', '24_55_y', '24_56_x', '24_56_y', '24_57_x', '24_57_y',
'24_58_x', '24_58_y', '24_59_x', '24_59_y', '24_60_x', '24_60_y', '24_61_x', '24_61_y', '24_62_x',
'24_62_y', '24_63_x', '24_63_y', '24_64_x', '24_64_y', '24_65_x', '24_65_y', '24_66_x', '24_66_y',
'24_67_x', '24_67_y', '25_26_x', '25_26_y', '25_27_x', '25_27_y', '25_28_x', '25_28_y', '25_29_x',
'25_29_y', '25_30_x', '25_30_y', '25_31_x', '25_31_y', '25_32_x', '25_32_y', '25_33_x', '25_33_y',
'25_34_x', '25_34_y', '25_35_x', '25_35_y', '25_36_x', '25_36_y', '25_37_x', '25_37_y', '25_38_x',
'25_38_y', '25_39_x', '25_39_y', '25_40_x', '25_40_y', '25_41_x', '25_41_y', '25_42_x', '25_42_y',
'25_43_x', '25_43_y', '25_44_x', '25_44_y', '25_45_x', '25_45_y', '25_46_x', '25_46_y', '25_47_x',
'25_47_y', '25_48_x', '25_48_y', '25_49_x', '25_49_y', '25_50_x', '25_50_y', '25_51_x', '25_51_y',
'25_52_x', '25_52_y', '25_53_x', '25_53_y', '25_54_x', '25_54_y', '25_55_x', '25_55_y', '25_56_x',
'25_56_y', '25_57_x', '25_57_y', '25_58_x', '25_58_y', '25_59_x', '25_59_y', '25_60_x', '25_60_y',
'25_61_x', '25_61_y', '25_62_x', '25_62_y', '25_63_x', '25_63_y', '25_64_x', '25_64_y', '25_65_x',
'25_65_y', '25_66_x', '25_66_y', '25_67_x', '25_67_y', '26_27_x', '26_27_y', '26_28_x', '26_28_y',
'26_29_x', '26_29_y', '26_30_x', '26_30_y', '26_31_x', '26_31_y', '26_32_x', '26_32_y', '26_33_x',
'26_33_y', '26_34_x', '26_34_y', '26_35_x', '26_35_y', '26_36_x', '26_36_y', '26_37_x', '26_37_y',
'26_38_x', '26_38_y', '26_39_x', '26_39_y', '26_40_x', '26_40_y', '26_41_x', '26_41_y', '26_42_x',
'26_42_y', '26_43_x', '26_43_y', '26_44_x', '26_44_y', '26_45_x', '26_45_y', '26_46_x', '26_46_y',
'26_47_x', '26_47_y', '26_48_x', '26_48_y', '26_49_x', '26_49_y', '26_50_x', '26_50_y', '26_51_x',
'26_51_y', '26_52_x', '26_52_y', '26_53_x', '26_53_y', '26_54_x', '26_54_y', '26_55_x', '26_55_y',
'26_56_x', '26_56_y', '26_57_x', '26_57_y', '26_58_x', '26_58_y', '26_59_x', '26_59_y', '26_60_x',
'26_60_y', '26_61_x', '26_61_y', '26_62_x', '26_62_y', '26_63_x', '26_63_y', '26_64_x', '26_64_y',
'26_65_x', '26_65_y', '26_66_x', '26_66_y', '26_67_x', '26_67_y', '27_28_x', '27_28_y', '27_29_x',
'27_29_y', '27_30_x', '27_30_y', '27_31_x', '27_31_y', '27_32_x', '27_32_y', '27_33_x', '27_33_y',
'27_34_x', '27_34_y', '27_35_x', '27_35_y', '27_36_x', '27_36_y', '27_37_x', '27_37_y', '27_38_x',
'27_38_y', '27_39_x', '27_39_y', '27_40_x', '27_40_y', '27_41_x', '27_41_y', '27_42_x', '27_42_y',
'27_43_x', '27_43_y', '27_44_x', '27_44_y', '27_45_x', '27_45_y', '27_46_x', '27_46_y', '27_47_x',
'27_47_y', '27_48_x', '27_48_y', '27_49_x', '27_49_y', '27_50_x', '27_50_y', '27_51_x', '27_51_y',
'27_52_x', '27_52_y', '27_53_x', '27_53_y', '27_54_x', '27_54_y', '27_55_x', '27_55_y', '27_56_x',
'27_56_y', '27_57_x', '27_57_y', '27_58_x', '27_58_y', '27_59_x', '27_59_y', '27_60_x', '27_60_y',
'27_61_x', '27_61_y', '27_62_x', '27_62_y', '27_63_x', '27_63_y', '27_64_x', '27_64_y', '27_65_x',
'27_65_y', '27_66_x', '27_66_y', '27_67_x', '27_67_y', '28_29_x', '28_29_y', '28_30_x', '28_30_y',
'28_31_x', '28_31_y', '28_32_x', '28_32_y', '28_33_x', '28_33_y', '28_34_x', '28_34_y', '28_35_x',
'28_35_y', '28_36_x', '28_36_y', '28_37_x', '28_37_y', '28_38_x', '28_38_y', '28_39_x', '28_39_y',
'28_40_x', '28_40_y', '28_41_x', '28_41_y', '28_42_x', '28_42_y', '28_43_x', '28_43_y', '28_44_x',
'28_44_y', '28_45_x', '28_45_y', '28_46_x', '28_46_y', '28_47_x', '28_47_y', '28_48_x', '28_48_y',
'28_49_x', '28_49_y', '28_50_x', '28_50_y', '28_51_x', '28_51_y', '28_52_x', '28_52_y', '28_53_x',
'28_53_y', '28_54_x', '28_54_y', '28_55_x', '28_55_y', '28_56_x', '28_56_y', '28_57_x', '28_57_y',
'28_58_x', '28_58_y', '28_59_x', '28_59_y', '28_60_x', '28_60_y', '28_61_x', '28_61_y', '28_62_x',
'28_62_y', '28_63_x', '28_63_y', '28_64_x', '28_64_y', '28_65_x', '28_65_y', '28_66_x', '28_66_y',
'28_67_x', '28_67_y', '29_30_x', '29_30_y', '29_31_x', '29_31_y', '29_32_x', '29_32_y', '29_33_x',
'29_33_y', '29_34_x', '29_34_y', '29_35_x', '29_35_y', '29_36_x', '29_36_y', '29_37_x', '29_37_y',
'29_38_x', '29_38_y', '29_39_x', '29_39_y', '29_40_x', '29_40_y', '29_41_x', '29_41_y', '29_42_x',
'29_42_y', '29_43_x', '29_43_y', '29_44_x', '29_44_y', '29_45_x', '29_45_y', '29_46_x', '29_46_y',
'29_47_x', '29_47_y', '29_48_x', '29_48_y', '29_49_x', '29_49_y', '29_50_x', '29_50_y', '29_51_x',
'29_51_y', '29_52_x', '29_52_y', '29_53_x', '29_53_y', '29_54_x', '29_54_y', '29_55_x', '29_55_y',
'29_56_x', '29_56_y', '29_57_x', '29_57_y', '29_58_x', '29_58_y', '29_59_x', '29_59_y', '29_60_x',
'29_60_y', '29_61_x', '29_61_y', '29_62_x', '29_62_y', '29_63_x', '29_63_y', '29_64_x', '29_64_y',
'29_65_x', '29_65_y', '29_66_x', '29_66_y', '29_67_x', '29_67_y', '2_10_x', '2_10_y', '2_11_x', '2_11_y',
'2_12_x', '2_12_y', '2_13_x', '2_13_y', '2_14_x', '2_14_y', '2_15_x', '2_15_y', '2_16_x', '2_16_y',
'2_17_x', '2_17_y', '2_18_x', '2_18_y', '2_19_x', '2_19_y', '2_20_x', '2_20_y', '2_21_x', '2_21_y',
'2_22_x', '2_22_y', '2_23_x', '2_23_y', '2_24_x', '2_24_y', '2_25_x', '2_25_y', '2_26_x', '2_26_y',
'2_27_x', '2_27_y', '2_28_x', '2_28_y', '2_29_x', '2_29_y', '2_30_x', '2_30_y', '2_31_x', '2_31_y',
'2_32_x', '2_32_y', '2_33_x', '2_33_y', '2_34_x', '2_34_y', '2_35_x', '2_35_y', '2_36_x', '2_36_y',
'2_37_x', '2_37_y', '2_38_x', '2_38_y', '2_39_x', '2_39_y', '2_3_x', '2_3_y', '2_40_x', '2_40_y',
'2_41_x', '2_41_y', '2_42_x', '2_42_y', '2_43_x', '2_43_y', '2_44_x', '2_44_y', '2_45_x', '2_45_y',
'2_46_x', '2_46_y', '2_47_x', '2_47_y', '2_48_x', '2_48_y', '2_49_x', '2_49_y', '2_4_x', '2_4_y',
'2_50_x', '2_50_y', '2_51_x', '2_51_y', '2_52_x', '2_52_y', '2_53_x', '2_53_y', '2_54_x', '2_54_y',
'2_55_x', '2_55_y', '2_56_x', '2_56_y', '2_57_x', '2_57_y', '2_58_x', '2_58_y', '2_59_x', '2_59_y',
'2_5_x', '2_5_y', '2_60_x', '2_60_y', '2_61_x', '2_61_y', '2_62_x', '2_62_y', '2_63_x', '2_63_y',
'2_64_x', '2_64_y', '2_65_x', '2_65_y', '2_66_x', '2_66_y', '2_67_x', '2_67_y', '2_6_x', '2_6_y',
'2_7_x', '2_7_y', '2_8_x', '2_8_y', '2_9_x', '2_9_y', '30_31_x', '30_31_y', '30_32_x', '30_32_y',
'30_33_x', '30_33_y', '30_34_x', '30_34_y', '30_35_x', '30_35_y', '30_36_x', '30_36_y', '30_37_x',
'30_37_y', '30_38_x', '30_38_y', '30_39_x', '30_39_y', '30_40_x', '30_40_y', '30_41_x', '30_41_y',
'30_42_x', '30_42_y', '30_43_x', '30_43_y', '30_44_x', '30_44_y', '30_45_x', '30_45_y', '30_46_x',
'30_46_y', '30_47_x', '30_47_y', '30_48_x', '30_48_y', '30_49_x', '30_49_y', '30_50_x', '30_50_y',
'30_51_x', '30_51_y', '30_52_x', '30_52_y', '30_53_x', '30_53_y', '30_54_x', '30_54_y', '30_55_x',
'30_55_y', '30_56_x', '30_56_y', '30_57_x', '30_57_y', '30_58_x', '30_58_y', '30_59_x', '30_59_y',
'30_60_x', '30_60_y', '30_61_x', '30_61_y', '30_62_x', '30_62_y', '30_63_x', '30_63_y', '30_64_x',
'30_64_y', '30_65_x', '30_65_y', '30_66_x', '30_66_y', '30_67_x', '30_67_y', '31_32_x', '31_32_y',
'31_33_x', '31_33_y', '31_34_x', '31_34_y', '31_35_x', '31_35_y', '31_36_x', '31_36_y', '31_37_x',
'31_37_y', '31_38_x', '31_38_y', '31_39_x', '31_39_y', '31_40_x', '31_40_y', '31_41_x', '31_41_y',
'31_42_x', '31_42_y', '31_43_x', '31_43_y', '31_44_x', '31_44_y', '31_45_x', '31_45_y', '31_46_x',
'31_46_y', '31_47_x', '31_47_y', '31_48_x', '31_48_y', '31_49_x', '31_49_y', '31_50_x', '31_50_y',
'31_51_x', '31_51_y', '31_52_x', '31_52_y', '31_53_x', '31_53_y', '31_54_x', '31_54_y', '31_55_x',
'31_55_y', '31_56_x', '31_56_y', '31_57_x', '31_57_y', '31_58_x', '31_58_y', '31_59_x', '31_59_y',
'31_60_x', '31_60_y', '31_61_x', '31_61_y', '31_62_x', '31_62_y', '31_63_x', '31_63_y', '31_64_x',
'31_64_y', '31_65_x', '31_65_y', '31_66_x', '31_66_y', '31_67_x', '31_67_y', '32_33_x', '32_33_y',
'32_34_x', '32_34_y', '32_35_x', '32_35_y', '32_36_x', '32_36_y', '32_37_x', '32_37_y', '32_38_x',
'32_38_y', '32_39_x', '32_39_y', '32_40_x', '32_40_y', '32_41_x', '32_41_y', '32_42_x', '32_42_y',
'32_43_x', '32_43_y', '32_44_x', '32_44_y', '32_45_x', '32_45_y', '32_46_x', '32_46_y', '32_47_x',
'32_47_y', '32_48_x', '32_48_y', '32_49_x', '32_49_y', '32_50_x', '32_50_y', '32_51_x', '32_51_y',
'32_52_x', '32_52_y', '32_53_x', '32_53_y', '32_54_x', '32_54_y', '32_55_x', '32_55_y', '32_56_x',
'32_56_y', '32_57_x', '32_57_y', '32_58_x', '32_58_y', '32_59_x', '32_59_y', '32_60_x', '32_60_y',
'32_61_x', '32_61_y', '32_62_x', '32_62_y', '32_63_x', '32_63_y', '32_64_x', '32_64_y', '32_65_x',
'32_65_y', '32_66_x', '32_66_y', '32_67_x', '32_67_y', '33_34_x', '33_34_y', '33_35_x', '33_35_y',
'33_36_x', '33_36_y', '33_37_x', '33_37_y', '33_38_x', '33_38_y', '33_39_x', '33_39_y', '33_40_x',
'33_40_y', '33_41_x', '33_41_y', '33_42_x', '33_42_y', '33_43_x', '33_43_y', '33_44_x', '33_44_y',
'33_45_x', '33_45_y', '33_46_x', '33_46_y', '33_47_x', '33_47_y', '33_48_x', '33_48_y', '33_49_x',
'33_49_y', '33_50_x', '33_50_y', '33_51_x', '33_51_y', '33_52_x', '33_52_y', '33_53_x', '33_53_y',
'33_54_x', '33_54_y', '33_55_x', '33_55_y', '33_56_x', '33_56_y', '33_57_x', '33_57_y', '33_58_x',
'33_58_y', '33_59_x', '33_59_y', '33_60_x', '33_60_y', '33_61_x', '33_61_y', '33_62_x', '33_62_y',
'33_63_x', '33_63_y', '33_64_x', '33_64_y', '33_65_x', '33_65_y', '33_66_x', '33_66_y', '33_67_x',
'33_67_y', '34_35_x', '34_35_y', '34_36_x', '34_36_y', '34_37_x', '34_37_y', '34_38_x', '34_38_y',
'34_39_x', '34_39_y', '34_40_x', '34_40_y', '34_41_x', '34_41_y', '34_42_x', '34_42_y', '34_43_x',
'34_43_y', '34_44_x', '34_44_y', '34_45_x', '34_45_y', '34_46_x', '34_46_y', '34_47_x', '34_47_y',
'34_48_x', '34_48_y', '34_49_x', '34_49_y', '34_50_x', '34_50_y', '34_51_x', '34_51_y', '34_52_x',
'34_52_y', '34_53_x', '34_53_y', '34_54_x', '34_54_y', '34_55_x', '34_55_y', '34_56_x', '34_56_y',
'34_57_x', '34_57_y', '34_58_x', '34_58_y', '34_59_x', '34_59_y', '34_60_x', '34_60_y', '34_61_x',
'34_61_y', '34_62_x', '34_62_y', '34_63_x', '34_63_y', '34_64_x', '34_64_y', '34_65_x', '34_65_y',
'34_66_x', '34_66_y', '34_67_x', '34_67_y', '35_36_x', '35_36_y', '35_37_x', '35_37_y', '35_38_x',
'35_38_y', '35_39_x', '35_39_y', '35_40_x', '35_40_y', '35_41_x', '35_41_y', '35_42_x', '35_42_y',
'35_43_x', '35_43_y', '35_44_x', '35_44_y', '35_45_x', '35_45_y', '35_46_x', '35_46_y', '35_47_x',
'35_47_y', '35_48_x', '35_48_y', '35_49_x', '35_49_y', '35_50_x', '35_50_y', '35_51_x', '35_51_y',
'35_52_x', '35_52_y', '35_53_x', '35_53_y', '35_54_x', '35_54_y', '35_55_x', '35_55_y', '35_56_x',
'35_56_y', '35_57_x', '35_57_y', '35_58_x', '35_58_y', '35_59_x', '35_59_y', '35_60_x', '35_60_y',
'35_61_x', '35_61_y', '35_62_x', '35_62_y', '35_63_x', '35_63_y', '35_64_x', '35_64_y', '35_65_x',
'35_65_y', '35_66_x', '35_66_y', '35_67_x', '35_67_y', '36_37_x', '36_37_y', '36_38_x', '36_38_y',
'36_39_x', '36_39_y', '36_40_x', '36_40_y', '36_41_x', '36_41_y', '36_42_x', '36_42_y', '36_43_x',
'36_43_y', '36_44_x', '36_44_y', '36_45_x', '36_45_y', '36_46_x', '36_46_y', '36_47_x', '36_47_y',
'36_48_x', '36_48_y', '36_49_x', '36_49_y', '36_50_x', '36_50_y', '36_51_x', '36_51_y', '36_52_x',
'36_52_y', '36_53_x', '36_53_y', '36_54_x', '36_54_y', '36_55_x', '36_55_y', '36_56_x', '36_56_y',
'36_57_x', '36_57_y', '36_58_x', '36_58_y', '36_59_x', '36_59_y', '36_60_x', '36_60_y', '36_61_x',
'36_61_y', '36_62_x', '36_62_y', '36_63_x', '36_63_y', '36_64_x', '36_64_y', '36_65_x', '36_65_y',
'36_66_x', '36_66_y', '36_67_x', '36_67_y', '37_38_x', '37_38_y', '37_39_x', '37_39_y', '37_40_x',
'37_40_y', '37_41_x', '37_41_y', '37_42_x', '37_42_y', '37_43_x', '37_43_y', '37_44_x', '37_44_y',
'37_45_x', '37_45_y', '37_46_x', '37_46_y', '37_47_x', '37_47_y', '37_48_x', '37_48_y', '37_49_x',
'37_49_y', '37_50_x', '37_50_y', '37_51_x', '37_51_y', '37_52_x', '37_52_y', '37_53_x', '37_53_y',
'37_54_x', '37_54_y', '37_55_x', '37_55_y', '37_56_x', '37_56_y', '37_57_x', '37_57_y', '37_58_x',
'37_58_y', '37_59_x', '37_59_y', '37_60_x', '37_60_y', '37_61_x', '37_61_y', '37_62_x', '37_62_y',
'37_63_x', '37_63_y', '37_64_x', '37_64_y', '37_65_x', '37_65_y', '37_66_x', '37_66_y', '37_67_x',
'37_67_y', '38_39_x', '38_39_y', '38_40_x', '38_40_y', '38_41_x', '38_41_y', '38_42_x', '38_42_y',
'38_43_x', '38_43_y', '38_44_x', '38_44_y', '38_45_x', '38_45_y', '38_46_x', '38_46_y', '38_47_x',
'38_47_y', '38_48_x', '38_48_y', '38_49_x', '38_49_y', '38_50_x', '38_50_y', '38_51_x', '38_51_y',
'38_52_x', '38_52_y', '38_53_x', '38_53_y', '38_54_x', '38_54_y', '38_55_x', '38_55_y', '38_56_x',
'38_56_y', '38_57_x', '38_57_y', '38_58_x', '38_58_y', '38_59_x', '38_59_y', '38_60_x', '38_60_y',
'38_61_x', '38_61_y', '38_62_x', '38_62_y', '38_63_x', '38_63_y', '38_64_x', '38_64_y', '38_65_x',
'38_65_y', '38_66_x', '38_66_y', '38_67_x', '38_67_y', '39_40_x', '39_40_y', '39_41_x', '39_41_y',
'39_42_x', '39_42_y', '39_43_x', '39_43_y', '39_44_x', '39_44_y', '39_45_x', '39_45_y', '39_46_x',
'39_46_y', '39_47_x', '39_47_y', '39_48_x', '39_48_y', '39_49_x', '39_49_y', '39_50_x', '39_50_y',
'39_51_x', '39_51_y', '39_52_x', '39_52_y', '39_53_x', '39_53_y', '39_54_x', '39_54_y', '39_55_x',
'39_55_y', '39_56_x', '39_56_y', '39_57_x', '39_57_y', '39_58_x', '39_58_y', '39_59_x', '39_59_y',
'39_60_x', '39_60_y', '39_61_x', '39_61_y', '39_62_x', '39_62_y', '39_63_x', '39_63_y', '39_64_x',
'39_64_y', '39_65_x', '39_65_y', '39_66_x', '39_66_y', '39_67_x', '39_67_y', '3_10_x', '3_10_y',
'3_11_x', '3_11_y', '3_12_x', '3_12_y', '3_13_x', '3_13_y', '3_14_x', '3_14_y', '3_15_x', '3_15_y',
'3_16_x', '3_16_y', '3_17_x', '3_17_y', '3_18_x', '3_18_y', '3_19_x', '3_19_y', '3_20_x', '3_20_y',
'3_21_x', '3_21_y', '3_22_x', '3_22_y', '3_23_x', '3_23_y', '3_24_x', '3_24_y', '3_25_x', '3_25_y',
'3_26_x', '3_26_y', '3_27_x', '3_27_y', '3_28_x', '3_28_y', '3_29_x', '3_29_y', '3_30_x', '3_30_y',
'3_31_x', '3_31_y', '3_32_x', '3_32_y', '3_33_x', '3_33_y', '3_34_x', '3_34_y', '3_35_x', '3_35_y',
'3_36_x', '3_36_y', '3_37_x', '3_37_y', '3_38_x', '3_38_y', '3_39_x', '3_39_y', '3_40_x', '3_40_y',
'3_41_x', '3_41_y', '3_42_x', '3_42_y', '3_43_x', '3_43_y', '3_44_x', '3_44_y', '3_45_x', '3_45_y',
'3_46_x', '3_46_y', '3_47_x', '3_47_y', '3_48_x', '3_48_y', '3_49_x', '3_49_y', '3_4_x', '3_4_y',
'3_50_x', '3_50_y', '3_51_x', '3_51_y', '3_52_x', '3_52_y', '3_53_x', '3_53_y', '3_54_x', '3_54_y',
'3_55_x', '3_55_y', '3_56_x', '3_56_y', '3_57_x', '3_57_y', '3_58_x', '3_58_y', '3_59_x', '3_59_y',
'3_5_x', '3_5_y', '3_60_x', '3_60_y', '3_61_x', '3_61_y', '3_62_x', '3_62_y', '3_63_x', '3_63_y',
'3_64_x', '3_64_y', '3_65_x', '3_65_y', '3_66_x', '3_66_y', '3_67_x', '3_67_y', '3_6_x', '3_6_y',
'3_7_x', '3_7_y', '3_8_x', '3_8_y', '3_9_x', '3_9_y', '40_41_x', '40_41_y', '40_42_x', '40_42_y',
'40_43_x', '40_43_y', '40_44_x', '40_44_y', '40_45_x', '40_45_y', '40_46_x', '40_46_y', '40_47_x',
'40_47_y', '40_48_x', '40_48_y', '40_49_x', '40_49_y', '40_50_x', '40_50_y', '40_51_x', '40_51_y',
'40_52_x', '40_52_y', '40_53_x', '40_53_y', '40_54_x', '40_54_y', '40_55_x', '40_55_y', '40_56_x',
'40_56_y', '40_57_x', '40_57_y', '40_58_x', '40_58_y', '40_59_x', '40_59_y', '40_60_x', '40_60_y',
'40_61_x', '40_61_y', '40_62_x', '40_62_y', '40_63_x', '40_63_y', '40_64_x', '40_64_y', '40_65_x',
'40_65_y', '40_66_x', '40_66_y', '40_67_x', '40_67_y', '41_42_x', '41_42_y', '41_43_x', '41_43_y',
'41_44_x', '41_44_y', '41_45_x', '41_45_y', '41_46_x', '41_46_y', '41_47_x', '41_47_y', '41_48_x',
'41_48_y', '41_49_x', '41_49_y', '41_50_x', '41_50_y', '41_51_x', '41_51_y', '41_52_x', '41_52_y',
'41_53_x', '41_53_y', '41_54_x', '41_54_y', '41_55_x', '41_55_y', '41_56_x', '41_56_y', '41_57_x',
'41_57_y', '41_58_x', '41_58_y', '41_59_x', '41_59_y', '41_60_x', '41_60_y', '41_61_x', '41_61_y',
'41_62_x', '41_62_y', '41_63_x', '41_63_y', '41_64_x', '41_64_y', '41_65_x', '41_65_y', '41_66_x',
'41_66_y', '41_67_x', '41_67_y', '42_43_x', '42_43_y', '42_44_x', '42_44_y', '42_45_x', '42_45_y',
'42_46_x', '42_46_y', '42_47_x', '42_47_y', '42_48_x', '42_48_y', '42_49_x', '42_49_y', '42_50_x',
'42_50_y', '42_51_x', '42_51_y', '42_52_x', '42_52_y', '42_53_x', '42_53_y', '42_54_x', '42_54_y',
'42_55_x', '42_55_y', '42_56_x', '42_56_y', '42_57_x', '42_57_y', '42_58_x', '42_58_y', '42_59_x',
'42_59_y', '42_60_x', '42_60_y', '42_61_x', '42_61_y', '42_62_x', '42_62_y', '42_63_x', '42_63_y',
'42_64_x', '42_64_y', '42_65_x', '42_65_y', '42_66_x', '42_66_y', '42_67_x', '42_67_y', '43_44_x',
'43_44_y', '43_45_x', '43_45_y', '43_46_x', '43_46_y', '43_47_x', '43_47_y', '43_48_x', '43_48_y',
'43_49_x', '43_49_y', '43_50_x', '43_50_y', '43_51_x', '43_51_y', '43_52_x', '43_52_y', '43_53_x',
'43_53_y', '43_54_x', '43_54_y', '43_55_x', '43_55_y', '43_56_x', '43_56_y', '43_57_x', '43_57_y',
'43_58_x', '43_58_y', '43_59_x', '43_59_y', '43_60_x', '43_60_y', '43_61_x', '43_61_y', '43_62_x',
'43_62_y', '43_63_x', '43_63_y', '43_64_x', '43_64_y', '43_65_x', '43_65_y', '43_66_x', '43_66_y',
'43_67_x', '43_67_y', '44_45_x', '44_45_y', '44_46_x', '44_46_y', '44_47_x', '44_47_y', '44_48_x',
'44_48_y', '44_49_x', '44_49_y', '44_50_x', '44_50_y', '44_51_x', '44_51_y', '44_52_x', '44_52_y',
'44_53_x', '44_53_y', '44_54_x', '44_54_y', '44_55_x', '44_55_y', '44_56_x', '44_56_y', '44_57_x',
'44_57_y', '44_58_x', '44_58_y', '44_59_x', '44_59_y', '44_60_x', '44_60_y', '44_61_x', '44_61_y',
'44_62_x', '44_62_y', '44_63_x', '44_63_y', '44_64_x', '44_64_y', '44_65_x', '44_65_y', '44_66_x',
'44_66_y', '44_67_x', '44_67_y', '45_46_x', '45_46_y', '45_47_x', '45_47_y', '45_48_x', '45_48_y',
'45_49_x', '45_49_y', '45_50_x', '45_50_y', '45_51_x', '45_51_y', '45_52_x', '45_52_y', '45_53_x',
'45_53_y', '45_54_x', '45_54_y', '45_55_x', '45_55_y', '45_56_x', '45_56_y', '45_57_x', '45_57_y',
'45_58_x', '45_58_y', '45_59_x', '45_59_y', '45_60_x', '45_60_y', '45_61_x', '45_61_y', '45_62_x',
'45_62_y', '45_63_x', '45_63_y', '45_64_x', '45_64_y', '45_65_x', '45_65_y', '45_66_x', '45_66_y',
'45_67_x', '45_67_y', '46_47_x', '46_47_y', '46_48_x', '46_48_y', '46_49_x', '46_49_y', '46_50_x',
'46_50_y', '46_51_x', '46_51_y', '46_52_x', '46_52_y', '46_53_x', '46_53_y', '46_54_x', '46_54_y',
'46_55_x', '46_55_y', '46_56_x', '46_56_y', '46_57_x', '46_57_y', '46_58_x', '46_58_y', '46_59_x',
'46_59_y', '46_60_x', '46_60_y', '46_61_x', '46_61_y', '46_62_x', '46_62_y', '46_63_x', '46_63_y',
'46_64_x', '46_64_y', '46_65_x', '46_65_y', '46_66_x', '46_66_y', '46_67_x', '46_67_y', '47_48_x',
'47_48_y', '47_49_x', '47_49_y', '47_50_x', '47_50_y', '47_51_x', '47_51_y', '47_52_x', '47_52_y',
'47_53_x', '47_53_y', '47_54_x', '47_54_y', '47_55_x', '47_55_y', '47_56_x', '47_56_y', '47_57_x',
'47_57_y', '47_58_x', '47_58_y', '47_59_x', '47_59_y', '47_60_x', '47_60_y', '47_61_x', '47_61_y',
'47_62_x', '47_62_y', '47_63_x', '47_63_y', '47_64_x', '47_64_y', '47_65_x', '47_65_y', '47_66_x',
'47_66_y', '47_67_x', '47_67_y', '48_49_x', '48_49_y', '48_50_x', '48_50_y', '48_51_x', '48_51_y',
'48_52_x', '48_52_y', '48_53_x', '48_53_y', '48_54_x', '48_54_y', '48_55_x', '48_55_y', '48_56_x',
'48_56_y', '48_57_x', '48_57_y', '48_58_x', '48_58_y', '48_59_x', '48_59_y', '48_60_x', '48_60_y',
'48_61_x', '48_61_y', '48_62_x', '48_62_y', '48_63_x', '48_63_y', '48_64_x', '48_64_y', '48_65_x',
'48_65_y', '48_66_x', '48_66_y', '48_67_x', '48_67_y', '49_50_x', '49_50_y', '49_51_x', '49_51_y',
'49_52_x', '49_52_y', '49_53_x', '49_53_y', '49_54_x', '49_54_y', '49_55_x', '49_55_y', '49_56_x',
'49_56_y', '49_57_x', '49_57_y', '49_58_x', '49_58_y', '49_59_x', '49_59_y', '49_60_x', '49_60_y',
'49_61_x', '49_61_y', '49_62_x', '49_62_y', '49_63_x', '49_63_y', '49_64_x', '49_64_y', '49_65_x',
'49_65_y', '49_66_x', '49_66_y', '49_67_x', '49_67_y', '4_10_x', '4_10_y', '4_11_x', '4_11_y', '4_12_x',
'4_12_y', '4_13_x', '4_13_y', '4_14_x', '4_14_y', '4_15_x', '4_15_y', '4_16_x', '4_16_y', '4_17_x',
'4_17_y', '4_18_x', '4_18_y', '4_19_x', '4_19_y', '4_20_x', '4_20_y', '4_21_x', '4_21_y', '4_22_x',
'4_22_y', '4_23_x', '4_23_y', '4_24_x', '4_24_y', '4_25_x', '4_25_y', '4_26_x', '4_26_y', '4_27_x',
'4_27_y', '4_28_x', '4_28_y', '4_29_x', '4_29_y', '4_30_x', '4_30_y', '4_31_x', '4_31_y', '4_32_x',
'4_32_y', '4_33_x', '4_33_y', '4_34_x', '4_34_y', '4_35_x', '4_35_y', '4_36_x', '4_36_y', '4_37_x',
'4_37_y', '4_38_x', '4_38_y', '4_39_x', '4_39_y', '4_40_x', '4_40_y', '4_41_x', '4_41_y', '4_42_x',
'4_42_y', '4_43_x', '4_43_y', '4_44_x', '4_44_y', '4_45_x', '4_45_y', '4_46_x', '4_46_y', '4_47_x',
'4_47_y', '4_48_x', '4_48_y', '4_49_x', '4_49_y', '4_50_x', '4_50_y', '4_51_x', '4_51_y', '4_52_x',
'4_52_y', '4_53_x', '4_53_y', '4_54_x', '4_54_y', '4_55_x', '4_55_y', '4_56_x', '4_56_y', '4_57_x',
'4_57_y', '4_58_x', '4_58_y', '4_59_x', '4_59_y', '4_5_x', '4_5_y', '4_60_x', '4_60_y', '4_61_x',
'4_61_y', '4_62_x', '4_62_y', '4_63_x', '4_63_y', '4_64_x', '4_64_y', '4_65_x', '4_65_y', '4_66_x',
'4_66_y', '4_67_x', '4_67_y', '4_6_x', '4_6_y', '4_7_x', '4_7_y', '4_8_x', '4_8_y', '4_9_x', '4_9_y',
'50_51_x', '50_51_y', '50_52_x', '50_52_y', '50_53_x', '50_53_y', '50_54_x', '50_54_y', '50_55_x',
'50_55_y', '50_56_x', '50_56_y', '50_57_x', '50_57_y', '50_58_x', '50_58_y', '50_59_x', '50_59_y',
'50_60_x', '50_60_y', '50_61_x', '50_61_y', '50_62_x', '50_62_y', '50_63_x', '50_63_y', '50_64_x',
'50_64_y', '50_65_x', '50_65_y', '50_66_x', '50_66_y', '50_67_x', '50_67_y', '51_52_x', '51_52_y',
'51_53_x', '51_53_y', '51_54_x', '51_54_y', '51_55_x', '51_55_y', '51_56_x', '51_56_y', '51_57_x',
'51_57_y', '51_58_x', '51_58_y', '51_59_x', '51_59_y', '51_60_x', '51_60_y', '51_61_x', '51_61_y',
'51_62_x', '51_62_y', '51_63_x', '51_63_y', '51_64_x', '51_64_y', '51_65_x', '51_65_y', '51_66_x',
'51_66_y', '51_67_x', '51_67_y', '52_53_x', '52_53_y', '52_54_x', '52_54_y', '52_55_x', '52_55_y',
'52_56_x', '52_56_y', '52_57_x', '52_57_y', '52_58_x', '52_58_y', '52_59_x', '52_59_y', '52_60_x',
'52_60_y', '52_61_x', '52_61_y', '52_62_x', '52_62_y', '52_63_x', '52_63_y', '52_64_x', '52_64_y',
'52_65_x', '52_65_y', '52_66_x', '52_66_y', '52_67_x', '52_67_y', '53_54_x', '53_54_y', '53_55_x',
'53_55_y', '53_56_x', '53_56_y', '53_57_x', '53_57_y', '53_58_x', '53_58_y', '53_59_x', '53_59_y',
'53_60_x', '53_60_y', '53_61_x', '53_61_y', '53_62_x', '53_62_y', '53_63_x', '53_63_y', '53_64_x',
'53_64_y', '53_65_x', '53_65_y', '53_66_x', '53_66_y', '53_67_x', '53_67_y', '54_55_x', '54_55_y',
'54_56_x', '54_56_y', '54_57_x', '54_57_y', '54_58_x', '54_58_y', '54_59_x', '54_59_y', '54_60_x',
'54_60_y', '54_61_x', '54_61_y', '54_62_x', '54_62_y', '54_63_x', '54_63_y', '54_64_x', '54_64_y',
'54_65_x', '54_65_y', '54_66_x', '54_66_y', '54_67_x', '54_67_y', '55_56_x', '55_56_y', '55_57_x',
'55_57_y', '55_58_x', '55_58_y', '55_59_x', '55_59_y', '55_60_x', '55_60_y', '55_61_x', '55_61_y',
'55_62_x', '55_62_y', '55_63_x', '55_63_y', '55_64_x', '55_64_y', '55_65_x', '55_65_y', '55_66_x',
'55_66_y', '55_67_x', '55_67_y', '56_57_x', '56_57_y', '56_58_x', '56_58_y', '56_59_x', '56_59_y',
'56_60_x', '56_60_y', '56_61_x', '56_61_y', '56_62_x', '56_62_y', '56_63_x', '56_63_y', '56_64_x',
'56_64_y', '56_65_x', '56_65_y', '56_66_x', '56_66_y', '56_67_x', '56_67_y', '57_58_x', '57_58_y',
'57_59_x', '57_59_y', '57_60_x', '57_60_y', '57_61_x', '57_61_y', '57_62_x', '57_62_y', '57_63_x',
'57_63_y', '57_64_x', '57_64_y', '57_65_x', '57_65_y', '57_66_x', '57_66_y', '57_67_x', '57_67_y',
'58_59_x', '58_59_y', '58_60_x', '58_60_y', '58_61_x', '58_61_y', '58_62_x', '58_62_y', '58_63_x',
'58_63_y', '58_64_x', '58_64_y', '58_65_x', '58_65_y', '58_66_x', '58_66_y', '58_67_x', '58_67_y',
'59_60_x', '59_60_y', '59_61_x', '59_61_y', '59_62_x', '59_62_y', '59_63_x', '59_63_y', '59_64_x',
'59_64_y', '59_65_x', '59_65_y', '59_66_x', '59_66_y', '59_67_x', '59_67_y', '5_10_x', '5_10_y',
'5_11_x', '5_11_y', '5_12_x', '5_12_y', '5_13_x', '5_13_y', '5_14_x', '5_14_y', '5_15_x', '5_15_y',
'5_16_x', '5_16_y', '5_17_x', '5_17_y', '5_18_x', '5_18_y', '5_19_x', '5_19_y', '5_20_x', '5_20_y',
'5_21_x', '5_21_y', '5_22_x', '5_22_y', '5_23_x', '5_23_y', '5_24_x', '5_24_y', '5_25_x', '5_25_y',
'5_26_x', '5_26_y', '5_27_x', '5_27_y', '5_28_x', '5_28_y', '5_29_x', '5_29_y', '5_30_x', '5_30_y',
'5_31_x', '5_31_y', '5_32_x', '5_32_y', '5_33_x', '5_33_y', '5_34_x', '5_34_y', '5_35_x', '5_35_y',
'5_36_x', '5_36_y', '5_37_x', '5_37_y', '5_38_x', '5_38_y', '5_39_x', '5_39_y', '5_40_x', '5_40_y',
'5_41_x', '5_41_y', '5_42_x', '5_42_y', '5_43_x', '5_43_y', '5_44_x', '5_44_y', '5_45_x', '5_45_y',
'5_46_x', '5_46_y', '5_47_x', '5_47_y', '5_48_x', '5_48_y', '5_49_x', '5_49_y', '5_50_x', '5_50_y',
'5_51_x', '5_51_y', '5_52_x', '5_52_y', '5_53_x', '5_53_y', '5_54_x', '5_54_y', '5_55_x', '5_55_y',
'5_56_x', '5_56_y', '5_57_x', '5_57_y', '5_58_x', '5_58_y', '5_59_x', '5_59_y', '5_60_x', '5_60_y',
'5_61_x', '5_61_y', '5_62_x', '5_62_y', '5_63_x', '5_63_y', '5_64_x', '5_64_y', '5_65_x', '5_65_y',
'5_66_x', '5_66_y', '5_67_x', '5_67_y', '5_6_x', '5_6_y', '5_7_x', '5_7_y', '5_8_x', '5_8_y', '5_9_x',
'5_9_y', '60_61_x', '60_61_y', '60_62_x', '60_62_y', '60_63_x', '60_63_y', '60_64_x', '60_64_y',
'60_65_x', '60_65_y', '60_66_x', '60_66_y', '60_67_x', '60_67_y', '61_62_x', '61_62_y', '61_63_x',
'61_63_y', '61_64_x', '61_64_y', '61_65_x', '61_65_y', '61_66_x', '61_66_y', '61_67_x', '61_67_y',
'62_63_x', '62_63_y', '62_64_x', '62_64_y', '62_65_x', '62_65_y', '62_66_x', '62_66_y', '62_67_x',
'62_67_y', '63_64_x', '63_64_y', '63_65_x', '63_65_y', '63_66_x', '63_66_y', '63_67_x', '63_67_y',
'64_65_x', '64_65_y', '64_66_x', '64_66_y', '64_67_x', '64_67_y', '65_66_x', '65_66_y', '65_67_x',
'65_67_y', '66_67_x', '66_67_y', '6_10_x', '6_10_y', '6_11_x', '6_11_y', '6_12_x', '6_12_y', '6_13_x',
'6_13_y', '6_14_x', '6_14_y', '6_15_x', '6_15_y', '6_16_x', '6_16_y', '6_17_x', '6_17_y', '6_18_x',
'6_18_y', '6_19_x', '6_19_y', '6_20_x', '6_20_y', '6_21_x', '6_21_y', '6_22_x', '6_22_y', '6_23_x',
'6_23_y', '6_24_x', '6_24_y', '6_25_x', '6_25_y', '6_26_x', '6_26_y', '6_27_x', '6_27_y', '6_28_x',
'6_28_y', '6_29_x', '6_29_y', '6_30_x', '6_30_y', '6_31_x', '6_31_y', '6_32_x', '6_32_y', '6_33_x',
'6_33_y', '6_34_x', '6_34_y', '6_35_x', '6_35_y', '6_36_x', '6_36_y', '6_37_x', '6_37_y', '6_38_x',
'6_38_y', '6_39_x', '6_39_y', '6_40_x', '6_40_y', '6_41_x', '6_41_y', '6_42_x', '6_42_y', '6_43_x',
'6_43_y', '6_44_x', '6_44_y', '6_45_x', '6_45_y', '6_46_x', '6_46_y', '6_47_x', '6_47_y', '6_48_x',
'6_48_y', '6_49_x', '6_49_y', '6_50_x', '6_50_y', '6_51_x', '6_51_y', '6_52_x', '6_52_y', '6_53_x',
'6_53_y', '6_54_x', '6_54_y', '6_55_x', '6_55_y', '6_56_x', '6_56_y', '6_57_x', '6_57_y', '6_58_x',
'6_58_y', '6_59_x', '6_59_y', '6_60_x', '6_60_y', '6_61_x', '6_61_y', '6_62_x', '6_62_y', '6_63_x',
'6_63_y', '6_64_x', '6_64_y', '6_65_x', '6_65_y', '6_66_x', '6_66_y', '6_67_x', '6_67_y', '6_7_x',
'6_7_y', '6_8_x', '6_8_y', '6_9_x', '6_9_y', '7_10_x', '7_10_y', '7_11_x', '7_11_y', '7_12_x', '7_12_y',
'7_13_x', '7_13_y', '7_14_x', '7_14_y', '7_15_x', '7_15_y', '7_16_x', '7_16_y', '7_17_x', '7_17_y',
'7_18_x', '7_18_y', '7_19_x', '7_19_y', '7_20_x', '7_20_y', '7_21_x', '7_21_y', '7_22_x', '7_22_y',
'7_23_x', '7_23_y', '7_24_x', '7_24_y', '7_25_x', '7_25_y', '7_26_x', '7_26_y', '7_27_x', '7_27_y',
'7_28_x', '7_28_y', '7_29_x', '7_29_y', '7_30_x', '7_30_y', '7_31_x', '7_31_y', '7_32_x', '7_32_y',
'7_33_x', '7_33_y', '7_34_x', '7_34_y', '7_35_x', '7_35_y', '7_36_x', '7_36_y', '7_37_x', '7_37_y',
'7_38_x', '7_38_y', '7_39_x', '7_39_y', '7_40_x', '7_40_y', '7_41_x', '7_41_y', '7_42_x', '7_42_y',
'7_43_x', '7_43_y', '7_44_x', '7_44_y', '7_45_x', '7_45_y', '7_46_x', '7_46_y', '7_47_x', '7_47_y',
'7_48_x', '7_48_y', '7_49_x', '7_49_y', '7_50_x', '7_50_y', '7_51_x', '7_51_y', '7_52_x', '7_52_y',
'7_53_x', '7_53_y', '7_54_x', '7_54_y', '7_55_x', '7_55_y', '7_56_x', '7_56_y', '7_57_x', '7_57_y',
'7_58_x', '7_58_y', '7_59_x', '7_59_y', '7_60_x', '7_60_y', '7_61_x', '7_61_y', '7_62_x', '7_62_y',
'7_63_x', '7_63_y', '7_64_x', '7_64_y', '7_65_x', '7_65_y', '7_66_x', '7_66_y', '7_67_x', '7_67_y',
'7_8_x', '7_8_y', '7_9_x', '7_9_y', '8_10_x', '8_10_y', '8_11_x', '8_11_y', '8_12_x', '8_12_y', '8_13_x',
'8_13_y', '8_14_x', '8_14_y', '8_15_x', '8_15_y', '8_16_x', '8_16_y', '8_17_x', '8_17_y', '8_18_x',
'8_18_y', '8_19_x', '8_19_y', '8_20_x', '8_20_y', '8_21_x', '8_21_y', '8_22_x', '8_22_y', '8_23_x',
'8_23_y', '8_24_x', '8_24_y', '8_25_x', '8_25_y', '8_26_x', '8_26_y', '8_27_x', '8_27_y', '8_28_x',
'8_28_y', '8_29_x', '8_29_y', '8_30_x', '8_30_y', '8_31_x', '8_31_y', '8_32_x', '8_32_y', '8_33_x',
'8_33_y', '8_34_x', '8_34_y', '8_35_x', '8_35_y', '8_36_x', '8_36_y', '8_37_x', '8_37_y', '8_38_x',
'8_38_y', '8_39_x', '8_39_y', '8_40_x', '8_40_y', '8_41_x', '8_41_y', '8_42_x', '8_42_y', '8_43_x',
'8_43_y', '8_44_x', '8_44_y', '8_45_x', '8_45_y', '8_46_x', '8_46_y', '8_47_x', '8_47_y', '8_48_x',
'8_48_y', '8_49_x', '8_49_y', '8_50_x', '8_50_y', '8_51_x', '8_51_y', '8_52_x', '8_52_y', '8_53_x',
'8_53_y', '8_54_x', '8_54_y', '8_55_x', '8_55_y', '8_56_x', '8_56_y', '8_57_x', '8_57_y', '8_58_x',
'8_58_y', '8_59_x', '8_59_y', '8_60_x', '8_60_y', '8_61_x', '8_61_y', '8_62_x', '8_62_y', '8_63_x',
'8_63_y', '8_64_x', '8_64_y', '8_65_x', '8_65_y', '8_66_x', '8_66_y', '8_67_x', '8_67_y', '8_9_x',
'8_9_y', '9_10_x', '9_10_y', '9_11_x', '9_11_y', '9_12_x', '9_12_y', '9_13_x', '9_13_y', '9_14_x',
'9_14_y', '9_15_x', '9_15_y', '9_16_x', '9_16_y', '9_17_x', '9_17_y', '9_18_x', '9_18_y', '9_19_x',
'9_19_y', '9_20_x', '9_20_y', '9_21_x', '9_21_y', '9_22_x', '9_22_y', '9_23_x', '9_23_y', '9_24_x',
'9_24_y', '9_25_x', '9_25_y', '9_26_x', '9_26_y', '9_27_x', '9_27_y', '9_28_x', '9_28_y', '9_29_x',
'9_29_y', '9_30_x', '9_30_y', '9_31_x', '9_31_y', '9_32_x', '9_32_y', '9_33_x', '9_33_y', '9_34_x',
'9_34_y', '9_35_x', '9_35_y', '9_36_x', '9_36_y', '9_37_x', '9_37_y', '9_38_x', '9_38_y', '9_39_x',
'9_39_y', '9_40_x', '9_40_y', '9_41_x', '9_41_y', '9_42_x', '9_42_y', '9_43_x', '9_43_y', '9_44_x',
'9_44_y', '9_45_x', '9_45_y', '9_46_x', '9_46_y', '9_47_x', '9_47_y', '9_48_x', '9_48_y', '9_49_x',
'9_49_y', '9_50_x', '9_50_y', '9_51_x', '9_51_y', '9_52_x', '9_52_y', '9_53_x', '9_53_y', '9_54_x',
'9_54_y', '9_55_x', '9_55_y', '9_56_x', '9_56_y', '9_57_x', '9_57_y', '9_58_x', '9_58_y', '9_59_x',
'9_59_y', '9_60_x', '9_60_y', '9_61_x', '9_61_y', '9_62_x', '9_62_y', '9_63_x', '9_63_y', '9_64_x',
'9_64_y', '9_65_x', '9_65_y', '9_66_x', '9_66_y', '9_67_x', '9_67_y', 'skin_0', 'skin_1', 'skin_10',
'skin_11', 'skin_12', 'skin_13', 'skin_14', 'skin_15', 'skin_16', 'skin_17', 'skin_18', 'skin_19',
'skin_2', 'skin_20', 'skin_21', 'skin_22', 'skin_23', 'skin_24', 'skin_25', 'skin_3', 'skin_4', 'skin_5',
'skin_6', 'skin_7', 'skin_8', 'skin_9']
beauty_data = beauty_data[select_cols]
print(beauty_data.shape)
beauty_data.drop(['Image'], axis=1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(beauty_data.drop('label', axis=1), beauty_data['label'],
train_size=0.75, test_size=0.25)
# tpot = TPOTRegressor(scoring='r2', n_jobs=-1, early_stop=5, verbosity=2)
# tpot.fit(X_train, y_train)
# print(tpot.score(X_test, y_test))
# tpot.export('../model/tpot_beauty_pipeline.py')
atm = ATM()
atm.run()
|
python
|
from .descriptor import DescriptorType
from .object_type import ObjectDict
__all__ = ["ObjectDict", "DescriptorType"]
|
python
|
from typing import List
class Solution:
# 141, 逆波兰表达式求值, Medium
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for token in tokens:
if token == "+":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(oprand1 + oprand2)
elif token == "*":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(oprand1 * oprand2)
elif token == "/":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(int(oprand1 / oprand2))
elif token == "-":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(oprand1 - oprand2)
else:
stack.append(int(token))
# print(stack)
return stack[0]
def main():
s = Solution()
print(s.evalRPN(tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]))
if __name__ == "__main__":
main()
|
python
|
# we are here to get weighted-5-node-subgraph,
# given edge-count/bi-edge-count/strong-tie-count/weak-tie-count only to distinguish
# to leverage on weighte 4 node subgraphs(edge_count, biedge_count, strong_count, weak_count, subNo)
# next to share not edge
import re,sys,random, os
def subg(edges,s4,lf,sf,sfv,ror):
# subgraph counts, range from 1 to 13 for 3-node subgraph
#subgraph = {}
distinct = {}
ec = 0
#ec_ij = 0 # edge count within subgraph so as to cluster subgraph
bc = 0
#bc_ij = 0 # double directed edges count
sc = 0
wc = 0
unit = {} # key as subgraph, and value as features of subgraph
fcount = {} # vetex count in the front seat
#fcount_ij = {}
bcount = {} # vetex count in the back seat
#bcount_ij = {}
bi_boolean = False # for bi-edge check
pair = []
temp = []
third = 0
key = ''
#rpair = ''
seq = [] # for every subgraph to store all connections
#seq_ij = [] # for i,j related conenctions only
# sort to begin
edges.sort()
unit_seq = {}
unit_seq_sorted = {}
# motif detection take place in edges and e_copy
pair = []
key = ''
temp = []
nodes = []
features = []
new = ''
sub_can = []
for sub in s4: # strong edge count, weak edge count, and sub-no, giving #13 at most
#lf.write('')
lf.write('We are considering 4 node subgraph: <'+sub+'>\n')
for c in edges:
nodes = re.split(',',re.split(':',sub)[0])
features = re.split(',',re.split(':',sub)[1])
sub_can = re.split('\'',re.split(':',sub)[2][1:-1])
lf.write('\n')
lf.write('============= New Inner Loop ==============\n')
lf.write('Innner connection is <'+c+'>\n')
key = ''
temp = re.split(',',c)
if temp[0] in nodes and temp[1] in nodes or \
temp[0] not in nodes and temp[1] not in nodes:
continue
if temp[0] not in nodes:
new = temp[0]
nodes.append(new)
else:
new = temp[1]
nodes.append(new)
nodes.sort()
key = nodes[0]+','+nodes[1]+','+nodes[2]+','+nodes[3]+','+nodes[4]
if key in unit.keys():
continue
# to see new to other's connection
ec = int(features[0])
bc = int(features[1])
sc = int(features[2])
wc = int(features[3])
#fcount = {}
#bcount = {}
bi_boolean = False
seq = []
### Get edges from this 4-node-subgraph ###
for x in sub_can:
if len(x)<4:
continue
seq.append(x)
##### new subgraph found #####
lf.write('We found a new 5_node_subgraph: <'+key+'>\n')
lf.write('@@@@@seq for now we have:@@@@@\n')
for s in seq:
lf.write(s+'\n')
# we got a subgraph now, they are e and c
# next, get detail relationships across e and c
k = ''
for n in nodes:
if n!=new:
bi_boolean = False
k = n+','+new+',20'
if k in edges:
ec += 1
sc += 1
bi_boolean = True
seq.append(k)
#
# if p in fcount.keys():
# fcount[p] = fcount[p] + 1
# else:
# fcount[p] = 1
# if third in bcount.keys():
# bcount[third] = bcount[third] + 1
# else:
# bcount[third] = 1
k = n+','+new+',10'
if k in edges:
ec = ec + 1
wc = wc + 1
bi_boolean = True
seq.append(k)
k = new+','+n+',20'
if k in edges:
seq.append(k)
ec = ec + 1
sc = sc + 1
if bi_boolean == True:
bc = bc + 1
bi_boolean = False
k = new+','+n+',10'
if k in edges:
seq.append(k)
ec = ec + 1
sc = sc + 1
if bi_boolean == True:
bc = bc + 1
bi_boolean = False
#lf.write('@@@@@@@@@@Here comes sequences of connections\n')
#for s in seq:
# lf.write(s+'\n')
#lf.write('\n')
# check if ec overflow
if ec>20:
print 'ERROR: ec overflowed!!!'+str(ec)
lf.write('ERROR: ec overflowed!!!'+str(ec)+'\n')
# subgraph is about to complete
# value of unit:
unit[key]=str(ec)+','+str(bc)+','+str(sc)+','+str(wc)
unit_seq[key]=seq
# in order to only maitain single edge, we put it into (small edge no, larger edge no) form
temp = []
for s in seq:
ss = s[:-3] # get rid of ',20' or ',10'
first = re.split(',',ss)[0]
second = re.split(',',ss)[1]
new = second+','+first
if first>second and new not in temp:# in alphabet order, not int order
temp.append(new)
elif ss not in temp:
temp.append(ss)
unit_seq_sorted[key]=temp
temp = []
lf.write('We appending following binary direction-free edges to unit_seq_sorted as :\n')
lf.write(str(unit_seq_sorted[key])+'\n')
#double loop ended
#print unit
lf.write('\n')
lf.write('*********Here comes subgraphs************\n')
c = 0
for k,v in unit.iteritems():
lf.write(k+':'+v+'\n')
sfv.write(k+':'+v+':')
sfv.write(str(unit_seq[k])+'\n')
c = c+1
lf.write('\n' + str(c) + ' subgraph count has been written into file.\n')
# Here we are about to count the distinct number of every single subgraph
pp = []
ing_weighted = {} # put ec,bc,sc,wc,sub_no as distinct feature
ing = {} #only put sub_no as distinct feature
# compute weighted_subgraphs
# IT IS the true weighted_subgraphs
weighted_subgraph = {}
binary_subgraph = {}
distinct_weighted = {}
for k, v in unit.iteritems():
pp = re.split(',',k)
lf.write('We consider key: '+str(k)+'\n')
if v not in weighted_subgraph.keys():
weighted_subgraph[v] = 1
else:
weighted_subgraph[v] = weighted_subgraph[v] + 1
if v not in ing_weighted.keys():
distinct_weighted[v] = 1
ing_weighted[v]=[]
for edge in unit_seq[k]:
ing_weighted[v].append(edge)
else:
temp_temp = []
for edge in unit_seq[k]:
if edge not in ing_weighted[v]:
temp_temp.append(edge)
if len(temp_temp)==len(unit_seq[k]):
distinct_weighted[v] += 1
for edge in temp_temp:
ing_weighted[v].append(edge)
vv = int(re.split(',',v)[-1])
# vv is the exact binary 3 node subgraph no
if vv not in binary_subgraph.keys():
binary_subgraph[vv] = 1
else:
binary_subgraph[vv] = binary_subgraph[vv] + 1
if vv not in ing.keys():
distinct[vv] = 1
ing[vv]=[]
for edge in unit_seq_sorted[k]:
ing[vv].append(edge)
else:
temp_temp = []
for edge in unit_seq_sorted[k]:
if edge not in ing[vv]:
temp_temp.append(edge)
if len(temp_temp)==len(unit_seq_sorted[k]):
distinct[vv] += 1
for edge in unit_seq_sorted[k]:
ing[vv].append(edge)
lf.write('Newly appended a distinct subgraph: \n')
# to count the number of ditinct subgraphs
# it may not that precise cauz <sc,wc,13_sub> is short for weighted_3_subgraph
#lf.write('Here comes the distinct version of subgraphs.\n')
#for k,v in ing.iteritems():
# lf.write(str(k)+':'+str(len(v)/4)+'\n')
# distinct[k] = len(v)/4
#print subgraph
#sf.write('\n')
sf.write('Subgraph to be: \n')
if ror==0:# it is real network
# write subgraph.txt with distinct no in
for k,v in weighted_subgraph.iteritems():
vv = int(re.split(',',k)[-1])
#print vv
# weighted subgraph: weighted appearance, distinct count on binary subgraph (1~13): raw appearance, distinct count on weighted subgraph
sf.write(str(k)+':'+str(v)+','+str(distinct_weighted[k])+':'+str(binary_subgraph[vv])+','+str(distinct[vv])+'\n')
else:
for k,v in weighted_subgraph.iteritems():
sf.write(str(k)+':'+str(v)+'\n')
def main(argv):
# to store edges from file
edges = []
subgraph_of_4_node = []
subgraph_file_verbose =''
pair = []
rf = ''
f = ''
log_file = ''
#log_file_random = ''
subgraph_file = ''
line = ''
sub4 = ''
for parents,dirnames,filenames in os.walk(argv[1]):
for fn in filenames:
if '_edges.txt' in fn:
print '------Begin Processing '+fn+'--------'
edges = []
subgraph_of_4_node = []
line = ''
rf = open(argv[1]+'/'+fn,'r')
sub4 = open(argv[1]+'/'+fn[:fn.index('_')]+'_weighted_4_node_subgraph_verbose.txt','r')
f = open(argv[1]+'/'+fn[:fn.index('_')]+'_weighted_5_node_simple.txt','w+')
log_file = open(argv[1]+'/'+fn[:fn.index('_')]+'_weighted_5_node_log.txt','w')
#log_file_random = open(argv[1]+'/'+fn[:str(fn).index('_')]+'_weighted_4_node_random_log.txt','w')
subgraph_file = open(argv[1]+'/'+fn[:str(fn).index('_')]+'_weighted_5_node_subgraph.txt','w+')
subgraph_file_verbose = open(argv[1]+'/'+fn[:str(fn).index('_')]+'_weighted_5_node_subgraph_verbose.txt','w+')
#get 3_node_subgraphs
for line in sub4:
subgraph_of_4_node.append(line.strip())
for line in rf:
if('Source' in line):
continue
line = line[:line.index(',D')]+line[line.index('d,')+1:] # weight is considered here
f.write(line)
#into set
f.seek(0)
for line in f:
pair = re.split(',',line[:-1])
key = pair[0]+','+pair[1]+','+pair[2]
edges.append(key)
edges.sort()
# call to find subgraphs
# for original network
subg(edges,subgraph_of_4_node, log_file,subgraph_file,subgraph_file_verbose,0)
# for randomized network
#for m in range(M):
# randomize(edges,single,double,log_file_random,subgraph_file,len(edges))# motif detection
print '======End Processing '+fn+'======'
if __name__ == '__main__':
main(sys.argv)
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name="Eng Phys Office Space Tools",
description="A set of scripts to work with the Eng Phys office space committee",
version="0.1dev",
author="Tim van Boxtel",
author_email="[email protected]",
py_modules=['parse-grad-students'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT",
])
|
python
|
import tweepy #https://github.com/tweepy/tweepy
import csv
import pandas as pd
# Used for progress bar
import time
import sys
#Twitter API credentials
consumer_key = "NBNgPGCBeGv80PcsYU3QWU94d"
consumer_secret = "lvAaoSInlF9mPonoMMldFq5ZE96oAAl30TLh6ynVwK2tauvOQC"
access_key = "1311728151265832961-AaFHXfZtozEgfoVZoFnNqzqRZEWQAr"
access_secret = "Az8Ezlg77NtkZ8coTqXGsXW2wVh7Wbpm3JTsAkoPsrq7z"
OAUTH_KEYS = {'consumer_key':consumer_key, 'consumer_secret':consumer_secret,
'access_token_key':access_key, 'access_token_secret':access_secret}
auth = tweepy.OAuthHandler(OAUTH_KEYS['consumer_key'], OAUTH_KEYS['consumer_secret'])
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
search = tweepy.Cursor(api.search, q='#Trump').items(4000)
# Create lists for each field desired from the tweets.
sn = []
text = []
timestamp =[]
for tweet in search:
#print tweet.user.screen_name, tweet.created_at, tweet.text
timestamp.append(tweet.created_at)
sn.append(tweet.user.screen_name)
text.append(tweet.text)
# Convert lists to dataframe
df = pd.DataFrame()
df['timestamp'] = timestamp
df['sn'] = sn
df['text'] = text
# Prepare ford date filtering. Adding an EST time column since chat hosted by people in that time zone.
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['EST'] = df['timestamp'] - pd.Timedelta(hours=5) #Convert to EST
df['EST'] = pd.to_datetime(df['EST'])
# Subset for the dates required. Can select a specific date or time to examine.
import time
df = df[(pd.to_datetime("2015-12-14 20:00:00", format='%Y-%m-%d %H:%M:%S') < df['EST']) & (df['EST'] < pd.to_datetime("2015-12-14 21:00:00", format='%Y-%m-%d %H:%M:%S'))]
# Write out Tweets in case they are needed later.
df.to_csv('edtechtweets.csv',index = False,encoding='utf-8')
# Create a list of the unique usernames in order to see which users we need to retrieve friends for.
allNames = list(df['sn'].unique())
# Initialize dataframe of users that will hold the edge relationships
dfUsers = pd.DataFrame()
dfUsers['userFromName'] =[]
dfUsers['userFromId'] =[]
dfUsers['userToId'] = []
count = 0
nameCount = len(allNames)
# The choice to retrieve friends (who the user is following) rather than followers is intentional.
# Either would work. However, many Twitter users follow fewer users than are following them, especially the most popular accounts.
# This reduces the number of very large calls to Twitter API, which seemed to cause problems.
for name in allNames:
# Build list of friends
currentFriends = []
for page in tweepy.Cursor(api.friends_ids, screen_name=name).pages():
currentFriends.extend(page)
currentId = api.get_user(screen_name=name).id
currentId = [currentId] * len(currentFriends)
currentName = [name] * len(currentFriends)
dfTemp = pd.DataFrame()
dfTemp['userFromName'] = currentName
dfTemp['userFromId'] = currentId
dfTemp['userToId'] = currentFriends
dfUsers = pd.concat([dfUsers,dfTemp])
time.sleep(70) # avoids hitting Twitter rate limit
# Progress bar to track approximate progress
count +=1
per = round(count*100.0/nameCount,1)
sys.stdout.write("\rTwitter call %s%% complete." % per)
sys.stdout.flush()
# Again, to limit the number of calls to Twitter API, just do lookups on followers that connect to those in our user group.
# We are not interested in "friends" that are not part of this community.
fromId = dfUsers['userFromId'].unique()
dfChat = dfUsers[dfUsers['userToId'].apply(lambda x: x in fromId)]
# No more Twitter API lookups are necessary. Create a lookup table that we will use to get the verify the userToName
dfLookup = dfChat[['userFromName','userFromId']]
dfLookup = dfLookup.drop_duplicates()
dfLookup.columns = ['userToName','userToId']
dfCommunity = dfUsers.merge(dfLookup, on='userToId')
dfCommunity.to_csv('dfCommunity.csv',index = False,encoding='utf-8')
|
python
|
#Pygments Tk Text from http://code.google.com/p/pygments-tk-text/
#Original Developer: Jonathon Eunice: [email protected]
__author__ = 'Robert Cope'
__original__author__ = 'Jonathan Eunice'
|
python
|
import random
def generatePassword(pwlength):
alphabet = "abcdefghijklmnopqrstuvwxyz"
passwords = []
for i in pwlength:
password = ""
for j in range(i):
next_letter_index = random.randrange(len(alphabet))
password = password + alphabet[next_letter_index]
password = replaceWithNumber(password)
password = replaceWithUppercaseLetter(password)
passwords.append(password)
return passwords
def replaceWithNumber(pword):
for i in range(random.randrange(1,3)):
replace_index = random.randrange(len(pword)//2)
pword = pword[0:replace_index] + str(random.randrange(10)) + pword[replace_index+1:]
return pword
def replaceWithUppercaseLetter(pword):
for i in range(random.randrange(1,3)):
replace_index = random.randrange(len(pword)//2,len(pword))
pword = pword[0:replace_index] + pword[replace_index].upper() + pword[replace_index+1:]
return pword
def main():
numPasswords = int(input("How many passwords do you want to generate? "))
print("Generating " +str(numPasswords)+" passwords")
passwordLengths = []
print("Minimum length of password should be 3")
for i in range(numPasswords):
length = int(input("Enter the length of Password #" + str(i+1) + " "))
if length<3:
length = 3
passwordLengths.append(length)
Password = generatePassword(passwordLengths)
for i in range(numPasswords):
print ("Password #"+str(i+1)+" = " + Password[i])
main()
#This program is created by Harsh Sharma
|
python
|
GAME_SIZE = 4
SCORE_TO_WIN1 = 512
SCORE_TO_WIN2 = 1024
SCORE_TO_WIN3 = 2048
SCORE_TO_WIN0 = 256
from game2048.game import Game
from game2048.agents import ExpectiMaxAgent
# save the dataset
f1 = open("dataset_256_3.txt", "w")
f2 = open("dataset_512_3.txt", "w")
f3 = open("dataset_1024_3.txt", "w")
# for i in range(100):
# print("i = ", i)
# game = Game(size=GAME_SIZE,score_to_win=SCORE_TO_WIN1)
# agent = ExpectiMaxAgent(game=game)
# while True:
# direction = agent.step()
# if (game.end != 0):
# break
# # print (game.board)
# # print ("direction: ", direction)
# for i in range(4):
# for j in range(4):
# #f.write(game.board[i,j])
# print(game.board[i, j], file = f1)
# print(direction, file = f1)
# #f.write(direction)
# game.move(direction)
# #f.write('\n')
# for i in range(100):
# print("i = ", i)
# game = Game(size=GAME_SIZE,score_to_win=SCORE_TO_WIN2)
# agent = ExpectiMaxAgent(game=game)
# while True:
# direction = agent.step()
# if (game.end != 0):
# break
# # print (game.board)
# # print ("direction: ", direction)
# for i in range(4):
# for j in range(4):
# #f.write(game.board[i,j])
# print(game.board[i, j], file = f2)
# print(direction, file = f2)
# #f.write(direction)
# game.move(direction)
# #f.write('\n')
for i in range(300):
print("i = ", i)
game = Game(size=GAME_SIZE,score_to_win=SCORE_TO_WIN0)
agent = ExpectiMaxAgent(game=game)
while True:
direction = agent.step()
if (game.end != 0):
break
# print (game.board)
# print ("direction: ", direction)
if game.board.max() <256:
for i in range(4):
for j in range(4):
#f.write(game.board[i,j])
print(game.board[i, j], file = f1)
print(direction, file = f1)
elif game.board.max() <512:
for i in range(4):
for j in range(4):
#f.write(game.board[i,j])
print(game.board[i, j], file = f2)
print(direction, file = f2)
else:
for i in range(4):
for j in range(4):
#f.write(game.board[i,j])
print(game.board[i, j], file = f3)
print(direction, file = f3)
#f.write(direction)
game.move(direction)
#f.write('\n')
|
python
|
"""
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : embedding.py
# Abstract : torch.nn.Embedding function encapsulation.
# Current Version: 1.0.0
# Date : 2021-05-20
######################################################################################################
"""
from torch import nn
from mmcv.runner import load_checkpoint
from davarocr.davar_common.models.builder import EMBEDDING
from davarocr.davar_common.utils import get_root_logger
@EMBEDDING.register_module()
class Embedding(nn.Module):
""" Embedding layer. Raw implementation: nn.Embedding(vocab_size, embedding_dim)"""
def __init__(self,
vocab_size,
embedding_dim,
drop_out=0.):
"""
Args:
vocab_size (int): size of vocabulary.
embedding_dim (int): dim of input features
drop_out (float): drop_out ratio if required.
"""
super().__init__()
self.drop_out = drop_out
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.drop_out_layer = nn.Dropout(self.drop_out)
def init_weights(self, pretrained=None):
""" Weight initialization
Args:
pretrained (str, optional): Path to pre-trained weights. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
logger.info("Embedding:")
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
return
else:
raise TypeError('pretrained must be a str or None')
def forward(self, input_feature):
""" Forward computation
Args:
input_feature (Tensor): in shape of [B x N x L]
Returns:
Tensor: in shape of [B x N x L x D], where D is the embedding_dim.
"""
embed_vector = self.embedding(input_feature)
embed_vector = self.drop_out_layer(embed_vector)
return embed_vector
|
python
|
from tweepy import Stream
from stream_tweets import StockListener
import get_old_tweets
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def getTweets(stock_name):
twitter_stream = Stream(get_old_tweets.auth, StockListener(stock_name))
twitter_stream.filter(track=[("$"+stock_name)])
logging.info("Stream for", stock_name, "is active...")
get_old_tweets.get_past_tweets(stock_name)
logging.info("Past tweets of", stock_name, "added to db...")
|
python
|
from painter.config import NAME, PATH, SHELVES
from templates import app
# Painter base
PAINTER = app.App(NAME, PATH, SHELVES)
|
python
|
"""Various lower-level functions to support the computation of steady states"""
import warnings
import numpy as np
import scipy.optimize as opt
from numbers import Real
from functools import partial
from ...utilities import misc, solvers
def instantiate_steady_state_mutable_kwargs(dissolve, block_kwargs, solver_kwargs, constrained_kwargs):
"""Instantiate mutable types from `None` default values in the steady_state function"""
if dissolve is None:
dissolve = []
if block_kwargs is None:
block_kwargs = {}
if solver_kwargs is None:
solver_kwargs = {}
if constrained_kwargs is None:
constrained_kwargs = {}
return dissolve, block_kwargs, solver_kwargs, constrained_kwargs
def provide_solver_default(unknowns):
if len(unknowns) == 1:
bounds = list(unknowns.values())[0]
if not isinstance(bounds, tuple) or bounds[0] > bounds[1]:
raise ValueError("Unable to find a compatible one-dimensional solver with provided `unknowns`.\n"
" Please provide valid lower/upper bounds, e.g. unknowns = {`a`: (0, 1)}")
else:
return "brentq"
elif len(unknowns) > 1:
init_values = list(unknowns.values())
if not np.all([isinstance(v, Real) for v in init_values]):
raise ValueError("Unable to find a compatible multi-dimensional solver with provided `unknowns`.\n"
" Please provide valid initial values, e.g. unknowns = {`a`: 1, `b`: 2}")
else:
return "broyden_custom"
else:
raise ValueError("`unknowns` is empty! Please provide a dict of keys/values equal to the number of unknowns"
" that need to be solved for.")
def run_consistency_check(cresid, ctol=1e-9, fragile=False):
if cresid > ctol:
if fragile:
raise RuntimeError(f"The target values evaluated for the proposed set of unknowns produce a "
f"maximum residual value of {cresid}, which is greater than the ctol {ctol}.\n"
f" If used, check if HelperBlocks are indeed compatible with the DAG.\n"
f" If this is not an issue, adjust ctol accordingly.")
else:
warnings.warn(f"The target values evaluated for the proposed set of unknowns produce a "
f"maximum residual value of {cresid}, which is greater than the ctol {ctol}.\n"
f" If used, check if HelperBlocks are indeed compatible with the DAG.\n"
f" If this is not an issue, adjust ctol accordingly.")
# Allow targets to be specified in the following formats
# 1) target = {"asset_mkt": 0} or ["asset_mkt"] (the standard case, where the target = 0)
# 2) target = {"r": 0.01} (allowing for the target to be non-zero)
# 3) target = {"K": "A"} (allowing the target to be another variable in potential_args)
def compute_target_values(targets, potential_args):
"""
For a given set of target specifications and potential arguments available, compute the targets.
Called as the return value for the residual function when utilizing the numerical solver.
targets: Refer to `steady_state` function docstring
potential_args: Refer to the `steady_state` function docstring for the "calibration" variable
return: A `float` (if computing a univariate target) or an `np.ndarray` (if using a multivariate target)
"""
target_values = np.empty(len(targets))
for (i, t) in enumerate(targets):
v = targets[t] if isinstance(targets, dict) else 0
if type(v) == str:
target_values[i] = potential_args[t] - potential_args[v]
else:
target_values[i] = potential_args[t] - v
# Univariate solvers require float return values (and not lists)
if len(targets) == 1:
return target_values[0]
else:
return target_values
def compare_steady_states(ss_ref, ss_comp, tol=1e-8, name_map=None, internal=True, check_same_keys=True, verbose=False):
"""Check if two steady state dicts (can be flat dicts or SteadyStateDict objects) are the same up to a tolerance"""
if name_map is None:
name_map = {}
valid = True
# Compare the steady state values present in both ss_ref and ss_comp
if internal:
if not hasattr(ss_ref, "internal") or not hasattr(ss_comp, "internal"):
warnings.warn("The provided steady state dicts do not both have .internal attrs. Will only compare"
" top-level values")
ds_to_check = [(ss_ref, ss_comp, "toplevel")]
else:
ds_to_check = [(ss_ref, ss_comp, "toplevel")] + [(ss_ref.internal[i], ss_comp.internal[i], i + "_internal") for i in ss_ref.internal]
else:
ds_to_check = [(ss_ref, ss_comp, "toplevel")]
for ds in ds_to_check:
d_ref, d_comp, level = ds
for key_ref in d_ref.keys():
if key_ref in d_comp.keys():
key_comp = key_ref
elif key_ref in name_map:
key_comp = name_map[key_ref]
else:
continue
if np.isscalar(d_ref[key_ref]):
resid = abs(d_ref[key_ref] - d_comp[key_comp])
else:
resid = np.linalg.norm(d_ref[key_ref].ravel() - d_comp[key_comp].ravel(), np.inf)
if verbose:
print(f"{key_ref} resid: {resid}")
else:
if not np.all(np.isclose(resid, 0., atol=tol)):
valid = False
# Show the steady state values present in only one of d_ref or d_comp, i.e. if there are missing keys
if check_same_keys:
d_ref_incl_mapped = set(d_ref.keys()) - set(name_map.keys())
d_comp_incl_mapped = set(d_comp.keys()) - set(name_map.values())
diff_keys = d_ref_incl_mapped.symmetric_difference(d_comp_incl_mapped)
if diff_keys:
if verbose:
print(f"At level '{level}', the keys present only one of the two steady state dicts are {diff_keys}")
valid = False
return valid
def solve_for_unknowns(residual, unknowns, solver, solver_kwargs, residual_kwargs=None,
constrained_method="linear_continuation", constrained_kwargs=None,
tol=2e-12, verbose=False):
"""Given a residual function (constructed within steady_state) and a set of bounds or initial values for
the set of unknowns, solve for the root.
residual: `function`
A function to be supplied to a numerical solver that takes unknown values as arguments
and returns computed targets.
unknowns: `dict`
Refer to the `steady_state` function docstring for the "unknowns" variable
targets: `dict`
Refer to the `steady_state` function docstring for the "targets" variable
tol: `float`
The absolute convergence tolerance of the computed target to the desired target value in the numerical solver
solver: `str`
Refer to the `steady_state` function docstring for the "solver" variable
solver_kwargs:
Refer to the `steady_state` function docstring for the "solver_kwargs" variable
return: The root[s] of the residual function as either a scalar (float) or a list of floats
"""
if residual_kwargs is None:
residual_kwargs = {}
scipy_optimize_uni_solvers = ["bisect", "brentq", "brenth", "ridder", "toms748", "newton", "secant", "halley"]
scipy_optimize_multi_solvers = ["hybr", "lm", "broyden1", "broyden2", "anderson", "linearmixing", "diagbroyden",
"excitingmixing", "krylov", "df-sane"]
# Wrap kwargs into the residual function
residual_f = partial(residual, **residual_kwargs)
if solver is None:
raise RuntimeError("Must provide a numerical solver from the following set: brentq, broyden, solved")
elif solver in scipy_optimize_uni_solvers:
initial_values_or_bounds = extract_univariate_initial_values_or_bounds(unknowns)
result = opt.root_scalar(residual_f, method=solver, xtol=tol,
**initial_values_or_bounds, **solver_kwargs)
if not result.converged:
raise ValueError(f"Steady-state solver, {solver}, did not converge.")
unknown_solutions = result.root
elif solver in scipy_optimize_multi_solvers:
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
result = opt.root(residual_f, initial_values,
method=solver, tol=tol, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
result = opt.root(constrained_residual, initial_values,
method=solver, tol=tol, **solver_kwargs)
if not result.success:
raise ValueError(f"Steady-state solver, {solver}, did not converge."
f" The termination status is {result.status}.")
unknown_solutions = list(result.x)
# TODO: Implement a more general interface for custom solvers, so we don't need to add new elifs at this level
# everytime a new custom solver is implemented.
elif solver == "broyden_custom":
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
unknown_solutions, _ = solvers.broyden_solver(residual_f, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
unknown_solutions, _ = solvers.broyden_solver(constrained_residual, initial_values,
verbose=verbose, tol=tol, **solver_kwargs)
unknown_solutions = list(unknown_solutions)
elif solver == "newton_custom":
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
unknown_solutions, _ = solvers.newton_solver(residual_f, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
unknown_solutions, _ = solvers.newton_solver(constrained_residual, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
unknown_solutions = list(unknown_solutions)
elif solver == "solved":
# If the model either doesn't require a numerical solution or is being evaluated at a candidate solution
# simply call residual_f once to populate the `ss_values` dict
residual_f(unknowns.values())
unknown_solutions = unknowns.values()
else:
raise RuntimeError(f"steady_state is not yet compatible with {solver}.")
return dict(misc.smart_zip(unknowns.keys(), unknown_solutions))
def extract_univariate_initial_values_or_bounds(unknowns):
val = next(iter(unknowns.values()))
if np.isscalar(val):
return {"x0": val}
else:
return {"bracket": (val[0], val[1])}
def extract_multivariate_initial_values_and_bounds(unknowns, fragile=False):
"""Provided a dict mapping names of unknowns to initial values/bounds, return separate dicts of
the initial values and bounds.
Note: For one-sided bounds, simply put np.inf/-np.inf as the other side of the bounds, so there is
no ambiguity about which is the unconstrained side.
"""
initial_values = []
multi_bounds = {}
for k, v in unknowns.items():
if np.isscalar(v):
initial_values.append(v)
elif len(v) == 2:
if fragile:
raise ValueError(f"{len(v)} is an invalid size for the value of an unknown."
f" the values of `unknowns` must either be a scalar, pertaining to a"
f" single initial value for the root solver to begin from,"
f" a length 2 tuple, pertaining to a lower bound and an upper bound,"
f" or a length 3 tuple, pertaining to a lower bound, initial value, and upper bound.")
else:
warnings.warn("Interpreting values of `unknowns` from length 2 tuple as lower and upper bounds"
" and averaging them to get a scalar initial value to provide to the solver.")
initial_values.append((v[0] + v[1])/2)
elif len(v) == 3:
lb, iv, ub = v
assert lb < iv < ub
initial_values.append(iv)
multi_bounds[k] = (lb, ub)
else:
raise ValueError(f"{len(v)} is an invalid size for the value of an unknown."
f" the values of `unknowns` must either be a scalar, pertaining to a"
f" single initial value for the root solver to begin from,"
f" a length 2 tuple, pertaining to a lower bound and an upper bound,"
f" or a length 3 tuple, pertaining to a lower bound, initial value, and upper bound.")
return np.asarray(initial_values), multi_bounds
def residual_with_linear_continuation(residual, bounds, eval_at_boundary=False,
boundary_epsilon=1e-4, penalty_scale=1e1,
verbose=False):
"""Modify a residual function to implement bounds by an additive penalty for exceeding the boundaries
provided, scaled by the amount the guess exceeds the boundary.
e.g. For residual function f(x), desiring x in (0, 1) (so assuming eval_at_boundary = False)
If the guess for x is 1.1 then we will censor to x_censored = 1 - boundary_epsilon, and return
f(x_censored) + penalty (where the penalty does not require re-evaluating f() which may be costly)
residual: `function`
The function whose roots we want to solve for
bounds: `dict`
A dict mapping the names of the unknowns (`str`) to length two tuples corresponding to the lower and upper
bounds.
eval_at_boundary: `bool`
Whether to allow the residual function to be evaluated at exactly the boundary values or not.
Think of it as whether the solver will treat the bounds as creating a closed or open set for the search space.
boundary_epsilon: `float`
The amount to adjust the proposed guess, x, by to calculate the censored value of the residual function,
when the proposed guess exceeds the boundaries.
penalty_scale: `float`
The linear scaling factor for adjusting the penalty for the proposed unknown values exceeding the boundary.
verbose: `bool`
Whether to print out additional information for how the constrained residual function is behaving during
optimization. Useful for tuning the solver.
"""
lbs = np.asarray([v[0] for v in bounds.values()])
ubs = np.asarray([v[1] for v in bounds.values()])
def constr_residual(x, residual_cache=[]):
"""Implements a constrained residual function, where any attempts to evaluate x outside of the
bounds provided will result in a linear penalty function scaled by `penalty_scale`.
Note: We are purposefully using residual_cache as a mutable default argument to cache the most recent
valid evaluation (maintain state between function calls) of the residual function to induce solvers
to backstep if they encounter a region of the search space that returns nan values.
See Hitchhiker's Guide to Python post on Mutable Default Arguments: "When the Gotcha Isn't a Gotcha"
"""
if eval_at_boundary:
x_censored = np.where(x < lbs, lbs, x)
x_censored = np.where(x > ubs, ubs, x_censored)
else:
x_censored = np.where(x < lbs, lbs + boundary_epsilon, x)
x_censored = np.where(x > ubs, ubs - boundary_epsilon, x_censored)
residual_censored = residual(x_censored)
if verbose:
print(f"Attempted x is {x}")
print(f"Censored x is {x_censored}")
print(f"The residual_censored is {residual_censored}")
if np.any(np.isnan(residual_censored)):
# Provide a scaled penalty to the solver when trying to evaluate residual() in an undefined region
residual_censored = residual_cache[0] * penalty_scale
if verbose:
print(f"The new residual_censored is {residual_censored}")
else:
if not residual_cache:
residual_cache.append(residual_censored)
else:
residual_cache[0] = residual_censored
if verbose:
print(f"The residual_cache is {residual_cache[0]}")
# Provide an additive, scaled penalty to the solver when trying to evaluate residual() outside of the boundary
residual_with_boundary_penalty = residual_censored + \
(x - x_censored) * penalty_scale * residual_censored
return residual_with_boundary_penalty
return constr_residual
def constrained_multivariate_residual(residual, bounds, method="linear_continuation", verbose=False,
**constrained_kwargs):
"""Return a constrained version of the residual function, which accounts for bounds, using the specified method.
See the docstring of the specific method of interest for further details."""
if method == "linear_continuation":
return residual_with_linear_continuation(residual, bounds, verbose=verbose, **constrained_kwargs)
# TODO: Implement logistic transform as another option for constrained multivariate residual
else:
raise ValueError(f"Method {method} for constrained multivariate root-finding has not yet been implemented.")
|
python
|
from django.db import models
from .book import Book
from .language import Language
class BookLanguage(models.Model):
id = models.AutoField(
primary_key=True,
editable=False)
book = models.ForeignKey(
Book,
db_column='book_id',
blank=False, null=False,
on_delete=models.PROTECT)
language = models.ForeignKey(
Language,
db_column='language_id',
blank=False, null=False,
on_delete=models.PROTECT
)
name = models.CharField(
max_length=100,
blank=False, null=False)
abreviation = models.CharField(
max_length=100,
blank=True, null=True)
class Meta:
verbose_name = 'Book language'
verbose_name_plural = 'Book language'
db_table = 'believe_book_lang'
def __str__(self):
return self.name
|
python
|
from __future__ import absolute_import, unicode_literals
from appearance.classes import Icon
icon_acl_list = Icon(driver_name='fontawesome', symbol='lock')
|
python
|
#!/usr/bin/env/python3
import os
import sys
import typing
THIS_SCRIPT_DIR = sys.path[0]
INCLUDE_DIR = THIS_SCRIPT_DIR + "/../src/include/cleantype/"
ALL_IN_ONE_FILE = THIS_SCRIPT_DIR + "/include/cleantype/cleantype.hpp"
MAIN_TITLE = """
// CleanType : amalgamated version
//
// This file is part of CleanType: Clean Types for C++
// Copyright Pascal Thomet - 2018
// Distributed under the Boost Software License, Version 1.0. (see LICENSE.md)
"""
TITLE = """
//////////////////////////////////////////
//// Header: HEADER_FILE
//////////////////////////////////////////
"""
FileName = typing.NewType("Filename", str)
FileContent = typing.NewType("FileContent", str)
CodeLine = typing.NewType("CodeLine", str)
def read_file(file: FileName) -> FileContent:
with open(file, "r") as f:
r = f.read()
return r
def get_included_file(codeline: CodeLine) -> typing.Optional[FileName]:
search = "#include <cleantype/"
if search in codeline:
included_file = codeline.replace(search, "")
included_file = included_file[:-1]
return included_file
else:
return None
def process_header_file(header_file: FileName, already_processed_headers: [FileName] = []) -> FileContent:
if header_file in already_processed_headers:
return ""
already_processed_headers.append(header_file)
dst_file_content = TITLE.replace("HEADER_FILE", header_file)
original_file_content = read_file(INCLUDE_DIR + header_file)
for line in original_file_content.split("\n"):
included_file = get_included_file(line)
if included_file is not None:
dst_file_content = dst_file_content + \
process_header_file(included_file, already_processed_headers)
else:
if "#pragma once" not in line:
dst_file_content = dst_file_content + line + "\n"
return dst_file_content
def make_all_in_one() -> str:
c = process_header_file("cleantype.hpp")
c = "#pragma once\n" + MAIN_TITLE + c
return c
if __name__ == "__main__":
content = make_all_in_one()
# print(make_all_in_one())
with open(ALL_IN_ONE_FILE, "w") as f:
f.write(content)
print("{} was created".format(ALL_IN_ONE_FILE))
|
python
|
import datetime
from bs4 import BeautifulSoup
from kik_unofficial.utilities.cryptographic_utilities import CryptographicUtils
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement, XMPPResponse
class GetMyProfileRequest(XMPPElement):
def __init__(self):
super().__init__()
def serialize(self) -> bytes:
data = ('<iq type="get" id="{}">'
'<query xmlns="kik:iq:user-profile" />'
'</iq>').format(self.message_id)
return data.encode()
class GetMyProfileResponse(XMPPResponse):
def __init__(self, data: BeautifulSoup):
super().__init__(data)
self.first_name = get_text_safe(data, "first")
self.last_name = get_text_safe(data, "last")
self.username = get_text_safe(data, "username")
# Birthday set upon registration using date format yyyy-MM-dd
# Server seems to default to 2000-01-01 if a birthday wasn't set during sign up
self.birthday = get_text_safe(data, "birthday")
# Token that is used to start the OAuth flow for Kik Live API requests
self.session_token = get_text_safe(data, "session-token")
# Token expiration date in ISO 8601 format
# When the token expires, requesting your profile information again
# should return the new session token.
self.session_token_expiration = get_text_safe(data, "session-token-expiration")
self.notify_new_people = True if get_text_safe(data, "notify-new-people") == "true" else False
self.verified = True if data.verified else False
if data.find("email"):
self.email = data.find("email").text
self.email_is_confirmed = "true" == data.find("email").get("confirmed")
else:
self.email = None
self.email_is_confirmed = False
if data.find("pic"):
# append /orig.jpg for the full resolution
# append /thumb.jpg for a smaller resolution
self.pic_url = data.find("pic").text
else:
self.pic_url = None
# Once the session token is expired, call get_my_profile again to get the new token
def is_valid_token(self):
if self.session_token is None or self.session_token_expiration is None:
return False
now = datetime.datetime.now()
try:
expire_time = datetime.datetime.strptime(self.session_token_expiration, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return False
return now < expire_time
def __str__(self):
return f'Username: {self.username}' \
f'\nDisplay name: {self.first_name} {self.last_name}' \
f'\nBirthday: {self.birthday}' \
f'\nEmail: {self.email} (confirmed: {self.email_is_confirmed})' \
f'\nPic: {self.pic_url + "/orig.jpg" if self.pic_url else "none"}'
def __repr__(self):
return "GetMyProfileResponse(first_name={}, last_name={}, username={}, birthday={}, " \
"session_token={}, session_token_expiration={}, notify_new_people={}, " \
"verified={}, email={}, email_is_confirmed={}, pic_url={})".format(self.first_name, self.last_name,
self.username, self.birthday,
self.session_token,
self.session_token_expiration,
self.notify_new_people, self.verified,
self.email, self.email_is_confirmed,
self.pic_url)
def get_text_safe(data: BeautifulSoup, tag: str):
return data.find(tag).text if data.find(tag) else None
class ChangeNameRequest(XMPPElement):
def __init__(self, first_name, last_name):
super().__init__()
self.first_name = first_name
self.last_name = last_name
def serialize(self) -> bytes:
data = ('<iq type="set" id="{}">'
'<query xmlns="kik:iq:user-profile">'
'<first>{}</first>'
'<last>{}</last>'
'</query>'
'</iq>').format(self.message_id, self.first_name, self.last_name)
return data.encode()
class ChangePasswordRequest(XMPPElement):
def __init__(self, old_password, new_password, email, username):
super().__init__()
self.old_password = old_password
self.new_password = new_password
self.email = email
self.username = username
def serialize(self):
passkey_e = CryptographicUtils.key_from_password(self.email, self.old_password)
passkey_u = CryptographicUtils.key_from_password(self.username, self.new_password)
data = ('<iq type="set" id="{}">'
'<query xmlns="kik:iq:user-profile">'
'<passkey-e>{}</passkey-e>'
'<passkey-u>{}</passkey-u>'
'</query>'
'</iq>').format(self.message_id, passkey_e, passkey_u)
return data.encode()
class ChangeEmailRequest(XMPPElement):
def __init__(self, password, old_email, new_email):
super().__init__()
self.password = password
self.old_email = old_email
self.new_email = new_email
def serialize(self):
passkey_e = CryptographicUtils.key_from_password(self.old_email, self.password)
data = ('<iq type="set" id="{}">'
'<query xmlns="kik:iq:user-profile">'
'<email>{}</email>'
'<passkey-e>{}</passkey-e>'
'</query>'
'</iq>').format(self.message_id, self.new_email, passkey_e)
return data.encode()
|
python
|
import sys
sys.path.append('../../../')
import unittest
import numpy as np
from spiketag.base import ProbeFactory
class TestProbe(unittest.TestCase):
def test_linear_probe(self):
linear_probe = ProbeFactory.genLinearProbe(25e3, 32)
expected_type = 'linear'
expected_n_group = 32
expected_n_ch = 32
expected_len_group = 3
self.assertEqual(expected_type, linear_probe.type)
self.assertEqual(expected_n_group, linear_probe.n_group)
self.assertEqual(expected_n_ch, linear_probe.n_ch)
self.assertEqual(expected_len_group, linear_probe.len_group)
expected_near_ch_1 = [4, 5, 6]
near_ch_1 = linear_probe.get_group_ch(5)
self.assertListEqual(list(near_ch_1), expected_near_ch_1)
expected_near_ch_2 = [-1, 0, 1]
near_ch_2 = linear_probe.get_group_ch(0)
self.assertListEqual(list(near_ch_2), expected_near_ch_2)
expected_near_ch_3 = [30, 31, -1]
near_ch_3 = linear_probe.get_group_ch(31)
self.assertListEqual(list(near_ch_3), expected_near_ch_3)
def test_tetrode_probe(self):
tetrode_probe = ProbeFactory.genTetrodeProbe(25e3, 100)
expected_type = 'tetrode'
expected_n_group = 25
expected_n_ch = 100
expected_len_group = 4
self.assertEqual(expected_type, tetrode_probe.type)
self.assertEqual(expected_n_group, tetrode_probe.n_group)
self.assertEqual(expected_n_ch, tetrode_probe.n_ch)
self.assertEqual(expected_len_group, tetrode_probe._len_group)
expected_near_ch_1 = [0, 1, 2, 3]
near_ch_1 = tetrode_probe.get_group_ch(0)
self.assertListEqual(list(near_ch_1), expected_near_ch_1)
near_ch_1 = tetrode_probe.get_group_ch(3)
self.assertListEqual(list(near_ch_1), expected_near_ch_1)
expected_near_ch_2 = [4, 5, 6, 7]
near_ch_2 = tetrode_probe.get_group_ch(4)
self.assertListEqual(list(near_ch_2), expected_near_ch_2)
expected_near_ch_3 = [96, 97, 98, 99]
near_ch_3 = tetrode_probe.get_group_ch(96)
self.assertListEqual(list(near_ch_3), expected_near_ch_3)
near_ch_3 = tetrode_probe.get_group_ch(99)
self.assertListEqual(list(near_ch_3), expected_near_ch_3)
|
python
|
"""{{cookiecutter.project_description}}"""
import logging
from {{cookiecutter.package_name}}.wsgi import ApplicationLoader
from {{cookiecutter.package_name}}.version import __version__ # noqa: F401
# initialize logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Mar-27-21 01:06
# @Author : Kan HUANG ([email protected])
# @RefLink : https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
from __future__ import unicode_literals, print_function, division
import os
import random
import string
import unicodedata
from io import open
import glob
import torch
def findFiles(path): return glob.glob(path)
all_letters = string.ascii_letters + " .,;'" # Five extra letters
n_letters = len(all_letters)
def unicodeToAscii(s):
"""unicodeToAscii
Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
For example, 'Ślusàrski' -> 'Slusarski'
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
def readLines(filename):
"""
Read a file and split into lines.
Each file of 'data/names/*.txt' is a text file with names belonging to a category.
Every line of these files is a name string belonging to the category of the file.
"""
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
def letterToIndex(letter):
# Find letter index from all_letters, e.g. "a" = 0
return all_letters.find(letter)
def letterToIndex_test():
for l in all_letters:
index = all_letters.find(l)
print(f"{l}: {index}; ", end='')
def letterToTensor(letter):
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1 # One-hot encoding
return tensor
def lineToTensor(line):
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor(
[all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
def main():
# All categories
print(f"n_categories: {n_categories}")
print(f"all_categories: {all_categories}")
# Prepare data's pipeline
print(findFiles('data/names/*.txt'))
print(unicodeToAscii('Ślusàrski'))
print("First 5 names in category Italian:")
print(category_lines['Italian'][:5])
# Turning Names into Tensors
# A letter is turned into a <1 x n_letters> one-hot Tensor
print(letterToTensor('J'))
# A name string is turned into a <line_length x 1 x n_letters> one-hot matrix, or an array of one-hot letter vectors
# This will make the encoded input matrix very sparse.
# print(lineToTensor('Jones'))
print(lineToTensor('Jones').size())
# Random choosed exammples
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
# print(f"line_tensor.size(): {line_tensor.size()}")
if __name__ == "__main__":
main()
|
python
|
from . import db
from flask.ext.login import UserMixin, AnonymousUserMixin
from datetime import datetime
class ScrapeCount(db.Model):
__tablename__ = 'scrapecounts'
id = db.Column(db.Integer, primary_key=True)
websitename = db.Column(db.String(200))
count = db.Column(db.Integer)
date = db.Column(db.DateTime(), default=datetime.utcnow)
#user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(70), unique=True, index=True)
username = db.Column(db.String(70), unique=True, index=True)
|
python
|
import json
import torchvision
import data as data_module
from examples.NIPS.generate_data_utils import gather_examples
import pandas as pd
preprocessable = pd.read_pickle('/home/roigvilamalam/projects/Urban-Sound-Classification/preprocessable.pkl')
preprocessable = preprocessable[preprocessable['preprocessable']]
preprocessable_filenames = set(preprocessable['filename'])
def sound_true_values(dataset):
return [
(
"'examples/NIPS/UrbanSounds8K{}'".format(sample['path'][2:]),
sample['class']
)
for sample in dataset.dataset.data_arr
if sample['path'][3:] in preprocessable_filenames
]
def generate_data():
config = json.load(open('../my-config_generate.json'))
data_manager = getattr(data_module, config['data']['type'])(config['data'])
t_loader = data_manager.get_loader('train', transfs=None)
v_loader = data_manager.get_loader('val', transfs=None)
def scenario_function(digit, last_digits, threshold, available_digits):
if digit == last_digits[-1]:
return digit, True
return None, None
gather_examples(
t_loader, 'in_train_data.txt', 'init_train_data.txt', 'holds_train_data.txt', 'sounds_train_data.txt',
'init_sound_train_data.txt', get_true_values=sound_true_values, network_clause='sound',
start_sequence=['air_conditioner', 'children_playing', 'drilling', 'gun_shot', 'siren'],
end_sequence=['car_horn', 'dog_bark', 'engine_idling', 'jackhammer', 'street_music'],
relevant_digits=1, scenario_function=scenario_function, threshold=None
)
gather_examples(
v_loader, 'in_test_data.txt', 'init_test_data.txt', 'holds_test_data.txt', 'sounds_test_data.txt',
'init_sound_test_data.txt', get_true_values=sound_true_values, network_clause='sound',
start_sequence=['air_conditioner', 'children_playing', 'drilling', 'gun_shot', 'siren'],
end_sequence=['car_horn', 'dog_bark', 'engine_idling', 'jackhammer', 'street_music'],
relevant_digits=1, scenario_function=scenario_function, threshold=None
)
if __name__ == '__main__':
generate_data()
|
python
|
from flask import Flask, render_template, url_for, request, redirect, g, jsonify, flash, session, Markup
from bs4 import BeautifulSoup
from pymongo import *
from functools import wraps
from datetime import datetime
import requests
import csv
import boto3
import ckapi
import validation
from settings import *
# FLASK CONFIG ###########################
app = Flask(__name__)
app.secret_key = APP_SECRET_KEY
# with app.app_context():
client = MongoClient(MONGODB_DATABASE['uri'])
db = client[MONGODB_DATABASE['database_name']]
collection = db[MONGODB_DATABASE['collection_name']]
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not session.get('logged_in'):
return redirect(url_for('index', next=request.url))
return f(*args, **kwargs)
return decorated_function
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
admin_domain = request.form['admin']
username = request.form['username']
password = request.form['password']
session['api_key'] = ckapi.get_api_key(
admin_domain, username, password)
if session['api_key']:
session['logged_in'] = True
session['username'] = username
return redirect(url_for('welcome'))
else:
flash_str = "Remote authentication failed. Please try again."
flash(flash_str)
return redirect(url_for('index'))
else:
if session.get('logged_in'):
return redirect(url_for('welcome'))
else:
return render_template('login.html')
@app.route("/logout")
def logout():
session.clear()
return redirect(url_for('index'))
@app.route('/welcome', methods=['GET', 'POST'])
def welcome():
if request.method == 'POST':
start_date = request.form['start_date']
end_date = request.form['end_date']
start_date_convert = date_convert_for_api(start_date) #date format mm/dd/yyyy for running api
end_date_convert = date_convert_for_api(end_date) #date format mm/dd/yyyy for running api
job_id = 'report_{}_{}'.format(start_date.replace('-', ''), end_date.replace('-', ''))
created_date = str(datetime.now().strftime('%d-%m-%Y %H:%M:%S'))
queue_message = QueueMessage(start_date_convert, end_date_convert, job_id, created_date)
sqs_job(queue_message)
#collection_name = db[MONGODB_DATABASE['collection_name']]
collection.insert({"job_id": job_id, "created_date": created_date, "start_date": start_date, "end_date": end_date, "status": "Queued", "file_link": ""})
message = "Job has been scheduled"
job_list = retrieve_scheduled_report(collection)
return render_template('welcome.html', message=message, jobs=job_list)
else:
job_list = retrieve_scheduled_report(collection)
return render_template('welcome.html', jobs=job_list)
class QueueJob(object):
def __init__(self, job_id, start_date, end_date, created_date, status, file_link):
self.job_id = job_id
self.start_date = start_date
self.end_date = end_date
self.created_date = created_date
self.status = status
self.file_link = file_link
class QueueMessage(object):
def __init__(self, start_date, end_date, job_id, created_date):
self.start_date = start_date
self.end_date = end_date
self.job_id = job_id
self.created_date = created_date
# TODO: experiment using an ORM so you don't have to create a list, directly pass Mongo query response to template
def retrieve_scheduled_report(collection):
job_list = []
for i in collection.find({}, {'_id':0}).sort('created_date', -1):
job_temp = QueueJob(i['job_id'], i['start_date'], i['end_date'], i['created_date'], i['status'], i['file_link'])
job_list.append(job_temp)
return job_list
def date_convert_for_api(date):
date_result = datetime.strptime(date, '%d-%m-%Y').strftime('%m/%d/%y')
return date_result
def sqs_job(queue_message):
message_content = str({
"start_date": "%s" % queue_message.start_date,
"end_date": "%s" % queue_message.end_date,
"job_id": "%s" %queue_message.job_id,
"created_date": "%s" % queue_message.created_date
})
client = boto3.client('sqs')
client.create_queue(QueueName = SQS_QUEUE['name'])
r = client.get_queue_url(QueueName = SQS_QUEUE['name'])
queue_url = r['QueueUrl']
client.send_message(QueueUrl=queue_url, MessageBody=message_content, DelaySeconds=10)
if __name__ == "__main__":
app.run(debug=False)
|
python
|
# @Author: Leeroy P. Williams
# @Date: 29/09/19
# @Problem: If we list all the natural numbers below 10 that are
# multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000
# @Solution: Correct
def multi(a, b, array):
"""
Then print the sum of the multiples
"""
MULTIPLES = []
for i in array:
if (i%a == 0) or (i%b == 0):
MULTIPLES.append(i)
print(sum(MULTIPLES))
if __name__ == "__main__":
nums = [nums for nums in range(1, 1000)]
a = 3
b = 5
multi(a, b, nums)
|
python
|
# -*- coding: utf-8 -*-
"""
===============================================================================
Trajectory class for MD simulations (:mod:`sknano.core.atoms._trajectory`)
===============================================================================
Classes for analyzing the atom trajectories of molecular dynamics simulations.
.. currentmodule:: sknano.core.atoms._trajectory
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
from operator import attrgetter
import numpy as np
from sknano.core import BaseClass, UserList
from ._md_atoms import MDAtom as Atom, MDAtoms as Atoms
__all__ = ['Snapshot', 'Trajectory']
class AtomSelection:
""":class:`Trajectory` atom selection class.
Parameters
----------
traj : :class:`Trajectory`
"""
def __init__(self, traj):
self.traj = traj
def all(self, ts=None):
"""Select all atoms for all snapshots or snapshot at given timestep.
Parameters
----------
ts : {None, int}, optional
"""
if ts is None:
for snapshot in self.traj:
if not snapshot.selected:
continue
for i in range(snapshot.Natoms):
snapshot.atom_selection[i] = True
snapshot.nselected = snapshot.Natoms
else:
snapshot = self.traj.get_snapshot(ts)
for i in range(snapshot.Natoms):
snapshot.atom_selection[i] = True
snapshot.nselected = snapshot.Natoms
class TimeSelection:
""":class:`Trajectory` time selection class.
Parameters
----------
traj : :class:`Trajectory`
"""
def __init__(self, traj):
self.traj = traj
def all(self, ts=None):
"""Select all trajectory snapshots/timesteps."""
[setattr(snapshot, 'selected', True) for snapshot in self.traj]
self.traj.nselected = self.traj.Nsnaps
self.traj.atom_selection.all()
self.print_fraction_selected()
def one(self, ts):
"""Select only timestep `ts`."""
[setattr(snapshot, 'selected', False) for snapshot in self.traj]
try:
self.traj.get_snapshot(ts).selected = True
self.traj.nselected = 1
except AttributeError:
pass
self.traj.atom_selection.all()
self.print_fraction_selected()
def none(self):
"""Deselect all timesteps."""
[setattr(snapshot, 'selected', False) for snapshot in self.traj]
self.traj.nselected = 0
self.print_fraction_selected()
def skip(self, n):
"""Select every `n`\ th timestep from currently selected timesteps."""
count = n - 1
for snapshot in self.traj:
if not snapshot.selected:
continue
count += 1
if count == n:
count = 0
continue
snapshot.selected = False
self.traj.nselected -= 1
self.traj.atom_selection.all()
self.print_fraction_selected()
def print_fraction_selected(self):
print('{}/{} snapshots selected'.format(
self.traj.nselected, self.traj.Nsnaps))
class Snapshot(BaseClass):
"""Container class for :class:`Trajectory` data at single timestep"""
def __init__(self, trajectory=None):
super().__init__()
self.trajectory = trajectory
self.atomattrs = None
self.attr_dtypes = None
self.timestep = None
self._atoms = None
self.fmtstr = "trajectory={trajectory!r}"
@property
def atoms(self):
"""Snapshot atoms."""
atoms = Atoms()
for atom in self._atoms:
try:
reference_atom = \
self.trajectory.reference_atoms.get_atom(
int(atom[self.atomattrs.index('id')]))
except AttributeError:
reference_atom = None
try:
t0_atom = self.trajectory.t0_atoms.get_atom(
int(atom[self.atomattrs.index('id')]))
except AttributeError:
t0_atom = None
attrs = [dtype(value) for dtype, value in
zip(self.attr_dtypes, atom)]
atoms.append(Atom(reference_atom=reference_atom,
t0_atom=t0_atom,
**dict(list(zip(self.atomattrs, attrs)))))
return atoms
@atoms.setter
def atoms(self, value):
self._atoms = value
@property
def atom_selection(self):
""":class:`~numpy:numpy.ndarray` boolean array."""
return self._atom_selection
@atom_selection.setter
def atom_selection(self, value):
if not isinstance(value, (list, np.ndarray)):
raise ValueError('Expected an array_like object.')
self._atom_selection = np.asarray(value, dtype=bool)
@property
def aselect(self):
"""Alias for :attr:`Snapshot.atom_selection`."""
return self.atom_selection
@aselect.setter
def aselect(self, value):
self.atom_selection = value
@property
def selected(self):
"""True/False if this snapshot is selected."""
return self._selected
@selected.setter
def selected(self, value):
self._selected = bool(value)
@property
def tselect(self):
"""Alias for :attr:`Snapshot.selected`."""
return self.selected
@tselect.setter
def tselect(self, value):
self.selected = value
@property
def nselected(self):
"""Number of selected atoms in this snapshot."""
return self._nselected
@nselected.setter
def nselected(self, value):
self._nselected = int(value)
@property
def nselect(self):
"""Alias for :attr:`Snapshot.nselected`."""
return self.nselected
@nselect.setter
def nselect(self, value):
self.nselected = value
def get_atoms(self, asarray=False):
"""Get atoms.
Parameters
----------
asarray : :class:`~python:bool`
Returns
-------
:class:`~numpy:numpy.ndarray` or :class:`MDAtoms`
if `asarray` is `True`, the atoms are returned as an
:class:`~numpy:numpy.ndarray`, otherwise an :class:`MDAtoms`
instance is returned.
"""
if asarray:
return self._atoms
return self.atoms
def todict(self):
return dict(trajectory=self.trajectory)
class Trajectory(BaseClass, UserList):
"""Base class for trajectory analysis."""
def __init__(self, snapshots=None):
super().__init__(initlist=snapshots)
self.fmtstr = "snapshots={snapshots!r}"
self.time_selection = TimeSelection(self)
self.atom_selection = AtomSelection(self)
self.nselected = 0
self.reference_atoms = None
self._reference_snapshot = None
self.t0_atoms = None
self._t0_snapshot = None
@property
def Nsnaps(self):
"""Number of :class:`Snapshot`\ s in `Trajectory`."""
return len(self.data)
@property
def atom_selection(self):
"""`AtomSelection` class."""
return self._atom_selection
@atom_selection.setter
def atom_selection(self, value):
if not isinstance(value, AtomSelection):
raise ValueError('Expected an `AtomSelection` instance.')
self._atom_selection = value
@property
def time_selection(self):
return self._time_selection
@time_selection.setter
def time_selection(self, value):
if not isinstance(value, TimeSelection):
raise ValueError('Expected a `TimeSelection instance.')
self._time_selection = value
@property
def aselect(self):
"""Alias for :attr:`Trajectory.atom_selection`."""
return self.atom_selection
@aselect.setter
def aselect(self, value):
self.atom_selection = value
@property
def tselect(self):
"""Alias for :attr:`Trajectory.time_selection`."""
return self.time_selection
@tselect.setter
def tselect(self, value):
self.time_selection = value
@property
def nselected(self):
"""Number of selected snapshots."""
return self._nselected
@nselected.setter
def nselected(self, value):
self._nselected = int(value)
@property
def nselect(self):
"""Alias for :attr:`Trajectory.nselected`."""
return self.nselected
@nselect.setter
def nselect(self, value):
self.nselected = value
@property
def snapshots(self):
"""Returns the list of :class:`Snapshot`\ s."""
return self.data
def sort(self, key=attrgetter('timestep'), reverse=False):
"""Sort the trajectory :class:`Snapshot`\ s."""
super().sort(key=key, reverse=reverse)
def cull(self):
"""Remove duplicate timesteps from `Trajectory`."""
i = 1
while i < len(self.data):
if self.data[i].timestep == self.data[i-1].timestep:
del self.data[i]
else:
i += 1
def get_snapshot(self, ts):
"""Return :class:`Snapshot` with timestep `ts`."""
for snapshot in self:
if snapshot.timestep == ts:
return snapshot
print("No snapshot at ts={:d} exists".format(ts))
def timestep_index(self, ts):
"""Return index of :class:`Snapshot` with timestep `ts`."""
for i, snapshot in enumerate(self):
if snapshot.timestep == ts:
return i
print("No timestep {:d} exists".format(ts))
@property
def reference_snapshot(self):
return self._reference_snapshot
@reference_snapshot.setter
def reference_snapshot(self, value):
if not isinstance(value, Snapshot):
raise TypeError('Expected a `Snapshot` instance.')
self._reference_snapshot = value
self.reference_atoms = self.reference_snapshot.atoms
self.reference_atoms.update_attrs()
@property
def t0_snapshot(self):
return self._t0_snapshot
@t0_snapshot.setter
def t0_snapshot(self, value):
if not isinstance(value, Snapshot):
raise TypeError('Expected a `Snapshot` instance.')
self._t0_snapshot = value
self.t0_atoms = self.t0_snapshot.atoms
self.t0_atoms.update_attrs()
@property
def timesteps(self):
v = np.zeros(self.nselected, dtype=int)
for i, snapshot in enumerate(self.data):
if snapshot.selected:
v[i] = snapshot.timestep
return v
def todict(self):
return dict(snapshots=self.data)
|
python
|
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import redirect, render, get_object_or_404
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from ranbo.forms import *
from django.core.exceptions import ObjectDoesNotExist
def index(request):
thoughts = Post.objects.order_by('-like_times')[:5]
context_dict = {'thoughts': thoughts}
return render(request, 'ranbo/index.html', context=context_dict)
def sort_thought(request):
thought = Post.objects.order_by('-post_id')[:5]
if request.method == "POST":
if 'like' in request.POST:
thought = Post.objects.order_by('like_times')[:5]
if 'view' in request.POST:
thought = Post.objects.order_by('view_times')[:5]
return render(request, 'ranbo/index.html', context=thought)
def user_login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect(reverse('ranbo:index'))
else:
print(f"Invalid login details: {username}, {password}")
return HttpResponse("Invalid login details supplied.")
else:
return render(request, 'ranbo/login.html', context={'disable_login_card': True})
@login_required
def add_thought(request):
form = PostForm()
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
thought = form.save(commit=False)
thought.user = request.user
thought.save()
return redirect('/ranbo/')
else:
print(form.errors)
return render(request, 'ranbo/add_thought.html', {'form': form})
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(request.POST)
profile_form = UserProfileForm(request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
return render(request, 'ranbo/index.html')
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request, 'ranbo/register.html', context={
'user_form': user_form,
'profile_form': profile_form,
'disable_login_card': True,
})
def show_more(request):
if request.user.is_authenticated():
thought = Post.objects.order_by('like_times')
return render(request, 'ranbo/index.html', context=thought)
else:
return HttpResponse("You need to login")
# def add_comment(request, post_id):
# thought = get_object_or_404(Post, Post_id=post_id)
# if request.method == 'POST':
# form = commentForm(request.POST)
# if form.is_vaild():
# comment = form.save(commit=False)
# return redirect('ranbo:thought')
# else:
# return HttpResponse("fail to add comment")
def thought_detail(request, post_id):
context_dict = {}
thought = Post.objects.get(post_id=post_id)
context_dict['like'] = thought.like_times
context_dict['view'] = thought.view_times
context_dict['comment'] = thought.comment
return render(request, 'ranbo/thought_detail.html')
def user_profile(request, user_id):
try:
user = User.objects.get(id=user_id)
except ObjectDoesNotExist:
return render(request, 'ranbo/user_profile.html')
thoughts = Post.objects.filter(user=user)
total_thoughts = 0
total_likes = 0
total_views = 0
for t in thoughts:
total_thoughts += 1
total_likes += t.like_times
total_views += t.view_times
context_dict = {
'username': user.username,
'total_thoughts': total_thoughts,
'total_likes': total_likes,
'total_views': total_views,
'thoughts': thoughts,
}
return render(request, 'ranbo/user_profile.html', context=context_dict)
@login_required
def like_thought(request):
if request.method == 'GET':
thought_id = request.GET['post_id']
likes = 0
if thought_id:
thought = Post.objects.get(post_id=int(thought_id))
if thought:
likes = thought.like_times + 1
thought.like_times = likes
thought.save()
return HttpResponse(likes)
def user_edit(request):
user = User.object.get(user_id=request.user.user_id)
if request.method == 'POST':
user_form = UserForm(request.POST)
if user_form.is_valid():
user.username = user_form.cleaned_data['username']
user.password = user_form.cleaned_data['password']
user.save()
return HttpResponseRedirect('/ranbo/user_profile/')
@login_required
def user_logout(request):
logout(request)
return redirect(reverse('ranbo:index'))
|
python
|
import nltk, re, pprint
from urllib.request import urlopen
url = "http://nrs-projects.humboldt.edu/~st10/s20cs328/328lect15-1/328lect15-1-projected_txt.html"
raw = urlopen(url).read()
print(type(raw))
print(len(raw))
#natural language string
nls = nltk.clean_html(raw)
tokens = nltk.word_tokenize(raw)
print(tokens)
|
python
|
import os
from setuptools import (
setup,
find_packages
)
curr_dir = os.path.dirname(os.path.abspath(__file__))
install_requirements = [
"pydantic>=1.8",
"cryptography>=3.4"
]
setup(
name="restless",
version="0.0.1-dev",
description="Just an easy-to-use cryptographic Python module",
long_description="nope",
license="WTFPL License",
url=None,
author="Paul Feuvraux",
author_email="[email protected]",
classifier=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Security :: Cryptography"
],
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.8",
install_requires=install_requirements,
zip_safe=False,
)
|
python
|
from django.apps import apps
from rest_framework import serializers
from common.constants import models
Profile = apps.get_model(models.PROFILE_MODEL)
User = apps.get_model(models.USER_MODEL)
class ProfileSerializer(serializers.ModelSerializer):
"""
API serializer for :class:`Profile`.
"""
class Meta:
model = Profile
fields = (
'user', 'bio', 'birthday', 'language', 'alias', 'web', 'image',
)
class UserSerializer(serializers.ModelSerializer):
"""
API serializer for :class:`User`.
Parameter `auth_token` is taken as secure since nobody but admin and user itself can access this data.
"""
profile = ProfileSerializer()
class Meta:
model = User
fields = (
'id', 'last_login', 'username', 'first_name', 'last_name', 'is_active', 'date_joined', 'email',
'is_premium', 'profile',
)
class SimpleUserSerializer(serializers.ModelSerializer):
"""
Simplified API serializer for :class:`User`.
"""
class Meta:
model = User
fields = (
'id', 'username', 'first_name', 'last_name', 'email',
)
|
python
|
# -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : [email protected]
""" Caser
Reference:
"Personalized Top-N Sequential Recommendation via Convolutional Sequence Embedding"
Jiaxi Tang et al., WSDM'2018.
Reference code:
https://github.com/graytowne/caser_pytorch
Note:
We use a maximum of L (instead of history_max) horizontal filters to prevent excessive CNN layers.
Besides, to keep consistent with other sequential models, we do not use the sliding window to generate
training instances in the paper, and set the parameter T as 1.
CMD example:
python main.py --model_name Caser --emb_size 64 --L 5 --num_horizon 64 --num_vertical 32 --lr 1e-3 --l2 1e-4 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
from torch import nn
import torch.nn.functional as F
from models.BaseModel import SequentialModel
class Caser(SequentialModel):
extra_log_args = ['emb_size', 'num_horizon', 'num_vertical', 'L']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--num_horizon', type=int, default=16,
help='Number of horizon convolution kernels.')
parser.add_argument('--num_vertical', type=int, default=8,
help='Number of vertical convolution kernels.')
parser.add_argument('--L', type=int, default=4,
help='Union window size.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
self.emb_size = args.emb_size
self.max_his = args.history_max
self.num_horizon = args.num_horizon
self.num_vertical = args.num_vertical
self.l = args.L
assert self.l <= self.max_his # use L instead of max_his to avoid excessive conv_h
super().__init__(args, corpus)
def _define_params(self):
self.u_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size, padding_idx=0)
lengths = [i + 1 for i in range(self.l)]
self.conv_h = nn.ModuleList(
[nn.Conv2d(in_channels=1, out_channels=self.num_horizon, kernel_size=(i, self.emb_size)) for i in lengths])
self.conv_v = nn.Conv2d(in_channels=1, out_channels=self.num_vertical, kernel_size=(self.max_his, 1))
self.fc_dim_h = self.num_horizon * len(lengths)
self.fc_dim_v = self.num_vertical * self.emb_size
fc_dim_in = self.fc_dim_v + self.fc_dim_h
self.fc = nn.Linear(fc_dim_in, self.emb_size)
self.out = nn.Linear(self.emb_size * 2, self.emb_size)
def forward(self, feed_dict):
self.check_list = []
u_ids = feed_dict['user_id']
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
batch_size, seq_len = history.shape
pad_len = self.max_his - seq_len
history = F.pad(history, [0, pad_len])
his_vectors = self.i_embeddings(history).unsqueeze(1) # [batch_size, 1, history_max, emb_size]
# Convolution Layers
out, out_h, out_v = None, None, None
# vertical conv layer
if self.num_vertical > 0:
out_v = self.conv_v(his_vectors)
out_v = out_v.view(-1, self.fc_dim_v) # prepare for fully connect
# horizontal conv layer
out_hs = list()
if self.num_horizon > 0:
for conv in self.conv_h:
conv_out = conv(his_vectors).squeeze(3).relu()
pool_out = F.max_pool1d(conv_out, conv_out.size(2)).squeeze(2)
out_hs.append(pool_out)
out_h = torch.cat(out_hs, 1) # prepare for fully connect
# Fully-connected Layers
user_vector = self.u_embeddings(u_ids)
z = self.fc(torch.cat([out_v, out_h], 1)).relu()
his_vector = self.out(torch.cat([z, user_vector], 1))
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
return {'prediction': prediction.view(batch_size, -1)}
|
python
|
from pseudo_python.builtin_typed_api import builtin_type_check
from pseudo_python.errors import PseudoPythonTypeCheckError
class Standard:
'''
Standard classes should respond to expand and to return
valid nodes on expand
'''
pass
class StandardCall(Standard):
'''
converts to a standard call of the given namespace and function
'''
def __init__(self, namespace, function, expander=None):
self.namespace = namespace
self.function = function
self.expander = expander
def expand(self, args):
if not self.expander:
q = builtin_type_check(self.namespace, self.function, None, args)[-1]
return {'type': 'standard_call', 'namespace': self.namespace, 'function': self.function, 'args': args, 'pseudo_type': q}
else:
return self.expander(self.namespace, self.function, args)
class StandardMethodCall(Standard):
'''
converts to a method call of the same class
'''
def __init__(self, type, message, default=None, expander=None):
self.type = type
self.message = message
self.default = default
self.expander = expander
def expand(self, args):
if self.default and len(args) - 1 in self.default:
args += self.default[len(args) - 1]
if not self.expander:
q = builtin_type_check(self.type, self.message, args[0], args[1:])[-1]
return {'type': 'standard_method_call', 'receiver': args[0], 'message': self.message, 'args': args[1:], 'pseudo_type': q}
else:
return self.expander(self.type, self.message, args)
class StandardRegex(Standard):
'''
converts re.compile(r'literal') to {type: regex} node
re.compile(variable) to re.compile(variable) standard call
'''
def expand(self, args):
if args[0]['type'] == 'String':
return {'type': 'regex', 'value': args[0]['value'], 'pseudo_type': 'Regexp'}
else:
return {'type': 'standard_call', 'namespace': 'regexp', 'function': 'compile', 'args': [args[0]], 'pseudo_type': 'Regexp'}
class StandardSwapper(Standard):
def __init__(self, type, message):
self.type = type
self.message = message
def expand(self, args):
if len(args) < 2:
raise PseudoPythonTypeCheckError('%s expects more args' % self.message)
q = builtin_type_check(self.type, self.message, args[1], [args[0]])[-1]
return {'type': 'standard_method_call', 'receiver': args[1], 'args': [args[0]], 'message': self.message, 'pseudo_type': q}
def to_int_expander(type, message, args):
return len_expander(type, message, args)
def len_expander(type, message, args):
receiver_type = args[0]['pseudo_type']
if isinstance(receiver_type, list):
a = receiver_type[0]
else:
a = receiver_type
# print(a, message, args[0], args[1:])
# input(0)
if message == 'length' and 'special' in args[0]: # len(sys.argv)
return {'type': 'standard_call', 'namespace': 'system', 'function': 'arg_count', 'args': [], 'pseudo_type': 'Int'}
else:
q = builtin_type_check(a, message, args[0], args[1:])
return {'type': 'standard_method_call', 'receiver': args[0], 'message': message, 'args': [], 'pseudo_type': q[-1]}
FUNCTION_API = {
'global': {
'input': StandardCall('io', 'read'),
'print': StandardCall('io', 'display'),
'str': StandardCall('global', 'to_string'),
'len': StandardMethodCall('List', 'length', expander=len_expander),
'int': StandardMethodCall('String', 'to_int', expander=to_int_expander)
},
'math': {
'log': {
1: StandardCall('math', 'ln'),
2: StandardCall('math', 'log')
},
'sin': StandardCall('math', 'sin'),
'cos': StandardCall('math', 'cos'),
'tan': StandardCall('math', 'tan'),
'pow': lambda left, right, pseudo_type: Node('binary_op',
op='**', left=left, right=right, pseudo_type=pseudo_type)
},
're': {
'match': StandardMethodCall('Regexp', 'match'),
'sub': StandardMethodCall('Regexp', 'replace'),
'compile': StandardRegex(),
'escape': StandardCall('regexp', 'escape')
}
}
METHOD_API = {
'String': {
'split': StandardMethodCall('String', 'split'),
'join': StandardSwapper('List', 'join'),
'upper': StandardMethodCall('String', 'upper'),
'lower': StandardMethodCall('String', 'lower'),
'title': StandardMethodCall('String', 'title'),
'center': StandardMethodCall('String', 'center', default={1: [{'type': 'string', 'value': ' ', 'pseudo_type': 'String'}]}),
'index': {
1: StandardMethodCall('String', 'find'),
2: StandardMethodCall('String', 'find_from')
}
},
'List': {
'append': StandardMethodCall('List', 'push'),
'pop': StandardMethodCall('List', 'pop'),
'insert': {
1: StandardMethodCall('List', 'insert'),
2: StandardMethodCall('List', 'insert_at')
},
'remove': StandardMethodCall('List', 'remove'),
'extend': StandardMethodCall('List', 'push_many'),
'map': StandardMethodCall('List', 'map'),
'filter': StandardMethodCall('List', 'filter')
},
'Dictionary': {
'keys': StandardMethodCall('Dictionary', 'keys'),
'values': StandardMethodCall('Dictionary', 'values'),
'[]': StandardMethodCall('Dictionary', 'getitem'),
'[]=': StandardMethodCall('Dictionary', 'setitem')
},
'Array': {
},
'Tuple': {
},
'Set': {
'|': StandardMethodCall('Set', 'union')
},
'Regexp': {
'match': StandardMethodCall('Regexp', 'match')
},
'RegexpMatch': {
'group': StandardMethodCall('RegexpMatch', 'group')
}
}
OPERATOR_API = {
'List': {
'+': 'concat',
'*': 'repeat'
},
'Set': {
'|': 'union',
'&': 'intersection',
'^': 'symmetric_diff',
'-': 'diff'
},
'String': {
'+': 'concat',
'*': 'repeat',
'%': 'c_format'
}
}
|
python
|
import tensorflow as tf
def MaxAvgPooling2D(m,n, model):
max = tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=1, padding='SAME')(model)
avg = tf.keras.layers.AveragePooling2D(pool_size=(2,2), strides=1, padding='SAME')(model)
model = (max*m + avg*n)/(m+n)
return model
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Preprocessing ZTF database to be saved as a samplesx21x21x3 numpy array in a pickle
TODO: clean_NaN once cropped
TODO: unit tests
ToDo: instead of cascade implement as pipeline, in order to have single call and definition
ToDo: smart way to shut down nans
@author: asceta
"""
import os
import sys
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(PROJECT_PATH)
from parameters import param_keys
# from modules.data_set_alerce import DatasetAlerce as Dataset
from modules.data_set_generic import Dataset
import numpy as np
# Todo: refactor verbose
# comopse as a pipeline to choose preprocessing steps
class ZTFDataPreprocessor(object):
"""
Constructor
"""
def __init__(self, params, verbose=True):
self.params = params
self.channels_to_select = params[param_keys.CHANNELS_TO_USE]
self.number_to_replace_nans = params[param_keys.NANS_TO]
self.crop_size = params[param_keys.CROP_SIZE]
self.preprocessing_pipeline = [self.identity]
self.verbose = verbose
"""
define your preprocessing strategy here
"""
def preprocess_dataset(self, dataset: Dataset):
print('%s' % self._get_string_label_count(dataset.data_label), flush=True)
for preprocessing_function in self.preprocessing_pipeline:
dataset = preprocessing_function(dataset)
self.verbose = False
return dataset
def append_to_pipeline(self, method):
self.preprocessing_pipeline.append(method)
return self
def set_pipeline(self, pipeline):
self.preprocessing_pipeline = pipeline
def identity(self, dataset: Dataset):
return dataset
def check_single_image(self, dataset: Dataset):
if len(dataset.data_array.shape) == 3:
dataset.data_array = dataset.data_array[np.newaxis, ...]
return dataset
# TODO: erase single image check; adding dummy at begining
def select_channels(self, dataset: Dataset):
if len(dataset.data_array.shape) == 3:
dataset.data_array = dataset.data_array[np.newaxis, ...]
selected_images_channels = dataset.data_array[
..., self.channels_to_select]
if len(selected_images_channels.shape) == 3:
selected_images_channels = selected_images_channels[..., np.newaxis]
dataset.data_array = selected_images_channels
return dataset
# TODO: normalize template to avoid replication with by_image
def normalize_by_sample(self, dataset: Dataset):
images = dataset.data_array
images -= np.nanmin(images, axis=(1, 2, 3))[
..., np.newaxis, np.newaxis, np.newaxis]
images = images / np.nanmax(images, axis=(1, 2, 3))[
..., np.newaxis, np.newaxis, np.newaxis]
dataset.data_array = images
return dataset
def normalize_by_image(self, dataset: Dataset):
images = dataset.data_array
images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
images = images / np.nanmax(images, axis=(1, 2))[
:, np.newaxis, np.newaxis, :]
dataset.data_array = images
return dataset
def nan_to_num(self, dataset: Dataset):
samples = dataset.data_array
nans_sample_idx = self._get_nans_samples_idx(samples)
if self.verbose:
print('%i samples with NaNs. NaNs replaced with number %s' % (
len(nans_sample_idx), str(self.number_to_replace_nans)))
samples[np.isnan(samples)] = self.number_to_replace_nans
dataset.data_array = samples
return dataset
def _check_all_removed(self, remove_name, samples_list, idxs_to_remove):
if len(samples_list) == len(idxs_to_remove):
raise OverflowError(
'All samples have %s, thus batch is empty and cannot be processed' %
remove_name)
def _check_misshape_all_removed(self, samples_list, idxs_to_remove):
self._check_all_removed('MISSHAPE', samples_list, idxs_to_remove)
def _check_nan_all_removed(self, samples_list, idxs_to_remove):
self._check_all_removed('NAN', samples_list, idxs_to_remove)
def _get_misshaped_samples_idx(self, samples):
miss_shaped_sample_idx = []
for i in range(len(samples)):
sample = samples[i]
if sample.shape[2] != 3 or sample.shape[1] != 63 or sample.shape[0] != 63:
# print("sample %i of shape %s" % (i, str(sample.shape)))
miss_shaped_sample_idx.append(i)
self._check_misshape_all_removed(samples, miss_shaped_sample_idx)
return miss_shaped_sample_idx
def clean_misshaped(self, dataset: Dataset):
samples_clone = list(dataset.data_array[:])
labels_clone = list(dataset.data_label[:])
metadata_clone = list(dataset.meta_data[:])
miss_shaped_sample_idx = self._get_misshaped_samples_idx(samples_clone)
for index in sorted(miss_shaped_sample_idx, reverse=True):
samples_clone.pop(index)
labels_clone.pop(index)
metadata_clone.pop(index)
if self.verbose:
print('%i misshaped samples removed\n%s' % (
len(miss_shaped_sample_idx),
self._get_string_label_count(labels_clone)),
flush=True)
dataset = Dataset(data_array=samples_clone, data_label=labels_clone,
meta_data=metadata_clone,
batch_size=dataset.batch_size)
return dataset
def _get_nans_samples_idx(self, samples):
nans_sample_idx = []
for i in range(len(samples)):
sample = samples[i]
if (np.isnan(sample).any()):
# print("sample %i of shape %s" %(i,str(sample.shape)))
nans_sample_idx.append(i)
return nans_sample_idx
# TODO: refactor; fuse with clean misshaped
def clean_nans(self, dataset: Dataset):
samples_clone = list(dataset.data_array[:])
labels_clone = list(dataset.data_label[:])
metadata_clone = list(dataset.meta_data[:])
nans_sample_idx = self._get_nans_samples_idx(samples_clone)
self._check_nan_all_removed(samples_clone, nans_sample_idx)
for index in sorted(nans_sample_idx, reverse=True):
samples_clone.pop(index)
labels_clone.pop(index)
metadata_clone.pop(index)
if self.verbose:
print('%i samples with NaNs removed\n%s' % (
len(nans_sample_idx), self._get_string_label_count(labels_clone)),
flush=True)
dataset = Dataset(data_array=samples_clone, data_label=labels_clone,
batch_size=dataset.batch_size,
meta_data=metadata_clone)
return dataset
def crop_at_center(self, dataset: Dataset):
if self.crop_size is None:
return dataset
samples = dataset.data_array
assert (samples.shape[1] % 2 == self.crop_size % 2)
center = int((samples.shape[1]) / 2)
crop_side = int(self.crop_size / 2)
crop_begin = center - crop_side
if samples.shape[1] % 2 == 0:
crop_end = center + crop_side
elif samples.shape[1] % 2 == 1:
crop_end = center + crop_side + 1
# print(center)
# print(crop_begin, crop_end)
cropped_samples = samples[:, crop_begin:crop_end, crop_begin:crop_end, :]
dataset.data_array = cropped_samples
return dataset
def _get_string_label_count(self, labels,
class_names=np.array(['AGN', 'SN', 'VS', 'asteroid', 'bogus'])):
label_values, label_counts = np.unique(labels, return_counts=True)
if len(label_values) != class_names.shape[0]:
return ""
count_dict = dict(zip(label_values, label_counts))
return_str = 'Label count '
for single_label_value in count_dict.keys():
return_str += '%s: %i -' % (class_names[single_label_value],
count_dict[single_label_value])
return return_str
def labels_to_real_bogus(self, dataset: Dataset):
bogus_label_value = self.params[param_keys.BOGUS_LABEL_VALUE]
if bogus_label_value is None:
label_values = np.unique(dataset.data_label)
bogus_label_value = label_values[-1]
bogus_indexes = np.where(dataset.data_label == bogus_label_value)[0]
real_indexes = np.where(dataset.data_label != bogus_label_value)[0]
dataset.data_label[bogus_indexes] = 0
dataset.data_label[real_indexes] = 1
if self.verbose:
print('Labels changed to Real - Bogus\n%s' % (
self._get_string_label_count(dataset.data_label,
np.array(['bogus', 'real']))),
flush=True)
return dataset
|
python
|
import base64
import random
from django.conf import settings
from django.core.cache import caches
from django.core.mail import send_mail
from apps.user.models import User
from libs.yuntongxun.sms import CCP
from meiduoshop.celery import app
@app.task
def async_send_sms(to, datas, template_id):
"""异步发送短信验证码 注意结果不是直接返回,而是存到celery的backend中"""
result = CCP().send_template_sms(to, datas, template_id)
return result
|
python
|
import os
from typing import Dict, List, Optional, Union, Callable
from tinydb.storages import MemoryStorage, JSONStorage
from ..custom_typings import DataDict, PathType
class BaseDB(JSONStorage, MemoryStorage):
"""Base storage class that reads which extensions are available to feed the
path handling functions
To create a new storage, you will need to inherit this class, create a
`extensions` variable containing a list of extensions the storage will
support for example:
.. code:: python
extensions = ["yml", "yaml"]
then implement a `read` and `write` method using the methods
:meth:`~panda_core_data.storages.base_db.BaseDB.base_read` and
:meth:`~panda_core_data.storages.base_db.BaseDB.base_write` all you need to
do is follow the instructions contained in them"""
extensions = False
def __init_subclass__(cls):
"""Automatically generate an extension list containing the available
raw extensions available together with their storage"""
available_storages.append({
"name": cls.__name__,
"extensions": cls.extensions,
"storage": cls,
})
def __init__(self, path: PathType, **kwargs):
"""Create a new instance
:param str path: Path to file"""
if(not globals().get("auto_convert_to_pathlib", False) or
locals().get("auto_convert_to_pathlib", False)):
from . import auto_convert_to_pathlib
current_path = auto_convert_to_pathlib(path)
self.path = current_path
MemoryStorage.__init__(self)
JSONStorage.__init__(self, current_path, **kwargs)
def base_read(self, load_method: Callable, use_handle: bool) -> DataDict:
"""Base method used by children classes to read the file and transforms
the string into a list of dictionaries, a good example of this method
is the built in python :func:`json.load` however, since it needs a
string as an input (or handler) you would need to set the parameter
use_handler so the string, which is the contents of the raw file, will
be passed to that method. For example the read method of our yaml
parser:
.. code:: python
def read(self):
return self.base_read(yaml.safe_load, True)
And since the function :func:`yaml.safe_load` needs a string as an
input, we set use_handle to True.
An example of list of dictionaries would be like this:
.. code:: python
{"data": [
{
'field_name': 'value',
'another_field': 10,
},
{
'field_name': 'value',
'another_field': 10,
},
]}
The dict keys are fields of a :mod:`~dataclasses.dataclass` and the
value, well, values
:param load_method: method used to transform the raw file into a list
of dictionaries
:param use_handle: TinyDB offers a handle (More specifically, the
handle of the class
:class:`~tinydb.storages.JSONStorage`) to load the
file and turn into a string automatically if you'd
like to use it, just set this parameter to True
:return: The generated data"""
if not self.memory:
if use_handle:
data = load_method(self._handle.read())
else:
data = load_method(self)
desired_data = {}
for table, table_items in data.items():
desired_data[table] = {}
for item_index, current_item in enumerate(table_items):
desired_data[table][item_index] = current_item
self.memory = desired_data
return self.memory
def base_write(self, write_method: Callable, data: DataDict,
use_handle: bool):
"""Transforms the data dictionary to a raw representation
:param write_method: method used to transform the raw file into a list
of dictionaries
:param data: data dictionary"""
if use_handle:
self._handle.seek(0)
serialized = write_method(data, **self.kwargs)
self._handle.write(serialized)
self._handle.flush()
os.fsync(self._handle.fileno())
self._handle.truncate()
else:
write_method(self, data, **self.kwargs)
#pylint: disable=invalid-name
available_storages: Optional[List[Dict[str, Union[str, BaseDB]]]] = []
"List of available storages"
|
python
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from elastic_transport import QueryParams
from .._utils import ( # noqa: F401
DEFAULT,
SKIP_IN_PATH,
to_array,
to_deep_object,
to_path,
)
from ._base import BaseClient
class AppSearch(BaseClient):
def create_api_key(
self,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Create an API key
`<https://www.elastic.co/guide/en/app-search/master/credentials.html#credentials-create>`_
:arg body: API key details
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
params = QueryParams(params)
return self.perform_request(
"POST",
"/api/as/v1/credentials",
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def delete_api_key(
self,
api_key_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Delete an API key
`<https://www.elastic.co/guide/en/app-search/master/credentials.html#credentials-destroy>`_
:arg api_key_name: Name of an API key
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if api_key_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"DELETE",
to_path(
"api",
"as",
"v1",
"credentials",
api_key_name,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_api_key(
self,
api_key_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Get the details of an API key
`<https://www.elastic.co/guide/en/app-search/master/credentials.html#credentials-single>`_
:arg api_key_name: Name of an API key
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if api_key_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"credentials",
api_key_name,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def put_api_key(
self,
api_key_name,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Update an API key
`<https://www.elastic.co/guide/en/app-search/master/credentials.html#credentials-update>`_
:arg api_key_name: Name of an API key
:arg body: API key details
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if api_key_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"PUT",
to_path(
"api",
"as",
"v1",
"credentials",
api_key_name,
),
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def list_api_keys(
self,
current_page=None,
page_size=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
List the details of all API keys
`<https://www.elastic.co/guide/en/app-search/master/credentials.html#credentials-all>`_
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
params = QueryParams(params)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
return self.perform_request(
"GET",
"/api/as/v1/credentials",
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_api_logs(
self,
engine_name,
from_date,
to_date,
current_page=None,
page_size=None,
query=None,
http_status_filter=None,
http_method_filter=None,
sort_direction=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
The API Log displays API request and response data at the Engine level
`<https://www.elastic.co/guide/en/app-search/master/api-logs.html>`_
:arg engine_name: Name of the engine
:arg from_date: Filter date from
:arg to_date: Filter date to
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg query: Use this to specify a particular endpoint, like analytics,
search, curations and so on
:arg http_status_filter: Filter based on a particular status code: 400,
401, 403, 429, 200
:arg http_method_filter: Filter based on a particular HTTP method: GET,
POST, PUT, PATCH, DELETE
:arg sort_direction: Would you like to have your results ascending,
oldest to newest, or descending, newest to oldest?
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
from_date,
to_date,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if from_date is not None:
params.add("filters[date][from]", from_date)
if to_date is not None:
params.add("filters[date][to]", to_date)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
if query is not None:
params.add("query", query)
if http_status_filter is not None:
params.add("filters[status]", http_status_filter)
if http_method_filter is not None:
params.add("filters[method]", http_method_filter)
if sort_direction is not None:
params.add("sort_direction", sort_direction)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"logs",
"api",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_count_analytics(
self,
engine_name,
filters=None,
interval=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Returns the number of clicks and total number of queries over a period
`<https://www.elastic.co/guide/en/app-search/master/counts.html>`_
:arg engine_name: Name of the engine
:arg filters: Analytics filters
:arg interval: You can define an interval along with your date range.
Can be either hour or day
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if filters is not None:
for k, v in to_deep_object("filters", filters):
params.add(k, v)
if interval is not None:
params.add("interval", interval)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"analytics",
"counts",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def create_curation(
self,
engine_name,
queries,
promoted_doc_ids=None,
hidden_doc_ids=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Create a new curation
`<https://www.elastic.co/guide/en/app-search/master/curations.html#curations-create>`_
:arg engine_name: Name of the engine
:arg queries: List of affected search queries
:arg promoted_doc_ids: List of promoted document IDs
:arg hidden_doc_ids: List of hidden document IDs
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
queries,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if queries is not None:
for v in to_array(queries, param="queries"):
params.add("queries[]", v)
if promoted_doc_ids is not None:
for v in to_array(promoted_doc_ids, param="promoted_doc_ids"):
params.add("promoted[]", v)
if hidden_doc_ids is not None:
for v in to_array(hidden_doc_ids, param="hidden_doc_ids"):
params.add("hidden[]", v)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"curations",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def delete_curation(
self,
engine_name,
curation_id,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Delete a curation by ID
`<https://www.elastic.co/guide/en/app-search/master/curations.html#curations-destroy>`_
:arg engine_name: Name of the engine
:arg curation_id: Curation ID
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
curation_id,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"DELETE",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"curations",
curation_id,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_curation(
self,
engine_name,
curation_id,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieve a curation by ID
`<https://www.elastic.co/guide/en/app-search/master/curations.html#curations-read>`_
:arg engine_name: Name of the engine
:arg curation_id: Curation ID
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
curation_id,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"curations",
curation_id,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def put_curation(
self,
engine_name,
curation_id,
queries,
promoted_doc_ids=None,
hidden_doc_ids=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Update an existing curation
`<https://www.elastic.co/guide/en/app-search/master/curations.html#curations-update>`_
:arg engine_name: Name of the engine
:arg curation_id: Curation ID
:arg queries: List of affected search queries
:arg promoted_doc_ids: List of promoted document IDs
:arg hidden_doc_ids: List of hidden document IDs
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
curation_id,
queries,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if queries is not None:
for v in to_array(queries, param="queries"):
params.add("queries[]", v)
if promoted_doc_ids is not None:
for v in to_array(promoted_doc_ids, param="promoted_doc_ids"):
params.add("promoted[]", v)
if hidden_doc_ids is not None:
for v in to_array(hidden_doc_ids, param="hidden_doc_ids"):
params.add("hidden[]", v)
return self.perform_request(
"PUT",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"curations",
curation_id,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def list_curations(
self,
engine_name,
current_page=None,
page_size=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieve available curations for the engine
`<https://www.elastic.co/guide/en/app-search/master/curations.html#curations-read>`_
:arg engine_name: Name of the engine
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"curations",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def delete_documents(
self,
engine_name,
document_ids,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Delete documents by ID
`<https://www.elastic.co/guide/en/app-search/master/documents.html#documents-delete>`_
:arg engine_name: Name of the engine
:arg document_ids: List of document IDs
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"DELETE",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"documents",
),
body=document_ids,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_documents(
self,
engine_name,
document_ids,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieves one or more documents by ID
`<https://www.elastic.co/guide/en/app-search/master/documents.html#documents-get>`_
:arg engine_name: Name of the engine
:arg document_ids: List of document IDs
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"documents",
),
body=document_ids,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def index_documents(
self,
engine_name,
documents,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Create or update documents
`<https://www.elastic.co/guide/en/app-search/master/documents.html#documents-create>`_
:arg engine_name: Name of the engine
:arg documents: List of document to index
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"documents",
),
body=documents,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def list_documents(
self,
engine_name,
current_page=None,
page_size=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
List all available documents with optional pagination support
`<https://www.elastic.co/guide/en/app-search/master/documents.html#documents-list>`_
:arg engine_name: Name of the engine
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"documents",
"list",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def put_documents(
self,
engine_name,
documents,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Partial update of documents
`<https://www.elastic.co/guide/en/app-search/master/documents.html#documents-partial>`_
:arg engine_name: Name of the engine
:arg documents: List of documents to update
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"PATCH",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"documents",
),
body=documents,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def create_engine(
self,
engine_name,
language=None,
type=None,
source_engines=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Creates a new engine
`<https://www.elastic.co/guide/en/app-search/master/engines.html#engines-create>`_
:arg engine_name: Engine name
:arg language: Engine language (null for universal)
:arg type: Engine type
:arg source_engines: Sources engines list
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if engine_name is not None:
params.add("name", engine_name)
if language is not None:
params.add("language", language)
if type is not None:
params.add("type", type)
if source_engines is not None:
for v in to_array(source_engines, param="source_engines"):
params.add("source_engines[]", v)
return self.perform_request(
"POST",
"/api/as/v1/engines",
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def delete_engine(
self,
engine_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Delete an engine by name
`<https://www.elastic.co/guide/en/app-search/master/engines.html#engines-delete>`_
:arg engine_name: Name of the engine
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"DELETE",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_engine(
self,
engine_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieves an engine by name
`<https://www.elastic.co/guide/en/app-search/master/engines.html#engines-get>`_
:arg engine_name: Name of the engine
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def list_engines(
self,
current_page=None,
page_size=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieves all engines with optional pagination support
`<https://www.elastic.co/guide/en/app-search/master/engines.html#engines-list>`_
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
params = QueryParams(params)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
return self.perform_request(
"GET",
"/api/as/v1/engines",
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def log_clickthrough(
self,
engine_name,
query_text,
document_id,
request_id=None,
tags=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Send data about clicked results
`<https://www.elastic.co/guide/en/app-search/master/clickthrough.html>`_
:arg engine_name: Name of the engine
:arg query_text: Search query text
:arg document_id: The ID of the document that was clicked on
:arg request_id: The request ID returned in the meta tag of a search API
response
:arg tags: Array of strings representing additional information you wish
to track with the clickthrough
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
query_text,
document_id,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if query_text is not None:
params.add("query", query_text)
if document_id is not None:
params.add("document_id", document_id)
if request_id is not None:
params.add("request_id", request_id)
if tags is not None:
for v in to_array(tags, param="tags"):
params.add("tags[]", v)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"click",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def add_meta_engine_source(
self,
engine_name,
source_engines,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Add a source engine to an existing meta engine
`<https://www.elastic.co/guide/en/app-search/master/meta-engines.html#meta-engines-add-source-engines>`_
:arg engine_name: Name of the engine
:arg source_engines: List of engine names
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"source_engines",
),
body=source_engines,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def delete_meta_engine_source(
self,
engine_name,
source_engines,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Delete a source engine from a meta engine
`<https://www.elastic.co/guide/en/app-search/master/meta-engines.html#meta-engines-remove-source-engines>`_
:arg engine_name: Name of the engine
:arg source_engines: List of engine names
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"DELETE",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"source_engines",
),
body=source_engines,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def multi_search(
self,
engine_name,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Run several search in the same request
`<https://www.elastic.co/guide/en/app-search/master/multi-search.html>`_
:arg engine_name: Name of the engine
:arg body: One or more queries to execute in parallel
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"multi_search",
),
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def query_suggestion(
self,
engine_name,
query,
fields=None,
size=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Provide relevant query suggestions for incomplete queries
`<https://www.elastic.co/guide/en/app-search/master/query-suggestion.html>`_
:arg engine_name: Name of the engine
:arg query: A partial query for which to receive suggestions
:arg fields: List of fields to use to generate suggestions. Defaults to
all text fields
:arg size: Number of query suggestions to return. Must be between 1 and
20. Defaults to 5
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
query,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if query is not None:
params.add("query", query)
if fields is not None:
for v in to_array(fields, param="fields"):
params.add("types[documents][fields][]", v)
if size is not None:
params.add("size", size)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"query_suggestion",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_schema(
self,
engine_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieve current schema for the engine
`<https://www.elastic.co/guide/en/app-search/master/schema.html#schema-read>`_
:arg engine_name: Name of the engine
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"schema",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def put_schema(
self,
engine_name,
schema,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Update schema for the current engine
`<https://www.elastic.co/guide/en/app-search/master/schema.html#schema-patch>`_
:arg engine_name: Name of the engine
:arg schema: Schema description
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"schema",
),
body=schema,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def search(
self,
engine_name,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Allows you to search over, facet and filter your data
`<https://www.elastic.co/guide/en/app-search/master/search.html>`_
:arg engine_name: Name of the engine
:arg body: Search options including query text, pages, sorting, facets, and filters
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"search",
),
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_search_settings(
self,
engine_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieve current search settings for the engine
`<https://www.elastic.co/guide/en/app-search/master/search-settings.html#search-settings-show>`_
:arg engine_name: Name of the engine
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"search_settings",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def put_search_settings(
self,
engine_name,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Update search settings for the engine
`<https://www.elastic.co/guide/en/app-search/master/search-settings.html#search-settings-update>`_
:arg engine_name: Name of the engine
:arg body: Search settings
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"PUT",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"search_settings",
),
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def reset_search_settings(
self,
engine_name,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Reset search settings for the engine
`<https://www.elastic.co/guide/en/app-search/master/search-settings.html#search-settings-reset>`_
:arg engine_name: Name of the engine
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"search_settings",
"reset",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def create_synonym_set(
self,
engine_name,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Create a new synonym set
`<https://www.elastic.co/guide/en/app-search/master/synonyms.html#synonyms-create>`_
:arg engine_name: Name of the engine
:arg body: Synonym set description
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"POST",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"synonyms",
),
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def delete_synonym_set(
self,
engine_name,
synonym_set_id,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Delete a synonym set by ID
`<https://www.elastic.co/guide/en/app-search/master/synonyms.html#synonyms-delete>`_
:arg engine_name: Name of the engine
:arg synonym_set_id: Synonym set ID
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
synonym_set_id,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"DELETE",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"synonyms",
synonym_set_id,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_synonym_set(
self,
engine_name,
synonym_set_id,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieve a synonym set by ID
`<https://www.elastic.co/guide/en/app-search/master/synonyms.html#synonyms-list-one>`_
:arg engine_name: Name of the engine
:arg synonym_set_id: Synonym set ID
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
synonym_set_id,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"synonyms",
synonym_set_id,
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def put_synonym_set(
self,
engine_name,
synonym_set_id,
body,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Update a synonym set by ID
`<https://www.elastic.co/guide/en/app-search/master/synonyms.html#synonyms-update>`_
:arg engine_name: Name of the engine
:arg synonym_set_id: Synonym set ID
:arg body: Synonym set description
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
for param in (
engine_name,
synonym_set_id,
):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
return self.perform_request(
"PUT",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"synonyms",
synonym_set_id,
),
body=body,
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def list_synonym_sets(
self,
engine_name,
current_page=None,
page_size=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Retrieve available synonym sets for the engine
`<https://www.elastic.co/guide/en/app-search/master/synonyms.html#synonyms-get>`_
:arg engine_name: Name of the engine
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"synonyms",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_top_clicks_analytics(
self,
engine_name,
query=None,
current_page=None,
page_size=None,
filters=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Returns the number of clicks received by a document in descending order
`<https://www.elastic.co/guide/en/app-search/master/clicks.html>`_
:arg engine_name: Name of the engine
:arg query: Filter clicks over a search query
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg filters: Analytics filters
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if query is not None:
params.add("query", query)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
if filters is not None:
for k, v in to_deep_object("filters[]", filters):
params.add(k, v)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"analytics",
"clicks",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
def get_top_queries_analytics(
self,
engine_name,
current_page=None,
page_size=None,
filters=None,
params=None,
headers=None,
http_auth=DEFAULT,
request_timeout=DEFAULT,
ignore_status=(),
):
"""
Returns queries analytics by usage count
`<https://www.elastic.co/guide/en/app-search/master/queries.html#queries-top-queries>`_
:arg engine_name: Name of the engine
:arg current_page: The page to fetch. Defaults to 1
:arg page_size: The number of results per page
:arg filters: Analytics filters
:arg params: Additional query params to send with the request
:arg headers: Additional headers to send with the request
:arg http_auth: Access token or HTTP basic auth username
and password to send with the request
:arg request_timeout: Timeout in seconds
:arg ignore_status: HTTP status codes to not raise an error
"""
if engine_name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument")
params = QueryParams(params)
if current_page is not None:
params.add("page[current]", current_page)
if page_size is not None:
params.add("page[size]", page_size)
if filters is not None:
for k, v in to_deep_object("filters[]", filters):
params.add(k, v)
return self.perform_request(
"GET",
to_path(
"api",
"as",
"v1",
"engines",
engine_name,
"analytics",
"queries",
),
params=params,
headers=headers,
http_auth=http_auth,
request_timeout=request_timeout,
ignore_status=ignore_status,
)
|
python
|
"""
utility functions
"""
import collections
def flatten(d, parent_key="", sep="_"):
"""
flatten nested dictionary, preserving lists
Arguments:
parent_key (str):
sep (str):
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if v and isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def degToCompass(num):
val = int((num / 22.5) + 0.5)
arr = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
]
return arr[(val % 16)]
|
python
|
import tensorflow as tf
import datetime as dt
import pandas as pd
import os
def load_data():
col_names = [
'id', 'event_timestamp', 'course_over_ground', 'machine_id',
'vehicle_weight_type', 'speed_gps_kph', 'latitude', 'longitude']
data = pd.DataFrame(columns=col_names)
files = os.listdir('./machine-data')
for f in files:
d = pd.read_csv('./machine-data/' + f, sep=';')
d.loc[d.course_over_ground == -1, 'course_over_ground'] = None
d['north_proportion'] = north_proportion(d.course_over_ground)
data = data.append(d)
# data.to_csv("./machine-data-processed/raw.csv", index=False)
# data = pd.read_csv('./machine-data-processed/raw.csv')
return(data)
def north_proportion(vehicle_course_over_ground):
north_count = sum(
(vehicle_course_over_ground < 90) |
(vehicle_course_over_ground > 269)
)
return(float(north_count) / len(vehicle_course_over_ground))
def clean(data):
data = data.drop(columns=['id'])
data = split_dates_and_times(data, 'event_timestamp')
data.speed_gps_kph = pd.to_numeric(data.speed_gps_kph)
data = week_days(data)
return(data)
def split_dates_and_times(data, column):
data['year'] = pd.to_numeric(year(data[column]))
data['month'] = pd.to_numeric(month(data[column]))
data['day'] = pd.to_numeric(day(data[column]))
data['hour'] = pd.to_numeric(hour(data[column]))
data['minute'] = pd.to_numeric(minute(data[column]))
data = data.drop(columns=[column])
return(data)
def year(strings):
return split(strings, 0, '-', 0)
def month(strings):
return split(strings, 0, '-', 1)
def day(strings):
return split(strings, 0, '-', 2)
def hour(strings):
return split(strings, 1, ':', 0)
def minute(strings):
return split(strings, 1, ':', 1)
def split(strings, first, separator, second):
return [s.split(' ')[first].split(separator)[second] for s in strings]
def week_days(data):
data.loc[:, 'weekday'] = data.apply(get_row_weekday, axis=1)
return(data)
WEEKDAYS = {
0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday',
4: 'Friday', 5: 'Saturday', 6: 'Sunday'
}
def get_row_weekday(row):
date = dt.datetime(row.year, row.month, row.day)
return WEEKDAYS[date.weekday()]
WEATHER = pd.read_csv('./weather.csv')
WEATHER = split_dates_and_times(WEATHER, 'dt_iso')
def join_with_weather(data):
for index, row in WEATHER.iterrows():
selection = (data.day == row.day) & (data.hour == row.hour)
data.loc[selection, 'weather_description'] = row.weather_description
data.loc[selection, 'weather_main'] = row.weather_main
data.loc[selection, 'temperature'] = row.temp
data.temperature = data.temperature - 273.15
# data = pd.read_csv("./machine-data-processed/clean.csv")
# data.to_csv("./machine-data-processed/clean.csv", index=False)
return(data)
def input_fn(train_X, train_Y, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((dict(train_X), train_Y))
dataset = dataset.batch(batch_size)
return dataset
|
python
|
from jenkinsapi.jenkins import Jenkins
import xml.etree.ElementTree as ET
J = Jenkins('http://localhost:8080')
EMPTY_JOB_CONFIG = '''
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions>jkkjjk</actions>
<description></description>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.scm.NullSCM"/>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers class="vector"/>
<concurrentBuild>false</concurrentBuild>
<builders/>
<publishers/>
<buildWrappers/>
</project>
'''
jobname = 'foo_job'
new_job = J.create_job(jobname, EMPTY_JOB_CONFIG)
new_conf = new_job.get_config()
root = ET.fromstring(new_conf.strip())
builders = root.find('builders')
shell = ET.SubElement(builders, 'hudson.tasks.Shell')
command = ET.SubElement(shell, 'command')
command.text = "ls"
print ET.tostring(root)
J[jobname].update_config(ET.tostring(root))
#J.delete_job(jobname)
|
python
|
import os
from typing import List, Optional
from uuid import uuid4
import PIL
import pytest
import arcade
import arcade.gui
from arcade.gui import UIClickable, UIManager
from arcade.gui.ui_style import UIStyle
class TestUIManager(UIManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.event_history: List[arcade.gui.UIEvent] = []
self.push_handlers(on_ui_event=self._on_ui_event)
def move_mouse(self, x: int, y: int):
self.dispatch_ui_event(arcade.gui.UIEvent(
arcade.gui.MOUSE_MOTION,
x=x,
y=y,
button=1,
modifier=0
))
def click_and_hold(self, x: int, y: int, button=arcade.MOUSE_BUTTON_LEFT):
self.dispatch_ui_event(arcade.gui.UIEvent(
arcade.gui.MOUSE_PRESS,
x=x,
y=y,
button=button,
modifier=0
))
def release(self, x: int, y: int, button=arcade.MOUSE_BUTTON_LEFT):
self.dispatch_ui_event(arcade.gui.UIEvent(
arcade.gui.MOUSE_RELEASE,
x=x,
y=y,
button=button,
modifier=0
))
def click(self, x: int, y: int):
self.click_and_hold(x, y)
self.release(x, y)
def right_click(self, x: int, y: int):
self.click_and_hold(x, y, button=arcade.MOUSE_BUTTON_RIGHT)
self.release(x, y, button=arcade.MOUSE_BUTTON_RIGHT)
def _on_ui_event(self, event: arcade.gui.UIEvent):
self.event_history.append(event)
@property
def last_event(self):
return self.event_history[-1] if self.event_history else None
def T(name, *args):
return pytest.param(*args, id=name)
class MockHolder(dict):
"""
MockHolder, dict like object with property access
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class Env:
def __init__(self, **kwargs):
self.variables = kwargs
self.old_vars = {}
def __enter__(self):
for key, value in self.variables.items():
if key in os.environ:
self.old_vars[key] = os.environ[key]
os.environ[key] = value
def __exit__(self, exc_type, exc_val, exc_tb):
for key in self.variables.keys():
del os.environ[key]
for key, value in self.old_vars.items():
os.environ[key] = value
class MockButton(UIClickable):
on_hover_called = False
on_unhover_called = False
on_press_called = False
on_release_called = False
on_click_called = False
on_focus_called = False
on_unfocus_called = False
def __init__(self,
center_x=0,
center_y=0,
width=40,
height=40,
id: Optional[str] = None,
style: UIStyle = None,
**kwargs):
super().__init__(center_x=center_x, center_y=center_y, id=id, style=style, **kwargs)
self.event_history: List[arcade.gui.UIEvent] = []
self._width = width
self._height = height
def render(self):
self.normal_texture = arcade.Texture(
image=PIL.Image.new("RGBA", (self._width, self._height), color=(255, 0, 0)),
name=str(uuid4()))
self.hover_texture = arcade.Texture(
image=PIL.Image.new("RGBA", (self._width, self._height), color=(255, 0, 0)),
name=str(uuid4()))
self.press_texture = arcade.Texture(
image=PIL.Image.new("RGBA", (self._width, self._height), color=(255, 0, 0)),
name=str(uuid4()))
self.focus_texture = arcade.Texture(
image=PIL.Image.new("RGBA", (self._width, self._height), color=(255, 0, 0)),
name=str(uuid4()))
self.set_proper_texture()
def on_ui_event(self, event: arcade.gui.UIEvent):
self.event_history.append(event)
super().on_ui_event(event)
@property
def last_event(self):
return self.event_history[-1] if self.event_history else None
def on_hover(self):
super().on_hover()
self.on_hover_called = True
def on_unhover(self):
super().on_unhover()
self.on_unhover_called = True
def on_press(self):
super().on_press()
self.on_press_called = True
def on_release(self):
super().on_release()
self.on_release_called = True
def on_click(self):
super().on_click()
self.on_click_called = True
def on_focus(self):
super().on_focus()
self.on_focus_called = True
def on_unfocus(self):
super().on_unfocus()
self.on_unfocus_called = True
|
python
|
class Solution(object):
def deleteDuplicates(self, head):
ht = {}
it = head
while head:
if head.val in ht:
ht[head.val] += 1
else:
ht[head.val] = 1
head = head.next
headRef = None
last = None
while it:
if ht[it.val] > 1:
if last:
last.next = it.next
it = it.next
else:
it = it.next
else:
if not last:
headRef = it
last = it
it = it.next
return headRef
|
python
|
#!/usr/bin/env python
import sys
from os.path import exists as path_exists
from pyscaffold.api import create_project
from pyscaffold.cli import run
from pyscaffold.extensions.github_actions import GithubActions
def test_create_project_with_github_actions(tmpfolder):
# Given options with the GithubActions extension,
opts = dict(project_path="proj", extensions=[GithubActions()])
# when the project is created,
create_project(opts)
# then files from GithubActions extension should exist
assert path_exists("proj/.github/workflows/ci.yml")
def test_create_project_without_github_actions(tmpfolder):
# Given options without the GithubActions extension,
opts = dict(project_path="proj")
# when the project is created,
create_project(opts)
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
def test_cli_with_github_actions(tmpfolder):
# Given the command line with the GithubActions option,
sys.argv = ["pyscaffold", "--github-actions", "proj"]
# when pyscaffold runs,
run()
# then files from GithubActions and other extensions automatically added should
# exist
assert path_exists("proj/.github/workflows/ci.yml")
assert path_exists("proj/tox.ini")
assert path_exists("proj/.pre-commit-config.yaml")
def test_cli_with_github_actions_and_pretend(tmpfolder):
# Given the command line with the GithubActions and pretend options
sys.argv = ["pyscaffold", "--pretend", "--github-actions", "proj"]
# when pyscaffold runs,
run()
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
# (or the project itself)
assert not path_exists("proj")
def test_cli_without_github_actions(tmpfolder):
# Given the command line without the GithubActions option,
sys.argv = ["pyscaffold", "proj"]
# when pyscaffold runs,
run()
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
|
python
|
import glob
from os.path import join
import numpy as n
import astropy.io.fits as fits
import lib_functions_1pt as lib
import os
import sys
#Quantity studied
version = 'v4'
qty = "mvir"
# one point function lists
fileC = n.array(glob.glob( join(os.environ['MD_DIR'], "MD_*Gpc*", version, qty,"out_*_Central_JKresampling.pkl")))
fileB = n.array(glob.glob( join( os.environ['MD_DIR'], "MD_*Gpc*", version, qty,"out_*_"+qty+"_JKresampling.bins")))
fileS = n.array(glob.glob( join( os.environ['MD_DIR'], "MD_*Gpc*", version, qty,"out_*_Satellite_JKresampling.pkl")))
fileC.sort()
fileS.sort()
fileB.sort()
print len(fileC), len(fileB), len(fileS)
print "considers ",len(fileC), qty , " function files"
for ii, el in enumerate(fileC):
print el
print fileS[ii]
print fileB[ii]
lib.convert_pkl_mass(fileC[ii], fileS[ii], fileB[ii], qty)
fileC = n.array(glob.glob( join(os.environ['DS_DIR'], version, qty,"ds*_Central_JKresampling.pkl")))
fileB = n.array(glob.glob( join( os.environ['DS_DIR'], version, qty,"ds*_"+qty+"_JKresampling.bins")))
fileS = n.array(glob.glob( join( os.environ['DS_DIR'], version, qty,"ds*_Satellite_JKresampling.pkl")))
fileC.sort()
fileS.sort()
fileB.sort()
print len(fileC), len(fileB), len(fileS)
print "considers ",len(fileC), qty , " function files"
for ii, el in enumerate(fileC):
print el
print fileS[ii]
print fileB[ii]
lib.convert_pkl_mass(fileC[ii], fileS[ii], fileB[ii], qty)
print qty
af = n.array(glob.glob(join(os.environ['MVIR_DIR'], "data", "*_"+qty+".fits") ) )
print af[0]
d0 = fits.open(af[0])[1].data
#print len(d0['log_mvir']), d0['log_mvir']
for ii in range(1,len(af),1):
d1 = fits.open(af[ii])[1].data
d0 = n.hstack((d0,d1))
hdu2 = fits.BinTableHDU.from_columns(d0)
writeName = join(os.environ['MVIR_DIR'], qty+"_summary.fits")
if os.path.isfile(writeName):
os.remove(writeName)
hdu2.writeto( writeName )
"""
sys.exit()
# rebinning here
#solve bins = 0 problem
n.arange()
n.hstack((n.arange(8,14,0.25), n.arange(14,16,0.05)))
#if logmvir < 14 :
Nrb = 5.
idSplit = int(n.searchsorted(d0['log_mvir'],14)/Nrb)*Nrb
split_array= lambda array: [array[:idSplit], array[idSplit:]]
#variables :
#
def rebinMe(trb, mod, Nrb = 5):
# split
part1, part2 = split_array(trb)
# rebin
take_middle_val = lambda part: part[2::Nrb]
take_mean_val = lambda part: (part[0::Nrb] + part[1::Nrb] + part[2::Nrb] + part[3::Nrb] + part[4::Nrb])/Nrb.
take_sum_val = lambda part: part[0::Nrb] + part[1::Nrb] + part[2::3] + part[3::Nrb] + part[4::Nrb]
if mode == 'middle' :
part1b = take_middle_val(part1)
if mode == 'mean' :
part1b = take_mean_val(part1)
if mode == 'sum' :
part1b = take_sum_val(part1)
return n.hstack((part1b, part2))
trb = d0['log_mvir']
mode = 'middle'
trb_o = rebinMe(trb, mode)
"""
|
python
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <[email protected]>
# Copyright 2007 Kenneth Loafman <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Functions for producing signatures and deltas of directories
Note that the main processes of this module have two parts. In the
first, the signature or delta is constructed of a ROPath iterator. In
the second, the ROPath iterator is put into tar block form.
"""
import cStringIO, types, math
from duplicity import statistics
from duplicity import util
from duplicity import globals
from duplicity.path import * #@UnusedWildImport
from duplicity.lazy import * #@UnusedWildImport
from duplicity import progress
# A StatsObj will be written to this from DirDelta and DirDelta_WriteSig.
stats = None
tracker = None
class DiffDirException(Exception):
pass
def DirSig(path_iter):
"""
Alias for SigTarBlockIter below
"""
return SigTarBlockIter(path_iter)
def DirFull(path_iter):
"""
Return a tarblock full backup of items in path_iter
A full backup is just a diff starting from nothing (it may be less
elegant than using a standard tar file, but we can be sure that it
will be easy to split up the tar and make the volumes the same
sizes).
"""
return DirDelta(path_iter, cStringIO.StringIO(""))
def DirFull_WriteSig(path_iter, sig_outfp):
"""
Return full backup like above, but also write signature to sig_outfp
"""
return DirDelta_WriteSig(path_iter, cStringIO.StringIO(""), sig_outfp)
def DirDelta(path_iter, dirsig_fileobj_list):
"""
Produce tarblock diff given dirsig_fileobj_list and pathiter
dirsig_fileobj_list should either be a tar fileobj or a list of
those, sorted so the most recent is last.
"""
global stats
stats = statistics.StatsDeltaProcess()
if type(dirsig_fileobj_list) is types.ListType:
sig_iter = combine_path_iters(map(sigtar2path_iter,
dirsig_fileobj_list))
else:
sig_iter = sigtar2path_iter(dirsig_fileobj_list)
delta_iter = get_delta_iter(path_iter, sig_iter)
if globals.dry_run or (globals.progress and not progress.tracker.has_collected_evidence()):
return DummyBlockIter(delta_iter)
else:
return DeltaTarBlockIter(delta_iter)
def delta_iter_error_handler(exc, new_path, sig_path, sig_tar = None):
"""
Called by get_delta_iter, report error in getting delta
"""
if new_path:
index_string = new_path.get_relative_path()
elif sig_path:
index_string = sig_path.get_relative_path()
else:
assert 0, "Both new and sig are None for some reason"
log.Warn(_("Error %s getting delta for %s") % (str(exc), util.ufn(index_string)))
return None
def get_delta_path(new_path, sig_path, sigTarFile = None):
"""
Return new delta_path which, when read, writes sig to sig_fileobj,
if sigTarFile is not None
"""
assert new_path
if sigTarFile:
ti = new_path.get_tarinfo()
index = new_path.index
delta_path = new_path.get_ropath()
log.Debug(_("Getting delta of %s and %s") % (new_path, sig_path))
def callback(sig_string):
"""
Callback activated when FileWithSignature read to end
"""
ti.size = len(sig_string)
ti.name = "signature/" + "/".join(index)
sigTarFile.addfile(ti, cStringIO.StringIO(sig_string))
if new_path.isreg() and sig_path and sig_path.isreg() and sig_path.difftype == "signature":
delta_path.difftype = "diff"
old_sigfp = sig_path.open("rb")
newfp = FileWithReadCounter(new_path.open("rb"))
if sigTarFile:
newfp = FileWithSignature(newfp, callback,
new_path.getsize())
delta_path.setfileobj(librsync.DeltaFile(old_sigfp, newfp))
else:
delta_path.difftype = "snapshot"
if sigTarFile:
ti.name = "snapshot/" + "/".join(index)
if not new_path.isreg():
if sigTarFile:
sigTarFile.addfile(ti)
if stats:
stats.SourceFileSize += delta_path.getsize()
else:
newfp = FileWithReadCounter(new_path.open("rb"))
if sigTarFile:
newfp = FileWithSignature(newfp, callback,
new_path.getsize())
delta_path.setfileobj(newfp)
new_path.copy_attribs(delta_path)
delta_path.stat.st_size = new_path.stat.st_size
return delta_path
def log_delta_path(delta_path, new_path = None, stats = None):
"""
Look at delta path and log delta. Add stats if new_path is set
"""
if delta_path.difftype == "snapshot":
if new_path and stats:
stats.add_new_file(new_path)
log.Info(_("A %s") %
(util.ufn(delta_path.get_relative_path())),
log.InfoCode.diff_file_new,
util.escape(delta_path.get_relative_path()))
else:
if new_path and stats:
stats.add_changed_file(new_path)
log.Info(_("M %s") %
(util.ufn(delta_path.get_relative_path())),
log.InfoCode.diff_file_changed,
util.escape(delta_path.get_relative_path()))
def get_delta_iter(new_iter, sig_iter, sig_fileobj=None):
"""
Generate delta iter from new Path iter and sig Path iter.
For each delta path of regular file type, path.difftype with be
set to "snapshot", "diff". sig_iter will probably iterate ROPaths
instead of Paths.
If sig_fileobj is not None, will also write signatures to sig_fileobj.
"""
collated = collate2iters(new_iter, sig_iter)
if sig_fileobj:
sigTarFile = util.make_tarfile("w", sig_fileobj)
else:
sigTarFile = None
for new_path, sig_path in collated:
log.Debug(_("Comparing %s and %s") % (new_path and util.uindex(new_path.index),
sig_path and util.uindex(sig_path.index)))
if not new_path or not new_path.type:
# File doesn't exist (but ignore attempts to delete base dir;
# old versions of duplicity could have written out the sigtar in
# such a way as to fool us; LP: #929067)
if sig_path and sig_path.exists() and sig_path.index != ():
# but signature says it did
log.Info(_("D %s") %
(util.ufn(sig_path.get_relative_path())),
log.InfoCode.diff_file_deleted,
util.escape(sig_path.get_relative_path()))
if sigTarFile:
ti = ROPath(sig_path.index).get_tarinfo()
ti.name = "deleted/" + "/".join(sig_path.index)
sigTarFile.addfile(ti)
stats.add_deleted_file()
yield ROPath(sig_path.index)
elif not sig_path or new_path != sig_path:
# Must calculate new signature and create delta
delta_path = robust.check_common_error(delta_iter_error_handler,
get_delta_path,
(new_path, sig_path, sigTarFile))
if delta_path:
# log and collect stats
log_delta_path(delta_path, new_path, stats)
yield delta_path
else:
# if not, an error must have occurred
stats.Errors += 1
else:
stats.add_unchanged_file(new_path)
stats.close()
if sigTarFile:
sigTarFile.close()
def sigtar2path_iter(sigtarobj):
"""
Convert signature tar file object open for reading into path iter
"""
tf = util.make_tarfile("r", sigtarobj)
tf.debug = 1
for tarinfo in tf:
tiname = util.get_tarinfo_name(tarinfo)
for prefix in ["signature/", "snapshot/", "deleted/"]:
if tiname.startswith(prefix):
# strip prefix and '/' from name and set it to difftype
name, difftype = tiname[len(prefix):], prefix[:-1]
break
else:
raise DiffDirException("Bad tarinfo name %s" % (tiname,))
index = tuple(name.split("/"))
if not index[-1]:
index = index[:-1] # deal with trailing /, ""
ropath = ROPath(index)
ropath.difftype = difftype
if difftype == "signature" or difftype == "snapshot":
ropath.init_from_tarinfo(tarinfo)
if ropath.isreg():
ropath.setfileobj(tf.extractfile(tarinfo))
yield ropath
sigtarobj.close()
def collate2iters(riter1, riter2):
"""
Collate two iterators.
The elements yielded by each iterator must be have an index
variable, and this function returns pairs (elem1, elem2), (elem1,
None), or (None, elem2) two elements in a pair will have the same
index, and earlier indicies are yielded later than later indicies.
"""
relem1, relem2 = None, None
while 1:
if not relem1:
try:
relem1 = riter1.next()
except StopIteration:
if relem2:
yield (None, relem2)
for relem2 in riter2:
yield (None, relem2)
break
index1 = relem1.index
if not relem2:
try:
relem2 = riter2.next()
except StopIteration:
if relem1:
yield (relem1, None)
for relem1 in riter1:
yield (relem1, None)
break
index2 = relem2.index
if index1 < index2:
yield (relem1, None)
relem1 = None
elif index1 == index2:
yield (relem1, relem2)
relem1, relem2 = None, None
else:
# index2 is less
yield (None, relem2)
relem2 = None
def combine_path_iters(path_iter_list):
"""
Produce new iterator by combining the iterators in path_iter_list
This new iter will iterate every path that is in path_iter_list in
order of increasing index. If multiple iterators in
path_iter_list yield paths with the same index, combine_path_iters
will discard all paths but the one yielded by the last path_iter.
This is used to combine signature iters, as the output will be a
full up-to-date signature iter.
"""
path_iter_list = path_iter_list[:] # copy before destructive reverse
path_iter_list.reverse()
def get_triple(iter_index):
"""
Represent the next element as a triple, to help sorting
"""
try:
path = path_iter_list[iter_index].next()
except StopIteration:
return None
return (path.index, iter_index, path)
def refresh_triple_list(triple_list):
"""
Update all elements with path_index same as first element
"""
path_index = triple_list[0][0]
iter_index = 0
while iter_index < len(triple_list):
old_triple = triple_list[iter_index]
if old_triple[0] == path_index:
new_triple = get_triple(old_triple[1])
if new_triple:
triple_list[iter_index] = new_triple
iter_index += 1
else:
del triple_list[iter_index]
else:
break # assumed triple_list sorted, so can exit now
triple_list = filter(lambda x: x, map(get_triple,
range(len(path_iter_list))))
while triple_list:
triple_list.sort()
yield triple_list[0][2]
refresh_triple_list(triple_list)
def DirDelta_WriteSig(path_iter, sig_infp_list, newsig_outfp):
"""
Like DirDelta but also write signature into sig_fileobj
Like DirDelta, sig_infp_list can be a tar fileobj or a sorted list
of those. A signature will only be written to newsig_outfp if it
is different from (the combined) sig_infp_list.
"""
global stats
stats = statistics.StatsDeltaProcess()
if type(sig_infp_list) is types.ListType:
sig_path_iter = get_combined_path_iter(sig_infp_list)
else:
sig_path_iter = sigtar2path_iter(sig_infp_list)
delta_iter = get_delta_iter(path_iter, sig_path_iter, newsig_outfp)
if globals.dry_run or (globals.progress and not progress.tracker.has_collected_evidence()):
return DummyBlockIter(delta_iter)
else:
return DeltaTarBlockIter(delta_iter)
def get_combined_path_iter(sig_infp_list):
"""
Return path iter combining signatures in list of open sig files
"""
return combine_path_iters(map(sigtar2path_iter, sig_infp_list))
class FileWithReadCounter:
"""
File-like object which also computes amount read as it is read
"""
def __init__(self, infile):
"""FileWithReadCounter initializer"""
self.infile = infile
def read(self, length = -1):
try:
buf = self.infile.read(length)
except IOError, ex:
buf = ""
log.Warn(_("Error %s getting delta for %s") % (str(ex), util.ufn(self.infile.name)))
if stats:
stats.SourceFileSize += len(buf)
return buf
def close(self):
return self.infile.close()
class FileWithSignature:
"""
File-like object which also computes signature as it is read
"""
blocksize = 32 * 1024
def __init__(self, infile, callback, filelen, *extra_args):
"""
FileTee initializer
The object will act like infile, but whenever it is read it
add infile's data to a SigGenerator object. When the file has
been read to the end the callback will be called with the
calculated signature, and any extra_args if given.
filelen is used to calculate the block size of the signature.
"""
self.infile, self.callback = infile, callback
self.sig_gen = librsync.SigGenerator(get_block_size(filelen))
self.activated_callback = None
self.extra_args = extra_args
def read(self, length = -1):
buf = self.infile.read(length)
self.sig_gen.update(buf)
return buf
def close(self):
# Make sure all of infile read
if not self.activated_callback:
while self.read(self.blocksize):
pass
self.activated_callback = 1
self.callback(self.sig_gen.getsig(), *self.extra_args)
return self.infile.close()
class TarBlock:
"""
Contain information to add next file to tar
"""
def __init__(self, index, data):
"""
TarBlock initializer - just store data
"""
self.index = index
self.data = data
class TarBlockIter:
"""
A bit like an iterator, yield tar blocks given input iterator
Unlike an iterator, however, control over the maximum size of a
tarblock is available by passing an argument to next(). Also the
get_footer() is available.
"""
def __init__(self, input_iter):
"""
TarBlockIter initializer
"""
self.input_iter = input_iter
self.offset = 0l # total length of data read
self.process_waiting = False # process_continued has more blocks
self.process_next_vol_number = None # next volume number to write in multivol
self.previous_index = None # holds index of last block returned
self.previous_block = None # holds block of last block returned
self.remember_next = False # see remember_next_index()
self.remember_value = None # holds index of next block
self.remember_block = None # holds block of next block
self.queued_data = None # data to return in next next() call
def tarinfo2tarblock(self, index, tarinfo, file_data = ""):
"""
Make tarblock out of tarinfo and file data
"""
tarinfo.size = len(file_data)
headers = tarinfo.tobuf(errors='replace')
blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE) #@UnusedVariable
if remainder > 0:
filler_data = "\0" * (tarfile.BLOCKSIZE - remainder)
else:
filler_data = ""
return TarBlock(index, "%s%s%s" % (headers, file_data, filler_data))
def process(self, val):
"""
Turn next value of input_iter into a TarBlock
"""
assert not self.process_waiting
XXX # Override in subclass @UndefinedVariable
def process_continued(self):
"""
Get more tarblocks
If processing val above would produce more than one TarBlock,
get the rest of them by calling process_continue.
"""
assert self.process_waiting
XXX # Override in subclass @UndefinedVariable
def next(self):
"""
Return next block and update offset
"""
if self.queued_data is not None:
result = self.queued_data
self.queued_data = None
# Keep rest of metadata as is (like previous_index)
return result
if self.process_waiting:
result = self.process_continued()
else:
# Below a StopIteration exception will just be passed upwards
result = self.process(self.input_iter.next())
block_number = self.process_next_vol_number
self.offset += len(result.data)
self.previous_index = result.index
self.previous_block = block_number
if self.remember_next:
self.remember_value = result.index
self.remember_block = block_number
self.remember_next = False
return result
def get_read_size(self):
# read size must always be the same, because if we are restarting a
# backup volume where the previous volume ended in a data block, we
# have to be able to assume it's length in order to continue reading
# the file from the right place.
return 64 * 1024
def get_previous_index(self):
"""
Return index of last tarblock, or None if no previous index
"""
return self.previous_index, self.previous_block
def queue_index_data(self, data):
"""
Next time next() is called, we will return data instead of processing
"""
self.queued_data = data
def remember_next_index(self):
"""
When called, remember the index of the next block iterated
"""
self.remember_next = True
self.remember_value = None
self.remember_block = None
def recall_index(self):
"""
Retrieve index remembered with remember_next_index
"""
return self.remember_value, self.remember_block
def get_footer(self):
"""
Return closing string for tarfile, reset offset
"""
blocks, remainder = divmod(self.offset, tarfile.RECORDSIZE) #@UnusedVariable
self.offset = 0l
return '\0' * (tarfile.RECORDSIZE - remainder) # remainder can be 0
def __iter__(self):
return self
class DummyBlockIter(TarBlockIter):
"""
TarBlockIter that does no file reading
"""
def process(self, delta_ropath):
"""
Get a fake tarblock from delta_ropath
"""
ti = delta_ropath.get_tarinfo()
index = delta_ropath.index
# Return blocks of deleted files or fileless snapshots
if not delta_ropath.type or not delta_ropath.fileobj:
return self.tarinfo2tarblock(index, ti)
if stats:
# Since we don't read the source files, we can't analyze them.
# Best we can do is count them raw.
stats.SourceFiles += 1
stats.SourceFileSize += delta_ropath.getsize()
log.Progress(None, stats.SourceFileSize)
return self.tarinfo2tarblock(index, ti)
class SigTarBlockIter(TarBlockIter):
"""
TarBlockIter that yields blocks of a signature tar from path_iter
"""
def process(self, path):
"""
Return associated signature TarBlock from path
"""
ti = path.get_tarinfo()
if path.isreg():
sfp = librsync.SigFile(path.open("rb"),
get_block_size(path.getsize()))
sigbuf = sfp.read()
sfp.close()
ti.name = "signature/" + "/".join(path.index)
return self.tarinfo2tarblock(path.index, ti, sigbuf)
else:
ti.name = "snapshot/" + "/".join(path.index)
return self.tarinfo2tarblock(path.index, ti)
class DeltaTarBlockIter(TarBlockIter):
"""
TarBlockIter that yields parts of a deltatar file
Unlike SigTarBlockIter, the argument to __init__ is a
delta_path_iter, so the delta information has already been
calculated.
"""
def process(self, delta_ropath):
"""
Get a tarblock from delta_ropath
"""
def add_prefix(tarinfo, prefix):
"""Add prefix to the name of a tarinfo file"""
if tarinfo.name == ".":
tarinfo.name = prefix + "/"
else:
tarinfo.name = "%s/%s" % (prefix, tarinfo.name)
ti = delta_ropath.get_tarinfo()
index = delta_ropath.index
# Return blocks of deleted files or fileless snapshots
if not delta_ropath.type or not delta_ropath.fileobj:
if not delta_ropath.type:
add_prefix(ti, "deleted")
else:
assert delta_ropath.difftype == "snapshot"
add_prefix(ti, "snapshot")
return self.tarinfo2tarblock(index, ti)
# Now handle single volume block case
fp = delta_ropath.open("rb")
data, last_block = self.get_data_block(fp)
if stats:
stats.RawDeltaSize += len(data)
if last_block:
if delta_ropath.difftype == "snapshot":
add_prefix(ti, "snapshot")
elif delta_ropath.difftype == "diff":
add_prefix(ti, "diff")
else:
assert 0, "Unknown difftype"
return self.tarinfo2tarblock(index, ti, data)
# Finally, do multivol snapshot or diff case
full_name = "multivol_%s/%s" % (delta_ropath.difftype, ti.name)
ti.name = full_name + "/1"
self.process_prefix = full_name
self.process_fp = fp
self.process_ropath = delta_ropath
self.process_waiting = 1
self.process_next_vol_number = 2
return self.tarinfo2tarblock(index, ti, data)
def get_data_block(self, fp):
"""
Return pair (next data block, boolean last data block)
"""
read_size = self.get_read_size()
buf = fp.read(read_size)
if len(buf) < read_size:
if fp.close():
raise DiffDirException("Error closing file")
return (buf, True)
else:
return (buf, False)
def process_continued(self):
"""
Return next volume in multivol diff or snapshot
"""
assert self.process_waiting
ropath = self.process_ropath
ti, index = ropath.get_tarinfo(), ropath.index
ti.name = "%s/%d" % (self.process_prefix, self.process_next_vol_number)
data, last_block = self.get_data_block(self.process_fp)
if stats:
stats.RawDeltaSize += len(data)
if last_block:
self.process_prefix = None
self.process_fp = None
self.process_ropath = None
self.process_waiting = None
self.process_next_vol_number = None
else:
self.process_next_vol_number += 1
return self.tarinfo2tarblock(index, ti, data)
def write_block_iter(block_iter, out_obj):
"""
Write block_iter to filename, path, or file object
"""
if isinstance(out_obj, Path):
fp = open(out_obj.name, "wb")
elif type(out_obj) is types.StringType:
fp = open(out_obj, "wb")
else:
fp = out_obj
for block in block_iter:
fp.write(block.data)
fp.write(block_iter.get_footer())
assert not fp.close()
if isinstance(out_obj, Path):
out_obj.setdata()
def get_block_size(file_len):
"""
Return a reasonable block size to use on files of length file_len
If the block size is too big, deltas will be bigger than is
necessary. If the block size is too small, making deltas and
patching can take a really long time.
"""
if file_len < 1024000:
return 512 # set minimum of 512 bytes
else:
# Split file into about 2000 pieces, rounding to 512
file_blocksize = long((file_len / (2000 * 512)) * 512)
return min(file_blocksize, globals.max_blocksize)
|
python
|
from django.test import TestCase
from rest_framework.test import APIClient
from teams.models import Team
class TeamTestCase(TestCase):
"""
Test set for Team CRD (there is no update here)
"""
client = APIClient()
trainer_id = 0
team_id = 0
def setUp(self):
"""
Set up case. Creates a trainer to work with.
"""
response = self.client.post('/trainer/create/',
{"name": "Blue",
"las_name": "Oak"})
self.trainer_id = response.json()["id"]
def test_create_team(self):
"""
Creates a new team for the trainer. Asserts that
the team is created.
"""
response = self.client.post("/teams/create/",
{
"trainer": str(self.trainer_id),
})
self.assertEqual(response.status_code, 201)
self.team_id = response.json()["id"]
team = Team.objects.get(pk=self.team_id,
trainer=self.trainer_id)
self.assertAlmostEqual(self.trainer_id, team.trainer.pk)
def test_create_team_empty_data(self):
"""
Tries to create a team with an empty json.
Expects a bad request error.
"""
response = self.client.post("/teams/create/",
{
})
self.assertEqual(response.status_code, 400)
def test_create_team_bad_data(self):
"""
Tries to create a team with bad data.
Expects a bad request error.
"""
response = self.client.post("/teams/create/",
{
"Pokemon_Master": str(self.trainer_id),
})
self.assertEqual(response.status_code, 400)
def test_create_team_trainer_not_exist(self):
"""
Tries to create a team with a trainer that doesn't
exists. Expects a bad request error.
"""
response = self.client.post("/teams/create/",
{
"trainer": "2",
})
self.assertEqual(response.status_code, 400)
def test_get_team(self):
"""
Retrieves a team using the team pk. Expects
a 200 code.
"""
response = self.client.post("/teams/create/",
{
"trainer": str(self.trainer_id),
})
team_pk = response.json()["id"]
response = self.client.get(
"/teams/get/"+str(team_pk)+"/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["id"], team_pk)
self.assertEqual(
response.json()["trainer"], self.trainer_id)
def test_get_team_bad_id(self):
"""
Tries to retrieve a team that doesn't exist.
Error not found expected.
"""
response = self.client.get(
"/teams/get/15555/")
self.assertEqual(response.status_code, 404)
def test_get_team_bad_param(self):
"""
Tries to retrieve a team with the wrong type of
path param, a string instead of a number.
Error not found expected.
"""
response = self.client.get(
"/teams/get/15555/")
self.assertEqual(response.status_code, 404)
def test_delete_team(self):
"""
Deletes an existing team. Code 204 expected
"""
response = self.client.post("/teams/create/",
{
"trainer": str(self.trainer_id),
})
team_pk = response.json()["id"]
response = self.client.delete(
"/teams/get/"+str(team_pk)+"/")
self.assertEqual(response.status_code, 204)
def test_delete_team_no_team_exists(self):
"""
Tries to delete a team that doesn't exist.
Not Found error expected.
"""
response = self.client.delete(
"/teams/get/50000/")
self.assertEqual(response.status_code, 404)
|
python
|
import os
import sqlite3
from datetime import datetime
from flask import g
DATABASE = 'test.db'
def get_db():
if not os.path.isfile(DATABASE):
create_database()
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
def save_msg(msg_text: str):
conn = get_db()
c = conn.cursor()
c.execute("INSERT INTO Message (Date, UserID, MessageText) VALUES (?, 0, ?)",
(datetime.utcnow().replace(microsecond=0).isoformat(),
msg_text))
conn.commit()
def create_database():
db = sqlite3.connect(DATABASE)
db.cursor().execute("""CREATE TABLE Message (
MessageID INTEGER PRIMARY KEY,
Date TEXT NOT NULL,
UserID INTEGER NOT NULL,
MessageText TEXT NOT NULL
)""")
db.commit()
db.close()
|
python
|
__author__ = "Hangi,Kim"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" IBM LoadLeveler job adaptor implementation
reference for pbs job adaptor & sge job adaptor implementation
Hangi, Kim [email protected]
"""
import os
import re
import time
from urllib.parse import parse_qs
from datetime import datetime
import radical.utils as ru
from .. import base
from .. import cpi
from ...job import constants as c
from ...exceptions import *
from ... import job as sj
from ...utils import pty_shell as sups
from ..sge.sgejob import SgeKeyValueParser
SYNC_CALL = cpi.decorators.SYNC_CALL
ASYNC_CALL = cpi.decorators.ASYNC_CALL
# --------------------------------------------------------------------
#
def _ll_to_saga_jobstate(lljs):
""" translates a loadleveler one-letter state to saga
pbs_loadl_comparison.xlsx
"""
if lljs == 'C' : return c.DONE
elif lljs == 'S' : return c.PENDING
elif lljs == 'ST': return c.PENDING
elif lljs == 'I' : return c.PENDING
elif lljs == 'R' : return c.RUNNING
else : return c.UNKNOWN
def getId(out):
t = out.split('\n')
jobId = None
for line in t:
if line.startswith('Job'):
tmpStr = line.split(' ')
jobId = tmpStr[1]
break
elif re.search('The job ".+" has been submitted.', line):
# Format: llsubmit: The job "srv03-ib.443336" has been submitted.
jobId = re.findall(r'"(.*?)"', line)[0]
break
if not jobId:
raise Exception("Failed to detect jobId.")
return jobId
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "radical.saga.adaptors.loadljob"
_ADAPTOR_SCHEMAS = ["loadl", "loadl+ssh", "loadl+gsissh"]
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [c.NAME,
c.EXECUTABLE,
c.ARGUMENTS,
c.ENVIRONMENT,
c.INPUT,
c.OUTPUT,
c.ERROR,
c.QUEUE,
c.PROJECT,
c.JOB_CONTACT,
c.WALL_TIME_LIMIT,
c.WORKING_DIRECTORY,
c.TOTAL_PHYSICAL_MEMORY,
c.PROCESSES_PER_HOST,
c.CANDIDATE_HOSTS,
c.TOTAL_CPU_COUNT],
"job_attributes": [c.EXIT_CODE,
c.EXECUTION_HOSTS,
c.CREATED,
c.STARTED,
c.FINISHED],
"metrics": [c.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """
The LoadLeveler adaptor allows to run and manage jobs on ` IBM LoadLeveler<http://www-03.ibm.com/systems/software/loadleveler/>`_
controlled HPC clusters.
""",
"example": "examples/jobs/loadljob.py",
"schemas": {"loadl": "connect to a local cluster",
"loadl+ssh": "conenct to a remote cluster via SSH",
"loadl+gsissh": "connect to a remote cluster via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version": "v0.1",
"schemas": _ADAPTOR_SCHEMAS,
"cpis" : [
{
"type": "radical.saga.job.Service",
"class": "LOADLJobService"
},
{
"type": "radical.saga.job.Job",
"class": "LOADLJob"
}
]
}
###############################################################################
#
# The adaptor class
#
class Adaptor (base.Base):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
base.Base.__init__(self, _ADAPTOR_INFO)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.epoch = datetime(1970,1,1)
self.purge_on_start = self._cfg['purge_on_start']
self.purge_older_than = self._cfg['purge_older_than']
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class LOADLJobService (cpi.job.Service):
""" implements cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._cpi_base = super(LOADLJobService, self)
self._cpi_base.__init__(api, adaptor)
self._adaptor = adaptor
# ----------------------------------------------------------------
#
def __del__(self):
self.finalize(kill_shell=True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
self.rm = rm_url
self.session = session
self.ppn = 0 # check for remove
self.jobs = dict()
self.cluster_option = ''
self.energy_policy_tag = None
self.island_count = None
self.node_usage = None
self.network_mpi = None
self.blocking = None
self.job_type = 'MPICH' # TODO: Is this a sane default?
self.enforce_resource_submission = False
self.enforce_consumable_cpus = False
self.enforce_consumable_memory = False
self.enforce_consumable_virtual_memory = False
self.enforce_consumable_large_page_memory = False
self.temp_path = "$HOME/.radical/saga/adaptors/loadl_job"
# LoadLeveler has two ways of specifying the executable and arguments.
# - Explicit: the executable and arguments are specified as parameters.
# - Implicit: the (remainder of the) job script is the task.
#
# Currently we don't know how this policy can be detected at runtime.
# We know that providing both will not work in all cases.
#
# As the IBM Red Book documents the explicit exec only,
# we should use that as a default.
# Currently we just use a hack to workaround Joule.
#
# Note: does this now simply become a Joule hack?
#
# TODO: Split script into submission file and script and use that for
# explicit exec?
self.explicit_exec = False
rm_scheme = rm_url.scheme
pty_url = ru.Url (rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query is not None:
for key, val in parse_qs(rm_url.query).items():
if key == 'cluster':
self.cluster_option = " -X %s" % val[0]
elif key == 'energy_policy_tag':
self.energy_policy_tag = val[0]
elif key == 'island_count':
self.island_count = val[0]
elif key == 'node_usage':
self.node_usage = val[0]
elif key == 'network_mpi':
self.network_mpi = val[0]
elif key == 'blocking':
self.blocking = val[0]
elif key == 'job_type':
self.job_type = val[0]
elif key == 'enforce_consumable_cpus':
self.enforce_consumable_cpus = True
self.enforce_resource_submission = True
elif key == 'enforce_consumable_memory':
self.enforce_consumable_memory = True
self.enforce_resource_submission = True
elif key == 'enforce_consumable_virtual_memory':
self.enforce_consumable_virtual_memory = True
self.enforce_resource_submission = True
elif key == 'enforce_consumable_large_page_memory':
self.enforce_consumable_large_page_memory = True
self.enforce_resource_submission = True
elif key == 'explicit_exec':
self.explicit_exec = True
# we need to extract the scheme for PTYShell. That's basically the
# job.Service Url without the loadl+ part. We use the PTYShell to execute
# loadleveler commands either locally or via gsissh or ssh.
if rm_scheme == "loadl":
pty_url.scheme = "fork"
elif rm_scheme == "loadl+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "loadl+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with Load
# Leveler. the adaptor will try to find them during initialize(self)
# and bail out in case they are note avaialbe.
self._commands = {'llq' : None,
'llsubmit': None,
'llcancel': None}
self.shell = sups.PTYShell(pty_url, self.session)
# self.shell.set_initialize_hook(self.initialize)
# self.shell.set_finalize_hook(self.finalize)
self.initialize()
return self.get_api ()
# ----------------------------------------------------------------
#
def close (self) :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required loadleveler tools are available
for cmd in self._commands:
ret, out, _ = self.shell.run_sync("which %s " % cmd)
self._logger.info(ret)
self._logger.info(out)
if ret != 0:
raise NoSuccess("Error finding LoadLeveler tools: %s" % out)
else:
path = out.strip() # strip removes newline
ret, out, _ = self.shell.run_sync("%s -v" % cmd)
if ret != 0:
raise NoSuccess("Error finding LoadLeveler tools: %s" % out)
else:
# version is reported as: "version: x.y.z"
version = out.strip().split()[1]
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found LoadLeveler tools: %s" % self._commands)
# see if we can get some information about the cluster, e.g.,
# different queues, number of processes per node, etc.
# TODO: this is quite a hack. however, it *seems* to work quite
# well in practice.
# modi by hgkim
# purge temporary files
if self._adaptor.purge_on_start:
cmd = "find $HOME/.radical/saga/adaptors/loadl_job" \
" -type f -mtime +%d -print -delete | wc -l" \
% self._adaptor.purge_older_than
ret, out, _ = self.shell.run_sync(cmd)
if ret == 0 and out != "0":
self._logger.info("Purged %s temporary files" % out)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
if kill_shell :
if self.shell :
self.shell.finalize (True)
def __remote_mkdir(self, path):
"""
Creates a directory on the remote host.
:param path: the remote directory to be created.
"""
# check if the path exists
ret, out, _ = self.shell.run_sync(
"(test -d %s && echo -n 0) || (mkdir -p %s && echo -n 1)"
% (path, path))
if ret == 0 and out == "1":
self._logger.info("Remote directory created: %s" % path)
elif ret != 0:
# something went wrong
raise NoSuccess("Couldn't create remote directory - %s\n%s" % (out, path))
def __remote_job_info_path(self, loadl_job_id="$LOADL_JOB_NAME"):
"""
Returns the path of the remote job info file.
:param loadl_job_id: the LoadLeveler job id.
if omitted an environment variable representing the job id will be used.
:return: path to the remote job info file
"""
return "%s/%s" % (self.temp_path, loadl_job_id)
def __clean_remote_job_info(self, loadl_job_id):
"""
Removes the temporary remote file containing job info.
:param loadl_job_id: the LoadLeveler job id
"""
path = self.__remote_job_info_path(loadl_job_id)
ret, out, _ = self.shell.run_sync("rm %s" % path)
if ret != 0:
self._logger.debug("Remote job info couldn't be removed: %s" % path)
def __get_remote_job_info(self, loadl_job_id):
"""
Obtains the job info from a temporary remote file created by the
llsubmit script.
:param loadl_job_id: the LoadLeveler job id
:return: a dictionary with the job info
"""
ret, out, _ = self.shell.run_sync("cat %s"
% self.__remote_job_info_path(loadl_job_id))
if ret != 0:
return None
qres = SgeKeyValueParser(out, key_suffix=":").as_dict()
if "signal" in qres : state = c.CANCELED
elif "exit_status" not in qres : state = c.RUNNING
elif not int(qres["exit_status"]): state = c.DONE
else : state = c.FAILED
job_info = {
'state' : state,
'exec_hosts' : qres.get("hostname"),
'create_time' : qres.get("qsub_time"),
'start_time' : qres.get("start_time"),
'end_time' : qres.get("end_time"),
'returncode' : int(qres.get("exit_status", -1)),
'gone' : False
}
return job_info
def __generate_llsubmit_script(self, jd):
"""
generates a IMB LoadLeveler script from a SAGA job description
:param jd: job descriptor
:return: the llsubmit script
"""
loadl_params = ''
exec_string = ''
args_strings = ''
if jd.executable is not None:
exec_string = "%s" % (jd.executable)
if jd.arguments is not None:
for arg in jd.arguments:
args_strings += "%s " % (arg)
if jd.name is not None:
loadl_params += "#@ job_name = %s \n" % jd.name
if jd.environment is not None:
variable_list = ''
for key in list(jd.environment.keys()):
variable_list += "%s=%s;" % (key, jd.environment[key])
loadl_params += "#@ environment = %s \n" % variable_list
# Energy
if self.energy_policy_tag:
loadl_params += "#@ energy_policy_tag = %s\n" % self.energy_policy_tag
loadl_params += "#@ minimize_time_to_solution = yes\n"
if jd.working_directory is not None:
loadl_params += "#@ initialdir = %s\n" % jd.working_directory
if jd.output is not None:
loadl_params += "#@ output = %s\n" % jd.output
if jd.error is not None:
loadl_params += "#@ error = %s\n" % jd.error
if jd.wall_time_limit is not None:
hours = int(jd.wall_time_limit / 60)
minutes = jd.wall_time_limit % 60
loadl_params += "#@ wall_clock_limit = %s:%s:00\n" \
% (str(hours), str(minutes))
if jd.total_cpu_count is None:
# try to come up with a sensible (?) default value
jd.total_cpu_count = 1
else:
if jd.total_cpu_count > 1:
if self.job_type not in ['bluegene']:
# 'bluegene' and total_tasks dont live well together
loadl_params += "#@ total_tasks = %s\n" % jd.total_cpu_count
loadl_params += "#@ job_type = %s\n" % self.job_type
if self.job_type == 'bluegene':
BGQ_CORES_PER_NODE = 16 # Only true for BG/Q
if jd.total_cpu_count % BGQ_CORES_PER_NODE > 0:
raise Exception("#cores requested is no multiple of 16.")
loadl_params += "#@ bg_size = %d\n" \
% (jd.total_cpu_count / BGQ_CORES_PER_NODE)
if self.blocking:
loadl_params += "#@ blocking = %s\n" % self.blocking
if self.enforce_resource_submission:
loadl_params += "#@ resources ="
if self.enforce_consumable_cpus:
loadl_params += " ConsumableCpus(%d)" % jd.total_cpu_count
if self.enforce_consumable_memory:
if jd.total_physical_memory is None:
raise Exception("total_physical_memory is not set, but required by enforce_consumable_memory.")
loadl_params += " ConsumableMemory(%dmb)" % jd.total_physical_memory
if self.enforce_consumable_large_page_memory:
# TODO: Not sure how to get a sensible value for this
if jd.total_physical_memory is None:
raise Exception("total_physical_memory is not set, but required by enforce_consumable_large_page_memory.")
loadl_params += " ConsumableLargePageMemory(%dmb)" % jd.total_physical_memory
if self.enforce_consumable_virtual_memory:
# TODO: Not sure how to get a sensible value for this
if jd.total_physical_memory is None:
raise Exception("total_physical_memory is not set, but required by enforce_consumable_virtual_memory.")
loadl_params += " ConsumableVirtualMemory(%dmb)" % jd.total_physical_memory
loadl_params += "\n"
# Number of islands to allocate resources on, can specify a number, or a min/max
if self.island_count:
loadl_params += "#@ island_count = %s\n" % self.island_count
# Specify network configuration
if self.network_mpi:
loadl_params += "#@ network.MPI = %s\n" % self.network_mpi
# Specify node usage policy
if self.node_usage:
loadl_params += "#@ node_usage = %s\n" % self.node_usage
if jd.job_contact is not None:
if len(jd.job_contact) > 1:
raise Exception("Only one notify user supported.")
loadl_params += "#@ notify_user = %s\n" % jd.job_contact[0]
loadl_params += "#@ notification = always\n"
# some default (?) parameter that seem to work fine everywhere...
if jd.queue is not None:
loadl_params += "#@ class = %s\n" % jd.queue
else:
loadl_params += "#@ class = edison\n"
# finally, we 'queue' the job
loadl_params += "#@ queue\n"
# Job info, executable and arguments
job_info_path = self.__remote_job_info_path()
script_body = [
'function aborted() {',
' echo Aborted with signal $1.',
' echo "signal: $1" >>%s' % job_info_path,
' echo "end_time: $(LC_ALL=en_US.utf8 date \'+%%s\')" >>%s' % job_info_path,
' exit -1',
'}',
'mkdir -p %s' % self.temp_path,
'for sig in SIGHUP SIGINT SIGQUIT SIGTERM SIGUSR1 SIGUSR2; do trap "aborted $sig" $sig; done',
'echo "hostname: $HOSTNAME" > %s' % job_info_path,
'echo "qsub_time: %s" >>%s' % (time.time(), job_info_path),
'echo "start_time: $(LC_ALL=en_US.utf8 date \'+%%s\')" >>%s' % job_info_path
]
script_body += ['%s %s' % (exec_string, args_strings)]
script_body += [
'echo "exit_status: $?" >>%s' % job_info_path,
'echo "end_time: $(LC_ALL=en_US.utf8 date \'+%%s\')" >>%s' % job_info_path
]
# convert exec and args into an string and
# escape all double quotes and dollar signs, otherwise 'echo |'
# further down won't work.
# only escape '$' in args and exe. not in the params
script_body = "\n".join(script_body).replace('$', '\\$')
# Dirty Trick for Joule: it expects an "executable" parameter,
# but doesn't really need it, therefore we pass it after the queue
# parameter, where it is not used anymore.
if self.explicit_exec:
loadl_params += "#@ executable = BOGUS\n"
loadlscript = "\n%s%s" % (loadl_params, script_body)
return loadlscript.replace('"', '\\"')
# ----------------------------------------------------------------
#
def _job_run(self, jd):
""" runs a job via llsubmit
"""
try:
# create a LoadLeveler job script from SAGA job description
script = self.__generate_llsubmit_script(jd)
self._logger.debug("Generated LoadLeveler script: %s" % script)
except Exception as e:
raise BadParameter('error creating batch script') from e
# try to create the working/output/error directories (if defined)
# WARNING: this assumes a shared filesystem between login node and
# compute nodes.
if jd.working_directory is not None and len(jd.working_directory) > 0:
self.__remote_mkdir(jd.working_directory)
if jd.output is not None and len(jd.output) > 0:
self.__remote_mkdir(os.path.dirname(jd.output))
if jd.error is not None and len(jd.error) > 0:
self.__remote_mkdir(os.path.dirname(jd.error))
# submit the LoadLeveler script
# Now we want to execute the script. This process consists of two steps:
# (1) we create a temporary file with 'mktemp' and write the contents of
# the generated Load Leveler script into it
# (2) we call 'llsubmit <tmpfile>' to submit the script to the queueing system
cmdline = """SCRIPTFILE=`mktemp -t RS-LOADLJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s%s $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['llsubmit']['path'], self.cluster_option)
self._logger.info("cmdline: %r", cmdline)
ret, out, _ = self.shell.run_sync(cmdline)
if ret != 0:
# something went wrong
raise NoSuccess("Error running 'llsubmit' job: %s. Script: %s"
% (out, script))
else:
# stdout contains the job id
#job_id = "[%s]-[%s]" % (self.rm, out.strip().split('.')[0])
job_id = "[%s]-[%s]" % (self.rm, getId(out))
self._logger.info("Submitted LoadLeveler job with id: %s" % job_id)
# add job to internal list of known jobs.
self.jobs[job_id] = {
'state': c.PENDING,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id, max_retries=10):
""" see if we can get some info about a job that we don't
know anything about
refactoring by referencing sgejob.py
"""
rm, pid = self._adaptor.parse_id(job_id)
# run the LoadLeveler 'llq' command to get some info about our job
ret, out, _ = self.shell.run_sync("%s -j %s -r %%st %%dd %%cc %%jt %%c %%Xs" %
(self._commands['llq']['path'], pid))
# output is something like
# R!03/25/2014 13:47!!Serial!normal!kisti.kim
# OR
# llq: There is currently no job status to report.
if ret != 0:
raise NoSuccess("Couldn't reconnect to job '%s': %s" % (job_id, out))
else:
# the job seems to exist on the backend. let's gather some data
job_info = {
'state': c.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
# lastStr = out.rstrip().split('\n')[-1]
lastStr = out.rstrip()
self._logger.debug(lastStr)
if lastStr.startswith('llq:'):
# llq: There is currently no job status to report
job_info = None
retries = 0
while job_info is None and retries < max_retries:
job_info = self.__get_remote_job_info(pid)
# print("llq:", job_info)
if job_info is None and retries > 0:
message = "__get_remote_job_info get None, pid: %s and retries: %d" % (pid, retries)
self._logger.debug(message)
# Exponential back-off
time.sleep(2**retries)
retries += 1
if job_info is None:
raise NoSuccess("__get_remote_job_info exceed %d times(s), "
"pid: %s" % (max_retries, pid))
self._logger.info("_retrieve_job: %r", job_info)
else:
# job is still in the queue
results = lastStr.split('!')
self._logger.info("results: %r",results)
job_info['state'] = _ll_to_saga_jobstate(results[0])
job_info['returncode'] = None # still running
job_info['start_time'] = results[1]
# job_info['exec_hosts'] = results[5]
return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id):
""" get job attributes via llq
"""
# if we don't have the job in our dictionary, we don't want it
if job_id not in self.jobs:
raise NoSuccess("Unknown job ID: %s. Can't update state." % job_id)
# prev. info contains the info collect when _job_get_info
# was called the last time
prev_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if prev_info['gone'] is True:
self._logger.warning("Job information is not available anymore.")
return prev_info
# if the job is in a terminal state don't expect it to change anymore
if prev_info["state"] in [c.CANCELED, c.FAILED, c.DONE]:
return prev_info
# retrieve updated job information
curr_info = self._retrieve_job(job_id)
if curr_info is None:
prev_info["gone"] = True
return prev_info
# update the job info cache and return it
self.jobs[job_id] = curr_info
return curr_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
# check if we have already reach a terminal state
if self.jobs[job_id]['state'] in [c.CANCELED, c.FAILED, c.DONE]:
return self.jobs[job_id]['state']
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['returncode'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['returncode']
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['exec_hosts'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['create_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['start_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
# check if we can / should update
if not self.jobs[job_id]['gone'] and not self.jobs[job_id]['end_time']:
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'llcancel'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s%s %s\n" \
% (self._commands['llcancel']['path'], self.cluster_option, pid))
if ret != 0:
raise NoSuccess("Error canceling job via 'llcancel': %s" % out)
#self.__clean_remote_job_info(pid)
# assume the job was succesfully canceld
self.jobs[job_id]['state'] = c.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self._job_get_state(job_id=job_id)
if state == c.UNKNOWN :
raise IncorrectState("cannot get job state")
if state in [c.DONE, c.FAILED, c.CANCELED]:
# self.__clean_remote_job_info(pid)
return True
# avoid busy poll
time.sleep(0.5)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements cpi.job.Service.get_url()
"""
# check that only supported attributes are provided
for attribute in jd.list_attributes():
if attribute not in _ADAPTOR_CAPABILITIES["jdes_attributes"]:
raise BadParameter("'jd.%s' is not supported by this adaptor"
% attribute)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
return sj.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, jobid):
""" Implements cpi.job.Service.get_job()
"""
self._logger.info("get_job: %r", jobid)
# try to get some information about this job and throw it into
# our job dictionary.
self.jobs[jobid] = self._retrieve_job(jobid)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": sj.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": jobid
}
return sj.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s | grep `whoami`" %
self._commands['llq']['path'])
if ret != 0 and len(out) > 0:
raise NoSuccess("failed to list jobs via 'llq': %s" % out)
elif ret != 0 and len(out) == 0:
# llq | grep `` exits with 1 if the list is empty
pass
else:
for line in out.split("\n"):
# output looks like this:
# v4c064.8637.0 ydkim 3/27 13:33 R 50 normal v4c064
# v4c064.8638.0 ydkim 3/27 13:37 R 50 normal v4c064
# v4c064.8639.0 ydkim 3/27 13:37 R 50 normal v4c065
# v4c064.8640.0 ydkim 3/27 13:37 R 50 normal v4c065
# v4c064.8641.0 ydkim 3/27 13:37 I 50 normal
lineArray=line.split()
if len(lineArray) > 1:
# lineArray[0] : v4c064.8637.0
tmpStr=lineArray[0].split('.')
jobid = "[%s]-[%s]" % (self.rm, ".".join(tmpStr[:2]))
ids.append(str(jobid))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs, timeout) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise NoSuccess ("Not Implemented");
###############################################################################
#
class LOADLJob (cpi.job.Job):
""" implements cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
self._cpi_base = super(LOADLJob, self)
self._cpi_base.__init__(api, adaptor)
@SYNC_CALL
def init_instance(self, job_info):
""" implements cpi.job.Job.init_instance()
"""
# init_instance is called for every new sj.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._name = self.jd.get(c.NAME)
self._started = True
else:
self._id = None
self._name = self.jd.get(c.NAME)
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" implements cpi.job.Job.get_state()
"""
if self._started is False:
# jobs that are not started are always in 'NEW' state
return c.NEW
else:
return self.js._job_get_state(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements cpi.job.Job.wait()
"""
if self._started is False:
raise IncorrectState("Can't wait for job that hasn't been started")
else:
self.js._job_wait(self._id, timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements cpi.job.Job.cancel()
"""
if self._started is False:
raise IncorrectState("Can't wait for job that hasn't been started")
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements cpi.job.Job.run()
"""
self._id = self.js._job_run(self.jd)
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_name (self):
""" Implements cpi.job.Job.get_name() """
return self._name
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
# FIXME: convert to EPOCH
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
# FIXME: convert to EPOCH
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
# ------------------------------------------------------------------------------
|
python
|
# Privacy Policy Browser
# Credit: Josiah Baldwin, Eric Dale, Ethan Fleming, Jacob Hilt,
# Darian Hutchinson, Joshua Lund, Bennett Wright
#
# The alexa skill is implemented as a collection of handlers for certain types of user input.
# This is paired with a S3 bucket that is holding data about location & acceptance in privacy policy.
# These are paired together to implement a kind of state machine.
#
# The components of the privacy policy are handled in the privacy_policy module, go there for more info.
import logging
import ask_sdk_core.utils as ask_utils
import sys
import os
sys.path.append(os.path.dirname(__file__))
from ask_sdk_s3.adapter import S3Adapter
s3_adapter = S3Adapter(bucket_name=os.environ["S3_PERSISTENCE_BUCKET"])
from ask_sdk_core.skill_builder import CustomSkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler, AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model.ui import PlayBehavior, Reprompt, SsmlOutputSpeech, PlainTextOutputSpeech
from ask_sdk_model import Response, IntentRequest, Intent, Slot, SlotConfirmationStatus
from privacy_policy import PrivacyPolicy
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
policy = PrivacyPolicy("policy.xml")
max_section_num = len(policy.sections)
repeat = False
cont = False
quit = False
persistent_variables = {
"lastSectionRead": -1,
"acceptedSections": policy.accepted_sections
}
def load_attributes(handler_input):
"""Load persistent variables from S3 storage."""
attributes_manager = handler_input.attributes_manager
persistent_variables = handler_input.attributes_manager.persistent_attributes
def save_attributes(handler_input):
"""Save persistent variables to S3 storage."""
attributes_manager = handler_input.attributes_manager
attributes_manager.persistent_attributes = persistent_variables
attributes_manager.save_persistent_attributes()
def create_section_response(handler_input, section_number):
"""Return a response object that reads the specified section number."""
if section_number >= max_section_num:
return (
handler_input.response_builder
.speak("The end of the policy was reached.")
.ask("Would you like to accept the policy?")
.response
)
section = policy.sections[section_number]
persistent_variables["lastSectionRead"] = section_number
speak_output = 'Starting from section {num}'.format(num = section_number + 1)
speak_output += ". " + section.all_atoms_as_string()
speak_output += "To accept or decline this section of the policy, say accept or decline. Otherwise, say continue."
return (
handler_input.response_builder
.speak(speak_output)
.ask("Would you like to continue reading the next section? To do so, say continue.")
.response
)
def have_read_section():
"""Return true if a section was previously read."""
return persistent_variables["lastSectionRead"] >= 0
def list_accepted_sections():
"""Return a string stating which sections of the policy have been read."""
count = 0
speak_output = ""
for i in range(len(policy.accepted_sections)):
if policy.is_section_accepted(i):
if count == 0:
speak_output += ": "
else:
speak_output += ", "
count += 1
speak_output += str(i + 1)
if count == len(policy.accepted_sections):
return "You have accepted all sections of this policy."
elif count == 1:
return "You have accepted section" + speak_output + "."
elif count > 0:
return "You have accepted sections" + speak_output + "."
else:
return "You have not accepted any sections in this policy."
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler run when the skill first launches."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
"""Return a response object that plays at launch."""
# Maybe we should change up this opening dialog to something like:
#
# "Welcome to the privacy policy reader. I can read through privacy policies and keep track
# of which sections you would like to accept or decline. Say, 'list options' to hear all that I can do for you,
# or say, 'Start from the beginning' to start reading."
#
# This might make it sound less overwhelming on startup - Darian
speak_output = "Welcome to the privacy policy reader. Here are some things I can do for you: 1. list options, " \
+ "2. hear the table of contents, " \
+ "3. Start from the beginning, " \
+ "4. start from section by saying the word section followed by the desired section number," \
+ "5. 'skip section' or 'continue', " \
+ "6. 'repeat that', " \
+ "7. 'accept or decline section' optionally followed by the section number, " \
+ "8. 'read accepted sections', " \
+ "or 'quit.'"
load_attributes(handler_input)
if persistent_variables["lastSectionRead"] >= 0:
speak_output = "Welcome back to the privacy policy reader. To continue from where you left off say continue, " \
+ "or to hear other options say 1, help, or menu."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# OUR CODE #################################################################################################
def get_toc_string():
"""Return a string stating the top-level titles of the privacy policy."""
titles = "Here are the Privacy Policy section titles"
i = 1
for title in policy.section_titles:
titles += f". {i}. " + str(title).rstrip(".")
i += 1
return titles
class TableOfContentsHandler(AbstractRequestHandler):
"""Handler run when the user requests the table of contents."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("TableOfContents")(handler_input)
def handle(self, handler_input):
"""Return a response object that plays the table of contents."""
return (
handler_input.response_builder
.speak(get_toc_string())
.ask("What would you like to do?")
.response
)
class ReadAcceptedHandler(AbstractRequestHandler):
"""Handler run when the user requests a list of accepted sections."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("readAccepted")(handler_input)
def handle(self, handler_input):
"""Return a response object that plays a list of accepted sections."""
load_attributes(handler_input)
speak_output = list_accepted_sections()
return (
handler_input.response_builder
.speak(speak_output)
.ask("What would you like to do?")
.response
)
class ListOptionsHandler(AbstractRequestHandler):
"""Handler run when the user requests help or menu options."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("ListOptions")(handler_input) or ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
"""Return a response object that plays a list of menu options."""
speak_output = "OK, you can tell me " \
+ "1. 'menu' to listen to this menu, " \
+ "2. 'table of contents', " \
+ "3. 'read from beginning', " \
+ "4. 'read section' followed by the section number, " \
+ "5. 'skip section' or 'continue', " \
+ "6. 'repeat that', " \
+ "7. 'accept or decline section' optionally followed by the section number, " \
+ "8. 'read accepted sections', " \
+ "or 'quit.'"
return (
handler_input.response_builder
.speak(speak_output)
.ask("What would you like to do?")
.response
)
class ResetHandler(AbstractRequestHandler):
"""Debug handle to reset AWS slots
Invoke with "Erase all data" """
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("Reset")(handler_input)
def handle(self, handler_input):
"""Delete persistent variables and return a response object that
reports success."""
policy.decline_all_sections()
persistent_variables["lastSectionRead"] = -1
persistent_variables["acceptedSections"] = policy.accepted_sections
response = "Okay, I've forgotten everything. What was my name again?"
save_attributes(handler_input)
return (
handler_input.response_builder
.speak(response)
.ask("What would you like to do?")
.response
)
class StartFromSectionHandler(AbstractRequestHandler):
"""Handler run when the user asks to start from a section."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("StartFromSection")(handler_input)
def handle(self, handler_input):
"""Update the last section read variable and return a response object
that plays the requested section."""
load_attributes(handler_input)
slots = handler_input.request_envelope.request.intent.slots
num = int(slots["num"].value) - 1
response = create_section_response(handler_input, num)
save_attributes(handler_input)
return response
class AcceptPolicyHandler(AbstractRequestHandler):
"""Handler run when the user asks to accept a section of the policy."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("AcceptPolicy")(handler_input)
def handle(self, handler_input):
"""Update the accepted sections variable and return a response object
that reports success or failure."""
load_attributes(handler_input)
slots = handler_input.request_envelope.request.intent.slots
user_acceptence = slots["userAcceptence"].value
speak_output = ""
if slots["acceptNum"].value:
num = int(slots["acceptNum"].value) - 1
elif slots["acceptWhat"].value:
num = -1
else:
if not have_read_section():
return (
handler_input.response_builder
.speak("Sorry, you haven't read any sections yet.")
.ask("What would you like to do?")
.response
)
num = persistent_variables["lastSectionRead"]
policy.set_accepted_sections(persistent_variables["acceptedSections"])
if user_acceptence == "accept":
if num >= 0:
policy.accept_section(num)
speak_output += "Ok. Section " + str(num + 1) + " has been accepted."
else:
policy.accept_all_sections()
speak_output += "Ok. All sections of the policy have been accepted."
elif user_acceptence == "decline":
if num >= 0:
policy.decline_section(num)
speak_output += "Ok. Section " + str(num + 1) + " has been declined."
else:
policy.decline_all_sections()
speak_output += "Ok. All sections of the policy have been declined."
persistent_variables["acceptedSections"] = policy.accepted_sections
save_attributes(handler_input)
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class StartFromBeginningHandler(AbstractRequestHandler):
"""Handler run when the user asks to start reading from the beginning the
policy."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("StartFromBeginning")(handler_input)
def handle(self, handler_input):
"""Update the last section read variable and return a response object
that plays the first section of the policy."""
load_attributes(handler_input)
response = create_section_response(handler_input, 0)
save_attributes(handler_input)
return response
# can we have 'next section' call this handler as well?
class ContinueHandler(AbstractRequestHandler):
"""Handler run when the user asks to continue reading the next section
of the policy."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("Continue")(handler_input)
def handle(self, handler_input):
"""Update the last section read variable and return a response object
that plays the next section of the policy."""
load_attributes(handler_input)
persistent_variables["lastSectionRead"] += 1
num = persistent_variables["lastSectionRead"]
response = create_section_response(handler_input, num)
save_attributes(handler_input)
return response
# INTENTS TO BE CALLED WHILE READING:
class RepeatWhileReadingHandler(AbstractRequestHandler):
"""Handler run when the user asks to repeat the current section of the
policy."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("RepeatWhileReading")(handler_input)
def handle(self, handler_input):
"""Return a response object that plays the current section of the
policy or reports failure if no section has been read."""
load_attributes(handler_input)
if not have_read_section():
return (
handler_input.response_builder
.speak("Sorry, I'm not sure which section you want me to repeat.")
.ask("What would you like to do?")
.response
)
num = persistent_variables["lastSectionRead"]
return create_section_response(handler_input, num)
# PREGEN CODE #################################################################################################
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Handler run when the user asks to cancel the skill."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
"""Return a response object that plays a goodbye message and leaves the
skill."""
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallbackIntentHandler(AbstractRequestHandler):
"""Handler run when the user issues a command we don't handle."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
"""Return a response object that reports we don't understand that
command, and await further commands."""
logger.info("In FallbackIntentHandler")
speech = "Hmm, I'm not sure. You can say Hello or Help. What would you like to do?"
reprompt = "I didn't catch that. What can I help you with?"
return handler_input.response_builder.speak(speech).ask(reprompt).response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler run when the session ends."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
"""Return a blank response object that terminates the session."""
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and
debugging. It will simply repeat the intent the user said. You can
create custom handlers for your intents by defining them above, then
also adding them to the request handler chain below."""
def can_handle(self, handler_input):
"""Return true if this handler can handle the specified handler
input."""
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
"""Return a response object reporting the intent that was triggered."""
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you
receive an error stating the request handler chain is not found, you
have not implemented a handler for the intent being invoked or included
it in the skill builder below."""
def can_handle(self, handler_input, exception):
"""Return true if this handler can handle the specified handler
input."""
return True
def handle(self, handler_input, exception):
"""Log an error and return a response object indicating an error
occurred."""
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# END PREGEN CODE #################################################################################################
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = CustomSkillBuilder(persistence_adapter=s3_adapter)
sb.add_request_handler(LaunchRequestHandler())
# OUR INTENTS
sb.add_request_handler(ListOptionsHandler())
sb.add_request_handler(TableOfContentsHandler())
sb.add_request_handler(ReadAcceptedHandler())
sb.add_request_handler(StartFromSectionHandler())
sb.add_request_handler(StartFromBeginningHandler())
sb.add_request_handler(ContinueHandler())
sb.add_request_handler(ResetHandler())
# READING INTENTS
sb.add_request_handler(AcceptPolicyHandler())
sb.add_request_handler(RepeatWhileReadingHandler())
# OUR INTENTS ^
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
|
python
|
from sklearn.preprocessing import StandardScaler
from scipy import ndimage
import nilearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import PowerTransformer
from scipy.stats import variation
import numpy as np
from densratio import densratio
from sklearn.neighbors import LocalOutlierFactor
from sklearn.utils import resample
from sklearn.utils import shuffle
from imblearn.over_sampling import ADASYN
from sklearn import preprocessing
from scipy.stats import ks_2samp
from sklearn.decomposition import FastICA, PCA
import nibabel as nib
from skimage.util import random_noise
from scipy.signal import wiener
from skimage.filters import unsharp_mask
from scipy import signal
import math
import load_data
import data_augmentation
####2D transformation
def standarization(data):
scaler = StandardScaler()
scaler.fit(data)
data = scaler.transform(data)
return data
def quantile_transform(data, random_state):
quantile_transformer = preprocessing.QuantileTransformer(n_quantiles=36, random_state=random_state)
data = quantile_transformer.fit_transform(data)
return data
def gussian_filter(data, sigma):
for i in range(len(data)):
data[i] = ndimage.gaussian_filter(data[i], sigma)
return data
def signal_clean(data):
data = nilearn.signal.clean(data)
return data
def robust_scaler(data):
scaler = RobustScaler()
data = scaler.fit_transform(data)
return data
def MinMax_scaler(data):
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
return data
def dublicate(data, number):
data_stacked = data.copy()
for i in range(number):
data_stacked = np.vstack((data, data_stacked))
return data_stacked
def concat(data1, data2):
data1 = np.vstack((data1, data2))
return data1
def shuffling(data, labels):
idx = np.random.permutation(len(labels))
data, labels = data[idx], labels[idx]
return data, labels
def PowerTransform(data):
power_transform = PowerTransformer()
data = power_transform.fit_transform(data)
return data
def coefficient_of_variance(data):
data = MinMax_scaler(data)
data = variation(data, axis=1)
return data
def density_ratio_estimation(train_data, test_data):
result = densratio(train_data, test_data)
sample_weight = result.compute_density_ratio(train_data)
return sample_weight
def outliers(train_data, train_labels, number_of_neighbours):
neigh = LocalOutlierFactor(n_neighbors=number_of_neighbours)
indices = neigh.fit_predict(train_data)
train_data_inlier = train_data[np.where(indices == 1)]
train_labels_inlier = train_labels[np.where(indices == 1)]
outlier_indices = np.where(indices == -1)
return train_data_inlier, train_labels_inlier, outlier_indices
def novelty(train_data, train_labels, test_data, test_labels, number_of_neighbours):
neigh = LocalOutlierFactor(n_neighbors=number_of_neighbours, novelty=True)
indices = neigh.fit(train_data)
indices = indices.predict(test_data)
test_data_inlier = test_data[np.where(indices == 1)]
print('test_labels[np.where(indices == 1)]',np.shape(test_labels[np.where(indices == 1)]))
test_labels_inlier = test_labels[np.where(indices == 1)]
outlier_indices = np.where(indices == -1)
return test_data_inlier, test_labels_inlier, outlier_indices
def upsampling(data,labels):
X = np.hstack((data, labels))
if (len(X[X[:, -1] == 0])>len(X[X[:, -1]==1])):
not_fewsamples = X[np.where(X[:, -1] == 0)]
fewsamples = X[np.where(X[:, -1] == 1)]
else:
not_fewsamples = X[np.where(X[:, -1] == 1)]
fewsamples = X[np.where(X[:, -1] == 0)]
if len(fewsamples)==0:
return data,labels
print('np.shape(fewsamples)',np.shape(fewsamples))
if (np.shape((np.unique(fewsamples,axis=0)))[0]<(len(not_fewsamples[:, -1])-len(fewsamples[:, -1]))):
fewsamples_upsampled = resample(fewsamples,
replace=True, # sample with replacement
n_samples=len(not_fewsamples[:, -1]) - len(fewsamples[:, -1]),
# match number in majority class
random_state=1) # reproducible results
else:
fewsamples_upsampled = resample(fewsamples,
replace=False, # sample with replacement
n_samples=len(not_fewsamples[:, -1])-len(fewsamples[:, -1]), # match number in majority class
random_state=1) # reproducible results
fewsamples_upsampled=np.vstack((fewsamples_upsampled, fewsamples))
fewsamples_upsampled = np.vstack((fewsamples_upsampled, not_fewsamples))
fewsamples_upsampled = shuffle(fewsamples_upsampled, random_state=42)
labels = fewsamples_upsampled[:, -1]
data = fewsamples_upsampled[:, 0:np.shape(fewsamples_upsampled)[1] - 1]
return data,labels
def resampling(data, labels):
AD_data = data[np.where(labels == 1)]
AD_labels = labels[np.where(labels == 1)]
Con_data = data[np.where(labels == 0)]
Con_labels = labels[np.where(labels == 0)]
indices = np.random.randint(0, len(AD_labels), len(AD_labels))
AD_data = AD_data[indices].copy()
AD_labels = AD_labels[indices].copy()
indices = np.random.randint(0, len(Con_labels), len(Con_labels))
Con_data = Con_data[indices].copy()
Con_labels = Con_labels[indices].copy()
data = concat(Con_data, AD_data)
labels = concat(Con_labels[:, np.newaxis], AD_labels[:, np.newaxis])
data, labels = shuffling(data, labels)
return data, labels
def synthetic(data, labels, num):
smote = ADASYN(ratio='all', n_neighbors=num)
data, labels = smote.fit_sample(data, labels)
return data, labels
def KSTest(train_data, test_data, step):
index = []
for i in range(0, len(train_data) - step, step):
for j in range(train_data.shape[1]):
r = ks_2samp(train_data[i:i + step, j], test_data[:, j])
if r[1] > 0.05:
index = np.append(index, j)
if index==[]:
return train_data,test_data
index = index[:, np.newaxis]
index = index.astype(int)
index = removeDuplicates(index)
train_data[:, index] = 0
test_data[:, index] = 0
return train_data, test_data
def removeDuplicates(listofElements):
# Create an empty list to store unique elements
uniqueList = []
# Iterate over the original list and for each element
# add it to uniqueList, if its not already there.
for elem in listofElements:
if elem not in uniqueList:
uniqueList.append(elem)
# Return the list of unique elements
return uniqueList
def ica(data, number_of_combonents):
ica = FastICA(n_components=number_of_combonents)
ICA_combonents = ica.fit_transform(data)
# ICA_combonents = ica.inverse_transform(ICA_combonents)
return ICA_combonents
def pca(data, number_of_combonents):
pca_m = PCA(n_components=number_of_combonents)
PCA_combonents = pca_m.fit_transform(data)
# ICA_combonents = pca_m.inverse_transform(ICA_combonents)
return PCA_combonents
####3D transformation
def g_po_sk(input=None):
input = input
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
sigma = 0.155
input[:, :, :, i] = random_noise(input[:, :, :, i], var=sigma ** 2)
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='poisson')
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='speckle')
input = np.moveaxis(input, 2, 1)
return input
def sp(input):
input = input
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='s&p')
input = np.moveaxis(input, 2, 1)
return input
def po(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='poisson')
input = np.moveaxis(input, 2, 1)
return input
def g_sp(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
sigma = 0.155
input[:, :, :, i] = random_noise(input[:, :, :, i], var=sigma ** 2)
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='s&p')
input = np.moveaxis(input, 2, 1)
return input
def g_po(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
sigma = 0.155
input[:, :, :, i] = random_noise(input[:, :, :, i], var=sigma ** 2)
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='poisson')
input = np.moveaxis(input, 2, 1)
return input
def g_sk(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
sigma = 0.155
input[:, :, :, i] = random_noise(input[:, :, :, i], var=sigma ** 2)
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='speckle')
input = np.moveaxis(input, 2, 1)
return input
def sp_sk(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='s&p')
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='speckle')
input = np.moveaxis(input, 2, 1)
return input
def sp_po(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='s&p')
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='poisson')
input = np.moveaxis(input, 2, 1)
return input
def po_sk(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='poisson')
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='speckle')
input = np.moveaxis(input, 2, 1)
return input
def noise_all(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
sigma = 0.155
input[:, :, :, i] = random_noise(input[:, :, :, i], var=sigma ** 2)
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='s&p')
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='poisson')
input[:, :, :, i] = random_noise(input[:, :, :, i], mode='speckle')
input = np.moveaxis(input, 2, 1)
return input
def apply_noise_manytypes(data):
data_noised = concatination(data, sp(data.copy()))
data_noised = concatination(data_noised, g_po_sk(data.copy()))
data_noised = concatination(data_noised, po(data.copy()))
data_noised = concatination(data_noised, g_sp(data.copy()))
data_noised = concatination(data_noised, g_po(data.copy()))
data_noised = concatination(data_noised, g_sk(data.copy()))
data_noised = concatination(data_noised, sp_sk(data.copy()))
data_noised = concatination(data_noised, sp_po(data.copy()))
data_noised = concatination(data_noised, po_sk(data.copy()))
data_noised = concatination(data_noised, noise_all(data.copy()))
return data_noised
def g_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def m_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def us_f(input): # unsharp filter
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def c_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def w_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def g_m_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def g_us_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def m_us_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def g_c_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def g_w_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def c_w_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def m_w_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def m_c_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def m_g_c_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def m_g_us_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def c_w_us_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def c_w_us_g_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def c_w_us_m_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def all_f(input):
shape = np.shape(input)
num_of_inputs = shape[3]
input = np.moveaxis(input, 1, 2)
# print('input shape', np.shape(input))
for i in range(num_of_inputs):
input[:, :, :, i] = ndimage.gaussian_filter(input[:, :, :, i], .5)
input[:, :, :, i] = nilearn.signal.clean(input[:, :, :, i])
input[:, :, :, i] = wiener(input[:, :, :, i], mysize=7)
input[:, :, :, i] = unsharp_mask(input[:, :, :, i], radius=5, amount=2)
input[:, :, :, i] = signal.medfilt(input[:, :, :, i])
# print(np.shape(input))
input = np.moveaxis(input, 2, 1)
return input
def apply_filter_manytypes(data):
data_filter = g_f(data.copy())
data_filter = concatination(data_filter, m_f(data.copy()))
data_filter = concatination(data_filter, us_f(data.copy()))
data_filter = concatination(data_filter, c_f(data.copy()))
data_filter = concatination(data_filter, w_f(data.copy()))
data_filter = concatination(data_filter, g_m_f(data.copy()))
data_filter = concatination(data_filter, g_us_f(data.copy()))
data_filter = concatination(data_filter, m_us_f(data.copy()))
data_filter = concatination(data_filter, g_c_f(data.copy()))
data_filter = concatination(data_filter, g_w_f(data.copy()))
data_filter = concatination(data_filter, c_w_f(data.copy()))
data_filter = concatination(data_filter, m_w_f((data.copy())))
data_filter = concatination(data_filter, m_c_f(data.copy()))
data_filter = concatination(data_filter, m_g_c_f(data.copy()))
data_filter = concatination(data_filter, m_g_us_f(data.copy()))
data_filter = concatination(data_filter, c_w_us_f(data.copy()))
data_filter = concatination(data_filter, c_w_us_g_f(data.copy()))
data_filter = concatination(data_filter, c_w_us_m_f(data.copy()))
data_filter = concatination(data_filter, all_f(data.copy()))
return data_filter
def concatination(data1, data2):
shape_data1 = np.shape(data1)
shape_data2 = np.shape(data2)
matrix_data = np.zeros((shape_data1[0], shape_data1[1], shape_data1[2], shape_data1[3] + shape_data2[3]))
matrix_data[:, :, :, 0:shape_data1[3]] = data1
matrix_data[:, :, :, shape_data1[3]:shape_data1[3] + shape_data2[3]] = data2
return matrix_data
def flatten(data):
data = np.reshape(data, (np.shape(data)[0], -1))
return data
def deflatten(data, shape):
data = np.reshape(data, (-1, shape[1], shape[2], shape[3]))
return data
def select_max_features(mask, number_of_featrues):
mask_reduces = np.zeros((len(mask)))
argsmask = np.argsort((-mask).copy())
for i in range(number_of_featrues): mask_reduces[argsmask[i]] = mask[argsmask[i]]
return mask_reduces
def transposnig(input_data, order):
return input_data.transpose(order)
def size_editing(data, final_height):
data_length = data.shape[1]
if (data_length > final_height):
diff = abs(data_length - final_height) / 2
if (round(diff) > diff):
start = round(diff)
end = data_length - round(diff) + 1
return data[:, start:end, start:end, :]
else:
start = int(diff)
end = int(data_length - diff)
return data[:, start:end, start:end, :]
else:
diff = abs(data_length - final_height) / 2
if (round(diff) > diff):
resized_data = np.pad(data,
((0, 0), (round(diff), round(diff) - 1), (round(diff), round(diff) - 1), (0, 0)),
'constant', constant_values=(0, 0))
else:
resized_data = np.pad(data, ((0, 0), (round(diff), round(diff)), (round(diff), round(diff)), (0, 0)),
'constant', constant_values=(0, 0))
return resized_data
def depth_reshapeing(data):
depth = int(data.shape[3])
dim0 = int(data.shape[0])
dim1 = int(data.shape[1])
dim2 = int(data.shape[2])
step = math.floor(depth / 3)
reshaped_data = np.empty((dim0, dim1, dim2, 3))
for i in range(3):
if i == 2:
reshaped_data[:, :, :, i] = np.mean(data[:, :, :, step * i:depth], axis=3)
else:
reshaped_data[:, :, :, i] = np.mean(data[:, :, :, step * i:step * (i + 1)], axis=3)
return reshaped_data
def converting_nii_to_npz(file_name):
file_path = load_data.find_path(file_name)
nii_file = data_augmentation.load_obj(file_path)
np.savez(file_path[0:len(file_path) - 7] + '.npz', masked_voxels=nii_file)
def labels_convert_one_hot(labels):
length = len(labels)
if labels.all() == 0:
ones = np.ones((length, 1))
labels = np.hstack((ones, labels))
elif labels.all() == 1:
zeros = np.zeros((length, 1))
labels = np.hstack((zeros, labels))
else:
zeros = np.zeros((length, 1))
labels = np.hstack((zeros, labels))
indecies = np.where(labels[:, 1] == 0)
labels[indecies[0], 0] = 1
return labels
|
python
|
class Player(object):
def __init__(self, name, connection):
self.connection = connection
self.name = name
self.score = 0
|
python
|
import json
from typing import List, Tuple
import boto3
from botocore.client import BaseClient
from logger.decorator import lambda_auto_logging
from logger.my_logger import MyLogger
from utils.lambda_tool import get_environ_values
from utils.s3_tool import (
create_key_of_eorzea_database_merged_item,
create_key_of_irregular_data,
create_key_of_match_data,
create_key_of_xivapi_merged_item,
)
environ_names = ["TMP_DATA_BUCKET_NAME"]
logger = MyLogger(__name__)
@lambda_auto_logging(*environ_names)
def handler(event, context):
main(event)
def main(event: dict, s3_client: BaseClient = boto3.client("s3")):
(tmp_data_bucket_name,) = get_environ_values(environ_names)
process_id = get_process_id_from_event(event)
key_of_eorzea_database = create_key_of_eorzea_database_merged_item(process_id)
key_of_xivapi = create_key_of_xivapi_merged_item(process_id)
eorzea_database = get_s3_data(
tmp_data_bucket_name, key_of_eorzea_database, s3_client
)
xivapi = get_s3_data(tmp_data_bucket_name, key_of_xivapi, s3_client)
match, irregular = exec_matching(eorzea_database, xivapi)
key_of_match = create_key_of_match_data(process_id)
put_s3_data(tmp_data_bucket_name, key_of_match, match, s3_client)
if len(irregular) > 0:
key_of_irregular = create_key_of_irregular_data(process_id)
put_s3_data(tmp_data_bucket_name, key_of_irregular, irregular, s3_client)
logger.error("has irregular data", count=len(irregular))
def get_process_id_from_event(event: dict) -> str:
return event["id"]
def get_s3_data(bucket_name: str, key: str, s3_client: BaseClient) -> List[dict]:
option = {"Bucket": bucket_name, "Key": key}
resp = s3_client.get_object(**option)
return json.load(resp["Body"])
def exec_matching(
eorzea_database: List[dict], xivapi: List[dict]
) -> Tuple[List[dict], List[dict]]:
match = []
irregular = []
for item in eorzea_database:
parsed = [x for x in xivapi if x["Name_en"] == item["name"]]
if len(parsed) == 1:
api_item = parsed[0]
match.append({**api_item, **{"EorzeaDatabaseId": item["id"]}})
else:
irregular.append({"eorzea_database": item, "xivapi": parsed})
return match, irregular
def put_s3_data(bucket_name: str, key: str, data: List[dict], s3_client: BaseClient):
option = {
"Bucket": bucket_name,
"Key": key,
"Body": json.dumps(data, ensure_ascii=False).encode(),
"ContentType": "application/json",
}
s3_client.put_object(**option)
|
python
|
#!/usr/bin/python
"""
`yap_ipython.external.mathjax` is deprecated with yap_ipython 4.0+
mathjax is now install by default with the notebook package
"""
import sys
if __name__ == '__main__' :
sys.exit("yap_ipython.external.mathjax is deprecated, Mathjax is now installed by default with the notebook package")
|
python
|
from random import random
from player import Player
from pokerUtils import PokerUtils
from board import Board
def decide(playerdecision, player, callamount, raiseamount=0):
# fold
if playerdecision == 2:
print(player.getname() + " loses")
players.remove(player)
# raise
if playerdecision == 1:
print(player.getname() + " raises by " + str(raiseamount))
player.chips = player.chips - callamount + raiseamount
# call
if playerdecision == 0:
print(player.getname() + " called")
player.chips = player.chips - callamount
# init players
p1 = Player("Alice", True, 1500)
p2 = Player("Bob", False, 1500)
players = [p1, p2]
roundIndex = 0
blindSize = [10, 15, 20, 30, 50, 75, 100, 150, 200, 300, 400, 500, 600, 800, 1000]
startPlayerValue = round(random() * len(players))
while True:
# setup blinds and variables
roundIndex = roundIndex + 1
smallBlind = blindSize[roundIndex]
smallBlindPlayer = players[(startPlayerValue + roundIndex) % len(players)]
bigBlindPlayer = players[(startPlayerValue + roundIndex + 1) % len(players)]
bigBlind = smallBlind * 2
callamount = bigBlind
# set new order accordingly
oldPlayerOrder = players
players = [smallBlindPlayer, bigBlindPlayer]
for player in oldPlayerOrder:
if player is not smallBlindPlayer and player is not bigBlindPlayer:
players.append(player)
# init deck
deck = PokerUtils.createdeck()
""" START GAME """
smallBlindPlayer.chips = smallBlindPlayer.chips - smallBlind
bigBlindPlayer.chips = bigBlindPlayer.chips - bigBlind
# players draw
for player in players:
player.draw(deck)
player.draw(deck)
# decide
while not all(x.hasCalled for x in players):
for player in players:
if not player.hasCalled:
# call, raise or fold # returns: [max(rankings), decision, raiseamount]
decision = player.decide(player.gethandranking(), callamount)
# check if raised
if decision[1] == 1:
# reset callstatus
for x in (players[:players.index(player)] + players[players.index(player) + 1:]):
x.hasCalled = False
decide(decision[1], player, callamount, decision[2])
""" DRAW FLOP (first 3 cards) """
myBoard = Board(deck)
# decide
while not all(x.hasCalled for x in players):
for player in players:
if not player.hasCalled:
# call, raise or fold # returns: [max(rankings), decision, raiseamount]
decision = player.decide(player.gethandranking(), callamount)
# check if raised
if decision[1] == 1:
# reset callstatus
for x in (players[:players.index(player)] + players[players.index(player) + 1:]):
x.hasCalled = False
decide(decision[1], player, callamount, decision[2])
""" DRAW TURN (4th card) """
myBoard.draw(deck)
# decide
while not all(x.hasCalled for x in players):
for player in players:
if not player.hasCalled:
# call, raise or fold # returns: [max(rankings), decision, raiseamount]
decision = player.decide(player.gethandranking(myBoard.getcommunitycards()), callamount)
# check if raised
if decision[1] == 1:
# reset callstatus
for x in (players[:players.index(player)] + players[players.index(player) + 1:]):
x.hasCalled = False
decide(decision[1], player, callamount, decision[2])
""" DRAW RIVER (5th card) """
myBoard.draw(deck)
# decide
ranks = []
for player in players:
decision = player.decide(player.gethandranking(myBoard.getcommunitycards()))
decide(decision[1])
ranks.append([decision[0], player.getname()])
print(ranks)
winners = []
for rank in ranks:
if max(ranks)[0] == rank[0]:
winners.append(rank[1])
if len(winners) > 1:
print("Draw between " + ' & '.join(winners))
else:
print(max(ranks)[1] + " wins!")
|
python
|
#
#
# This script compiles and runs all examples in /examples
# by mpifort and mpirun on four processors.
#
#
# Glob: https://docs.python.org/2/library/glob.html
import glob
# OS: https://docs.python.org/2/library/os.html
import os
for ftest in glob.glob('examples/*.f90'):
print("compile "+ftest)
os.system('mpifort '+ftest+' -J./build/include -L./build/lib -lmpifw')
print("run "+ftest)
os.system('mpirun -n 4 a.out')
print("\n")
os.system('rm a.out')
|
python
|
from django_unicorn.components import UnicornView
import requests
from bs4 import BeautifulSoup
class ContcompView(UnicornView):
url = "https://example.com"
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
cuenta = dict()
palabras = []
def count(self,*args,**kwargs):
self.page = requests.get(self.url)
self.data = self.page.text
self.soup = BeautifulSoup(self.data,features="html.parser")
self.text = self.soup.get_text()
self.palabras = self.text.split()
self.auxpalabras = []
for palabra in self.palabras:
if palabra in self.cuenta:
self.cuenta[palabra] += 1
else:
self.cuenta[palabra] = 1
self.auxpalabras.append(palabra)
self.palabras = self.auxpalabras
#TODO: Ordenar por palabras mas repeditas
|
python
|
from flask import Blueprint, render_template, jsonify, app
from edinet import edinet_methods
from eve_docs.config import get_cfg
def edinet_docs():
blueprints = [edinet_methods.edinet_methods]
eve_docs = Blueprint('eve_docs', __name__,
template_folder='templates')
@eve_docs.route('/')
def index():
cfg = get_cfg(blueprints)
return render_template('index.html', cfg=cfg)
@eve_docs.route('/v1/docs')
def indexv1():
cfg = get_cfg(blueprints)
return render_template('indexv1.html', cfg=cfg)
@eve_docs.route('/v1/spec.json')
def spec():
cfg = get_cfg(blueprints)
return jsonify(cfg)
return eve_docs
|
python
|
from app.dao.permissions_dao import permission_dao
from tests.app.db import create_service
def test_get_permissions_by_user_id_returns_all_permissions(sample_service):
permissions = permission_dao.get_permissions_by_user_id(user_id=sample_service.users[0].id)
assert len(permissions) == 8
assert sorted(["manage_users",
"manage_templates",
"manage_settings",
"send_texts",
"send_emails",
"send_letters",
"manage_api_keys",
"view_activity"]) == sorted([i.permission for i in permissions])
def test_get_permissions_by_user_id_returns_only_active_service(sample_user):
active_service = create_service(user=sample_user, service_name="Active service")
inactive_service = create_service(user=sample_user, service_name="Inactive service", active=False)
permissions = permission_dao.get_permissions_by_user_id(user_id=sample_user.id)
assert len(permissions) == 8
assert active_service in [i.service for i in permissions]
assert inactive_service not in [i.service for i in permissions]
|
python
|
from django.urls import path
from .views import *
from .import views
app_name = 'offers'
urlpatterns = [
# Renda Fixa
# ------------------------------------------------------------------------------------------------------------------
# Listar
path('rf/listar/', OfferRfListView.as_view(), name="listar-rf"),
# Adicionar
path('rf/adicionar/', views.createrf, name="create-rf"),
# Editar
path('rf/editar/<int:pk>/', views.editrf, name="edit-rf"),
# Deletar
path('rf/deletar/<int:pk>/', DeleteOfferRfView.as_view(), name="delete-rf"),
# Send Mail
path('rf/enviar-email-rf/<int:pk>/', views.sendemailrf, name="send-mail-rf"),
# Sent View
path('rf/email-enviado-rf/<int:id>/', views.sentmailrf, name="sent-mail-rf"),
# Renda Variável
# ------------------------------------------------------------------------------------------------------------------
# IPO
# ------------------------------------------------------------------------------------------------------------------
# Listar
path('rv/ipo/listar/', OfferRvIpoListView.as_view(), name="listar-rv-ipo"),
# Adicionar
path('rv/ipo/adicionar/', views.creatervipo, name="create-rv-ipo"),
# Editar
path('rv/ipo/editar/<int:pk>/', views.editrvipo, name="edit-rv-ipo"),
# Deletar
path('rv/ipo/deletar/<int:pk>/', DeleteOfferRvIpoView.as_view(), name="delete-rv-ipo"),
# Send Mail
path('rv/ipo/enviar-email-rv-ipo/<int:pk>/', views.sendmailrvipo, name="send-mail-rv-ipo"),
# Sent View
path('rv/ipo/email-enviado-rv-ipo/<int:id>/', views.sentmailrvviewipo, name="sent-mail-rv-view-ipo"),
# Direito de Subscrição
# ------------------------------------------------------------------------------------------------------------------
# Listar
path('rv/direito-de-subscricao/listar/', OfferRvSubscriptionListView.as_view(), name="listar-rv-subscription"),
# Adicionar
path('rv/direito-de-subscricao/adicionar/', OfferRvSubscriptionCreateView.as_view(), name="create-rv-subscription"),
# Editar
path('rv/direito-de-subscricao/editar/<int:pk>/', OfferRvSubscriptionEditView.as_view(), name="edit-rv-subscription"),
# Deletar
path('rv/direito-de-subscricao/deletar/<int:pk>/', OfferRvSubscriptionDeleteView.as_view(), name="delete-rv-subscription"),
# Get Ticker
path('rv/get_ticker/', views.get_ticker_price, name="get-ticker"),
# Fii
# ------------------------------------------------------------------------------------------------------------------
# Listar
path('rv/fii/listar/', views.ofertarvfiiview, name="listar-rv-fii"),
# Eviar Email
path('rv/fii/enviar-email-rv-fii/<str:id>/<str:ticker>/', views.sendemailrvfii, name="send-mail-rv-fii"),
# Enviar
path('rv/fii/envia_email_fii/', views.envia_email_fii, name="envia-email-fii"),
#EDIT FII
# Adicionar TESTE
path('adicionarFII', views.addFii, name="add-fii"),
# Adicionar
path('rv/fii/adicionar/<str:ticker>/<str:emissor>/', OfferRvFiiCreateView.as_view(), name="create-rv-fii-edit"),
# Editar
path('rv/fii/editar/<int:pk>/', OfferRvFiiEditView.as_view(), name="edit-rv-fii-edit"),
# Deletar
path('rv/fii/deletar/<int:pk>/', OfferRvFiiDeleteView.as_view(), name="delete-rv-fii-edit"),
# ---
# Scrape TICKER11
path('rv/fii/ticker11/', views.scrapy_ticker11, name="scrape-ticker11"),
# Scrape ClubeFii
path('rv/fii/clubefii/', views.scrapy_clubefii, name="scrape-clubefii"),
# Fiis File Upload
path('rv/fii/fii-files-upload/', views.fii_files_upload, name="fii-files-upload"),
]
|
python
|
from ..legend import Legend
def size_continuous_legend(title=None, description=None, footer=None, prop='size',
variable='size_value', dynamic=True, ascending=False, format=None):
"""Helper function for quickly creating a size continuous legend.
Args:
prop (str, optional): Allowed properties are 'size' and 'stroke_width'.
dynamic (boolean, optional):
Update and render the legend depending on viewport changes.
Defaults to ``True``.
title (str, optional):
Title of legend.
description (str, optional):
Description in legend.
footer (str, optional):
Footer of legend. This is often used to attribute data sources.
variable (str, optional):
If the information in the legend depends on a different value than the
information set to the style property, it is possible to set an independent
variable.
ascending (boolean, optional):
If set to ``True`` the values are sorted in ascending order.
Defaults to ``False``.
format (str, optional): Format to apply to number values in the widget, based on d3-format
specifier (https://github.com/d3/d3-format#locale_format).
Returns:
cartoframes.viz.legend.Legend
Example:
>>> size_continuous_legend(
... title='Legend title',
... description='Legend description',
... footer='Legend footer',
... dynamic=False,
... format='.2~s')
"""
return Legend('size-continuous', title, description, footer, prop, variable, dynamic, ascending, format)
|
python
|
import unittest
from app.models import Role
class RoleModelTest(unittest.TestCase):
def setUp(self):
self.new_role = Role('1','admin')
if __name__ =='__main__':
unittest.main()
|
python
|
"""
dotfiles
~~~~~~~~
Dotfiles is a tool to make managing your dotfile symlinks in $HOME easy,
allowing you to keep all your dotfiles in a single directory.
:copyright: (c) 2011-2016 by Jon Bernard.
:license: ISC, see LICENSE.rst for more details.
"""
__version__ = '0.9.dev0'
|
python
|
"""
"""
import sys
sys.path.append("..") # import one subdirectory up in files
import numpy as np
from networks.networks import IsoMPS
#%% XXZ Tenpy MPO
from tenpy.models.model import CouplingMPOModel, NearestNeighborModel
from tenpy.tools.params import asConfig
from tenpy.networks.site import SpinHalfSite
#__all__ = ['XXZModel', 'XXChain']
class XXZModel(CouplingMPOModel):
def init_sites(self, model_params):
conserve = model_params.get('conserve', 'parity')
assert conserve != 'Sz'
if conserve == 'best':
conserve = 'parity'
if self.verbose >= 1.:
print(self.name + ": set conserve to", conserve)
site = SpinHalfSite(conserve=conserve)
return site
def init_terms(self, model_params):
J = np.asarray(model_params.get('J', 1.))
d = np.asarray(model_params.get('d', 1.))
for u1, u2, dx in self.lat.pairs['nearest_neighbors']:
self.add_coupling(J, u1, 'Sigmax', u2, 'Sigmax', dx)
self.add_coupling(J, u1, 'Sigmay', u2, 'Sigmay', dx)
self.add_coupling(J*d, u1, 'Sigmaz', u2, 'Sigmaz', dx)
# done
class XXZChain(XXZModel, NearestNeighborModel):
def __init__(self, model_params):
model_params = asConfig(model_params, self.__class__.__name__)
model_params.setdefault('lattice', "Chain")
CouplingMPOModel.__init__(self, model_params)
def xxz_mpo(J,Delta,hz=0):
model_params = dict(J=J, d=Delta, bc_MPS='infinite', conserve=None, verbose=False)
return XXZChain(model_params).H_MPO
#%% (Free) Energy calculators
def energy_tp(param_vals,*args):
"""
function to calculate energy using MPO/MPS contraction in tenpy
inputs:
- param_vals = dict {parameter:value}
- *args,
args[0] should be psi: state as IsoMPS
args[1] should be H_mpo: Hamiltonian as MPO
(input made this way to be compatible w/ scipy.optimize)
outputs:
- float, <psi|H|psi> computed w/ tenpy
"""
# parse inputs
psi=args[0] # state as isoMPS
H_mpo = args[1] # Hamiltonian as tenpy MPO
param_dict = dict(zip(psi.param_list,param_vals))
# convert state from holoPy isoMPS to tenpy MPS
psi_tp = psi.to_tenpy(param_dict,L=np.inf)
# compute energy
E = (H_mpo.expectation_value(psi_tp)).real
return E
# def energy_tp_corr(param_vals,*args):
# psi=args[0]
# J,Delta,h = args[1]
# psi_tp=psi.to_tenpy(opt_params,L=np.inf)
# Cxx_tp = np.mean([4*psi_tp.correlation_function("Sx", "Sx",
# sites1=[j],
# sites2=[j+1],
# opstr=None, str_on_first=False, hermitian=False, autoJW=False)
# for j in range(l_uc)])
# Cyy_tp = np.mean([4*psi_tp.correlation_function("Sy", "Sy",
# sites1=[j],
# sites2=[j+1],
# opstr=None, str_on_first=False, hermitian=False, autoJW=False)
# for j in range(l_uc)])
# Czz_tp = np.mean([4*psi_tp.correlation_function("Sz", "Sz",
# sites1=[j],
# sites2=[j+1],
# opstr=None, str_on_first=False, hermitian=False, autoJW=False)
# for j in range(l_uc)])
# return J*(Cxx_tp+Cyy_tp+Delta*Czz_tp)
def entropy(prob_list,L):
# entropy function taken from Shahin's code; L should be viewed as L*l_uc in his convention
"""
Returns the von Neumann entropy (per site) of a given list
probability weight list (the form of Shannon entropy).
--------------
--the input assumes thermal_state_class-based prob_list--
L: int
Length (number) of repetitions of unit cell in the main network chain.
"""
new_prob_list = [np.array(j)[np.array(j) > 1.e-30] for j in prob_list] # avoiding NaN in numpy.log() function
s_list1 = []
d=1
for j in range(len(new_prob_list)):
for p in new_prob_list[j]:
s_list1.append(-p*np.log(p)) # converting to form of Shannon entropy
s_list2 = [sum(s_list1[j:j+d]) for j in range(0,len(s_list1),d)]
s_tot = sum(s_list2)/L # average entropy of chain
return s_tot
def free_energy_tp(tot_param_vals,*args):
#we need to define psi each time we call the function under our current circuit construction;
#tot_param_vals is just a list (array) of prams with first L*l_uc terms being the prob list on
#each site and the rest being the regular parameters
L,l_uc = args[0]
H_mpo = args[1]
prob, param_vals = np.split(tot_param_vals,[L*l_uc])
prob = np.reshape(prob,(L,l_uc,1))
psi = IsoMPS(preg,breg,site_pcircs,boundary_circuit=bond_prep_pcirc,L=L,thermal = True,thermal_prob=prob)
E = energy_tp(param_vals,psi,H_mpo)
S = entropy(prob,L*l_uc)
F = E - T*S
return F
|
python
|
#!/usr/bin/env python
"""booleanRules.py: Infers network from pseudotime and binary expression.
To run type python booleanRules.py gene expressionFile step orderFile maxAct maxRep networkFile threshold thresholdStep
gene: name of gene e.g. Gata1. This should match gene names in expression and network files.
expressionFile: path to expression file. This is a binary gene expression matrix with colnames equal to genes and rownames equal to cell names.
step: e.g. 5. This is how far back in pseudotime to take as input for the boolean function evaluated at current time t. So for step=5 the input to the boolean function at time t is given by the gene expression states at time t=t-5.
orderFile: path to pseudotime order file. This should be a list of cell names indicating the pseudotime ordering of cells. These cells names should be the same form as those in expressionFile.
maxAct: e.g. 4. The number of activators allowed for each gene
maxRep: e.g. 2. The number of repressors allowed for each gene. Note, if these two parameters are high finding rules will be very slow.
networkFile: path to the network file. This indicates possible activators and repressorrs of each gene. Each line should be of the form "from.gene relation to.gene" where relation is either 1 (activation) or -1 (repression).
threshold: Starting threshold for agreement of rules.
thresholdStep: Increments in which the threshold will be lowered if no rules are found.
"""
__author__ = "Fiona Hamey"
import pandas as pd
import numpy as np
import math
import sys
sys.path.append('/Users/fiona/z3/build')
import z3
sys.path.append('Users/fiona/Desktop/PhD/MRes/Rotation_2/Code/Python/')
gene = sys.argv[1]
expressionFile = sys.argv[2]
step = int(sys.argv[3])
orderFile = sys.argv[4]
maxAct = int(sys.argv[5])
maxRep = int(sys.argv[6])
networkFile = sys.argv[7]
threshold = float(sys.argv[8])
thresholdStep = float(sys.argv[9])
# Read in expression matrix
expression = pd.read_table(expressionFile, sep = "\t",
index_col = 0, header = 0)
# Read in path cells names as list
pathNames = list(pd.read_table(orderFile, sep = "\n").iloc[:,0])
from encodingFunctions import *
step = step
inputOutput = [(pathNames[i-step-1], pathNames[i-step], pathNames[i-step+1], pathNames[i]) for i in range(step+1,len(pathNames))]
inputOutputSelection = inputOutput
# Read in network
network = pd.read_table(networkFile, sep = "\t", header = 0)
# Define node class
class node:
def __init__(self, pos_in, neg_in):
self.p = pos_in
self.n = neg_in
# Node list for network
nodeList = {}
nodeNames = list(set(network['to.gene']))
for n in nodeNames:
allRelations = network[network['to.gene'] == n]
posRelations = allRelations[allRelations['relation'] == 1]
posGenes = list(posRelations['from.gene'])
if 'FoxH1' in posGenes:
posGenes.remove('FoxH1')
if 'FoxO4' in posGenes:
posGenes.remove('FoxO4')
negRelations = allRelations[allRelations['relation'] == -1]
negGenes = list(negRelations['from.gene'])
if 'FoxH1' in negGenes:
negGenes.remove('FoxH1')
if 'FoxO4' in negGenes:
negGenes.remove('FoxO4')
nodeList[n] = node(posGenes, negGenes)
# Need to add some penalties in, remember she allowed self-activation
# Penalties
penAll = 0
penSelf = 0.005
# Dictionary to look up node names number equivalent
allNames = list(expression.columns)
nodeLookUp = {allNames[j]:j+2 for j in range(len(allNames))}
# Add constraints to the solver
def constraintsBitVec(ctor, model, d):
x = z3.BitVecVal(int(str(model[d])), 32)
return ctor(str(d)) != x
def addConstraintsCircuitVar(solver, model, ds):
constraints = z3.Or([constraintsBitVec(makeCircuitVar, model, d) for d in ds])
return constraints
def modeCalc(lst):
return max(set(lst), key=lst.count)
# Function which enforces minimum agreeing counts
def totalAgreement(inputOutputPair, gene, aVars, rVars, expressionValues, counter):
inputName0 = inputOutputPair[0]
inputName1 = inputOutputPair[1]
inputName2 = inputOutputPair[2]
outputName = inputOutputPair[3]
input0 = expressionValues.loc[inputName0,:]
input1 = expressionValues.loc[inputName1,:]
input2 = expressionValues.loc[inputName2,:]
input = [modeCalc([input0[i], input1[i], input2[i]]) for i in range(len(input0))]
output = expressionValues.loc[outputName,:]
#counter += 1
score = makeEnforcedVar("counter_%i" %counter)
(encoding, match) = circuitEvaluatesTo(gene, aVars, rVars, input, output, counter)
return (z3.And(encoding, z3.If(match, score == z3.BitVecVal(1,32), score == z3.BitVecVal(0,32))), score)
def agreesAtLeastNTimes(inputOutputList, expressionValues, gene, aVars, rVars, n):
both = [totalAgreement(inputOutputList[p], gene, aVars, rVars, expressionValues, p) for p in range(len(inputOutputList))]
encodings = [both[i][0] for i in range(len(both))]
scoreValues = [both[i][1] for i in range(len(both))]
return z3.And(z3.And(encodings), z3.Sum(scoreValues) >= n)
# Genes seems a bit pointless?
def findFunctions(solver, gene, genes, nodeLookUp, nodeList, maxActivators, maxRepressors, inputOutput, expression, agreementThreshold):
expressionBool = expression == 1
possAct = nodeList[gene].p
possAct = [nodeLookUp[i] for i in possAct]
possRep = nodeList[gene].n
possRep = [nodeLookUp[i] for i in possRep]
gene = nodeLookUp[gene]
genes = [nodeLookUp[genes[i]] for i in range(len(genes))]
circuitEncoding, aVars, rVars = encodeUpdateFunction(gene, genes, maxActivators, maxRepressors, possAct, possRep)
circuitVars = aVars + rVars
# Choose number of agreement threshold
agreementThreshold = int(agreementThreshold*len(inputOutput))
solver.reset()
allConstraints = z3.And(circuitEncoding, agreesAtLeastNTimes(inputOutput, expressionBool, gene, aVars, rVars, \
agreementThreshold))
solver.add(allConstraints)
constraints = True
possibleRules = []
while str(solver.check()) == 'sat':
m = solver.model()
modelVariables = [m[i] for i in range(len(m))]
circuitDecls = filter(lambda x: str(x) in [str(s) for s in circuitVars], modelVariables)
enforceDecls = filter(lambda x: str(x)[0:7] == "counter", modelVariables)
totalScore = sum([int(str( m[d])) for d in enforceDecls])
newConstraints = addConstraintsCircuitVar(solver, m, circuitDecls)
constraints = z3.And(constraints, newConstraints)
solver.reset()
solver.add(z3.And(allConstraints, constraints))
possibleRules.append((m, totalScore))
print 'found rule'
if len(possibleRules) >= 100:
newThreshold = max([s[1] for s in possibleRules])
displayThreshold = float(newThreshold)/len(inputOutput)
print 'Finding too many rules. So now increase threshold to %f' %displayThreshold
solver.reset()
allConstraints = z3.And(circuitEncoding, agreesAtLeastNTimes(inputOutput, expressionBool, gene, aVars, rVars, \
newThreshold))
solver.add(allConstraints)
constraints = True
possibleRules = []
while str(solver.check()) == 'sat':
m = solver.model()
modelVariables = [m[i] for i in range(len(m))]
circuitDecls = filter(lambda x: str(x) in [str(s) for s in circuitVars], modelVariables)
enforceDecls = filter(lambda x: str(x)[0:7] == "counter", modelVariables)
totalScore = sum([int(str( m[d])) for d in enforceDecls])
newConstraints = addConstraintsCircuitVar(solver, m, circuitDecls)
constraints = z3.And(constraints, newConstraints)
solver.reset()
solver.add(z3.And(allConstraints, constraints))
possibleRules.append((m, totalScore))
print "found rule"
return possibleRules, z3.And(allConstraints)
def findBestRuleForGene(gene, genes, nodeLookUp, nodeList, maxActivators, maxRepressors, inputOutput, expression):
s = z3.Solver()
for k in range(20):
s.reset()
agreement = threshold - thresholdStep*k
print 'Lowered threshold to %f' %agreement
rules, allConstraints = findFunctions(s, gene, genes, nodeLookUp, nodeList, maxActivators, maxRepressors, inputOutput, expression, agreement)
convertedRules = [ruleConvert(r) for r in rules]
geneInRules = []
conRules = [x[0] for x in convertedRules]
for r in conRules:
geneInRules.append(s[1] for s in r)
if len(rules) > 0 and not all(g in r for r in geneInRules):
break
print 'Found %d rules for gene satisfying threshold %f' %(len(rules),agreement)
return rules, agreement, allConstraints
def ruleConvert(model):
score = model[1]/len(inputOutput)
modelRules = model[0]
def int2gene(i):
i = int(str(i))
if i == AND:
return 'and'
elif i == OR:
return 'or'
else:
return allNames[i-2]
namedVariables = ["a%i" %i for i in range(7)] + ["r%i" %i for i in range(7)]
modelVariables = [modelRules[i] for i in range(len(modelRules))]
usefulVariables = filter(lambda x: str(x) in namedVariables, modelVariables)
modelConstraints = [(str(v), int2gene(modelRules[v])) for v in usefulVariables if str(modelRules[v]) != str(NOTHING)]
return modelConstraints, score
def evaluateRule(rule, input0, input1, input2):
# Remember to input boolean expression
rules = {'a%d'%i : NOTHING for i in range(7)}
rules.update({'r%d'%i : NOTHING for i in range(7)})
convertBack = {'and':0, 'or':1}
convertBack.update({gene:nodeLookUp[gene] for gene in allNames})
for r in rule:
rules[r[0]] = convertBack[r[1]]
def getValue(v):
return modeCalc([input0[v-2], input1[v-2], input2[v-2]])
# Find intermediate variables
v = {'va%d'%i: getValue(rules['a%d'%i]) for i in range(7) if rules['a%d'%i] in range(2,NOTHING)}
v.update({'vr%d'%i: getValue(rules['r%d'%i]) for i in range(7) if rules['r%d'%i] in range(2,NOTHING)})
# Evaluate activators
if rules['a1'] == 0:
inter_a1 = v['va3'] and v['va4']
elif rules['a1'] == 1:
inter_a1 = v['va3'] or v['va4']
elif rules['a1'] in range(2,NOTHING):
inter_a1 = v['va1']
else:
inter_a1 = True
if rules['a2'] == 0:
inter_a2 = v['va5'] and v['va6']
elif rules['a2'] == 1:
inter_a2 = v['va5'] or v['va6']
elif rules['a2'] in range(2,NOTHING):
inter_a2 = v['va2']
else:
inter_a2 = True
if rules['a0'] == 0:
inter_a0 = inter_a1 and inter_a2
elif rules['a0'] == 1:
inter_a0 = inter_a1 or inter_a2
else:
inter_a0 = v['va0']
# Evaluate repressors
if rules['r0'] != NOTHING:
if rules['r1'] == 0:
inter_r1 = v['vr3'] and v['vr4']
elif rules['r1'] == 1:
inter_r1 = v['vr3'] or v['vr4']
elif rules['r1'] in range(2,NOTHING):
inter_r1 = v['vr1']
else:
inter_r1 = True
if rules['r2'] == 0:
inter_r2 = v['vr5'] and v['vr6']
elif rules['r2'] == 1:
inter_r2 = v['vr5'] or v['vr6']
elif rules['r2'] in range(2,NOTHING):
inter_r2 = v['vr2']
else:
inter_r2 = True
if rules['r0'] == 0:
inter_r0 = inter_r1 and inter_r2
elif rules['r0'] == 1:
inter_r0 = inter_r1 or inter_r2
else:
inter_r0 = v['vr0']
return inter_a0 and not inter_r0
else:
return inter_a0
g = sys.argv[1]
print 'Trying to find rules for %s' %g
expressBool = expression == 1
geneRules, agreement, solver = findBestRuleForGene(g, allNames, nodeLookUp, nodeList, maxAct, maxRep, inputOutputSelection, expression)
rulesForGene = [ruleConvert(r) for r in geneRules]
print 'Converted to readable format'
def scoreForRule(rule):
raw = 0
for io in range(len(inputOutput)):
inputName0 = inputOutput[io][0]
inputName1 = inputOutput[io][1]
inputName2 = inputOutput[io][2]
outputName = inputOutput[io][3]
input0 = expressBool.loc[inputName0,:]
input1 = expressBool.loc[inputName1,:]
input2 = expressBool.loc[inputName2,:]
output = expressBool.loc[outputName,g]
predictedOutput = evaluateRule(rule, input0, input1, input2)
if predictedOutput == output:
raw += 1
score = penalty = len(rule)*penAll
if any(g in x for x in rule):
penalty = penalty + (penSelf - penAll)
score = raw-penalty
# Might want to return more things?
return (score, rule)
scoreRules = [(scoreForRule(r[0]),'z3 score %f' %r[1]) for r in rulesForGene]
print 'Found agreement level for each rule'
bestRule = max(scoreRules, key=lambda x: x[0])
maxValue = bestRule[0][0]
allBest = []
for rule in scoreRules:
if rule[0][0] == maxValue:
allBest.append(rule)
print 'The best rules are %s' %str(allBest)
scoreRules = sorted(scoreRules, key=lambda x: x[0][0], reverse = True)
print 'Writing the rules to a file'
f=open('%s_boolean_rules_%d.txt' % (g, step),'w')
f.write('Agreement level = %f \n' %agreement)
f.write('The best rules were:\n')
for item in allBest:
f.write("%s\n" %str(item))
f.write('Other rules found were:\n')
for item in scoreRules:
f.write("%s\n" %str(item))
f.close()
print 'Found rules for %s' %g
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
python
|
"""Ekea for EAM model"""
import os, subprocess, json, shutil
from ekea.e3smapp import E3SMApp, here
from ekea.utils import xmlquery
# EAM app
class EAMKernel(E3SMApp):
_name_ = "eam"
_version_ = "0.1.0"
# main entry
def perform(self, args):
self.generate(args, "exclude_e3sm_eam.ini")
|
python
|
import os
import sys
from pathlib import Path
from data_pipeline.config import settings
from data_pipeline.utils import (
download_file_from_url,
get_module_logger,
)
logger = get_module_logger(__name__)
def check_score_data_source(
score_csv_data_path: Path,
score_data_source: str,
) -> None:
"""Checks if census data is present, and exits gracefully if it doesn't exist. It will download it from S3
if census_data_source is set to "aws"
Args:
score_csv_data_path (str): Path for local Score CSV data
score_data_source (str): Source for the score data
Options:
- local: fetch census data from the local data directory
- aws: fetch census from AWS S3 J40 data repository
Returns:
None
"""
TILE_SCORE_CSV_S3_URL = (
settings.AWS_JUSTICE40_DATAPIPELINE_URL
+ "/data/score/csv/tiles/usa.csv"
)
TILE_SCORE_CSV = score_csv_data_path / "tiles" / "usa.csv"
# download from s3 if census_data_source is aws
if score_data_source == "aws":
logger.info("Fetching Score Tile data from AWS S3")
download_file_from_url(
file_url=TILE_SCORE_CSV_S3_URL, download_file_name=TILE_SCORE_CSV
)
else:
# check if score data is found locally
if not os.path.isfile(TILE_SCORE_CSV):
logger.info(
"No local score tiles data found. Please use '-d aws` to fetch from AWS"
)
sys.exit()
|
python
|
"""
This file contains the implementation of the class GameObject.
Author: Alejandro Mujica ([email protected])
Date: 07/22/20
"""
from src.mixins import DrawableMixin, CollisionMixin
class GameObject(DrawableMixin, CollisionMixin):
# Object sides
TOP = 'top'
RIGHT = 'right'
BOTTOM = 'bottom'
LEFT = 'left'
def __init__(self, x, y, width, height, texture, frame, solidness):
self.x = x
self.y = y
self.width = width
self.height = height
self.texture = texture
self.frame = frame
self.solidness = solidness
self.inverted = False
|
python
|
#!/usr/bin/env python2
# Fill in checksum/size of an option rom, and pad it to proper length.
#
# Copyright (C) 2009 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys
def alignpos(pos, alignbytes):
mask = alignbytes - 1
return (pos + mask) & ~mask
def checksum(data):
ords = map(ord, data)
return sum(ords)
def main():
inname = sys.argv[1]
outname = sys.argv[2]
# Read data in
f = open(inname, 'rb')
data = f.read()
count = len(data)
# Pad to a 512 byte boundary
data += "\0" * (alignpos(count, 512) - count)
count = len(data)
# Fill in size field; clear checksum field
data = data[:2] + chr(count/512) + data[3:6] + "\0" + data[7:]
# Checksum rom
newsum = (256 - checksum(data)) & 0xff
data = data[:6] + chr(newsum) + data[7:]
# Write new rom
f = open(outname, 'wb')
f.write(data)
if __name__ == '__main__':
main()
|
python
|
# We start by importing all the modules we will need, as well as the helper document with all our functions
import argparse
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
import os
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
from torch.optim import lr_scheduler
import helper
def main():
# We display a short prompt
print('Hello! This script will train a neural network with a Resnet or VGG architecture with only one layer.' +
'\n' + 'You can consult the help for other command line arguments.')
nb_categories = int(input('Please input the number of categories of your target variable: '))
print('\n')
args = helper.get_input_args_train()
data_dir = args.dir
save_dir = args.save_dir
arch = args.arch
learning_rate = args.learning_rate
hidden_units = args.hidden_units
epochs = args.epochs
gpu = args.gpu
# We check the user used one of the possible architecture
while not arch in ('vgg16', 'resnet152'):
arch = input('You can only choose between vgg16 and resnet152 for an architecture: ')
# We now create our train, validation and test loaders with our images
# This assumes our files follow the appropriate structure
dataloaders = helper.load_images(data_dir)
# We create the architecture of our model
model = helper.build_model(arch, hidden_units, nb_categories)
# We choose our criterion and our optimizer
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
# Train our model
model = helper.train_model(model, criterion, optimizer, scheduler, dataloaders, epochs, gpu)
# Save the checkpoint
helper.save_checkpoint(model, criterion, optimizer, learning_rate, epochs, save_dir, arch)
main()
|
python
|
from django.contrib import admin
from django.urls import path, include
from django.contrib.sitemaps.views import sitemap
from website.sitemaps import PostSitemap
sitemaps = {
'posts': PostSitemap
}
urlpatterns = [
path('mkubwa/', admin.site.urls),
path('accounts/', include('accounts.urls', namespace='accounts')),
path('accounts/', include('django.contrib.auth.urls')),
path('', include('website.urls', namespace='website')),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
]
|
python
|
import multiprocessing
import time
from multiprocessing.connection import Listener, Client
from typing import Dict, Any, List
import tqdm
from autopandas_v2.ml.inference.interfaces import RelGraphInterface
class ModelStore:
def __init__(self, path_map: Dict[Any, str]):
self.path_map = path_map
self.model_store: Dict[str, RelGraphInterface] = {}
self.cache = None
self.setup()
def setup(self):
iterator = self.path_map.items()
if len(self.path_map) > 3:
iterator = tqdm.tqdm(iterator)
for key, path in iterator:
self.model_store[key] = RelGraphInterface.from_model_dir(path)
def wait_till_ready(self):
return True
def predict_graphs(self, key, encodings: List[Dict], top_k: int = 10):
return self.model_store[key].predict_graphs(encodings, top_k=top_k)
def __contains__(self, item):
return item in self.path_map
def get_path_map(self):
return self.path_map
def close(self):
for model in self.model_store.values():
pass
# model.close()
def start_caching(self):
self.cache = {}
def stop_caching(self):
self.cache = None
class ModelStoreServer(ModelStore):
def __init__(self, path_map: Dict[Any, str], port: int = 6543):
self.port = port
self.process: multiprocessing.Process = None
super().__init__(path_map)
def setup(self):
self.process = multiprocessing.Process(target=ModelStoreServer.serve, args=(self.path_map, self.port))
self.process.start()
def predict_graphs(self, key, encodings: List[Dict], **kwargs):
conn = Client(('localhost', self.port))
conn.send((key, encodings, kwargs))
result = conn.recv()
conn.close()
return result
def wait_till_ready(self):
ready = False
while not ready:
try:
conn = Client(('localhost', self.port))
conn.close()
ready = True
except ConnectionRefusedError:
time.sleep(2)
@staticmethod
def serve(path_map: Dict[Any, str], port: int):
model_store = ModelStore(path_map)
listener = Listener(('localhost', port))
while True:
conn = listener.accept()
try:
key, encodings, kwargs = conn.recv()
if key == 'autopandas-exit':
# Hacky AF
conn.close()
break
conn.send(model_store.predict_graphs(key, encodings, **kwargs))
conn.close()
except:
continue
finally:
conn.close()
listener.close()
def close(self):
conn = Client(('localhost', self.port))
conn.send(('autopandas-exit', [], {}))
conn.close()
self.process.join()
|
python
|
# -*- coding: utf-8 -*-
"""This module populates the tables of bio2bel_reactome."""
import logging
from collections import defaultdict
from typing import Dict, List, Mapping, Optional, Set
import pandas as pd
from tqdm import tqdm
from bio2bel.compath import CompathManager
from pyobo import get_name_id_mapping
from .constants import MODULE_NAME, SPECIES_REMAPPING
from .models import Base, Chemical, Pathway, Protein, Species, chemical_pathway, protein_pathway
from .parsers.entity_pathways import get_procesed_chemical_pathways_df, get_procesed_proteins_pathways_df
from .parsers.pathway_hierarchy import get_pathway_hierarchy_df, parse_pathway_hierarchy
from .parsers.pathway_names import get_pathway_names_df, parse_pathway_names
logger = logging.getLogger(__name__)
__all__ = [
'Manager',
]
class Manager(CompathManager):
"""Protein-pathway and chemical-pathway memberships."""
module_name = MODULE_NAME
protein_model = Protein
_base = Base
edge_model = [protein_pathway, chemical_pathway]
namespace_model = pathway_model = Pathway
flask_admin_models = [Pathway, Protein, Species, Chemical]
has_hierarchy = True # Indicates that this manager can handle hierarchies with the Pathway Model
def __init__(self, *args, **kwargs) -> None: # noqa: D107
super().__init__(*args, **kwargs)
# Global dictionary
self.uniprot_id_to_protein: Dict[str, Protein] = {}
self.chebi_id_to_chemical: Dict[str, Chemical] = {}
def summarize(self) -> Mapping[str, int]:
"""Summarize the database."""
return {
'pathways': self.count_pathways(),
'proteins': self.count_proteins(),
'chemicals': self.count_chemicals(),
'species': self.count_species(),
}
def count_chemicals(self) -> int:
"""Count the chemicals in the database."""
return self.session.query(Chemical).count()
def count_species(self) -> int:
"""Count the species in the database."""
return self.session.query(Species).count()
def get_gene_sets(self, only_human: bool = False) -> Mapping[str, Set[str]]:
"""Return pathway - genesets mapping."""
if only_human:
pathways = self.get_human_pathways()
else:
pathways = self.list_pathways()
return {
pathway.name: {
protein.hgnc_symbol
for protein in pathway.proteins
if protein.hgnc_symbol
}
for pathway in pathways
if pathway.proteins
}
def get_or_create_pathway(
self,
*,
reactome_id: str,
name: str,
species: Species,
chemicals: Optional[List[Chemical]],
) -> Pathway:
"""Get a pathway from the database or creates it.
:param reactome_id: pathway identifier
:param name: name of the pathway
:param species: Species object
:param chemicals: An optional list of chemicals that belong too this pathway
"""
pathway = self.get_pathway_by_id(reactome_id)
if pathway is None:
pathway = Pathway(
identifier=reactome_id,
name=name,
species=species,
chemicals=chemicals,
)
self.session.add(pathway)
return pathway
def get_or_create_chemical(self, *, chebi_id: str, chebi_name: str) -> Chemical:
"""Get a Chemical from the database or creates it.
:param chebi_id: ChEBI identifier
:param chebi_name: ChEBI name
"""
chemical = self.get_chemical_by_chebi_id(chebi_id)
if chemical is None:
chemical = Chemical(
chebi_id=chebi_id,
name=chebi_name,
)
self.session.add(chemical)
return chemical
def get_or_create_species(self, *, taxonomy_id: str, name: str) -> Species:
"""Get a Species from the database or creates it."""
species = self.get_species_by_name(name)
if species is None:
species = Species(taxonomy_id=taxonomy_id, name=name)
self.session.add(species)
return species
def get_or_create_protein(
self,
uniprot_id: str,
hgnc_symbol: Optional[str] = None,
hgnc_id: Optional[str] = None,
) -> Protein:
"""Get a protein from the database or creates it.
:param uniprot_id: pathway identifier
:param hgnc_symbol: name of the pathway
:param hgnc_id: Species object
"""
protein = self.get_protein_by_uniprot_id(uniprot_id)
if protein is not None:
return protein
protein = self.uniprot_id_to_protein.get(uniprot_id)
if protein is not None:
self.session.add(protein)
return protein
protein = self.uniprot_id_to_protein[uniprot_id] = Protein(
uniprot_id=uniprot_id,
hgnc_symbol=hgnc_symbol,
hgnc_id=hgnc_id,
)
self.session.add(protein)
return protein
def get_species_by_name(self, species_name: str) -> Optional[Species]:
"""Get a Species by its species_name.
:param species_name: name
"""
return self.session.query(Species).filter(Species.name == species_name).one_or_none()
def get_pathway_names_to_ids(self, only_human: bool = False):
"""Return a dictionary of pathway names to ids.
:rtype: dict[str,str]
"""
if only_human:
pathways = self.get_human_pathways()
else:
pathways = self.list_pathways()
return {
pathway.name: pathway.identifier
for pathway in pathways
}
def get_pathway_parent_by_id(self, reactome_id: str) -> Optional[Pathway]:
"""Get parent pathway by its reactome id.
:param reactome_id: reactome identifier
"""
pathway = self.get_pathway_by_id(reactome_id)
if not pathway or not pathway.parent:
return None
return pathway.parent
def get_top_hiearchy_parent_by_id(self, reactome_id: str) -> Optional[Pathway]:
"""Get the oldest pathway at the top of the hierarchy a pathway by its reactome id.
:param reactome_id: reactome identifier
"""
pathway = self.get_pathway_by_id(reactome_id)
if not pathway.parent:
return pathway
return self.get_top_hiearchy_parent_by_id(pathway.parent.identifier)
def get_all_top_hierarchy_pathways(self) -> List[Pathway]:
"""Get all pathways without a parent (top hierarchy)."""
all_pathways = self.list_pathways()
return [
pathway
for pathway in all_pathways
if not pathway.parent_id
]
def get_human_pathways(self) -> List[Pathway]:
"""Get human pathways."""
return self.get_pathways_by_species('Homo sapiens')
def get_pathways_by_species(self, species_name: str) -> Optional[List[Pathway]]:
"""Get pathways by species."""
filtered_species = self.session.query(Species).filter(Species.name == species_name).one_or_none()
if not filtered_species:
return None
return filtered_species.pathways
def get_chemical_by_chebi_id(self, chebi_id: str) -> Optional[Chemical]:
"""Get chemical by ChEBI id."""
return self.session.query(Chemical).filter(Chemical.chebi_id == chebi_id).one_or_none()
def get_protein_by_uniprot_id(self, uniprot_id: str) -> Optional[Protein]:
"""Get protein by UniProt id."""
return self.session.query(Protein).filter(Protein.uniprot_id == uniprot_id).one_or_none()
"""Custom Methods to Populate the DB"""
def _populate_pathways(
self,
chemical_mapping: Mapping[str, List[Chemical]],
url: Optional[str] = None,
) -> None:
"""Populate the pathway table.
:param url: url from pathway table file
"""
df = get_pathway_names_df(url=url)
pathways_dict, species_set = parse_pathway_names(df)
species_name_to_id = get_name_id_mapping('ncbitaxon')
species_name_to_model = {}
for species_name in tqdm(species_set, desc='populating species'):
species_name = SPECIES_REMAPPING.get(species_name, species_name)
species_name_to_model[species_name] = self.get_or_create_species(
name=species_name,
taxonomy_id=species_name_to_id[species_name],
)
for reactome_id, (name, species_name) in tqdm(pathways_dict.items(), desc='populating pathways'):
species_name = SPECIES_REMAPPING.get(species_name, species_name)
pathway = self.get_or_create_pathway(
reactome_id=reactome_id,
name=name,
species=species_name_to_model[species_name],
chemicals=chemical_mapping.get(reactome_id, []),
)
self.session.add(pathway)
self.session.commit()
def _pathway_hierarchy(self, url: Optional[str] = None) -> None:
"""Links pathway models through hierarchy.
:param url: url from pathway hierarchy file
"""
df = get_pathway_hierarchy_df(url=url)
pathways_hierarchy = parse_pathway_hierarchy(df)
for parent_id, child_id in tqdm(pathways_hierarchy, desc='populating pathway hierarchy'):
if parent_id is None:
logger.warning('parent id is None')
continue
if child_id is None:
logger.warning('child id is None')
continue
parent = self.get_pathway_by_id(parent_id)
child = self.get_pathway_by_id(child_id)
parent.children.append(child)
self.session.commit()
def _pathway_protein(self, url: Optional[str] = None) -> None:
"""Populate UniProt tables.
:param url: url from pathway protein file
"""
pathways_proteins_df = get_procesed_proteins_pathways_df(url=url)
missing_reactome_ids = set()
missing_hgnc_info = set()
protein_info_df = pathways_proteins_df[
['uniprot_id', 'uniprot_accession', 'hgnc_id', 'hgnc_symbol']].drop_duplicates()
it = tqdm(protein_info_df.values, total=len(protein_info_df.index), desc='populating proteins')
for uniprot_id, uniprot_accession, hgnc_id, hgnc_symbol in it:
self.uniprot_id_to_protein[uniprot_id] = Protein(
uniprot_id=uniprot_id,
uniprot_accession=uniprot_accession,
hgnc_id=hgnc_id,
hgnc_symbol=hgnc_symbol,
)
it = tqdm(
pathways_proteins_df[['uniprot_id', 'reactome_id']].values,
total=len(pathways_proteins_df.index),
desc='populating proteins-pathway relations',
)
for uniprot_id, reactome_id in it:
if uniprot_id is None:
logger.debug('uniprot_id is none')
continue
protein = self.uniprot_id_to_protein[uniprot_id]
pathway = self.get_pathway_by_id(reactome_id)
if pathway is None:
if reactome_id not in missing_reactome_ids:
it.write(f'protein/pathway mapping: could not find reactome:{reactome_id}')
missing_reactome_ids.add(reactome_id)
continue
if pathway not in protein.pathways:
protein.pathways.append(pathway)
self.session.commit()
if missing_reactome_ids:
logger.warning('missing %d reactome ids', len(missing_reactome_ids))
if missing_hgnc_info:
logger.warning('missing %d hgncs', len(missing_hgnc_info))
def _get_chemical_mapping(self, url: Optional[str] = None) -> Mapping[str, List[Chemical]]:
"""Populate ChEBI tables.
:param url: url from pathway chemical file
"""
chemical_pathways_df = get_procesed_chemical_pathways_df(url=url)
chemicals_df = chemical_pathways_df[['chebi_id', 'chebi_name']].drop_duplicates()
it = tqdm(chemicals_df.values, total=len(chemicals_df.index), desc='populating chemicals')
chebi_id_to_chemical = {}
for chebi_id, chebi_name in it:
if pd.isna(chebi_id):
continue
chebi_id_to_chemical[chebi_id] = Chemical(chebi_id=chebi_id, name=chebi_name)
rv = defaultdict(list)
_slim_chemical_pathways_df = chemical_pathways_df[['chebi_id', 'reactome_id']].drop_duplicates()
it = tqdm(
_slim_chemical_pathways_df.values,
total=len(_slim_chemical_pathways_df.index),
desc='populating chemical/reactome',
)
for chebi_id, reactome_id in it:
chemical = chebi_id_to_chemical[chebi_id]
rv[reactome_id].append(chemical)
return dict(rv)
def populate(
self,
pathways_path: Optional[str] = None,
pathways_hierarchy_path: Optional[str] = None,
pathways_proteins_path: Optional[str] = None,
pathways_chemicals_path: Optional[str] = None,
) -> None:
"""Populate all tables.
:param pathways_path: url from pathway table file
:param pathways_hierarchy_path: url from pathway hierarchy file
:param pathways_proteins_path: url from pathway protein file
:param pathways_chemicals_path: url from pathway chemical file
"""
chemical_mapping = self._get_chemical_mapping(url=pathways_chemicals_path)
self._populate_pathways(url=pathways_path, chemical_mapping=chemical_mapping)
self._pathway_hierarchy(url=pathways_hierarchy_path)
self._pathway_protein(url=pathways_proteins_path)
def _add_admin(self, app, **kwargs):
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
class PathwayView(ModelView):
"""Pathway view in Flask-admin."""
column_searchable_list = (
Pathway.identifier,
Pathway.name,
)
class ProteinView(ModelView):
"""Protein view in Flask-admin."""
column_searchable_list = (
Protein.hgnc_symbol,
Protein.uniprot_id,
Protein.hgnc_id,
)
class SpeciesView(ModelView):
"""Species view in Flask-admin."""
column_searchable_list = (
Species.taxonomy_id,
Species.name,
)
class ChemicalView(ModelView):
"""Chemical view in Flask-admin."""
column_searchable_list = (
Chemical.chebi_id,
Chemical.name,
)
admin = Admin(app, **kwargs)
admin.add_view(PathwayView(Pathway, self.session))
admin.add_view(ProteinView(Protein, self.session))
admin.add_view(ChemicalView(Chemical, self.session))
admin.add_view(SpeciesView(Species, self.session))
return admin
|
python
|
from collections import namedtuple
from sqlalchemy.orm.exc import NoResultFound
from parks.models import DBSession
from parks.models import Park
from parks.models import StampCollection
from parks.models import User
from parks.models import UserEmail
def get_user_by_id(user_id):
return DBSession.query(
User,
).filter(
User.id == user_id
).one()
def get_user_by_username_or_email(username_or_email):
user = DBSession.query(
User,
).filter(
User.username == username_or_email
).all()
if len(user) == 1:
return user[0]
user = DBSession.query(
User,
).join(
UserEmail,
User.id == UserEmail.user_id
).filter(
UserEmail.email == username_or_email
).all()
if len(user) == 1:
return user[0]
raise NoResultFound('No user with that email or username was found')
def get_recent_user_collections(limit, after_date=None):
"""Returns up to limit recent stamp collections."""
if limit > 10 or limit < 0:
raise ValueError('Too much recent activity requested')
# We need to load the various types of activity, then sort by the
# time_created and just return a few items
# TODO(bskari|2013-02-02) Load collection data
CollectionNamedTuple = namedtuple(
'CollectionNamedTuple',
['username', 'park_name', 'park_url']
)
query = DBSession.query(
StampCollection.id,
User,
Park,
).join(
User,
User.id == StampCollection.user_id
).join(
Park,
Park.id == StampCollection.park_id
)
if after_date is not None:
query = query.filter(StampCollection.date_collected > after_date)
query = query.order_by(
# id and time_created should be correlated
StampCollection.id.desc()
).limit(
limit
)
recent_collections = query.all()
collections = [
CollectionNamedTuple(
rc.User.username,
rc.Park.name,
rc.Park.url
)
for rc in recent_collections
]
return collections
|
python
|
from tkinter import *
from tkinter.ttk import *
from cep_price_console.cntr_upload.CntrUploadTab import CntrUploadTab
from cep_price_console.utils.log_utils import CustomAdapter, debug
from cep_price_console.cntr_upload.Treeview import TreeviewConstructor, TreeColumn, TreeRow
import logging
class Step5ReviewUpload(CntrUploadTab):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
@debug(lvl=logging.NOTSET, prefix='')
def __init__(self, master, tab_text, tab_state='normal'):
CntrUploadTab.__init__(self,
master,
tab_text,
tab_state)
# Instructions Frame
self.instr_frame = Frame(self.frame_main)
self.instr_lbl = Label(self.instr_frame)
# Review upload values
self.review_frame = Frame(self.frame_main)
self.review_treeview = None
self.upload_matched_tbl = None
self.csv_results = None
self.column_lst = []
# noinspection PyProtectedMember
@debug(lvl=logging.DEBUG, prefix='')
def populate_frame(self):
# Main Frame
self.btn_prev.grid_remove()
self.frame_main.columnconfigure(0, weight=1)
self.frame_main.rowconfigure(1, weight=1)
self.upload_matched_tbl = self.cont.fetch_upload_matched_tbl()
self.instr_frame.grid(row=0, column=0)
self.instr_lbl.config(text="This is the header", font=('Verdana Bold', '20'))
self.instr_lbl.grid(row=0, column=0)
self.review_frame.grid(row=1, column=0, sticky=NSEW)
self.review_frame.columnconfigure(0, weight=1)
self.review_frame.rowconfigure(0, weight=1)
self.review_treeview = TreeviewConstructor(self, self.review_frame, False)
self.btn_next.state(['!disabled'])
self.btn_next.bind("<ButtonRelease-1>", self.proceeding)
for _ in self.upload_matched_tbl.column_descriptions:
name = _.get('name').replace(" ", "_").replace("'", "").replace('"', "")
Step5ReviewUpload.logger.log(logging.DEBUG, "Name: {0}".format(name))
self.column_lst.append(name)
col_order = 1
for col in self.column_lst:
Step5ReviewUpload.logger.log(logging.DEBUG, "Column ID: {0}".format(col))
obj = TreeColumn(order=col_order,
col_id=col,
hdr_txt=col.replace("_", " "))
if col in ("UploadMatched_ID", "UploadMono_ID", "UploadMulti_ID"):
Step5ReviewUpload.logger.log(logging.DEBUG, "Hiding Column.")
obj.display = False
self.review_treeview.col_obj_dict[col] = obj
col_order += 1
self.review_treeview.populate_cols()
for query_row in self.upload_matched_tbl.all():
Step5ReviewUpload.logger.log(logging.NOTSET, "Row Info: {0}".format(query_row))
item = TreeRow(
treeview_const=self.review_treeview,
iid=query_row.UploadMatched_ID
)
for col_obj in self.review_treeview.col_obj_dict.values():
item.values_dict[str(col_obj.order)] = query_row._asdict().get(col_obj.col_id)
self.review_treeview.item_obj_dict[str(item.iid)] = item
self.review_treeview.populate_items()
self.review_treeview.stripe_rows()
# noinspection PyUnusedLocal
def proceeding(self, events):
self.manager.busy()
self.cont.prepare_repository_and_filename()
self.cont.save_csv_export()
self.cont.move_contract_wb()
self.manager.not_busy()
|
python
|
# start_chrome -> input_date -> scroll_down-> find_cards_info -> save -> find_next (goto)
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import csv
import os
def start_chrome():
driver = webdriver.Chrome(executable_path='./chromedriver.exe')
driver.start_client()
return driver
def q(st, et):
return f'?is_text=1&is_pic=1&key_word=&start_time=2017-12-17&end_time=2018-12-17&is_search=1&is_searchadv=1#_0'
def scroll_down():
html_page = driver.find_element_by_tag_name('html')
for i in range(5):
print(i)
html_page.send_keys(Keys.END)
time.sleep(0.9)
def find_cards_info():
cards_sel = 'div.WB_feed_detail'
cards = driver.find_elements_by_css_selector(cards_sel)
info_list = []
for card in cards:
content_sel = 'div.WB_text.W_f14'
time_sel = 'div.WB_from.S_txt2'
link_sel = 'div.WB_from.S_txt2 > a:nth-child(1)'
content = card.find_element_by_css_selector(content_sel).text
time = card.find_element_by_css_selector(time_sel).text
link = card.find_element_by_css_selector(link_sel).get_attribute('href')
info_list.append([content, time, link])
return info_list
def find_next():
next_sel = 'a.page.next'
next_page = driver.find_elements_by_css_selector(next_sel)
if next_page:
return next_page[0].get_attribute('href')
def save(info_list, name):
full_path = './' + name + '.csv'
if os.path.exists(full_path):
with open(full_path, 'a') as f:
writer = csv.writer(f)
writer.writerows(info_list)
print('Done')
else:
with open(full_path, 'w+') as f:
writer = csv.writer(f)
# writer.writerows(info_list)
print('Done')
def run_crawler(base, duration):
if not base.endswith('feedtop'):
st, et = duration.split('~')
driver.get(base + q(st, et))
else:
driver.get(base)
time.sleep(5)
scroll_down()
time.sleep(5)
info_list = find_cards_info()
save(info_list, duration)
next_page = find_next()
if next_page:
run_crawler(next_page, duration)
base = 'https://weibo.com/5767028504/profile?rightmod=1&wvr=6&mod=personinfo'
driver = start_chrome()
input()
run_crawler(base, '2017-12-17~2018-12-17')
|
python
|
import client
import get_plant
import time
import RPi.GPIO as GPIO
import picamera
import math
import numpy as np
import argparse
import cv2
import json
import MPU9250
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
servo=GPIO.PWM(17, 100)
# Connect to the server and take possession of the motors.
c = client.create_client("test")
c.conn()
import time
time.sleep(1)
c.send("take/9999999/motor1:1\n")
time.sleep(1)
c.send("take/9999999/odometry:1\n")
i = [0, 0, 0, 0]
last_time = time.time()
def update_pos():
"""Get odometry data and store the result in i.
"""
global i
while True:
msg = c.recv(10000)
try:
i = list(map(int, msg.rsplit(":", 1)[1].split(" ")))
except:
continue
time.sleep(0.25)
#set GPIO Pins
GPIO_TRIGGER = 23
GPIO_ECHO = 24
pos = [10, 10]
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
import threading
a = threading.Thread(target=update_pos)
a.start()
def get_angle():
"""Get 400 readings from the MPU9250
"""
angles = []
xs = []
ys = []
data = mpu.readMagnet()
for i in range(400):
data = mpu.readMagnet()
while (data["x"] == 0 and data["y"] == 0):
time.sleep(0.01)
data = mpu.readMagnet()
data["x"] -= 39.66
data["x"] /= 24.47
data["y"] -= 2.8675
data["y"] /= 23.84
xs.append(data["x"])
ys.append(data["y"])
a = math.atan2(data["y"], data["x"])
angles.append(a)
time.sleep(0.015)
avg = sum(angles) / len(angles)
avg = math.atan2(sum(ys), sum(xs))
return avg
def get_pos(vals):
return (vals[1] + vals[2]) / 2, (vals[0] + vals[3]) / 2
def move(dist, to_left=1, to_right=1):
dist /= 0.90
vals = list(i)
left, right = get_pos(vals)
left_init = left
right_init = right
end_left = left + dist
end_right = right + dist
last_left, last_right = left, right
sl = 120
sr = 120
cu_l = 0
cu_r = 0
distance_obj_cm = 3000
while distance_obj_cm > 55 and (left < end_left or right < end_right):
# Stop if finished or an obstacle is nearby.
old_sl = sl
old_sr = sr
cur_left, cur_right = get_pos(i)
dl = cur_left - last_left
dr = cur_right - last_right
cu_l += dl
cu_r += dr
# Compute ratio used to choose the new speeds.
ratio = (cu_l + 0.1) / (cu_r + 0.1)
ratio2 = (cu_r + 0.1) / (cu_l + 0.1)
cur_ratio = (dl + 0.1) / (dr + 0.1)
cur_ratio2 = (dr + 0.1) / (dl + 0.1)
if cu_l < cu_r:
if sl < 125 or sr < 125:
sl *= ratio2
else:
sr /= ratio2
elif cu_l > cu_r:
if sr < 125 or sl < 125:
sr *= ratio
else:
sl /= ratio
if sl < 100:
sl = 100
if sr < 100:
sr = 100
if sl > 170:
sl = 170
if sr > 170:
sr = 170
# Send data to the arduino.
c.sendtoserial("motor1", int(sr) * to_left)
c.sendtoserial("motor2", int(sl) * to_right)
c.sendtoserial("motor3", int(sl) * to_right)
c.sendtoserial("motor4", int(sr) * to_left)
left, right = cur_left, cur_right
last_left, last_right = cur_left, cur_right
distance_obj_cm = distance()
print("DIST: ", distance_obj_cm)
time.sleep(0.25)
c.sendtoserial("motor1", "0")
c.sendtoserial("motor2", "0")
c.sendtoserial("motor3", "0")
c.sendtoserial("motor4", "0")
time.sleep(0.5)
if distance_obj_cm < 60:
return (1, left - left_init)
return (0, left - left_init)
def move_centimeter(cm):
"""Move n centimeters, recalibrating every meter.
"""
global init_angle, pos, tunny_right
unit_per_cm = 290 / 71 / 2 / 1.44
ret = []
while cm > 0.1:
cur = min(cm, 100)
cm -= cur
ret = move(unit_per_cm * cur)
if ret[0] == 1:
break
angle = get_angle()
old_angle = init_angle
init_angle = angle
turn(get_angle_diff(angle, old_angle)[1])
time.sleep(1)
if tunny_right == 1:
pos[1] += ret[1] / unit_per_cm
else:
pos[0] += ret[1] / unit_per_cm
if ret[0] == 1 and tunny_right:
found_obstacle(pos[0], pos[1] + 50)
else:
found_obstacle(pos[0] + 50, pos[1])
print(ret[1] / unit_per_cm)
def iset_servo_angle(angle_idx):
"""Set the servomotor angle to -90, -45, 0, 45 or 90 degrees.
"""
vals = [5, 9, 13, 17, 21]
servo.start(vals[angle_idx])
time.sleep(1.5)
servo.start(0)
mpu=MPU9250.MPU9250()
def get_angle_diff(angle1, angle2):
"""Return the angle, between 0 and 2*pi, between angle1 and angle2.
"""
diff = angle2 - angle1
while diff < -3.1415:
diff += 3.1415*2
while diff > 3.1415:
diff -= 3.1415*2
return abs(diff), diff
def turn(rad, first=True):
"""Turn until reaching rad angles difference from the current direction.
"""
global init_angle
target_angle = init_angle + rad
while target_angle > 3.1415:
target_angle -= 3.1415 * 2
while target_angle < -3.1415:
target_angle += 3.1415 * 2
rad *= -1
left_val = -1 if rad > 0 else 1
right_val = -left_val
c.sendtoserial("motor1", str(160 * left_val))
c.sendtoserial("motor2", str(160 * right_val))
c.sendtoserial("motor3", str(160 * right_val))
c.sendtoserial("motor4", str(160 * left_val))
time.sleep(abs(rad) / 2 + 0.1)
c.sendtoserial("motor1", "0")
c.sendtoserial("motor2", "0")
c.sendtoserial("motor3", "0")
c.sendtoserial("motor4", "0")
time.sleep(0.2)
angle = get_angle()
diff, dir = get_angle_diff(angle, target_angle)
if diff > 0.05:
time.sleep(0.1)
init_angle = angle
turn(dir, False)
if first:
init_angle=target_angle
time.sleep(0.5)
def overwrite_mapinit(x, y):
map_data = []
f_size = 4.5
print("x: " + str(x))
print("y: " + str(y))
with open('fetch/map.capture_init', 'r+') as map_file:
f_size = float(map_file.readline())
print(str(f_size))
map_data = list(map_file.read().replace('\n', ''))
n = y * f_size * 900 + x * f_size
for j in range(0, 60):
for i in range(0, 60):
map_data[int(900 * j + i + n)] = 'P'
map_file.close()
with open('fetch/map.capture_init', 'w+') as f:
f.write(str(f_size) + "\n")
map_str = ''.join(map_data)
f.write(map_str)
f.close()
def process_image(image_path):
name = image_path
source_image = cv2.imread(name)
average_color_per_row = np.average(source_image, axis=0)
average_color = np.average(average_color_per_row, axis=0)
average_color = np.uint8(average_color)
print(average_color)
average_color_img = np.array([[average_color]*100]*100, np.uint8)
return average_color
nb_photo = 0
camera = picamera.PiCamera()
camera.rotation = 90
camera.contrast = 60
def found_obstacle(x, y):
global nb_photo
camera.capture("fetch/pics/photo_" + str(nb_photo) + ".jpg");
is_plant = get_plant.get_plant("fetch/pics/photo_" + str(nb_photo) + ".jpg");
if is_plant:
# Water the plant
GPIO.output(25, True)
time.sleep(2)
GPIO.output(25, False)
print("True")
# Write data in data_json
try:
with open("fetch/data_json", "rt") as file:
data = json.load(file)
except IOError:
data = {}
if 'plants' not in data:
data['plants'] = []
str_pos = str(str(int(x)) + ',' + str(int(y)))
data['plants'].append({
'position' : str_pos,
'to_water' : '0',
'picture_path': "../fetch/pics/photo_" + str(nb_photo) + ".jpg"
})
with open("fetch/data_json", "wt") as outfile:
json.dump(data, outfile)
# Write data in map.capture_init
overwrite_mapinit(int(x), int(y))
else:
print("FALSE")
nb_photo += 1
init_angle = get_angle()
tunny_right = 0
move_centimeter(100)
time.sleep(2)
turn(-3.14/2)
tunny_right = 1
move_centimeter(100)
|
python
|
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from variance.models.muscle import MuscleModel, MuscleGroupModel
class MuscleSchema(SQLAlchemyAutoSchema):
class Meta:
model = MuscleModel
load_instance = False
class MuscleGroupSchema(SQLAlchemyAutoSchema):
class Meta:
model = MuscleGroupModel
load_instance = False
|
python
|
from django.contrib import admin
from .models import PageCheckResult
@admin.register(PageCheckResult)
class PageCheckResultAdmin(admin.ModelAdmin):
list_display = ('buschenschank', 'created', 'tag_name',
'website', 'return_code')
list_filter = ('created', 'return_code', 'tag_name')
search_fields = ('buschenschank__name', 'website')
|
python
|
from player import Player
class Human(Player):
def __init__(self, symbol: str, name: str = "You"):
super().__init__(name, symbol)
def move(self, state: list[str]):
print(f"{self.name} is to move!")
while True:
inp = input("Enter an integer between 1 and 9 representing an empty cell: ")
if not inp.isdigit():
print("Input is not integer!", end=" ")
continue
pos = int(inp)
if pos > 9 or pos < 1:
print("Input is not between 1 and 9!", end=" ")
continue
if state[pos - 1] == '-':
state[pos - 1] = self.symbol
return pos - 1
else:
print("Cell is not empty!", end=" ")
|
python
|
import mnist_loader
import network2
from task_gen import TaskGen
import numpy as np
training_data2, validation_data2, test_data2 = mnist_loader.load_data_wrapper()
task_gen = TaskGen()
training_data = task_gen.generate(1000)
validation_data = task_gen.generate(1000, False)
'''
print training_data2[0][1]
print training_data[0][1]
print validation_data2[0][1]
print validation_data[0][1]
'''
net = network2.Network([32, 30, 8])
net.SGD(training_data, 30, 10, 0.005, lmbda = 5.0,
evaluation_data=validation_data, monitor_evaluation_accuracy=True)
|
python
|
# -*- coding: utf-8 -*-
import hashlib
import unicodedata
from collections import Counter, defaultdict
from six import text_type
from hayes.analysis import builtin_simple_analyzer
from hayes.ext.stopwords import (
english_stopwords, finnish_stopwords, russian_stopwords, swedish_stopwords,
unicode_punctuation_chars)
from hayes.indexing import DocumentIndex, IntegerField, TextField
from hayes.search import Search
from hayes.search.queries import MatchAllQuery, PrefixQuery
def default_tokenizer(content):
return content.split()
def smart_tokenizer(content, stopwords=()):
words = text_type(content).split()
words = [word.strip(unicode_punctuation_chars) for word in words]
words = [word for word in words if len(
word) > 3 and word.lower() not in stopwords]
# Filter out full numbers
words = [word for word in words if not word.isdigit()]
return words
def smart_finnish_tokenizer(content):
return smart_tokenizer(content, stopwords=finnish_stopwords)
def smart_swedish_tokenizer(content):
return smart_tokenizer(content, stopwords=swedish_stopwords)
def smart_russian_tokenizer(content):
return smart_tokenizer(content, stopwords=russian_stopwords)
def smart_english_tokenizer(content):
return smart_tokenizer(content, stopwords=english_stopwords)
class WordGatherer(object):
def __init__(self, connection, target_type, coll_name=None):
"""
:type connection: hayes.conn.Hayes
"""
self.connection = connection
self.target_type = target_type
self.target_coll_name = (
coll_name or self.connection.default_coll_name)
self.index = index = DocumentIndex()
index.name = self.target_type
index.fields = {
"word": TextField(analyzer=builtin_simple_analyzer),
"count": IntegerField(),
}
def reset(self):
""" Reset target collection (rebuild index).
"""
self.connection.rebuild_index(
self.index, coll_name=self.target_coll_name)
def _tokenize_documents(self, index, fields, tokenizer=default_tokenizer):
search = Search(MatchAllQuery())
for doc in self.connection.search_iter(
search, indexes=[index], count=200):
for field in fields:
value = doc.get(field)
if not value:
continue
words = tokenizer(value)
yield doc, words
def _gather_words(self, index, fields, tokenizer=default_tokenizer):
word_counts = Counter()
for _doc, words in self._tokenize_documents(
index, fields, tokenizer=tokenizer):
if words:
for word in words:
word_counts[word] += 1
return word_counts
def update(self, index, fields, tokenizer=default_tokenizer, cutoff=1):
"""
Update (upsert) the wordgatherer collection.
:param index: Source index.
:param fields: Fields to read.
:param tokenizer: Tokenizer callable. Should split unicode to words
:param cutoff: Ignore words with less than this many occurrences.
"""
counts_by_uid = defaultdict(Counter)
for word, count in self._gather_words(
index, fields, tokenizer=tokenizer).items():
uid = hashlib.sha1(unicodedata.normalize(
"NFKD", word.lower()).encode("UTF-8")).hexdigest()
counts_by_uid[uid][word] += count
for uid, word_to_count in counts_by_uid.items():
word = word_to_count.most_common(1)[0][0]
count = sum(word_to_count.values())
if count <= cutoff:
continue
self.connection.session.post(
"/%s/%s/%s/_update" % (self.target_coll_name,
self.target_type, uid),
data={
"script": "ctx._source.count += count",
"params": {"count": count},
"upsert": {"word": word, "count": count}
})
def search(self, word, limit=30):
"""
Search for a word within the wordgatherer collection.
:param word: Word to search for.
:param limit: Maximum number of results to return.
"""
search = Search(PrefixQuery("word", word), sort={"count": "desc"})
for doc in self.connection.search(
search, indexes=[self.index], count=limit):
yield (doc["word"], doc["count"])
|
python
|
from app import db
from datetime import datetime
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from app import login
class User(UserMixin,db.Model):
id=db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(64),index=True,unique=True)
email = db.Column(db.String(120),index=True,unique=True)
password_hash=db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __repr__(self):
return '<user {}'.format(self.username)
def set_password(self,password):
self.password_hash=generate_password_hash(password)
def check_password(self,password):
return check_password_hash(self.password_hash,password)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post {}'.format(self.body)
|
python
|
from dataclasses import dataclass
from typing import Sequence
from .phrase import Phrase
from .translation_sources import TranslationSources
from .word_alignment_matrix import WordAlignmentMatrix
@dataclass(frozen=True)
class TranslationResult:
source_segment: Sequence[str]
target_segment: Sequence[str]
word_confidences: Sequence[float]
word_sources: Sequence[TranslationSources]
alignment: WordAlignmentMatrix
phrases: Sequence[Phrase]
def __post_init__(self) -> None:
if len(self.word_confidences) != len(self.target_segment):
raise ValueError("The confidences must the same length as the target segment.")
if len(self.word_sources) != len(self.target_segment):
raise ValueError("The sources must the same length as the target segment.")
if self.alignment.row_count != len(self.source_segment):
raise ValueError("The alignment source length must be the same length as the source segment.")
if self.alignment.column_count != len(self.target_segment):
raise ValueError("The alignment target length must be the same length as the target segment.")
|
python
|
#!/usr/bin/env python
from collections import defaultdict
# 000000000111111111122222222222333333333344444444
# 123456789012345678901234567890123456789012345678
# Step G must be finished before step X can begin.
def parse_line(input_line):
return input_line[5], input_line[36]
def parse_input(content):
return [parse_line(line) for line in content]
def get_yet_unavailable(requirements):
unavailable_yet = set()
for required_step, for_steps in requirements.iteritems():
for step in for_steps:
unavailable_yet.add(step)
return unavailable_yet
def task1(steps):
backlog = set()
requirements = defaultdict(list)
for required_step, for_step in steps:
backlog.add(required_step)
backlog.add(for_step)
requirements[required_step].append(for_step)
instruction = ""
while backlog:
unavailable_yet = get_yet_unavailable(requirements)
as_list = sorted(list(backlog.difference(unavailable_yet)))
next_step, _ = as_list[0], set(as_list[1:])
backlog.discard(next_step)
instruction += next_step
requirements.pop(next_step, None)
return instruction
def task2(steps, workers, initial_step_work):
backlog = set()
requirements = defaultdict(list)
unavailable_yet = set()
for required_step, for_step in steps:
backlog.add(required_step)
backlog.add(for_step)
requirements[required_step].append(for_step)
unavailable_yet.add(for_step)
assigned = set() # (Step, Accomplish Time)
time = 0
while backlog or assigned:
accomplished = set([step for step, accomplish_time in assigned if accomplish_time <= time])
assigned = set([(step, accomplish_time) for step, accomplish_time in assigned if accomplish_time > time])
if accomplished:
for accomplished_item in accomplished:
requirements.pop(accomplished_item, None)
unavailable_yet = get_yet_unavailable(requirements)
backlog = backlog.difference(accomplished)
tmp = set([step for step, _ in assigned])
steps_to_go = sorted(list(backlog.difference(unavailable_yet).difference(tmp)))
while len(assigned) < workers and len(steps_to_go) > 0:
next, steps_to_go = steps_to_go[0], steps_to_go[1:]
assigned.add((next, time + initial_step_work + ord(next) - ord('A') + 1))
time += 1
return time - 1
# print task1(parse_input(open("input7.test").readlines()))
print task1(parse_input(open("input7.txt").readlines()))
# print task2(parse_input(open("input7.test").readlines()), 2, 0)
print task2(parse_input(open("input7.txt").readlines()), 5, 60)
|
python
|
from lxml.builder import E
from sciencebeam_lab.lxml_to_svg import (
iter_svg_pages_for_lxml,
SVG_TEXT,
SVG_G,
SVG_DOC
)
SOME_TEXT = "some text"
SOME_X = "10"
SOME_Y = "20"
SOME_BASE = "25"
SOME_HEIGHT = "30"
SOME_FONT_SIZE = "40"
SOME_FONT_FAMILY = "Fontastic"
SOME_FONT_COLOR = '#123'
class LXML(object):
X = 'x'
Y = 'y'
BASE = 'base'
HEIGHT = 'height'
FONT_SIZE = 'font-size'
FONT_NAME = 'font-name'
FONT_COLOR = 'font-color'
class SVG(object):
X = 'x'
Y = 'y'
HEIGHT = 'height'
FONT_SIZE = 'font-size'
FONT_FAMILY = 'font-family'
FILL = 'fill'
COMMON_LXML_TOKEN_ATTRIBS = {
LXML.X: SOME_X,
LXML.Y: SOME_Y,
LXML.HEIGHT: SOME_HEIGHT,
LXML.FONT_SIZE: SOME_FONT_SIZE,
LXML.FONT_NAME: SOME_FONT_FAMILY,
LXML.FONT_COLOR: SOME_FONT_COLOR
}
def dict_extend(*dicts):
d = dict()
for x in dicts:
d.update(x)
return d
class TestIterSvgPagesForLxml(object):
def test_should_return_one_page(self):
lxml_root = E.DOCUMENT(
E.PAGE(
)
)
svg_pages = list(iter_svg_pages_for_lxml(lxml_root))
assert len(svg_pages) == 1
def test_should_return_multiple_pages(self):
lxml_root = E.DOCUMENT(
E.PAGE(
),
E.PAGE(
),
E.PAGE(
)
)
svg_pages = list(iter_svg_pages_for_lxml(lxml_root))
assert len(svg_pages) == 3
def test_should_create_text_node_with_common_attributes(self):
lxml_root = E.DOCUMENT(
E.PAGE(
E.TEXT(
E.TOKEN(
SOME_TEXT,
COMMON_LXML_TOKEN_ATTRIBS
)
)
)
)
svg_pages = list(iter_svg_pages_for_lxml(lxml_root))
assert len(svg_pages) == 1
first_page = svg_pages[0]
svg_text = first_page.find('.//' + SVG_TEXT)
assert svg_text is not None
assert svg_text.text == SOME_TEXT
assert float(svg_text.attrib[SVG.X]) == float(SOME_X)
assert float(svg_text.attrib[SVG.Y]) == float(SOME_Y)
assert float(svg_text.attrib[SVG.FONT_SIZE]) == float(SOME_FONT_SIZE)
assert svg_text.attrib[SVG.FONT_FAMILY] == SOME_FONT_FAMILY
assert svg_text.attrib[SVG.FILL] == SOME_FONT_COLOR
def test_should_use_base_as_y_in_svg_if_available(self):
lxml_root = E.DOCUMENT(
E.PAGE(
E.TEXT(
E.TOKEN(
SOME_TEXT,
dict_extend(COMMON_LXML_TOKEN_ATTRIBS, {
LXML.BASE: SOME_BASE
})
)
)
)
)
svg_pages = list(iter_svg_pages_for_lxml(lxml_root))
assert len(svg_pages) == 1
first_page = svg_pages[0]
svg_text = first_page.find('.//' + SVG_TEXT)
assert float(svg_text.attrib[SVG.Y]) == float(SOME_BASE)
def test_should_keep_text_block_structure_without_block(self):
lxml_root = E.DOCUMENT(
E.PAGE(
E.TEXT(
E.TOKEN(
SOME_TEXT,
dict_extend(COMMON_LXML_TOKEN_ATTRIBS, {
LXML.BASE: SOME_BASE
})
)
)
)
)
svg_pages = list(iter_svg_pages_for_lxml(lxml_root))
assert len(svg_pages) == 1
first_page = svg_pages[0]
svg_text = first_page.find('.//' + SVG_TEXT)
assert svg_text is not None
assert svg_text.getparent().tag == SVG_G
assert svg_text.getparent().getparent().tag == SVG_DOC
def test_should_keep_text_block_structure_with_block(self):
lxml_root = E.DOCUMENT(
E.PAGE(
E.BLOCK(
E.TEXT(
E.TOKEN(
SOME_TEXT,
dict_extend(COMMON_LXML_TOKEN_ATTRIBS, {
LXML.BASE: SOME_BASE
})
)
)
)
)
)
svg_pages = list(iter_svg_pages_for_lxml(lxml_root))
assert len(svg_pages) == 1
first_page = svg_pages[0]
svg_text = first_page.find('.//' + SVG_TEXT)
assert svg_text is not None
assert svg_text.getparent().tag == SVG_G
assert svg_text.getparent().getparent().tag == SVG_G
assert svg_text.getparent().getparent().getparent().tag == SVG_DOC
|
python
|
# coding: utf8
from __future__ import print_function
import os, sys, re
from xml.dom import minidom
class HamshahriReader():
"""
interfaces [Hamshahri Corpus](http://ece.ut.ac.ir/dbrg/hamshahri/files/HAM2/Corpus.zip) that you must download and extract it.
>>> hamshahri = HamshahriReader(root='corpora/hamshahri')
>>> next(hamshahri.docs())['id']
'HAM2-750403-001'
"""
def __init__(self, root):
self._root = root
self._invalids = set(['hamshahri.dtd', 'HAM2-960622.xml', 'HAM2-960630.xml', 'HAM2-960701.xml', 'HAM2-960709.xml', 'HAM2-960710.xml', 'HAM2-960711.xml', 'HAM2-960817.xml', 'HAM2-960818.xml', 'HAM2-960819.xml', 'HAM2-960820.xml', 'HAM2-961019.xml', 'HAM2-961112.xml', 'HAM2-961113.xml', 'HAM2-961114.xml', 'HAM2-970414.xml', 'HAM2-970415.xml', 'HAM2-970612.xml', 'HAM2-970614.xml', 'HAM2-970710.xml', 'HAM2-970712.xml', 'HAM2-970713.xml', 'HAM2-970717.xml', 'HAM2-970719.xml', 'HAM2-980317.xml', 'HAM2-040820.xml', 'HAM2-040824.xml', 'HAM2-040825.xml', 'HAM2-040901.xml', 'HAM2-040917.xml', 'HAM2-040918.xml', 'HAM2-040920.xml', 'HAM2-041025.xml', 'HAM2-041026.xml', 'HAM2-041027.xml', 'HAM2-041230.xml', 'HAM2-041231.xml', 'HAM2-050101.xml', 'HAM2-050102.xml', 'HAM2-050223.xml', 'HAM2-050224.xml', 'HAM2-050406.xml', 'HAM2-050407.xml', 'HAM2-050416.xml'])
self._paragraph_pattern = re.compile(r'(\n.{0,50})(?=\n)')
def docs(self):
for root, dirs, files in os.walk(self._root):
for name in sorted(files):
if name in self._invalids:
continue
try:
elements = minidom.parse(os.path.join(root, name))
for element in elements.getElementsByTagName('DOC'):
doc = {}
doc['id'] = element.getElementsByTagName('DOCID')[0].childNodes[0].data
doc['issue'] = element.getElementsByTagName('ISSUE')[0].childNodes[0].data
for cat in element.getElementsByTagName('CAT'):
doc['categories_'+ cat.attributes['xml:lang'].value] = cat.childNodes[0].data.split('.')
elm = element.getElementsByTagName('TITLE')[0]
doc['title'] = elm.childNodes[1].data if len(elm.childNodes) > 1 else ''
doc['text'] = ''
for item in element.getElementsByTagName('TEXT')[0].childNodes:
if item.nodeType == 4: # CDATA
doc['text'] += item.data
# refine text
doc['text'] = self._paragraph_pattern.sub(r'\1\n', doc['text']).replace('\no ', '\n')
yield doc
except Exception as e:
print('error in reading', name, e, file=sys.stderr)
def texts(self):
for doc in self.docs():
yield doc['text']
|
python
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# class declaration
class Mapping:
"""
Mix-in class that forms the basis of the representation of mappings
Mappings are dictionaries with arbitrary keys whose values are nodes
"""
# types
from .exceptions import CircularReferenceError
# constants
category = "mapping"
# public data
@property
def operands(self):
"""
Iterate over my operands
"""
# easy enough
yield from self.data.values()
# all done
return
# classifiers
@property
def mappings(self):
"""
Return a sequence over mappings in my dependency graph
"""
# i am one
yield self
# nothing further
return
# value management
def getValue(self, **kwds):
"""
Compute and return my value
"""
# return the value of each operand
return {name: op.value for name, op in self.data.items()}
def setValue(self, value):
"""
Add the {key, node} pair in {value} to the mapping
"""
# unpack
key, node = value
# store
self.data[key] = noe
# all done
return self
# meta-methods
def __init__(self, operands, **kwds):
# chain up with an empty pile of dependencies
super().__init__(operands=(), **kwds)
# my operands are in a dict
self.data = dict(**operands)
# all done
return
def __getitem__(self, key):
# return the value of the node stored under {key}
return self.data[key].value
def __setitem__(self, key, node):
# store {node} under {key}
self.data[key] = node
# all done
return
# implementation details
def _substitute(self, current, replacement):
"""
Adjust the operands by substituting {replacement} for {current} in the set of operands
"""
# go through my data
for name, operand in self.data.items():
# if we found the match
if operand is current:
# replace it
self.data[name] = replacement
# all done
return self
# end of file
|
python
|
import streamlit as st
st.title('titulo')
st.button('cl')
st.sidebar.radio('radio',[1,2,3])
for i in range(10):
st.write('holla')
|
python
|
#!python
from __future__ import print_function
import time
from arduino_device import findArduinoDevicePorts
from arduino_olfactometer import ArduinoOlfactometers
from arduino_olfactometer import isOlfactometerPortInfo
from faa_actuation import PwmController
from faa_actuation import CurrentController
from faa_actuation import isPwmControllerPortInfo
from faa_actuation import isCurrentControllerPortInfo
DEBUG = True
class Actuation(object):
def __init__(self,*args,**kwargs):
t_start = time.time()
pwm_controller_port = None
olfactometer_ports = []
current_controller_port = None
self.pwm_controller = None
self.olfactometers = None
self.current_controller = None
arduino_device_ports = findArduinoDevicePorts()
for port in arduino_device_ports:
port_info = arduino_device_ports[port]
if isPwmControllerPortInfo(port_info):
pwm_controller_port = port
elif isOlfactometerPortInfo(port_info):
olfactometer_ports.append(port)
elif isCurrentControllerPortInfo(port_info):
current_controller_port = port
if pwm_controller_port is not None:
self.pwm_controller = PwmController(port=pwm_controller_port)
self.pwm_controller.setDeviceName('pwm_controller')
if len(olfactometer_ports) != 0:
self.olfactometers = ArduinoOlfactometers(use_ports=olfactometer_ports)
self.olfactometers.sortBySerialNumber()
try:
self.olfactometers[0].setDeviceName('olfactometer_odor1')
self.olfactometers[1].setDeviceName('olfactometer_odor2')
self.olfactometers[2].setDeviceName('olfactometer_ethanol')
except IndexError:
pass
if current_controller_port is not None:
self.current_controller = CurrentController(port=current_controller_port)
self.current_controller.setDeviceName('current_controller')
t_end = time.time()
print('Initialization time =', (t_end - t_start))
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.