content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : download.py
@Time : 2020/11/08
@Author : Yaronzz
@Version : 1.0
@Contact : [email protected]
@Desc :
'''
import os
import aigpy
import logging
import lyricsgenius
from tidal_dl.settings import Settings
from tidal_dl.tidal import TidalAPI
from tidal_dl.enum import Type, AudioQuality, VideoQuality
from tidal_dl.model import Track, Video, Album
from tidal_dl.printf import Printf
from tidal_dl.decryption import decrypt_security_token
from tidal_dl.decryption import decrypt_file
API = TidalAPI()
def __loadAPI__(user):
API.key.accessToken = user.accessToken
API.key.userId = user.userid
API.key.countryCode = user.countryCode
#API.key.sessionId = user.sessionid1
def __loadVideoAPI__(user):
API.key.accessToken = user.accessToken
API.key.userId = user.userid
API.key.countryCode = user.countryCode
#API.key.sessionId = user.sessionid2 if not aigpy.string.isNull(user.sessionid2) else user.sessionid1
def __getIndexStr__(index):
pre = "0"
if index < 10:
return pre+str(index)
if index < 99:
return str(index)
return str(index)
def __getExtension__(url):
if '.flac' in url:
return '.flac'
if '.mp4' in url:
return '.mp4'
return '.m4a'
def __getArtists__(array):
ret = []
for item in array:
ret.append(item.name)
return ret
def __parseContributors__(roleType, Contributors):
if Contributors is None:
return None
try:
ret = []
for item in Contributors['items']:
if item['role'] == roleType:
ret.append(item['name'])
return ret
except:
return None
GEMIUS = lyricsgenius.Genius('vNKbAWAE3rVY_48nRaiOrDcWNLvsxS-Z8qyG5XfEzTOtZvkTfg6P3pxOVlA2BjaW')
def __getLyrics__(trackName, artistName, proxy):
try:
if not aigpy.string.isNull(proxy):
GEMIUS._session.proxies = {
'http': f'http://{proxy}',
'https': f'http://{proxy}',
}
song = GEMIUS.search_song(trackName, artistName)
return song.lyrics
except:
return ""
def __setMetaData__(track, album, filepath, contributors, lyrics):
obj = aigpy.tag.TagTool(filepath)
obj.album = track.album.title
obj.title = track.title
if not aigpy.string.isNull(track.version):
obj.title += ' (' + track.version + ')'
obj.artist = __getArtists__(track.artists)
obj.copyright = track.copyRight
obj.tracknumber = track.trackNumber
obj.discnumber = track.volumeNumber
obj.composer = __parseContributors__('Composer', contributors)
obj.isrc = track.isrc
obj.albumartist = __getArtists__(album.artists)
obj.date = album.releaseDate
obj.totaldisc = album.numberOfVolumes
obj.lyrics = lyrics
if obj.totaldisc <= 1:
obj.totaltrack = album.numberOfTracks
coverpath = API.getCoverUrl(album.cover, "1280", "1280")
obj.save(coverpath)
return
def __convertToM4a__(filepath, codec):
if 'ac4' in codec or 'mha1' in codec:
return filepath
if '.mp4' not in filepath:
return filepath
newpath = filepath.replace('.mp4', '.m4a')
aigpy.path.remove(newpath)
os.rename(filepath, newpath)
return newpath
def __stripPathParts__(stripped_path, separator):
result = ""
stripped_path = stripped_path.split(separator)
for stripped_path_part in stripped_path:
result += stripped_path_part.strip()
if not stripped_path.index(stripped_path_part) == len(stripped_path) - 1:
result += separator
return result.strip()
def __stripPath__(path):
result = __stripPathParts__(path, "/")
result = __stripPathParts__(result, "\\")
return result.strip()
# "{ArtistName}/{Flag} [{AlbumID}] [{AlbumYear}] {AlbumTitle}"
def __getAlbumPath__(conf: Settings, album):
base = conf.downloadPath + '/Album/'
artist = aigpy.path.replaceLimitChar(album.artists[0].name, '-')
# album folder pre: [ME][ID]
flag = API.getFlag(album, Type.Album, True, "")
if conf.audioQuality != AudioQuality.Master:
flag = flag.replace("M", "")
if not conf.addExplicitTag:
flag = flag.replace("E", "")
if not aigpy.string.isNull(flag):
flag = "[" + flag + "] "
sid = str(album.id)
#album and addyear
albumname = aigpy.path.replaceLimitChar(album.title, '-')
year = ""
if album.releaseDate is not None:
year = aigpy.string.getSubOnlyEnd(album.releaseDate, '-')
# retpath
retpath = conf.albumFolderFormat
if retpath is None or len(retpath) <= 0:
retpath = Settings.getDefaultAlbumFolderFormat()
retpath = retpath.replace(R"{ArtistName}", artist.strip())
retpath = retpath.replace(R"{Flag}", flag)
retpath = retpath.replace(R"{AlbumID}", sid)
retpath = retpath.replace(R"{AlbumYear}", year)
retpath = retpath.replace(R"{AlbumTitle}", albumname.strip())
retpath = __stripPath__(retpath.strip())
return base + retpath
def __getAlbumPath2__(conf, album):
# outputdir/Album/artist/
artist = aigpy.path.replaceLimitChar(album.artists[0].name, '-').strip()
base = conf.downloadPath + '/Album/' + artist + '/'
# album folder pre: [ME][ID]
flag = API.getFlag(album, Type.Album, True, "")
if conf.audioQuality != AudioQuality.Master:
flag = flag.replace("M", "")
if not conf.addExplicitTag:
flag = flag.replace("E", "")
if not aigpy.string.isNull(flag):
flag = "[" + flag + "] "
sid = "[" + str(album.id) + "] " if conf.addAlbumIDBeforeFolder else ""
#album and addyear
albumname = aigpy.path.replaceLimitChar(album.title, '-').strip()
year = ""
if conf.addYear and album.releaseDate is not None:
year = "[" + aigpy.string.getSubOnlyEnd(album.releaseDate, '-') + "] "
return base + flag + sid + year + albumname + '/'
def __getPlaylistPath__(conf, playlist):
# outputdir/Playlist/
base = conf.downloadPath + '/Playlist/'
# name
name = aigpy.path.replaceLimitChar(playlist.title, '-')
return base + name + '/'
# "{TrackNumber} - {ArtistName} - {TrackTitle}{ExplicitFlag}"
def __getTrackPath__(conf: Settings, track, stream, album=None, playlist=None):
if album is not None:
base = __getAlbumPath__(conf, album) + '/'
if album.numberOfVolumes > 1:
base += 'CD' + str(track.volumeNumber) + '/'
if playlist is not None and conf.usePlaylistFolder:
base = __getPlaylistPath__(conf, playlist)
# number
number = __getIndexStr__(track.trackNumber)
if playlist is not None and conf.usePlaylistFolder:
number = __getIndexStr__(track.trackNumberOnPlaylist)
# artist
artist = aigpy.path.replaceLimitChar(track.artists[0].name, '-')
# title
title = track.title
if not aigpy.string.isNull(track.version):
title += ' (' + track.version + ')'
title = aigpy.path.replaceLimitChar(title, '-')
# get explicit
explicit = "(Explicit)" if conf.addExplicitTag and track.explicit else ''
#album and addyear
albumname = aigpy.path.replaceLimitChar(album.title, '-')
year = ""
if album.releaseDate is not None:
year = aigpy.string.getSubOnlyEnd(album.releaseDate, '-')
# extension
extension = __getExtension__(stream.url)
retpath = conf.trackFileFormat
if retpath is None or len(retpath) <= 0:
retpath = Settings.getDefaultTrackFileFormat()
retpath = retpath.replace(R"{TrackNumber}", number)
retpath = retpath.replace(R"{ArtistName}", artist.strip())
retpath = retpath.replace(R"{TrackTitle}", title)
retpath = retpath.replace(R"{ExplicitFlag}", explicit)
retpath = retpath.replace(R"{AlbumYear}", year)
retpath = retpath.replace(R"{AlbumTitle}", albumname.strip())
retpath = retpath.strip()
return base + retpath + extension
def __getTrackPath2__(conf, track, stream, album=None, playlist=None):
if album is not None:
base = __getAlbumPath__(conf, album)
if album.numberOfVolumes > 1:
base += 'CD' + str(track.volumeNumber) + '/'
if playlist is not None and conf.usePlaylistFolder:
base = __getPlaylistPath__(conf, playlist)
# hyphen
hyphen = ' - ' if conf.addHyphen else ' '
# get number
number = ''
if conf.useTrackNumber:
number = __getIndexStr__(track.trackNumber) + hyphen
if playlist is not None:
number = __getIndexStr__(track.trackNumberOnPlaylist) + hyphen
# get artist
artist = ''
if conf.artistBeforeTitle:
artist = aigpy.path.replaceLimitChar(track.artists[0].name, '-') + hyphen
# get explicit
explicit = "(Explicit)" if conf.addExplicitTag and track.explicit else ''
# title
title = track.title
if not aigpy.string.isNull(track.version):
title += ' - ' + track.version
title = aigpy.path.replaceLimitChar(title, '-')
# extension
extension = __getExtension__(stream.url)
return base + number + artist.strip() + title + explicit + extension
def __getVideoPath__(conf, video, album=None, playlist=None):
if album is not None and album.title is not None:
base = __getAlbumPath__(conf, album)
elif playlist is not None and conf.usePlaylistFolder:
base = __getPlaylistPath__(conf, playlist)
else:
base = conf.downloadPath + '/Video/'
# hyphen
hyphen = ' - ' if conf.addHyphen else ' '
# get number
number = ''
if conf.useTrackNumber:
number = __getIndexStr__(video.trackNumber) + hyphen
# get artist
artist = ''
if conf.artistBeforeTitle:
artist = aigpy.path.replaceLimitChar(video.artists[0].name, '-') + hyphen
# get explicit
explicit = "(Explicit)" if conf.addExplicitTag and video.explicit else ''
# title
title = aigpy.path.replaceLimitChar(video.title, '-')
# extension
extension = ".mp4"
return base + number + artist.strip() + title + explicit + extension
def __isNeedDownload__(path, url):
curSize = aigpy.file.getSize(path)
if curSize <= 0:
return True
netSize = aigpy.net.getSize(url)
if curSize >= netSize:
return False
return True
def __downloadVideo__(conf, video:Video, album=None, playlist=None):
if video.allowStreaming is False:
Printf.err("Download failed! " + video.title + ' not allow streaming.')
return
msg, stream = API.getVideoStreamUrl(video.id, conf.videoQuality)
Printf.video(video, stream)
if not aigpy.string.isNull(msg):
Printf.err(video.title + "." + msg)
return
path = __getVideoPath__(conf, video, album, playlist)
logging.info("[DL Video] name=" + aigpy.path.getFileName(path) + "\nurl=" + stream.m3u8Url)
check, msg = aigpy.m3u8.download(stream.m3u8Url, path)
if check is True:
Printf.success(aigpy.path.getFileName(path))
else:
Printf.err("\nDownload failed!" + msg + '(' + aigpy.path.getFileName(path) + ')')
def __downloadTrack__(conf: Settings, track:Track, album=None, playlist=None):
try:
if track.allowStreaming is False:
Printf.err("Download failed! " + track.title + ' not allow streaming.')
return
msg, stream = API.getStreamUrl(track.id, conf.audioQuality)
Printf.track(track, stream)
if not aigpy.string.isNull(msg) or stream is None:
Printf.err(track.title + "." + msg)
return
path = __getTrackPath__(conf, track, stream, album, playlist)
# check exist
if conf.checkExist and __isNeedDownload__(path, stream.url) == False:
Printf.success(aigpy.path.getFileName(path) + " (skip:already exists!)")
return
logging.info("[DL Track] name=" + aigpy.path.getFileName(path) + "\nurl=" + stream.url)
tool = aigpy.download.DownloadTool(path + '.part', [stream.url])
check, err = tool.start(conf.showProgress)
if not check:
Printf.err("Download failed! " + aigpy.path.getFileName(path) + ' (' + str(err) + ')')
return
# encrypted -> decrypt and remove encrypted file
if aigpy.string.isNull(stream.encryptionKey):
os.replace(path + '.part', path)
else:
key, nonce = decrypt_security_token(stream.encryptionKey)
decrypt_file(path + '.part', path, key, nonce)
os.remove(path + '.part')
path = __convertToM4a__(path, stream.codec)
# contributors
contributors = API.getTrackContributors(track.id)
lyrics = ''
if conf.addLyrics:
lyrics = __getLyrics__(track.title, track.artists[0].name, conf.lyricsServerProxy)
__setMetaData__(track, album, path, contributors, lyrics)
Printf.success(aigpy.path.getFileName(path))
except Exception as e:
Printf.err("Download failed! " + track.title + ' (' + str(e) + ')')
def __downloadCover__(conf, album):
if album == None:
return
path = __getAlbumPath__(conf, album) + '/cover.jpg'
url = API.getCoverUrl(album.cover, "1280", "1280")
if url is not None:
aigpy.net.downloadFile(url, path)
def __saveAlbumInfo__(conf, album, tracks):
if album == None:
return
path = __getAlbumPath__(conf, album) + '/AlbumInfo.txt'
infos = ""
infos += "[ID] %s\n" % (str(album.id))
infos += "[Title] %s\n" % (str(album.title))
infos += "[Artists] %s\n" % (str(album.artist.name))
infos += "[ReleaseDate] %s\n" % (str(album.releaseDate))
infos += "[SongNum] %s\n" % (str(album.numberOfTracks))
infos += "[Duration] %s\n" % (str(album.duration))
infos += '\n'
i = 0
while True:
if i >= int(album.numberOfVolumes):
break
i = i + 1
infos += "===========CD %d=============\n" % i
for item in tracks:
if item.volumeNumber != i:
continue
infos += '{:<8}'.format("[%d]" % item.trackNumber)
infos += "%s\n" % item.title
aigpy.file.write(path, infos, "w+")
def __album__(conf, obj):
Printf.album(obj)
msg, tracks, videos = API.getItems(obj.id, Type.Album)
if not aigpy.string.isNull(msg):
Printf.err(msg)
return
if conf.saveAlbumInfo:
__saveAlbumInfo__(conf, obj, tracks)
if conf.saveCovers:
__downloadCover__(conf, obj)
for item in tracks:
__downloadTrack__(conf, item, obj)
for item in videos:
__downloadVideo__(conf, item, obj)
def __track__(conf, obj):
# Printf.track(obj)
msg, album = API.getAlbum(obj.album.id)
if conf.saveCovers:
__downloadCover__(conf, album)
__downloadTrack__(conf, obj, album)
def __video__(conf, obj):
# Printf.video(obj)
__downloadVideo__(conf, obj, obj.album)
def __artist__(conf, obj):
msg, albums = API.getArtistAlbums(obj.id, conf.includeEP)
Printf.artist(obj, len(albums))
if not aigpy.string.isNull(msg):
Printf.err(msg)
return
for item in albums:
__album__(conf, item)
def __playlist__(conf, obj):
Printf.playlist(obj)
msg, tracks, videos = API.getItems(obj.uuid, Type.Playlist)
if not aigpy.string.isNull(msg):
Printf.err(msg)
return
for index, item in enumerate(tracks):
mag, album = API.getAlbum(item.album.id)
item.trackNumberOnPlaylist = index + 1
__downloadTrack__(conf, item, album, obj)
for item in videos:
__downloadVideo__(conf, item, None)
def file(user, conf, string):
txt = aigpy.file.getContent(string)
if aigpy.string.isNull(txt):
Printf.err("Nothing can read!")
return
array = txt.split('\n')
for item in array:
if aigpy.string.isNull(item):
continue
if item[0] == '#':
continue
if item[0] == '[':
continue
start(user, conf, item)
def start(user, conf, string):
__loadAPI__(user)
if aigpy.string.isNull(string):
Printf.err('Please enter something.')
return
strings = string.split(" ")
for item in strings:
if aigpy.string.isNull(item):
continue
if os.path.exists(item):
file(user, conf, item)
return
msg, etype, obj = API.getByString(item)
if etype == Type.Null or not aigpy.string.isNull(msg):
Printf.err(msg + " [" + item + "]")
return
if etype == Type.Album:
__album__(conf, obj)
if etype == Type.Track:
__track__(conf, obj)
if etype == Type.Video:
__loadVideoAPI__(user)
__video__(conf, obj)
if etype == Type.Artist:
__artist__(conf, obj)
if etype == Type.Playlist:
__playlist__(conf, obj)
|
python
|
"""
plots how total workseting set increase over time
"""
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
from utils.common import *
import bisect
SLAB_SIZES = [96, 120, 152, 192, 240, 304, 384, 480, 600, 752, 944, 1184, 1480, 1856, 2320, 2904, 3632, 4544, 5680, 7104, 8880,
11104, 13880, 17352, 21696, 27120, 33904, 42384, 52984, 66232, 82792, 103496, 129376, 161720, 202152, 252696,
315872, 394840, 524288, 655360, 819200, 1024000, 1280000, 1600000, 2000000, 2500000, 3125000, 3906250,
]
def _cal_total_workingset_size(trace_reader, window=300, consider_ttl=True, slab_sizes=None):
""" calculate how working set size change over time
"""
metadata_name = "ttl_w{}_{}{}_{}.pickle".format(window, consider_ttl, "_slab" if slab_sizes is not None else "", trace_reader.trace_path.split("/")[-1])
loaded = load_metadata(metadata_name)
if loaded is not None:
return loaded
ttl_obj = defaultdict(list) # the objects that expire at ttl
workingset = {} # obj -> size
workingset_size = 0
workingset_size_list = []
sz_to_slab_mapping = {}
start_ts, current_ts, last_window_ts = -1, 0, 0
for req in trace_reader:
current_ts = req.real_time
if start_ts == -1:
start_ts = req.real_time
if req.op == "set" or req.op == "add":
if req.obj_id not in workingset:
sz = req.obj_size
# sz = 1
if slab_sizes is not None:
# find the slab this object will use
if sz not in sz_to_slab_mapping:
sz_slab = slab_sizes[bisect.bisect_right(slab_sizes, sz)]
sz_to_slab_mapping[sz] = sz_slab
sz = sz_slab
else:
sz = sz_to_slab_mapping[sz]
workingset_size += sz
workingset[req.obj_id] = sz
if consider_ttl and req.ttl != 0:
ttl_obj[current_ts+req.ttl].append(req.obj_id)
if consider_ttl and current_ts in ttl_obj:
for obj in ttl_obj[current_ts]:
workingset_size -= workingset[obj]
del workingset[obj]
del ttl_obj[current_ts]
if (req.real_time - start_ts) % window == 0 and req.real_time != last_window_ts:
workingset_size_list.append(workingset_size)
# print("{} append {}".format(req.real_time, workingset_size))
last_window_ts = req.real_time
save_metadata(workingset_size_list, metadata_name)
trace_reader.reset()
return workingset_size_list
def plot_total_workingset_size(trace_reader, window, consider_ttl=True, slab_sizes=None):
figname = "{}/{}_{}_workingset".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window)
if consider_ttl:
figname = "{}_ttl".format(figname)
if slab_sizes is not None and slab_sizes is not False:
figname = "{}_slab".format(figname)
if slab_sizes is True:
slab_sizes = SLAB_SIZES
n_color = 2
if slab_sizes:
n_color = 4
plt.set_n_colors(n_color)
ret_dict = {}
workingset_size_list = _cal_total_workingset_size(trace_reader, window, False, slab_sizes=None)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="no-ttl")
ret_dict["no-ttl"] = workingset_size_list[-1]
if consider_ttl:
workingset_size_list = _cal_total_workingset_size(trace_reader, window, True, slab_sizes=None)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="ttl")
ret_dict["ttl"] = workingset_size_list[-1]
if slab_sizes:
workingset_size_list = _cal_total_workingset_size(trace_reader, window, False, slab_sizes=slab_sizes)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="no-ttl-slab")
ret_dict["no-ttl-slab"] = workingset_size_list[-1]
workingset_size_list = _cal_total_workingset_size(trace_reader, window, True, slab_sizes=slab_sizes)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="ttl-slab")
ret_dict["ttl-slab"] = workingset_size_list[-1]
if "ttl" in ret_dict and ret_dict["no-ttl"]/ret_dict["ttl"] > 100:
plt.yscale("log")
plt.xlabel("Time (hour)")
plt.ylabel("Working set size (MB)")
# plt.ylabel("Working set size (# million Obj)")
plt.legend()
plt.grid(linestyle="--")
plt.savefig(figname, no_save_plot_data=True)
plt.clf()
return ret_dict
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--trace", type=str, help="trace path")
ap.add_argument("--window", type=int, default=300, help="window size")
p = ap.parse_args()
reader = TwrShortBinTraceReader(p.trace)
plot_total_workingset_size(reader, p.window)
|
python
|
from setuptools import setup
setup(name='carpet',
version='2021',
# description='',
url='https://cfaed.tu-dresden.de/friedrich-home',
author='Anton Solovev, Benjamin M Friedrich',
license='MIT',
packages=['carpet'],
zip_safe=False)
|
python
|
'''
Convert a neighbors file to human-readable format,
optionally including preferred string expansion.
'''
from hedgepig_logger import log
from .. import nn_io
if __name__ == '__main__':
def _cli():
import optparse
parser = optparse.OptionParser(usage='Usage: %prog')
parser.add_option('-i', '--input', dest='inputf',
help='(REQUIRED) input neighbors file')
parser.add_option('-o', '--output', dest='outputf',
help='(REQUIRED) output remapped neighbors file')
parser.add_option('-v', '--vocab', dest='vocabf',
help='(REQUIRED) neighbor ID <-> key mapping file')
parser.add_option('-k', '--nearest-neighbors', dest='k',
help='number of nearest neighbors to use in statistics (default: %default)',
type='int', default=5)
parser.add_option('-m', '--string-map', dest='string_mapf',
help='file mapping embedding keys to strings')
parser.add_option('--with-distances', dest='with_distances',
action='store_true', default=False,
help='neighbor files have distance information')
parser.add_option('-l', '--logfile', dest='logfile',
help='name of file to write log contents to (empty for stdout)',
default=None)
(options, args) = parser.parse_args()
if not options.inputf:
parser.error('Must provide --input')
elif not options.outputf:
parser.error('Must provide --output')
elif not options.vocabf:
parser.error('Must provide --vocab')
return options
options = _cli()
log.start(options.logfile)
log.writeConfig([
('Input neighbors file', options.inputf),
('Remapped neighbors file', options.outputf),
('Number of nearest neighbors to pull', options.k),
('String map file', options.string_mapf),
('Vocab file', options.vocabf),
('Using distance information', options.with_distances),
], 'Neighborhood file remapping')
node_map = nn_io.readNodeMap(options.vocabf)
neighbors = nn_io.readNeighborFile(
options.inputf,
k=options.k,
node_map=node_map,
with_distances=options.with_distances
)
if options.string_mapf:
log.writeln('Reading string map from %s...' % options.string_mapf)
string_map = nn_io.readStringMap(options.string_mapf, lower_keys=True)
log.writeln('Mapped strings for {0:,} keys.\n'.format(len(string_map)))
remap_key = lambda key: '%s (%s)' % (key, string_map.get(key, '-UNKNOWN-'))
else:
string_map = None
remap_key = lambda key: key
log.writeln('Writing remapped neighbor info to %s...' % options.outputf)
log.track(' >> Wrote {0:,} neighbor sets', writeInterval=100)
with open(options.outputf, 'w') as stream:
for (key, nbrs) in neighbors.items():
if options.with_distances:
nbrs = [
(remap_key(k), dist)
for (k,dist) in nbrs
]
else:
nbrs = [remap_key(k) for k in nbrs]
stream.write('--------------------------------\n')
stream.write('{0}\n'.format(remap_key(key)))
for nbr_info in nbrs:
if options.with_distances:
stream.write(' {0} --> {1}\n'.format(
remap_key(nbr_info[0]),
nbr_info[1]
))
else:
stream.write(' {0}\n'.format(remap_key(nbr_info)))
#nn_io.writeNeighborFileLine(
# stream,
# remap_key(key),
# nbrs,
# with_distances=options.with_distances
#)
log.tick()
log.flushTracker()
log.stop()
|
python
|
from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=32)
price = models.DecimalField(max_digits=8, decimal_places=2)
|
python
|
#!/usr/bin/env python
import cv2
# from opencvutils.video import Camera
cam = cv2.VideoCapture(0)
# cam.init(cameraNumber=0, win=(640, 480))
while True:
try:
ret, img = cam.read()
cv2.imshow('img', img)
if cv2.waitKey(1) == 27:
break # esc to quit
except:
# cam.close()
break
cv2.destroyAllWindows()
print('bye ...')
|
python
|
import requests
import json
from xlwt import *
url = "https://api.github.com/users/andrewbeattycourseware/followers"
response = requests.get(url)
data = response.json()
filename = 'githubusers.json'
print(data)
for car in data:
print(car)
#write the Json to a file.
#import json
if filename:
with open(filename, 'w') as f:
json.dump(data,f,indent=4)
w = Workbook()
ws = w.add_sheet('githubusers')
row = 0
ws.write(row,0,"login")
ws.write(row,1,"id")
ws.write(row,2,"node_id")
ws.write(row,3,"avatar_url")
ws.write(row,4,"gravatar_id")
ws.write(row,5,"url")
ws.write(row,6,"html_url")
ws.write(row,7,"followers_url")
ws.write(row,8,"gists_url")
ws.write(row,9,"starred_url")
ws.write(row,10,"subscriptions_url")
ws.write(row,11,"organizations_url")
ws.write(row,12,"repos_url")
ws.write(row,13,"events_url")
ws.write(row,14,"received_events_url")
ws.write(row,15,"type")
ws.write(row,16,"site_admin")
row +=1
for car in data:
ws.write(row,0,car["login"])
ws.write(row,1,car["id"])
ws.write(row,2,car["node_id"])
ws.write(row,3,car["avatar_url"])
ws.write(row,4,car["gravatar_id"])
ws.write(row,5,car["url"])
ws.write(row,6,car["html_url"])
ws.write(row,7,car["followers_url"])
ws.write(row,8,car["gists_url"])
ws.write(row,9,car["starred_url"])
ws.write(row,10,car["subscriptions_url"])
ws.write(row,11,car["organizations_url"])
ws.write(row,12,car["repos_url"])
ws.write(row,13,car["events_url"])
ws.write(row,14,car["received_events_url"])
ws.write(row,15,car["type"])
ws.write(row,16,car["site_admin"])
row +=1
w.save('githubusers.xls')
print(response.status_code)
print(response.text)
|
python
|
from .cell_level_analysis import CellLevelAnalysis
from .pixel_level_analysis import PixellevelAnalysis
from .feature_extraction import InstanceFeatureExtraction
from .background_extraction import ExtractBackground
|
python
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2014, pietro partescano
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Redis.Cache.Py nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import redis
from redis.cache import config, errors, utilities
class _RedisDal(object):
"""
"""
def __init__(self):
self._db = redis.StrictRedis(host=config.RedisConnectionString, port=config.RedisConnectionStringPort, db=config.RedisDatabase)
pass
def SetTTL(self, key, ttl):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.expire(key, ttl)
return result
except (Exception):
raise
finally:
pass
pass
def DeleteTTL(self, key):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.persist(key)
return result
except (Exception):
raise
finally:
pass
pass
def ItemDelete(self, key):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.delete(key)
return result
except (Exception):
raise
finally:
pass
pass
def ItemExist(self, key):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.exists(key)
return result
except (Exception):
raise
finally:
pass
pass
def AddListItem(self, key, value):
if( key is None or str(key).strip() == "" or value is None or str(value).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key or value)")
try:
#value = utilities._ConvertObjToRedisValue(value) #TODO
result = self._db.rpush(key, value)
return result
except (Exception):
raise
finally:
pass
pass
def UpdateTTL_Item(self, key, ttl):
if( key is None or str(key).strip() == "" or ttl is None or str(ttl).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key or value)")
try:
result = self._db.lset(key, 0, ttl)
return result
except (Exception):
raise
finally:
pass
pass
def AddListItemWithTTL(self, key, value, value_ttl):
if( key is None or str(key).strip() == ""
or value is None or str(value).strip() == ""
or value_ttl is None or str(value_ttl).strip() == ""
):
raise errors.ArgumentError("Parameter is invalid (key or value or value_ttl)")
try:
result = self._db.rpush(key, value_ttl)
result = self._db.rpush(key, value)
return result
except (Exception):
raise
finally:
pass
pass
def GetListItem(self, key):
if( key is None or str(key).strip() == "" ):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
val = self._db.lrange(key, 0, 1)
if(val is None or len(val)==0):
result = None
else:
#val = utilities._ConvertRedisValueToObject(value, t) #TODO
result = (val[0], val[1])
return result
except (Exception):
raise
finally:
pass
pass
|
python
|
import xml.etree.ElementTree as ET
import urllib2
from sqlalchemy import and_
from datetime import datetime
from .ConnectDB_ParseExcel import *
from stp0_loadCVs import Load_CV_To_DB
from stp4_loadDataValue.helper import LoadingUtils
class CUAHSI_importer():
'''
This class is used to get data putting to WaMDaM database
from data responded of web.
'''
def __init__(self):
self.setup = DB_Setup()
self.__session = self.setup.get_session()
def load_data(self, response_data):
'''
:param resphonse_string: responded data from web
:return: None
'''
# Firstly, load CV data
instance_cvData = Load_CV_To_DB(None)
instance_cvData.load_data()
instance_cvData.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data to add within Organizations table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Organizations;')
organizationID = 0
for n in recordCountResult:
organizationID = int(n[0])
organizationID += 1
organizationName = 'CUAHSI' #response_data.timeSeries[0].values[0].source[0].organization
organizationWebpage = 'http://hydroportal.cuahsi.org/nwisdv/cuahsi_1_1.asmx?op=GetValuesObject'
# Check whether same name exist in Organizations table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Organizations).filter(
SqlAlchemy.Organizations.OrganizationName == organizationName).first().OrganizationID
except:
pass
if exsting is None:
org = SqlAlchemy.Organizations()
# org.OrganizationID = organizationID
org.OrganizationName = organizationName
org.OrganizationWebpage = organizationWebpage
self.setup.push_data(org)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within People table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM People;')
personID = 0
for n in recordCountResult:
personID = int(n[0])
personID += 1
personName = "Unknown"
# Check whether same name exist in People table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.People).filter(
SqlAlchemy.People.PersonName == personName).first().PersonID
except:
pass
if exsting is None:
people = SqlAlchemy.People()
people.PersonID = personID
people.PersonName = personName
people.OrganizationID = organizationID
self.setup.push_data(people)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Sources table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Sources;')
sourceID = 0
for n in recordCountResult:
sourceID = int(n[0])
sourceID += 1
source_name = "CUAHSI Water One Flow"
# Check whether same name exist in Sources table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Sources).filter(
SqlAlchemy.Sources.SourceName == source_name).first().SourceID
except:
pass
if exsting is None:
sources = SqlAlchemy.Sources()
sources.SourceID = sourceID
sources.SourceName = source_name
sources.SourceWebpage = "http://hydroportal.cuahsi.org/nwisdv/cuahsi_1_1.asmx?WSDL"
sources.PersonID = personID
self.setup.push_data(sources)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Methods table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Methods;')
methodID = 0
for n in recordCountResult:
methodID = int(n[0])
methodID += 1
method_name = "CUAHSI/ODM"
# Check whether same name exist in Methods table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Methods).filter(
SqlAlchemy.Methods.MethodName == method_name).first().MethodID
except:
pass
if exsting is None:
methods = SqlAlchemy.Methods()
methods.MethodID = methodID
methods.MethodName = method_name
methods.MethodWebpage = "https://water.usbr.gov/query.php"
methods.MethodTypeCV = "Derivation"
methods.PersonID = personID
self.setup.push_data(methods)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within ResourceTypes table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM ResourceTypes;')
resourceTypeID = 0
for n in recordCountResult:
resourceTypeID = int(n[0])
resourceTypeID += 1
resource_type = "CUAHSI web service"
# Check whether same name exist in ResourceTypes table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.ResourceTypes).filter(
SqlAlchemy.ResourceTypes.ResourceType == resource_type).first().ResourceTypeID
except:
pass
if exsting is None:
resourceTypes = SqlAlchemy.ResourceTypes()
resourceTypes.ResourceTypeID = resourceTypeID
resourceTypes.ResourceType = resource_type
resourceTypes.ResourceTypeAcronym = "CUAHSI"
resourceTypes.MethodID = methodID
self.setup.push_data(resourceTypes)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within ObjectTypes table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM ObjectTypes;')
objectTypeID = 0
for n in recordCountResult:
objectTypeID = int(n[0])
objectTypeID += 1
objecttype = "site"
# Check whether same name exist in ObjectTypes table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.ObjectTypes).filter(
SqlAlchemy.ObjectTypes.ObjectType == objecttype).first().ObjectTypeID
except:
pass
if exsting is None:
objectTypes = SqlAlchemy.ObjectTypes()
objectTypes.ObjectTypeID = objectTypeID
objectTypes.ObjectType = objecttype
objectTypes.ObjectTypeCV = 'Site'
objectTypes.ObjectTypologyCV = "Node"
objectTypes.ResourceTypeID = resourceTypeID
self.setup.push_data(objectTypes)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Attributes table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Attributes;')
attributesID = 0
for n in recordCountResult:
attributesID = int(n[0])
attributesID += 1
attribute_name = response_data.timeSeries[0].variable.variableName
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Attributes).filter(
SqlAlchemy.Attributes.AttributeName == attribute_name).first().AttributeID
except:
pass
if exsting is None:
attributes = SqlAlchemy.Attributes()
attributes.AttributeID = attributesID
attributes.AttributeName = attribute_name
try:
attributes.ObjectTypeID = self.__session.query(SqlAlchemy.ObjectTypes).filter(
SqlAlchemy.ObjectTypes.ObjectType == 'site').first().ObjectTypeID
except:
raise Exception('Error \n Could not find {} in ObjectTypes'
.format('site'))
attributes.UnitName = 'ft3/s'
if attributes.UnitName=='ft3/s':
attributes.UnitNameCV = 'cubic foot per second'
attributes.AttributeDataTypeCV = 'TimeSeries'
attributes.AttributeNameCV = 'Flow'
self.setup.push_data(attributes)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within MasterNetworks table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM MasterNetworks;')
masterNetworkID = 0
for n in recordCountResult:
masterNetworkID = int(n[0])
masterNetworkID += 1
masternetwork_name = "CUAHSI"
# Check whether same name exist in MasterNetworks table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.MasterNetworks).filter(
SqlAlchemy.MasterNetworks.MasterNetworkName == masternetwork_name).first().MasterNetworkID
except:
pass
if exsting is None:
masterNetworks = SqlAlchemy.MasterNetworks()
masterNetworks.MasterNetworkID = masterNetworkID
masterNetworks.MasterNetworkName = masternetwork_name
self.setup.push_data(masterNetworks)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Scenarios table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Scenarios;')
scenarioID = 0
for n in recordCountResult:
scenarioID = int(n[0])
scenarioID += 1
scenario_name = "CUAHSI data as-is"
# Check whether same name exist in Scenarios table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Scenarios).filter(
SqlAlchemy.Scenarios.ScenarioName == scenario_name).first().ScenarioID
except:
pass
if exsting is None:
scenarios = SqlAlchemy.Scenarios()
scenarios.ScenarioID = scenarioID
scenarios.ScenarioName = 'AS-is'
scenarios.MasterNetworkID = masterNetworkID
scenarios.ScenarioParentName = 'self'
scenarios.ScenarioType = 'Baseline'
scenarios.ScenarioStartDate = '1900-10-01'
scenarios.ScenarioEndDate = '2020-10-01'
scenarios.TimeStepValue = 1
scenarios.TimeStepUnitCV = 'day'
self.setup.push_data(scenarios)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Instances table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Instances;')
instanceID = 0
for n in recordCountResult:
instanceID = int(n[0])
instanceID += 1
node_instance_name = response_data.timeSeries[0].sourceInfo.siteName
# not working
# node_instance_Longitude = response_data.timeSeries[0].sourceInfo.Longitude
# Check whether same name exist in Instances table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Instances).filter(
SqlAlchemy.Instances.InstanceName == node_instance_name).first().InstanceID
except:
pass
if exsting is None:
instances = SqlAlchemy.Instances()
instances.InstanceID = instanceID
instances.InstanceName = node_instance_name
if node_instance_name=='BEAR RIVER BL STEWART DAM NR MONTPELIER, ID':
instances.InstanceNameCV = 'USGS 10046500 BEAR RIVER BL STEWART DAM NR MONTPELIER, ID'
else:
instances.InstanceNameCV =''
if node_instance_name=='BEAR RIVER NEAR UTAH-WYOMING STATE LINE':
instances.Longitude_x='-111.062'
instances.Latitude_y = '42.211'
self.setup.push_data(instances)
self.setup.add_data()
instance_name = node_instance_name
#////////////////////////////////////////////////////////////////////#
# Load data for ValuesMapper, Mapping, ScenarioMapping, TimeSeries and TimeSeriesValues table
# Add data for ValuesMapper
valuesMapper = SqlAlchemy.ValuesMapper()
try:
valuesMapper.ValuesMapperID = int(self.__session.query(SqlAlchemy.ValuesMapper).order_by(
SqlAlchemy.ValuesMapper.ValuesMapperID.desc()).first().ValuesMapperID)
valuesMapper.ValuesMapperID += 1
except:
valuesMapper.ValuesMapperID = 1
self.setup.push_data(valuesMapper)
self.setup.add_data()
#///////////////////////////////////#
# Add data for Mapping
attrib_id, instance_id, scenario_id, source_id, method_id = LoadingUtils.get_ids_from_names({'ObjectType': objecttype,
'AttributeName': attribute_name,
'InstanceName': instance_name,
'ScenarioName': scenario_name,
'SourceName': source_name,
'MethodName': method_name}, self.__session)
dataval_map = SqlAlchemy.Mappings()
dataval_map.AttributeID = attrib_id
dataval_map.InstanceID = instance_id
dataval_map.SourceID = source_id
dataval_map.MethodID = method_id
dataval_map.ValuesMapperID = valuesMapper.ValuesMapperID
self.setup.push_data(dataval_map)
self.setup.add_data()
#///////////////////////////////////#
# Add data for ScenarioMappings
scenariomap = SqlAlchemy.ScenarioMappings()
scenariomap.ScenarioID = scenario_id
datavalues = self.__session.query(SqlAlchemy.Mappings).filter(
and_(
SqlAlchemy.Mappings.AttributeID == attrib_id,
SqlAlchemy.Mappings.InstanceID == instance_id,
SqlAlchemy.Mappings.SourceID == source_id,
SqlAlchemy.Mappings.MethodID == method_id
)
).first()
if datavalues:
scenariomap.MappingID = datavalues.MappingID
else:
scenariomap.MappingID = self.__session.query(SqlAlchemy.Mappings).filter(
and_(
SqlAlchemy.Mappings.AttributeID == attrib_id,
SqlAlchemy.Mappings.InstanceID == instance_id,
SqlAlchemy.Mappings.SourceID == source_id,
SqlAlchemy.Mappings.MethodID == method_id
)
).first().MappingID
# if the current mappingid - scenarioid does not exist, a new
# one is created else the old is reused.
try:
test = self.__session.query(SqlAlchemy.ScenarioMappings).filter(
and_(
SqlAlchemy.ScenarioMappings.MappingID == scenariomap.MappingID,
SqlAlchemy.ScenarioMappings.ScenarioID == scenariomap.ScenarioID
)
).first().ScenarioMappingID
except:
self.setup.push_data(scenariomap)
self.setup.add_data()
#///////////////////////////////////#
# Add data within TimeSeries table
timeSeries = SqlAlchemy.TimeSeries()
timeSeries.YearType = 'CalendarYear'
timeSeries.AggregationStatisticCV = "Average"
timeSeries.AggregationInterval = 1
timeSeries.IntervalTimeUnitCV = "day"
timeSeries.ValuesMapperID = valuesMapper.ValuesMapperID
self.setup.push_data(timeSeries)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within TimeSeriesValues table
values = response_data.timeSeries[0].values[0].value
for value in values:
timeSeriesValues = SqlAlchemy.TimeSeriesValues()
timeSeriesValues.TimeSeriesID = timeSeries.TimeSeriesID
timeSeriesValues.DateTimeStamp = datetime(value._dateTime.year, value._dateTime.month, value._dateTime.day,
value._dateTime.hour, value._dateTime.minute, value._dateTime.second)
try:
timeSeriesValues.DataValue = value.value
except:
timeSeriesValues.DataValue = 0.0
self.setup.push_data(timeSeriesValues)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
|
python
|
"""Fsubs config."""
|
python
|
"""Utility functions to check attributes returned in API responses and read from the AWS S3."""
import datetime
import re
def check_attribute_presence(node, attribute_name):
"""Check the attribute presence in the given dictionary or list.
To be used to check the deserialized JSON data etc.
"""
found_attributes = node if type(node) is list else node.keys()
assert attribute_name in node, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (attribute_name, ", ".join(found_attributes))
def check_attributes_presence(node, attribute_names):
"""Check the presence of all attributes in the dictionary or in the list.
To be used to check the deserialized JSON data etc.
"""
for attribute_name in attribute_names:
found_attributes = node if type(node) is list else node.keys()
assert attribute_name in node, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (attribute_name, ", ".join(found_attributes))
def check_and_get_attribute(node, attribute_name):
"""Check the attribute presence and if the attribute is found, return its value."""
check_attribute_presence(node, attribute_name)
return node[attribute_name]
def check_uuid(uuid):
"""Check if the string contains a proper UUID.
Supported format: 71769af6-0a39-4242-94be-1f84f04c8a56
"""
regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z',
re.I)
match = regex.match(uuid)
return bool(match)
def check_timestamp(timestamp):
"""Check if the string contains proper timestamp value.
The following four formats are supported:
2017-07-19 13:05:25.041688
2017-07-17T09:05:29.101780
2017-07-19 13:05:25
2017-07-17T09:05:29
"""
assert timestamp is not None
assert isinstance(timestamp, str)
# some attributes contains timestamp without the millisecond part
# so we need to take care of it
if len(timestamp) == len("YYYY-mm-dd HH:MM:SS") and '.' not in timestamp:
timestamp += '.0'
assert len(timestamp) >= len("YYYY-mm-dd HH:MM:SS.")
# we have to support the following formats:
# 2017-07-19 13:05:25.041688
# 2017-07-17T09:05:29.101780
# -> it is needed to distinguish the 'T' separator
#
# (please see https://www.tutorialspoint.com/python/time_strptime.htm for
# an explanation how timeformat should look like)
timeformat = "%Y-%m-%d %H:%M:%S.%f"
if timestamp[10] == "T":
timeformat = "%Y-%m-%dT%H:%M:%S.%f"
# just try to parse the string to check whether
# the ValueError exception is raised or not
datetime.datetime.strptime(timestamp, timeformat)
def check_job_token_attributes(token):
"""Check that the given JOB token contains all required attributes."""
attribs = ["limit", "remaining", "reset"]
for attr in attribs:
assert attr in token
assert int(token[attr]) >= 0
def check_status_attribute(data):
"""Check the value of the status attribute, that should contain just two allowed values."""
status = check_and_get_attribute(data, "status")
assert status in ["success", "error"]
def check_summary_attribute(data):
"""Check the summary attribute that can be found all generated metadata."""
summary = check_and_get_attribute(data, "summary")
assert type(summary) is list or type(summary) is dict
def release_string(ecosystem, package, version=None):
"""Construct a string with ecosystem:package or ecosystem:package:version tuple."""
return "{e}:{p}:{v}".format(e=ecosystem, p=package, v=version)
def check_release_attribute(data, ecosystem, package, version=None):
"""Check the content of _release attribute.
Check that the attribute _release contains proper release string for given ecosystem
and package.
"""
check_attribute_presence(data, "_release")
assert data["_release"] == release_string(ecosystem, package, version)
def check_schema_attribute(data, expected_schema_name, expected_schema_version):
"""Check the content of the schema attribute.
This attribute should contains dictionary with name and version that are checked as well.
"""
# read the toplevel attribute 'schema'
schema = check_and_get_attribute(data, "schema")
# read attributes from the 'schema' node
name = check_and_get_attribute(schema, "name")
version = check_and_get_attribute(schema, "version")
# check the schema name
assert name == expected_schema_name, "Schema name '{n1}' is different from " \
"expected name '{n2}'".format(n1=name, n2=expected_schema_name)
# check the schema version (ATM we are able to check just one fixed version)
assert version == expected_schema_version, "Schema version {v1} is different from expected " \
"version {v2}".format(v1=version, v2=expected_schema_version)
def check_audit_metadata(data):
"""Check the metadata stored in the _audit attribute.
Check if all common attributes can be found in the audit node
in the component or package metadata.
"""
check_attribute_presence(data, "_audit")
audit = data["_audit"]
check_attribute_presence(audit, "version")
assert audit["version"] == "v1"
check_attribute_presence(audit, "started_at")
check_timestamp(audit["started_at"])
check_attribute_presence(audit, "ended_at")
check_timestamp(audit["ended_at"])
def get_details_node(context):
"""Get content of details node, given it exists."""
data = context.s3_data
return check_and_get_attribute(data, 'details')
def check_cve_value(cve, with_score=False):
"""Check CVE values in CVE records."""
if with_score:
# please note that in graph DB, the CVE entries have the following format:
# CVE-2012-1150:5.0
# don't ask me why, but the score is stored in one field together with ID itself
# the : character is used as a separator
pattern = "CVE-(\d{4})-\d{4,}:(\d+\.\d+)"
else:
pattern = "CVE-(\d{4})-\d{4,}"
match = re.fullmatch(pattern, cve)
assert match is not None, "Improper CVE number %s" % cve
year = int(match.group(1))
current_year = datetime.datetime.now().year
# well the lower limit is a bit arbitrary
# (according to SRT guys it should be 1999)
assert year >= 1999 and year <= current_year
if with_score:
score = float(match.group(2))
assert score >= 0.0 and score <= 10.0
|
python
|
gagaStop = Lorentz(name = 'gagaStop',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonTop(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSbot = Lorentz(name = 'gagaSbot',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonBot(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSW = Lorentz(name = 'gagaSW',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonW(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSS = Lorentz(name = 'gagaSS',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonS(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSOddtop = Lorentz(name = 'gagaSOddtop',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonOddTop(2*P(-1,1)*P(-1,2)) * (Epsilon(1,2,-1,-2)*P(-1,1)*P(-2,2))')
gagaSOddbot = Lorentz(name = 'gagaSOddbot',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonOddBot(2*P(-1,1)*P(-1,2)) * (Epsilon(1,2,-1,-2)*P(-1,1)*P(-2,2))')
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Zero-DCE++: Learning to Enhance Low-Light Image via Zero-Reference Deep Curve
Estimation
Zero-DCE++ has a fast inference speed (1000/11 FPS on single GPU/CPU for an
image with a size of 1200*900*3) while keeping the enhancement performance of
Zero-DCE.
References:
https://github.com/Li-Chongyi/Zero-DCE_extension
"""
from __future__ import annotations
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from onevision.factory import ACT_LAYERS
from onevision.factory import IMAGE_ENHANCEMENT
from onevision.factory import LOW_LIGHT_IMAGE_ENHANCEMENT
from onevision.factory import MODELS
from onevision.models.enhancement.image_enhancer import ImageEnhancer
from onevision.models.enhancement.zerodce.loss import CombinedLoss
from onevision.nn import DepthwiseConv
from onevision.nn import PointwiseConv
from onevision.type import Indexes
from onevision.type import Pretrained
from onevision.type import Tensors
__all__ = [
"ZeroDCEPP",
]
# MARK: - Modules
class CSDNTem(nn.Module):
# MARK: Magic Functions
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.dw_conv = DepthwiseConv(
in_channels=in_channels, out_channels=in_channels, padding=1,
groups=in_channels, bias=True,
)
self.pw_conv = PointwiseConv(
in_channels=in_channels, out_channels=out_channels, padding=0,
groups=1
)
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
out = self.dw_conv(x)
out = self.pw_conv(out)
return out
# MARK: - ZeroDCE++
@MODELS.register(name="zerodce++")
@IMAGE_ENHANCEMENT.register(name="zerodce++")
@LOW_LIGHT_IMAGE_ENHANCEMENT.register(name="zerodce++")
class ZeroDCEPP(ImageEnhancer):
"""
References:
https://github.com/Li-Chongyi/Zero-DCE
Args:
name (str, optional):
Name of the backbone. Default: `zerodce++`.
out_indexes (Indexes):
List of output tensors taken from specific layers' indexes.
If `>= 0`, return the ith layer's output.
If `-1`, return the final layer's output. Default: `-1`.
pretrained (Pretrained):
Use pretrained weights. If `True`, returns a model pre-trained on
ImageNet. If `str`, load weights from saved file. Default: `True`.
- If `True`, returns a model pre-trained on ImageNet.
- If `str` and is a weight file(path), then load weights from
saved file.
- In each inherited model, `pretrained` can be a dictionary's
key to get the corresponding local file or url of the weight.
"""
model_zoo = {
"sice": dict(
path="https://github.com/Li-Chongyi/Zero-DCE_extension/blob/main/Zero-DCE%2B%2B/snapshots_Zero_DCE%2B%2B/Epoch99.pth",
file_name="zerodce++_sice.pth", num_classes=None,
),
}
# MARK: Magic Functions
def __init__(
self,
# Hyperparameters
scale_factor: int = 1,
channels : int = 32,
act = nn.ReLU(inplace=True),
# BaseModel's args
basename : Optional[str] = "zerodce++",
name : Optional[str] = "zerodce++",
num_classes : Optional[int] = None,
out_indexes : Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs["loss"] = CombinedLoss(
spa_weight = 1.0,
exp_patch_size = 16,
exp_mean_val = 0.6,
exp_weight = 10.0,
col_weight = 5.0,
tv_weight = 1600.0,
)
super().__init__(
basename = basename,
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
# NOTE: Get Hyperparameters
self.scale_factor = scale_factor
self.channels = channels
# NOTE: Features
# Zero-DCE DWC + p-shared
self.e_conv1 = CSDNTem(3, self.channels)
self.e_conv2 = CSDNTem(self.channels, self.channels)
self.e_conv3 = CSDNTem(self.channels, self.channels)
self.e_conv4 = CSDNTem(self.channels, self.channels)
self.e_conv5 = CSDNTem(self.channels * 2, self.channels)
self.e_conv6 = CSDNTem(self.channels * 2, self.channels)
self.e_conv7 = CSDNTem(self.channels * 2, 3)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=self.scale_factor)
self.act = act
if isinstance(self.act, str):
self.act = ACT_LAYERS.build(name=self.act)
# NOTE: Load Pretrained
if self.pretrained:
self.load_pretrained()
else:
self.apply(self.weights_init)
# MARK: Configure
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# MARK: Forward Pass
def forward_once(self, x: Tensor, *args, **kwargs) -> Tensors:
"""Forward pass once. Implement the logic for a single forward pass.
Args:
x (Tensor):
Input of shape [B, C, H, W].
Returns:
yhat (Tensors):
Predictions.
"""
if self.scale_factor == 1:
x_down = x
else:
x_down = F.interpolate(
x, scale_factor=1.0 / self.scale_factor, mode="bilinear"
)
x1 = self.act(self.e_conv1(x_down))
x2 = self.act(self.e_conv2(x1))
x3 = self.act(self.e_conv3(x2))
x4 = self.act(self.e_conv4(x3))
x5 = self.act(self.e_conv5(torch.cat([x3, x4], 1)))
x6 = self.act(self.e_conv6(torch.cat([x2, x5], 1)))
x_r = F.tanh(self.e_conv7(torch.cat([x1, x6], 1)))
if self.scale_factor == 1:
x_r = x_r
else:
x_r = self.upsample(x_r)
# NOTE: Enhance
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image_1 = x + x_r * (torch.pow(x, 2) - x)
x = enhance_image_1 + x_r * (torch.pow(enhance_image_1, 2) - enhance_image_1)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image = x + x_r * (torch.pow(x, 2) - x)
return x_r, enhance_image
|
python
|
"""Number constraint names."""
from jsonvl._utilities.venum import Venum
class NumberConstraintNames(Venum):
"""Constraints applied to number types."""
LT = 'lt'
GT = 'gt'
LTE = 'lte'
GTE = 'gte'
EQ = 'eq'
|
python
|
import time
import logging
import numpy as np
import torch
import torch.nn as nn
from data import augment, TensorDataset
from diffaugment import DiffAugment
from utils import get_time
def epoch(mode, dataloader, net, optimizer, criterion, args, aug):
loss_avg, acc_avg, num_exp = 0, 0, 0
net = net.to(args.device)
criterion = criterion.to(args.device)
if mode == 'train':
net.train()
else:
net.eval()
for i_batch, datum in enumerate(dataloader):
img = datum[0].float().to(args.device)
if aug:
if args.dsa:
img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)
else:
img = augment(img, args.dc_aug_param, device=args.device)
lab = datum[1].long().to(args.device)
n_b = lab.shape[0]
output = net(img)
loss = criterion(output, lab)
acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))
loss_avg += loss.item()*n_b
acc_avg += acc
num_exp += n_b
if mode == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_avg /= num_exp
acc_avg /= num_exp
return loss_avg, acc_avg
def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args):
net = net.to(args.device)
images_train = images_train.to(args.device)
labels_train = labels_train.to(args.device)
lr = float(args.lr_net)
Epoch = int(args.epoch_eval_train)
lr_schedule = [Epoch//2+1]
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss().to(args.device)
dst_train = TensorDataset(images_train, labels_train)
trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)
start = time.time()
for ep in range(Epoch+1):
loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug = True)
if ep in lr_schedule:
lr *= 0.1
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
time_train = time.time() - start
loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug = False)
logging.info('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))
return net, acc_train, acc_test
|
python
|
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM matches")
DB.commit()
DB.close()
def deletePlayers():
"""Remove all the player records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM members")
DB.commit()
DB.close()
def deleteTournaments():
"""Remove all the tournament records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM tournaments")
DB.commit()
DB.close()
def countTournaments():
"""Returns the number of tournaments currently registered."""
DB = connect()
c = DB.cursor()
c.execute("SELECT COUNT(*) FROM tournaments")
total = c.fetchone()[0]
DB.commit()
DB.close()
return total
def countPlayers():
"""Returns the number of players currently registered."""
DB = connect()
c = DB.cursor()
c.execute("SELECT COUNT(*) FROM members")
total = c.fetchone()[0]
DB.commit()
DB.close()
return total
def registerTournament(name):
"""Adds a tournament to the database.
Args:
name: the name of the tournament (need not be unique).
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO tournaments (name) VALUES (%s)", (name,))
DB.commit()
DB.close()
# Add this to be able to reference unique id of each tournament
def getTournamentId(name):
"""Returns unique ID of registered tournament.
Args:
name: the name of the tournament.
"""
DB = connect()
c = DB.cursor()
c.execute("SELECT id FROM tournaments WHERE name = (%s)", (name,))
idNumber = c.fetchone()[0]
DB.commit()
DB.close()
return idNumber
def registerPlayer(name, tournamentId):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player.
Args:
name: the player's full name (need not be unique).
tournamentId: the unique ID (number) of the tournament the player is registering for.
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO members (name, tournament) VALUES (%s, %s)", (name, tournamentId))
DB.commit()
DB.close()
def playerStandings(tournamentId):
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
Args:
tournamentId: the unique ID (number) of the tournament.
"""
DB = connect()
c = DB.cursor()
c.execute("SELECT * FROM standings WHERE tournament = %s ORDER BY wins DESC", (tournamentId,))
standings = c.fetchall()
DB.commit()
DB.close()
return standings
def reportMatch(tournamentId, winner, loser):
"""Records the outcome of a single match between two players.
Args:
tournamentId: the unique ID of the tournament.
winner: the ID number of the player who won.
loser: the ID number of the player who lost.
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO matches (tournament, winner, loser) VALUES (%s, %s, %s)", (tournamentId, winner, loser))
# Add the win to winner's record
c.execute("UPDATE members SET wins = wins + 1 WHERE id = (%s)", (winner,))
# Add the loss to loser's record
c.execute("UPDATE members SET losses = losses + 1 WHERE id = (%s)", (loser,))
DB.commit()
DB.close()
def swissPairings(tournamentId):
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
Args:
tournamentId: the unique ID of the tournament.
"""
DB = connect()
c = DB.cursor()
c.execute("SELECT id, name FROM members WHERE tournament = (%s) ORDER BY wins DESC", (tournamentId,))
pairings = []
results = c.fetchall()
result_length = len(results)
# Add counters that keep track of array positions to append to pairings array
first = 0
last = 1
# Loop through results and extract pairs that are adjacent to each other and
# append them to the pairings array. Note: the range should be the length of
# the result divided by two because we're pushing two items at a time.
for i in range(result_length / 2):
pair = results[first] + results[last]
pairings.append(pair)
first += 2
last += 2
DB.commit()
DB.close()
return pairings
|
python
|
a, b = [int(x) for x in input().split()]
if a > b:
a, b = b, a
if a & 1:
a += 1
if b & 1:
b -= 1
print(((b - a) // 2 + 1) * (a + b) // 2)
|
python
|
from ..SimpleSymbolDownloader import SymbolDownloader
from ..symbols.Generic import Generic
from time import sleep
from ..compat import text
import requests
class TigerDownloader(SymbolDownloader):
def __init__(self):
SymbolDownloader.__init__(self, "tiger")
def _add_queries(self, prefix=''):
elements = ['A','APPL','MSFT']
for element in elements:
if element not in self.queries: # Avoid having duplicates in list
self.queries.append(element)
def nextRequest(self, insecure=False, pandantic=False):
self._nextQuery()
success = False
retryCount = 0
json = None
# Eponential back-off algorithm
# to attempt 5 more times sleeping 5, 25, 125, 625, 3125 seconds
# respectively.
maxRetries = 5
while (success == False):
try:
json = self._fetch(insecure)
success = True
except (requests.HTTPError,
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError) as ex:
if retryCount < maxRetries:
attempt = retryCount + 1
sleepAmt = int(math.pow(5, attempt))
print("Retry attempt: " + str(attempt) + " of " + str(maxRetries) + "."
" Sleep period: " + str(
sleepAmt) + " seconds."
)
sleep(sleepAmt)
retryCount = attempt
else:
raise
(symbols, count) = self.decodeSymbolsContainer(json)
for symbol in symbols:
self.symbols[symbol.ticker] = symbol
if count > 10:
# This should never happen with this API, it always returns at most 10 items
raise Exception("Funny things are happening: count "
+ text(count)
+ " > 10. "
+ "Content:"
+ "\n"
+ repr(json))
if self._getQueryIndex() + 1 >= len(self.queries):
self.done = True
else:
self.done = False
return symbols
def decodeSymbolsContainer(self, json):
symbols = []
count = 0
for row in json['data']['items']:
ticker = text(row['symbol'])
name = row['name']
exchange = row['exch']
exchangeDisplay = row['exchDisp']
symbolType = row['type']
symbolTypeDisplay = row['typeDisp']
symbols.append(Generic(ticker, name, exchange, exchangeDisplay, symbolType, symbolTypeDisplay))
count = len(json['data']['items'])
return (symbols, count)
def getRowHeader(self):
return SymbolDownloader.getRowHeader(self) + ["exchangeDisplay", "Type", "TypeDisplay"]
|
python
|
from flask import Blueprint
from app.actor import get_hello
import jsonpickle
hello_resource = Blueprint('hello_resource', __name__)
@hello_resource.route('/rest/hello/', defaults={'name': 'world'})
@hello_resource.route('/rest/hello/<name>')
def get(name):
return jsonpickle.encode(get_hello.run(name), unpicklable=False)
|
python
|
"""
Copyright 2020 Alexander Brauckmann.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from absl import logging as lg
from yacos.info.compy.llvm_seq import LLVMSeqBuilder
program_1fn_2 = """
int bar(int a) {
if (a > 10)
return a;
return -1;
}
"""
program_fib = """
int fib(int x) {
switch(x) {
case 0:
return 0;
case 1:
return 1;
default:
return fib(x-1) + fib(x-2);
}
}
"""
def verify_data_dir():
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
def test_construct_with_custom_visitor():
"""Construction."""
verify_data_dir()
filename = os.path.join(os.environ.get('HOME'),
'.local',
'yacos',
'tests',
'program_1fn_2.c')
builder = LLVMSeqBuilder()
info = builder.source_to_info(filename)
_ = builder.info_to_representation(info)
def test_plot(tmpdir):
"""General tests: Plot."""
verify_data_dir()
filename = os.path.join(os.environ.get('HOME'),
'.local',
'yacos',
'tests',
'program_fib.c')
builder = LLVMSeqBuilder()
info = builder.source_to_info(filename)
seq = builder.info_to_representation(info)
outfile = os.path.join(tmpdir, "syntax_seq.png")
seq.draw(path=outfile, width=8)
assert os.path.isfile(outfile)
|
python
|
# coding=utf-8
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pytest
import requests
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper import \
ResourceType
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def services():
neutron_services_filename = (
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../testdata/service_requests/listener_update.json')
)
return (json.load(open(neutron_services_filename)))
def get_next_listener(service_iterator, icontrol_driver, bigip, env_prefix):
service = service_iterator.next()
listener = service['listeners'][0]
folder = '{0}_{1}'.format(env_prefix, listener['tenant_id'])
icontrol_driver._common_service_handler(service)
listener_name = '{0}_{1}'.format(env_prefix, listener['id'])
return bigip.get_resource(
ResourceType.virtual, listener_name, partition=folder)
def get_folder_name(service, env_prefix):
return '{0}_{1}'.format(env_prefix, service['loadbalancer']['tenant_id'])
def test_listener_update(
track_bigip_cfg,
bigip,
services,
icd_config,
icontrol_driver):
env_prefix = 'TEST'
service_iter = iter(services)
# Create loadbalancer
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# Create listener (no name, description)
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert not hasattr(l, 'description')
assert l.connectionLimit == 0
assert l.enabled
# Update name ('spring'). Description is changed to include name.
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring:'
assert l.connectionLimit == 0
assert l.enabled
# Update description ('has sprung')
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring: has-sprung'
assert l.connectionLimit == 0
assert l.enabled
# Update connection limit (200)
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring: has-sprung'
assert l.connectionLimit == 200
assert l.enabled
# Update admin_state_up (False)
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring: has-sprung'
assert l.connectionLimit == 200
assert l.disabled
# Delete listener
service = service_iter.next()
folder = get_folder_name(service, env_prefix)
icontrol_driver._common_service_handler(service)
# Delete loadbalancer
service = service_iter.next()
icontrol_driver._common_service_handler(service, delete_partition=True)
# All objects deleted
assert not bigip.folder_exists(folder)
|
python
|
from __future__ import annotations
import logging
import re
from dataclasses import dataclass
from datetime import date
from wordgame_bot.attempt import Attempt, AttemptParser
from wordgame_bot.exceptions import InvalidFormatError, ParsingError
from wordgame_bot.guess import Guesses, GuessInfo
INCORRECT_GUESS_SCORE = 8
@dataclass
class HeardleAttemptParser(AttemptParser):
attempt: str
error: str = "" # TODO
def parse(self) -> HeardleAttempt:
try:
return self.parse_attempt()
except ParsingError as e:
self.handle_error(e)
def parse_attempt(self) -> HeardleAttempt:
lines = self.get_lines()
info = HeardleGuessInfo(lines[0])
guesses = Guesses(lines[1][1:], INCORRECT_GUESS_SCORE, "🟩", "🟩🟥⬜️", 1)
info.score = guesses.correct_guess
return HeardleAttempt(info, guesses)
def get_lines(self) -> list[str]:
lines = [
line.strip()
for line in self.attempt.strip().split("\n")
if line.strip()
]
if len(lines) <= 1 or len(lines) > 3:
raise InvalidFormatError(self.attempt)
return lines
def handle_error(self, error: ParsingError):
logging.warning(f"{error!r}")
self.error = str(error.message)
raise error
@dataclass
class HeardleGuessInfo(GuessInfo):
creation_day: date = date(2022, 2, 25)
valid_format = re.compile("^#Heardle #[0-9]+$")
def validate_format(self):
self.info = self.info.strip()
if self.valid_format.match(self.info) is None:
raise InvalidFormatError(self.info)
def extract_day_and_score(self):
info_parts = self.info.split(" ")
self.day = info_parts[1][1:]
self.score = None
def parse_day(self) -> int:
self.validate_day()
return int(self.day)
def parse_score(self) -> int:
return None
@dataclass
class HeardleAttempt(Attempt):
@property
def maxscore(self):
return 10
@property
def gamemode(self):
return "H"
|
python
|
# https://leetcode.com/problems/magic-squares-in-grid/description/
#
# algorithms
# Medium (35.25%)
# Total Accepted: 12,752
# Total Submissions: 36,179
# beats 66.41% of python submissions
class Solution(object):
def splitIntoFibonacci(self, S):
"""
:type S: str
:rtype: List[int]
"""
length = len(S)
if length < 3:
return []
res = [[]]
threshold = 2 ** 31 - 1
def recursive(idx, path):
if res[0]:
return
if idx == length:
if len(path) >= 3:
res[0] = path[:]
return
if S[idx] == '0':
if len(path) < 2 or path[-1] + path[-2] == 0:
recursive(idx + 1, path + [0])
return
for i in xrange(idx, length):
tmp = int(S[idx:i + 1])
if tmp > threshold:
break
if len(path) < 2:
recursive(i + 1, path + [tmp])
elif path[-1] + path[-2] == tmp:
recursive(i + 1, path + [tmp])
elif path[-1] + path[-2] < tmp:
break
recursive(0, [])
return res[0]
|
python
|
from django import forms
from formfactory import clean_methods
@clean_methods.register
def check_if_values_match(form_instance, **kwargs):
"""Clean method for when a contact updates password.
"""
first_field = form_instance.cleaned_data["first_field"]
second_field = form_instance.cleaned_data["second_field"]
if not first_field == second_field:
raise forms.ValidationError(
"The values you entered are not equal."
)
|
python
|
# File: worker.py
# Aim: Backend worker of the http server
# Imports
import os
import sys
from . import CONFIG
from .local_tools import Tools
tools = Tools()
CONFIG.logger.debug('Worker imported in HTTP package')
# Import other workers
other_folders = [
os.path.join(
os.path.dirname(__file__), # HTTP
'..', # Server
'..', # SocketServerInPython
'..', # [some parDir]
'PinYinInputMethod')
]
sys.path.append(other_folders[0])
from inputMethod.web_compat import Worker as PinYinWorker
pinYin_worker = PinYinWorker()
# Defines
class Worker(object):
# Backend worker object
def __init__(self):
CONFIG.logger.info(f'Worker initialized')
def _synchronize_settings(self):
self.src_dir = CONFIG.get('Runtime', 'srcDir')
self.default_src_dir = CONFIG.get('Default', 'srcDir')
self.known_types = CONFIG.get_section('KnownTypes')
CONFIG.logger.debug(
'Worker synchronized settings: src_dir={}, default_src_dir={}, known_types={}'
.format(self.src_dir, self.default_src_dir, self.known_types))
def fullpath(self, path):
# Make full path based on [path] in request,
# the method also checks the existence of the file on [path],
# will return None, if it fails to pass the check,
# the check is of two-step:
# 1. check if the file exists in srcDir,
# will return the fullpath directly if it passes,
# 2. check if the file exists in defaultSrcDir
for dir, name in zip([self.src_dir, self.default_src_dir],
['srcDir', 'defaultSrcDir']):
# Try src_dir and default_src_dir in order
full = os.path.join(dir, path)
if os.path.isfile(full):
CONFIG.logger.debug(
f'Found {path} in {name}, using it in response')
return full
# Can not find the file in two dirs
CONFIG.logger.warning(
f'Can not find {path} in known dirs, return None')
return None
def response(self, request):
# Make response of [request]
# Synchronize settings
self._synchronize_settings()
# Fetch method and path
method = request['method']
path = request['path'][1:]
# Make response
if method == 'GET':
# Response to 'GET' request
# Customized workers response
res = pinYin_worker.response(path)
if res is not None:
return tools.make_response(resType='application/json',
resContent=res)
# Get useable fullpath
full = self.fullpath(path)
# Can not find file
if full is None:
return tools.make_response(resCode='HTTP/1.1 404',
resContent=f'Not Found {path}')
# Found file
# Get ext
ext = path.split('.')[-1]
# Find file type
resType = 'Content-Type: {}'.format(
self.known_types.get(ext, 'text/html'))
# Read file
with open(full, 'rb') as f:
resContent = f.read()
# Make response and return
return tools.make_response(resType=resType, resContent=resContent)
|
python
|
from .forms import UserProfileForm, ProfileUpdateForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .models import Profile
def display_login(request):
context = {}
return render(request, 'login/index.html', context)
@login_required
def make_profile(request):
if request.method == "POST":
form = UserProfileForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
return redirect('login:display_profile')
else:
form = UserProfileForm({'user': request.user})
return render(request, 'login/blank_profile.html', {'profile_form': form})
@login_required
def display_profile(request):
args = {'user': request.user}
return render(request, 'login/info_profile.html', args)
@login_required
def update_profile(request):
if request.method == 'POST':
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if p_form.is_valid():
p_form.save()
return redirect('login:display_profile')
else:
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
context = {
'p_form': p_form
}
return render(request, 'login/update_profile.html', context)
|
python
|
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
from epi.util import dbg_check
import matplotlib.pyplot as plt
# import tensorflow_probability as tfp
FANO_EPS = 1e-6
neuron_inds = {"E": 0, "P": 1, "S": 2, "V": 3}
def load_SSSN_variable(v, ind=0):
# npzfile = np.load("data/V1_Zs.npz")
matfile = loadmat(os.path.join("data", "AgosBiorxiv2.mat"))
_x = matfile[v][ind]
x = tf.constant(_x, dtype=tf.float32)
return x
def euler_sim_stoch(f, x_init, dt, T):
x = x_init
for t in range(T):
x = x + f(x) * dt
return x[:, :, :, 0]
def tf_ceil(x, max):
return max - tf.nn.relu(max - x)
def tf_floor(x, min):
return min + tf.nn.relu(x - min)
def euler_sim_stoch_traj(f, x_init, dt, T):
x = x_init
xs = [x_init]
for t in range(T):
x = x + f(x) * dt
xs.append(x)
return tf.concat(xs, axis=3)
def euler_sim_stoch_traj_bound(f, x_init, dt, T, min=None, max=None):
x = x_init
xs = [x_init]
for t in range(T):
x = x + f(x) * dt
if min is not None:
x = tf_floor(x, min)
if max is not None:
x = tf_ceil(x, max)
xs.append(x)
return tf.concat(xs, axis=3)
ind = 1070
ind = 1070
W_mat = load_SSSN_variable("W", ind=ind)
HB = load_SSSN_variable("hb", ind=ind)
HC = load_SSSN_variable("hc", ind=ind)
n = 2.0
N = 1
# dt = 0.00025
# T = 100
tau = 0.001 * np.array([1.0, 1.0, 1.0, 1.0], np.float32)
tau = tau[None, None, :, None]
tau_noise = 0.005 * np.array([1.0, 1.0, 1.0, 1.0], np.float32)
tau_noise = tau_noise[None, None, :, None]
sigma_fac = np.sqrt(1.0 + (tau / tau_noise))
# Dim is [M,N,|r|,T]
def SSSN_sim_traj(sigma_eps, W_mat, N=1, dt=0.0005, T=150, x_init=None):
sigma_eps = sigma_eps[:, None, :, None]
def _SSSN_sim_traj(h, x_init=x_init):
h = h[:, None, :, None]
W = W_mat[None, None, :, :]
_x_shape = tf.ones((h.shape[0], N, 4, 1), dtype=tf.float32)
if x_init is None:
x_init = tf.random.uniform((h.shape[0], N, 4, 1), 0.1, 0.25)
else:
x_init = x_init[:, :, :, None]
eps_init = 0.0 * _x_shape
y_init = tf.concat((x_init, eps_init), axis=2)
def f(y):
x = y[:, :, :4, :]
eps = y[:, :, 4:, :]
B = tf.random.normal(eps.shape, 0.0, np.sqrt(dt))
dx = (-x + (tf.nn.relu(tf.matmul(W, x) + h + eps) ** n)) / tau
deps = (
-eps + (np.sqrt(2.0 * tau_noise) * sigma_eps * sigma_fac * B / dt)
) / tau_noise
return tf.concat((dx, deps), axis=2)
x_t = euler_sim_stoch_traj(f, y_init, dt, T)
# x_t = euler_sim_stoch_traj_bound(f, y_init, dt, T, None, 1000)
return x_t
return _SSSN_sim_traj
def SSSN_sim_traj_sigma(h, W_mat, N=1, dt=0.0005, T=150):
h = h[:, None, :, None]
def _SSSN_sim_traj(sigma_eps):
sigma_eps = sigma_eps[:, None, :, None]
W = W_mat[None, None, :, :]
_x_shape = tf.ones((sigma_eps.shape[0], N, 4, 1), dtype=tf.float32)
x_init = tf.random.uniform((sigma_eps.shape[0], N, 4, 1), 0.1, 0.25)
eps_init = 0.0 * _x_shape
y_init = tf.concat((x_init, eps_init), axis=2)
def f(y):
x = y[:, :, :4, :]
eps = y[:, :, 4:, :]
B = tf.random.normal(eps.shape, 0.0, np.sqrt(dt))
dx = (-x + (tf.nn.relu(tf.matmul(W, x) + h + eps) ** n)) / tau
deps = (
-eps + (np.sqrt(2.0 * tau_noise) * sigma_eps * sigma_fac * B / dt)
) / tau_noise
return tf.concat((dx, deps), axis=2)
# x_t = euler_sim_stoch_traj(f, y_init, dt, T)
x_t = euler_sim_stoch_traj_bound(f, y_init, dt, T, None, 1000)
return x_t
return _SSSN_sim_traj
def SSSN_sim(sigma_eps, W_mat, N=1, dt=0.0005, T=150):
sigma_eps = sigma_eps * np.array([1.0, 1.0, 1.0, 1.0], np.float32)
sigma_eps = sigma_eps[None, None, :, None]
def _SSSN_sim(h):
h = h[:, None, :, None]
W = W_mat[None, None, :, :]
_x_shape = tf.ones((h.shape[0], N, 4, 1), dtype=tf.float32)
x_init = tf.random.uniform((h.shape[0], N, 4, 1), 0.1, 0.25)
eps_init = 0.0 * _x_shape
y_init = tf.concat((x_init, eps_init), axis=2)
def f(y):
x = y[:, :, :4, :]
eps = y[:, :, 4:, :]
B = tf.random.normal(eps.shape, 0.0, np.sqrt(dt))
dx = (-x + (tf.nn.relu(tf.matmul(W, x) + h + eps) ** n)) / tau
deps = (
-eps + (np.sqrt(2.0 * tau_noise) * sigma_eps * sigma_fac * B / dt)
) / tau_noise
return tf.concat((dx, deps), axis=2)
x_ss = euler_sim_stoch(f, y_init, dt, T)
return x_ss
return _SSSN_sim
def get_drdh(alpha, eps, W_mat, N=1, dt=0.0005, T=150, delta_step=0.01):
alpha_ind = neuron_inds[alpha]
sssn_sim = SSSN_sim(eps, W_mat, N=N)
delta_h = np.zeros((1, 4))
delta_h[0, alpha_ind] = delta_step
def _drdh(h):
x1 = tf.reduce_mean(sssn_sim(h)[:, :, alpha_ind], axis=1)
x2 = tf.reduce_mean(sssn_sim(h + delta_h)[:, :, alpha_ind], axis=1)
diff = (x2 - x1) / delta_step
T_x = tf.stack((diff, diff ** 2), axis=1)
return T_x
return _drdh
def get_Fano(
alpha, sigma_eps, W_mat, N=100, dt=0.0005, T=150, T_ss=100, mu=0.01, k=100.0
):
if not (alpha == "all"):
alpha_ind = neuron_inds[alpha]
sssn_sim_traj = SSSN_sim_traj(sigma_eps, W_mat, N=N, dt=dt, T=T)
def Fano(h):
if alpha == "all":
x_t = k * sssn_sim_traj(h)[:, :, :4, T_ss:]
else:
x_t = k * sssn_sim_traj(h)[:, :, alpha_ind, T_ss:]
_means = tf.math.reduce_mean(x_t, axis=-1)
_vars = tf.square(tf.math.reduce_std(x_t, axis=-1))
fano = _vars / (_means + FANO_EPS)
vars_mean = tf.reduce_mean(fano, axis=1)
if alpha == "all":
T_x = tf.concat((vars_mean, tf.square(vars_mean - mu)), axis=1)
else:
T_x = tf.stack((vars_mean, tf.square(vars_mean - mu)), axis=1)
return T_x
return Fano
def get_stddev_sigma(
alpha, W_mat, h, N=100, dt=0.0005, T=150, T_ss=100, mu=0.01, k=100.0
):
if not (alpha == "all"):
alpha_ind = neuron_inds[alpha]
sssn_sim_traj = SSSN_sim_traj_sigma(h, W_mat, N=N, dt=dt, T=T)
def get_stddev(sigma_eps):
if alpha == "all":
x_t = k * sssn_sim_traj(sigma_eps)[:, :, :4, T_ss:]
else:
x_t = k * sssn_sim_traj(sigma_eps)[:, :, alpha_ind, T_ss:]
stddevs = tf.math.reduce_std(x_t, axis=-1)
stddevs_mean = tf.reduce_mean(stddevs, axis=1)
if alpha == "all":
T_x = tf.concat((stddevs_mean, tf.square(stddevs_mean - mu)), axis=1)
else:
T_x = tf.stack((stddevs_mean, tf.square(stddevs_mean - mu)), axis=1)
return T_x
return get_stddev
def plot_contrast_response(
c, x, title="", ax=None, linestyle="-", colors=None, fontsize=14
):
if colors is None:
colors = 4 * ["k"]
assert x.shape[0] == c.shape[0]
if ax is None:
fig, ax = plt.subplots(1, 1)
for i in range(4):
ax.plot(100 * c, x[:, i], linestyle, c=colors[i], lw=4)
ticksize = fontsize - 4
ax.set_xlabel("contrast (%)", fontsize=fontsize)
ax.set_ylabel("rate (Hz)", fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
plt.setp(ax.get_xticklabels(), fontsize=ticksize)
plt.setp(ax.get_yticklabels(), fontsize=ticksize)
return ax
def ISN_coeff(dh, H):
sssn_sim = SSSN_sim(0.0)
h = H + dh
h_E = h[:, 0]
r_ss = sssn_sim(h)
u_E = tf.linalg.matvec(r_ss[:, 0, :4], W_mat[0, :])
u_E = u_E + h_E
u_E = tf.nn.relu(u_E)
isn_coeff = 1.0 - 2.0 * (u_E) * W_mat[0, 0]
return isn_coeff
|
python
|
from schematics.types import ModelType, StringType, PolyModelType, FloatType, DateTimeType
from spaceone.inventory.model.sqldatabase.data import Database
from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, \
ListDyField
from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \
ListDynamicLayout, SimpleTableDynamicLayout
from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
'''
SQL DATABASES
'''
# TAB - Default
# Resource Group, Location, Subscription, Subscription ID, SKU, Backend pool, Health probe,
# Load balancing rule, NAT Rules, Public IP Addresses, Load Balancing Type
sql_databases_info_meta = ItemDynamicLayout.set_fields('SQL Databases', fields=[
TextDyField.data_source('Database Name', 'name'),
EnumDyField.data_source('Status', 'data.status', default_state={
'safe': ['Online', 'Creating', 'Copying', 'Creating', 'OnlineChangingDwPerformanceTiers', 'Restoring',
'Resuming', 'Scaling', 'Standby'],
'warning': ['AutoClosed', 'Inaccessible', 'Offline', 'OfflineChangingDwPerformanceTiers', 'OfflineSecondary',
'Pausing', 'Recovering', 'RecoveryPending', 'Suspect'],
'disable': ['Disabled', 'Paused', 'Shutdown'],
'alert': ['EmergencyMode']
}),
TextDyField.data_source('Resource ID', 'data.id'),
TextDyField.data_source('Resource Group', 'data.resource_group'),
TextDyField.data_source('Location', 'data.location'),
TextDyField.data_source('Subscription ID', 'account'),
TextDyField.data_source('Server Name', 'data.server_name'),
TextDyField.data_source('Elastic Pool', 'data.elastic_pool_id'),
TextDyField.data_source('Pricing Tier', 'data.pricing_tier_display'),
DateTimeDyField.data_source('Earliest Restore Point', 'data.earliest_restore_date'),
TextDyField.data_source('Collation', 'data.collation'),
DateTimeDyField.data_source('Creation Date', 'launched_at'),
TextDyField.data_source('Server Admin Login', 'data.administrator_login'),
])
# TAB - Configure
sql_databases_configure = ItemDynamicLayout.set_fields('Configure', fields=[
TextDyField.data_source('Service Tier', 'data.service_tier_display'),
TextDyField.data_source('Compute Tier', 'data.compute_tier'),
TextDyField.data_source('Compute Hardware', 'data.sku.family'),
TextDyField.data_source('Licence Type', 'data.license_type'),
TextDyField.data_source('vCores', 'data.current_sku.capacity'),
TextDyField.data_source('Data max size', 'instance_size'),
TextDyField.data_source('Zone Redundant', 'data.zone_redundant'),
ListDyField.data_source('Sync Groups', 'data.sync_group_display'),
ListDyField.data_source('Sync Agents', 'data.sync_agent_display'),
TextDyField.data_source('Collation', 'data.collation'),
DateTimeDyField.data_source('Creation Date', 'data.creation_date')
])
# TAB - Diagnostic Settings
sql_databases_diagnostic_settings = SimpleTableDynamicLayout.set_fields('Diagnostic Settings', 'data.diagnostic_settings_resource', fields=[
TextDyField.data_source('Name', 'name'),
TextDyField.data_source('Storage Account', 'storage_account_id'),
TextDyField.data_source('Event Hub', 'event_hub_name'),
TextDyField.data_source('Log Analytics Workspace', 'workspace_id'),
])
# TAB - tags
sql_databases_info_tags = TableDynamicLayout.set_fields('Tags', 'data.tags', fields=[
TextDyField.data_source('Key', 'key'),
TextDyField.data_source('Value', 'value')
])
sql_databases_meta = CloudServiceMeta.set_layouts(
[sql_databases_info_meta, sql_databases_configure, sql_databases_diagnostic_settings, sql_databases_info_tags])
class DatabaseResource(CloudServiceResource):
cloud_service_group = StringType(default='Database')
class SqlDatabaseResource(DatabaseResource):
cloud_service_type = StringType(default='SQLDatabase')
data = ModelType(Database)
_metadata = ModelType(CloudServiceMeta, default=sql_databases_meta, serialized_name='metadata')
name = StringType()
account = StringType(serialize_when_none=False)
instance_type = StringType(serialize_when_none=False)
instance_size = FloatType(serialize_when_none=False)
launched_at = DateTimeType(serialize_when_none=False)
class SqlDatabaseResponse(CloudServiceResponse):
resource = PolyModelType(SqlDatabaseResource)
|
python
|
"""Adam Integer Check"""
def prime_adam_check(number: int) -> bool:
"""
Check if a number is Adam Integer.
A number is Adam if the square of the number and square
of the reverse of the number are reverse of each other.
Example : 11 (11^2 and 11^2 are reverse of each other).
"""
# Get the square of the number.
square = str(number * number)
# Get the reverse of th number
reverse = int(str(number)[::-1])
# Get the square of reverse of the number.
square_reverse = str(reverse * reverse)
# Check if square and square_reverse are reverse of each other.
if square == square_reverse[::-1]:
return True
return False
if __name__ == "__main__":
print("Program to check whether a number is an Adam Int or not...")
number = int(input("Enter number: ").strip())
print(f"{number} is {'' if prime_adam_check(number) else 'not '}an Adam Int.")
|
python
|
import re
import urllib2
import urllib
import urlparse
import socket
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
next = next
except NameError:
# builtin next function doesn't exist
def next (iterable):
return iterable.next()
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
def validate_ip(s):
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
def validate_cidr(s):
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
def ip2long(ip):
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0,] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
_MAX_IP = 0xffffffff
def long2ip(l):
if _MAX_IP < l < 0:
raise TypeError("expected int between 0 and %d inclusive" % _MAX_IP)
return '%d.%d.%d.%d' % (l>>24 & 255, l>>16 & 255, l>>8 & 255, l & 255)
def cidr2block(cidr):
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
# can't use ip2long because partial addresses are treated as all network
# instead of network plus host (eg. '127.1' expands to '127.1.0.0')
quads = ip.split('.')
baseIp = 0
for i in range(4):
baseIp = (baseIp << 8) | int(len(quads) > i and quads[i] or 0)
# keep left most prefix bits of baseIp
shift = 32 - prefix
start = baseIp >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
end = start | mask
return (long2ip(start), long2ip(end))
_RIPE_WHOIS = 'riswhois.ripe.net'
_ASN_CACHE = {}
def ip2asn(ip):
global _ASN_CACHE
# ako nije u pitanju lista onda odmah trazimo IP adresu
# u cacheu i vracamo ju
if type(ip) is not list and ip in _ASN_CACHE:
return _ASN_CACHE[ip]
try:
ripeip = socket.gethostbyname(_RIPE_WHOIS)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((ripeip,43))
#s.recv(4096)
except socket.gaierror:
raise AsnResolutionError('Could not resolve RIPE server name')
except socket.error:
raise AsnResolutionError('Error connecting to whois server')
if type(ip) is list:
# u slucaju da je upit lista IP adresa, treba svaku provjeriti
result = {}
for i in ip:
# prvo provjera da li je IP adresa u cacheu, ako nije
# kontaktiramo server i saljemo upit te dodajemo u cache
if i in _ASN_CACHE:
result[i] = _ASN_CACHE[i]
else:
try:
s.send('-k -F -M %s\r\n' % i)
result[i] = _ASN_CACHE[i] = _parse_whois(s.recv(4096))
except AsnResolutionError, socket.error:
# TODO: popraviti ovo, ako je doslo do pogreske trenutno se
# vraca prazan string sto znaci da nema ASN-a
result[i] = ''
s.close()
return result
else:
# u slucaju da je upit jedna IP adresa koja nije u cacheu
s.send('-F -M %s\r\n' % ip)
_ASN_CACHE[ip] = asn = _parse_whois(s.recv(4096))
s.close()
return asn
def url2host(url):
host = urlparse.urlparse(url).hostname
if not host:
host = urlparse.urlparse('http://' + url).hostname
if not host:
raise HostResolutionError('Could not parse hostname')
return host
def url2ip(url):
try:
return socket.gethostbyname(url2host(url))
except socket.gaierror:
raise HostResolutionError('No IP address for host')
def url2tld(url):
host = url2host(url)
if '.' in host:
return host.split('.')[-1:][0]
else:
raise HostResolutionError('No valid TLD in hostname')
#_GOOGLE_KEY = 'ABQIAAAAxYjVDAFhAe3o3ORFz0M4WhSRANfPA86NpChaGS3JPxvpQtPEMg'
_GOOGLE_KEY = 'ABQIAAAA91BfexGg9gwOzbZ1zsgJOBQDSU0_BEb6BufZ5pmVD4AMkVBbaA'
_GOOGLE_URL = 'https://sb-ssl.google.com/safebrowsing/api/lookup?client=python&apikey=%s&appver=1.5.2&pver=3.0&url=%s'
def _check_google(url):
gurl = _GOOGLE_URL % (_GOOGLE_KEY, urllib.quote(url))
result = urllib2.urlopen(gurl)
if result.getcode() == 204:
return False
if result.getcode() == 200:
return True
_BLACKLIST_CACHE = {}
def url_blacklisted(url):
global _BLACKLIST_CACHE
if url in _BLACKLIST_CACHE:
return _BLACKLIST_CACHE[url]
try:
_BLACKLIST_CACHE[url] = _check_google(url)
return _BLACKLIST_CACHE[url]
except urllib2.URLError, e:
raise BlacklistCheckError(str(e))
class IpRange(object):
def __init__ (self, start, end=None):
if end is None:
if isinstance(start, tuple):
# occurs when IpRangeList calls via map to pass start and end
start, end = start
elif validate_cidr(start):
# CIDR notation range
start, end = cidr2block(start)
else:
# degenerate range
end = start
start = ip2long(start)
end = ip2long(end)
self.startIp = min(start, end)
self.endIp = max(start, end)
def __repr__ (self):
return (long2ip(self.startIp), long2ip(self.endIp)).__repr__()
def __contains__ (self, item):
if isinstance(item, basestring):
item = ip2long(item)
if type(item) not in [type(1), type(_MAX_IP)]:
raise TypeError("expected dotted-quad ip address or 32-bit integer")
return self.startIp <= item <= self.endIp
def __iter__ (self):
i = self.startIp
while i <= self.endIp:
yield long2ip(i)
i += 1
class IpRangeList(object):
def __init__ (self, args):
self.ips = tuple(map(IpRange, args))
def __repr__ (self):
return self.ips.__repr__()
def __contains__ (self, item):
for r in self.ips:
if item in r:
return True
return False
def __iter__ (self):
for r in self.ips:
for ip in r:
yield ip
_RIPE_IP_URL = 'ftp://ftp.ripe.net/ripe/stats/delegated-ripencc-latest'
_RANGE_CACHE = {}
_RIPE_DATA = None
class AddressSpace(object):
def __init__(self, tld, range=None):
self._tld = tld.upper()
if range:
self._range = IpRangeList(range)
else:
self._range = self._load_range(tld.upper())
def _load_range(self, cc):
global _RANGE_CACHE
global _RIPE_DATA
if cc.upper() in _RANGE_CACHE:
return _RANGE_CACHE[cc.upper()]
if _RIPE_DATA==None:
_RIPE_DATA = urllib.urlopen(_RIPE_IP_URL).readlines()
ranges = filter(lambda x: x.find(cc.upper()) is not -1 and x.find('ipv4') is not -1, _RIPE_DATA)
ranges_list = []
for r in ranges:
start = r.split('|')[3]
end = long2ip(ip2long(start) + int(r.split('|')[4]))
ranges_list.append((start,end))
_RANGE_CACHE[cc.upper()] = IpRangeList(ranges_list)
return _RANGE_CACHE[cc.upper()]
def __contains__ (self, item):
if validate_ip(item):
if item in self._range:
return True
else:
if url2tld(item).lower() == self._tld:
return True
if url2ip(item) in self._range:
return True
return False
class HostResolutionError(Exception):
pass
class AsnResolutionError(Exception):
pass
class BlacklistCheckError(Exception):
pass
# interna funkcija koja parsira reply whois servera i iz njega
# vadi van ASN broj, baca iznimku ako to ne moze napraviti
def _parse_whois(data):
data = filter(lambda x: x and not x.startswith('%'), ''.join(data).split('\n'))
if len(data) <> 1:
raise AsnResolutionError('Invalid reply from whois server')
asn = data[0].split('\t')
if len(asn) <> 2:
raise AsnResolutionError('Invalid reply from whois server')
if asn[0] == '3303':
return ''
else:
return asn[0]
def ip_in_tlds(ipurl, tlds):
""" provjera je li IP u rangeu provajdanih TLDova (zemalja) (c) fvlasic
"""
for tld in tlds:
if ipurl in AddressSpace(tld=tld):
return tld.upper()
return False
def cymruIP2ASN(ips):
addr = ('whois.cymru.com', '43')
sock = socket.create_connection(addr)
query='begin\ncountrycode\nasnumber\nnoasname\n[insert]end\n'
ips_on_wire = ""
if isinstance(ips, basestring):
ips = [ips]
for ip in ips:
ips_on_wire = ips_on_wire + ip + "\n"
query = query.replace('[insert]', ips_on_wire)
sent = 0
while not sent==len(query):
sendbytes = sock.send(query[sent:])
sent += sendbytes
data=''
more = True
while more:
more = sock.recv(8192)
data += more
sock.close()
return dict(map(lambda x: (x.split('|')[1].strip(), \
(x.split('|')[0].strip(), x.split('|')[2].strip())), filter(lambda x: '|' in x, data.split('\n'))))
|
python
|
class SPI(object):
def __init__(self, clk, MOSI=None, MISO=None):
self.clk = clk
self.MOSI = MOSI
self.MISO = MISO
|
python
|
from core.entity.entity_exceptions import EntityOperationNotPermitted, EntityNotFoundException
from core.test.media_files_test_case import MediaFilesTestCase
class BaseTestClass(MediaFilesTestCase):
"""
provides the base class for all test cases
_check_default_fields and _check_default_change must be re-implemented since they test whether the entity
re-implement all its default fields after reloading. Given these fields were re-implemented the diagram state
test cases will be defined automatically by inheritance mechanism. (see test_project how)
However, field validation tests must be written. Use _test_field function for convenience.
(see test_project).
If some field contains file use FileMixin
If some field contains password use PasswordMixin
_check_reload function usually tests whether object fields were preserved during the object reload.
If you need additional checks extend this function (see test_project)
Use setUpTestData to create all related entities if you want. However, don't forget to call the same method
from the superclass because it temporally moves all media root files to some safe directory.
Also, tearDownClass and tearDown method must call similar ones from the superclass.
DO NOT FORGET to use: del BaseTestClass AFTER EACH subclass declaration
"""
TEST_CREATE_AND_LOAD = 0
TEST_CHANGE_CREATE_AND_LOAD = 1
TEST_CREATE_CHANGE_AND_LOAD = 2
TEST_CREATE_LOAD_AND_CHANGE = 3
_entity_object_class = None
""" The entity object class. New entity object will be created from this class """
_entity_model_class = None
""" The entity model class is a Django model that is used for storing entities """
def test_object_creating_default(self):
"""
Tests how the object will be created with default values
:return: nothing
"""
obj = self.get_entity_object_class()()
self._check_creating_entity(obj.entity, False)
self._check_fields_changed(obj.entity, obj.default_field_key)
with self.assertRaises(EntityOperationNotPermitted,
msg="entity update() is possible when it was still creating"):
obj.entity.update()
with self.assertRaises(EntityOperationNotPermitted,
msg="entity delete() is possible when is was still creating"):
obj.entity.delete()
obj.create_entity()
def test_object_creating_default_plus_changed(self):
"""
This test case will create new entity then changes some entity fields and at last store entity data
to the database
:return: nothing
"""
obj = self.get_entity_object_class()()
obj.change_entity_fields()
self._check_creating_entity(obj.entity, True)
self._check_fields_changed(obj.entity, obj.entity_fields.keys())
obj.create_entity()
def test_object_created_default(self):
"""
Tests how the object can be created with default values
:return: nothing
"""
obj = self.get_entity_object_class()()
obj.create_entity()
self._check_created_entity(obj.entity)
self._check_fields_changed(obj.entity, [])
with self.assertRaises(EntityOperationNotPermitted,
msg="entity object can be created twice"):
obj.create_entity()
with self.assertRaises(EntityOperationNotPermitted,
msg="the 'saved' object can be saved again"):
obj.entity.update()
def test_object_created_plus_changed_default(self):
"""
Tests if the object has been created and correctly changed
:return: nothing
"""
obj = self.get_entity_object_class()()
obj.create_entity()
obj.change_entity_fields()
self._check_changed_entity(obj.entity, obj.id)
self._check_fields_changed(obj.entity, obj.changed_field_key)
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be created, changed plus created again"):
obj.create_entity()
def test_object_created_plus_updated_default(self):
"""
Tests if the object can be both created and updated
:return: nothing
"""
obj = self.get_entity_object_class()()
obj.create_entity()
self._do_entity_update(obj)
self._check_updated_entity(obj.entity, obj.id)
self._check_fields_changed(obj.entity, [])
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be re-created when this is created -> changed -> saved"):
obj.create_entity()
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be re-updated when this is created -> changed -> updated"):
obj.entity.update()
def test_object_created_updated_and_loaded_default(self):
"""
Tests whether the object can be successfully loaded after it has been created and updated
:return: nothing
"""
obj = self.get_entity_object_class()()
obj.create_entity()
self._do_entity_update(obj)
obj.reload_entity()
self._check_reload(obj)
self._check_fields_changed(obj.entity, [])
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be re-created when loaded"):
obj.create_entity()
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be updated when it still loaded and not changed"):
obj.entity.update()
def test_object_created_and_loaded_default(self):
obj = self.get_entity_object_class()()
obj.create_entity()
obj.reload_entity()
self._check_reload(obj)
self._check_fields_changed(obj.entity, [])
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be re-created when loaded"):
obj.create_entity()
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can be updated when it still loaded and not changed"):
obj.entity.update()
def test_object_created_loaded_and_changed(self):
obj = self.get_entity_object_class()()
obj.create_entity()
obj.reload_entity()
obj.change_entity_fields()
self._check_changed_entity(obj.entity, obj.id)
self._check_fields_changed(obj.entity, obj.changed_field_key)
with self.assertRaises(EntityOperationNotPermitted,
msg="The entity can't be re-created when 'changing'"):
obj.create_entity()
def test_object_created_and_deleted(self):
obj = self.get_entity_object_class()()
obj.create_entity()
obj.entity.delete()
self._check_entity_delete(obj)
def test_object_created_loaded_and_deleted(self):
obj = self.get_entity_object_class()()
obj.create_entity()
obj.reload_entity()
obj.entity.delete()
self._check_entity_delete(obj)
def test_object_created_changed_and_deleted(self):
obj = self.get_entity_object_class()()
obj.create_entity()
obj.change_entity_fields()
obj.entity.delete()
self._check_entity_delete(obj)
def _do_entity_update(self, obj):
obj.change_entity_fields()
obj.entity.update()
def _test_field(self, field_name, field_value, updated_value, exception_to_throw, route_number, use_defaults=True,
**additional_kwargs):
"""
Provides the test for a standalone field
:param field_name: the field name
:param field_value: the field value
:param updated_value: another field value to set
:param exception_to_throw: None if the field value shall be assigned successfully (positive test). An exception
class if attempt of field assignment must throw an exception (negative test).
:param route_number: Number of route in the transient state diagram (TEST_CREATE_AND_LOAD,
TEST_CHANGE_CREATE_AND_LOAD, TEST_CREATE_CHANGE_AND_LOAD, TEST_CREATE_LOAD_AND_CHANGE)
:param use_defaults: True to use defaults put into the entity_object. False if additional arguments shall be
put instead of defaults
:param additional_kwargs: Some additional create object arguments to put
:return: nothing
"""
initial_kwargs = {field_name: field_value}
initial_kwargs.update(additional_kwargs)
if exception_to_throw is None:
obj = self.get_entity_object_class()(use_defaults=use_defaults, **initial_kwargs)
else:
with self.assertRaises(exception_to_throw,
msg="An invalid value '%s' was successfully assigned to field '%s'" %
(field_value, field_name)):
self.get_entity_object_class()(use_defaults=use_defaults, **initial_kwargs)
return
if route_number == self.TEST_CHANGE_CREATE_AND_LOAD:
obj.change_entity_fields(use_defaults=False, **{field_name: updated_value})
obj.create_entity()
if route_number == self.TEST_CREATE_CHANGE_AND_LOAD:
obj.change_entity_fields(use_defaults=False, **{field_name: updated_value})
obj.entity.update()
obj.reload_entity()
if route_number == self.TEST_CREATE_LOAD_AND_CHANGE:
obj.change_entity_fields(use_defaults=False, **{field_name: updated_value})
obj.entity.update()
if route_number == self.TEST_CREATE_AND_LOAD:
last_value = field_value
else:
last_value = updated_value
actual_value = getattr(obj.entity, field_name)
self.assertEquals(actual_value, last_value,
"The value '%s' for field '%s' doesn't either stored or retrieved correctly" %
(last_value, field_name))
def _test_read_only_field(self, field_name, sample_value, throwing_exception=ValueError):
"""
Checks whether some value can be assigned to the read-only field
:param field_name: the field name
:param sample_value: value to assign
:param throwing_exception: exception to throw
:return: nothing
"""
with self.assertRaises(throwing_exception,
msg="The read-only field '%s' has been successfully changed" % field_name):
self.get_entity_object_class()(**{field_name: sample_value})
def get_entity_object_class(self):
"""
Returns new entity object class. New entity object will be created exactly from such a class
:return: the entity object class
"""
if self._entity_object_class is None:
raise NotImplementedError("Please, define the _entity_object_class protected variable")
else:
return self._entity_object_class
def get_entity_model_class(self):
"""
Returns the entity model class. The entity model class is used for storing entities
:return: the entity model class
"""
if self._entity_model_class is None:
raise NotImplementedError("Please, define the _entity_model_class protected variable")
else:
return self._entity_model_class
def _check_creating_entity(self, entity, fields_changed):
"""
Checks whether all entity fields were in place when the entity is 'CREATING'.
The entity fields will be checked given that the entity object was created with no keyword arguments.
:return: nothing
"""
self.assertIsNone(entity.id, "Entity ID is not None before the entity create")
self.assertEquals(entity.state, "creating", "Entity state is not 'creating' before the entity create")
self.assertIsNone(entity._wrapped, "Somebody has wrapped to the entity when the entity is creating")
if fields_changed:
self._check_default_change(entity)
else:
self._check_default_fields(entity)
def _check_created_entity(self, entity):
"""
Checks whether all entity fields were in place when the entity has already been created
:param entity: the entity to check
:return: nothing
"""
self.assertIsNotNone(entity.id, "Entity ID is None when the entity has been created")
self.assertEquals(entity.state, "saved", "The entity state is not 'saved' after the entity create")
self._check_default_fields(entity)
def _check_changed_entity(self, entity, expected_id):
"""
Checks whether the entity was changed
:param entity: the entity to check
:param expected_id: the entity ID to be expected
:return: nothing
"""
self.assertEquals(entity.id, expected_id, "The entity ID is not correct")
self.assertEquals(entity.state, "changed",
"The entity state is not 'changed' after entity fields were corrected")
self._check_default_change(entity)
def _check_updated_entity(self, entity, expected_id):
"""
Checks whether the entity can be updated
:param entity: the entity to be checked
:return: nothing
"""
self.assertEquals(entity.id, expected_id, "The entity ID changed during the update process")
self.assertEquals(entity.state, "saved", "The entity state was not proper")
self._check_default_change(entity)
def _check_reload(self, obj):
"""
Checks whether the entity is successfully and correctly reloaded.
:param obj: the entity object within which the entity was reloaded
:return: nothing
"""
self.assertIsInstance(obj.entity, obj.get_entity_class(),
"Unexpected entity class")
self.assertEquals(obj.entity.id, obj.id, "The entity ID was not properly retrieved")
self.assertEquals(obj.entity.state, "loaded", "The entity state is not 'loaded' when the entity is loaded")
self._check_field_consistency(obj)
def _check_field_consistency(self, obj):
for name, expected_value in obj.entity_fields.items():
actual_value = getattr(obj.entity, name)
self.assertEquals(actual_value, expected_value,
"The entity field '%s' doesn't retrieved correctly" % name)
def _check_entity_delete(self, obj):
"""
Checks whether the entity is properly deleted
:param obj: the entity object
:return: nothing
"""
self.assertIsNone(obj.entity.id, "The deleted entity still have an ID")
self.assertEquals(obj.entity.state, "deleted", "The deleted entity has incorrect status")
with self.assertRaises(EntityOperationNotPermitted, msg="the deleted entity can be created"):
obj.create_entity()
with self.assertRaises(EntityOperationNotPermitted, msg="The deleted entity can be saved"):
obj.entity.update()
with self.assertRaises(EntityOperationNotPermitted, msg="The deleted entity can be deleted again"):
obj.entity.delete()
with self.assertRaises(EntityNotFoundException,
msg="The entity can't be deleted carefully since the entity already deleted can be "
"easily re-created"):
obj.reload_entity()
def _check_fields_changed(self, entity, field_list):
"""
Checks whether the certain and only certain fields in the entity was changed
:param entity: the entity to test
:param field_list: field list to check in the entity object
:return: nothing
"""
self.assertEquals(len(entity._edited_fields), len(field_list),
"the Entity._edited_fields doesn't contain appropriate field number")
for field in field_list:
self.assertIn(field, entity._edited_fields,
"The field '%s' is not within the list of the edited fields")
def _check_default_fields(self, entity):
"""
Checks whether the default fields were properly stored.
The method deals with default data only.
:param entity: the entity which default fields shall be checked
:return: nothing
"""
raise NotImplementedError("The _check_default_fields method must be implemented when extending this base class")
def _check_default_change(self, entity):
"""
Checks whether the fields were properly change.
The method deals with default data only.
:param entity: the entity to store
:return: nothing
"""
raise NotImplementedError("The _check_default_change method must be implemented when extending this base class")
del MediaFilesTestCase
|
python
|
import numpy as np
import torch
from .mask_gen import BoxMaskGenerator
class CutmixCollateWrapper(object):
def __init__(self, batch_aug_fn=None):
self.batch_aug_fn = batch_aug_fn
self.mask_generator = BoxMaskGenerator(
prop_range = (0.25, 0.5),
n_boxes = 3,
random_aspect_ratio = True,
prop_by_area = False,
within_bounds = False,
invert = True
)
def _generate_cutmix_mask(self, num_masks: int, width: int, height: int):
return self.mask_generator.generate_params(num_masks, (width, height))
def __call__(self, batch):
if self.batch_aug_fn is not None:
batch = self.batch_aug_fn(batch)
batch_size, _, w, h = batch['inputs'].shape
masks = self._generate_cutmix_mask(batch_size, w, h)
batch['cutmix_masks'] = torch.from_numpy(masks.astype(np.float32))
return batch
class CutmixLoader(torch.utils.data.DataLoader):
r"""CutmixLoader, also return cutmix mask
dataset: `torch.utils.data.Dataset`
dataset, must have classes_dict and collate_fn attributes
batch_size: `int`
number of samples in one batch
"""
def __init__(self,
dataset: torch.utils.data.Dataset,
batch_size: int,
**kwargs):
if hasattr(dataset, 'collate_fn'):
pre_collate_fn = dataset.collate_fn
else:
pre_collate_fn = None
collate_fn = CutmixCollateWrapper(pre_collate_fn)
super(CutmixLoader, self).__init__(
dataset,
batch_size=batch_size,
collate_fn = collate_fn,
**kwargs
)
|
python
|
"""
# BEGIN BINGO_DEMO
>>> bingo = BingoCage(range(3))
>>> bingo()
2
>>> bingo()
0
>>> callable(bingo)
True
# END BINGO_DEMO
"""
# BEGIN BINGO
import random
class BingoCage:
def __init__(self, items):
self._items = list(items) # <1>
random.shuffle(self._items) # <2>
def __call__(self):
if not self._items: # <3>
raise IndexError('pop from empty BingoCage')
return self._items.pop()
# END BINGO
|
python
|
from json import dumps
class Node:
def __init__(self, line = 0, column = 0):
self.line = line
self.column = column
@property
def clsname(self):
return str(self.__class__.__name__)
def to_tuple(self):
return tuple([
("node_class_name", self.clsname)
])
def to_readable(self):
return "{}".format(self.clsname)
def __repr__(self):
return self.toJSON()
def __str__(self):
return str(self.to_readable())
def __getitem__(self, x):
return self.__dict__[x]
def __setitem__(self, x, y):
self.__dict__[x]= y
def __iter__(self):
return self.__dict__.__iter__()
def __eq__(self, other):
return type(self) == type(other) and self.idName == other.idName
def toJSON(self):
return dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4, separators=(',', ': '))
class NodeProgram(Node):
def __init__(self, class_list):
super().__init__()
self.class_list = class_list
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("classes", self.class_list)
])
def to_readable(self):
return "{}(classes={})".format(self.clsname, self.class_list)
class NodeClassTuple(Node, tuple):
def __init__(self, classes):
self.classes = classes
class NodeClass(Node):
def __init__(self, idName: str, methods, attributes, parent,
line = 0, column = 0, parent_col = -1):
super().__init__(line= line, column= column)
self.idName = idName
self.methods = methods
self.attributes = attributes
self.parent = parent
self.parent_col = parent_col
def to_readable(self):
return "{}(name='{}', parent={}, methods={}, attributes={})".format(
self.clsname, self.idName, self.parent,
self.methods, self.attributes)
class NodeFeature(Node):
def __init__(self, line, column):
super(NodeFeature, self).__init__(line= line, column= column)
#No se si poner aqui una clase para heredar , que sea feature_class.
#Tengo que ver si a futuro necesito iterar por los elementos de una clase
#de manera abstracta.
class NodeClassMethod(NodeFeature):
def __init__(self,
idName: str,
formal_param_list,
returnType: str,
body,
line,
column,
columnType):
super().__init__(line= line, column= column)
self.idName = idName
self.formal_param_list= formal_param_list
self.returnType = returnType
self.body = body
self.columnType= columnType
def to_readable(self):
return "{}(name='{}', formal_param_list={}, returnType={}, body={})".format(
self.clsname, self.idName, self.formal_param_list,
self.returnType, self.body
)
class NodeAttr(NodeFeature):
def __init__(self, idName, _type,
line, column, expr= None, columnTypeAttr= None):
super().__init__(line= line, column= column)
self.idName= idName
self._type= _type
self.expr= expr
self.columnTypeAttr= columnTypeAttr
class NodeFormalParam(NodeFeature):
def __init__(self, idName, param_type, line, column):
super().__init__(line= line, column= column)
self.idName = idName
self._type = param_type
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("name", self.idNme),
("param_type", self._type)
])
def to_readable(self):
return "{}(name='{}', param_type={})".format(self.clsname,
self.idName, self._type)
class NodeObject(Node):
def __init__(self, idName, line, column):
super().__init__(line= line, column= column)
self.idName = idName
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("name", self.idName)
])
def to_readable(self):
return "{}(name='{}')".format(self.clsname, self.idName)
class NodeSelf(NodeObject):
def __init__(self, line, column):
super().__init__(idName= "SELF", line= line, column= column)
def to_tuple(self):
return tuple([
("class_name", self.clsname)
])
def to_readable(self):
return "{}".format(self.clsname)
class NodeConstant(Node):
def __init__(self, line, column):
super().__init__(line= line, column= column)
class NodeInteger(NodeConstant):
def __init__(self, content, line, column):
super().__init__(line= line, column= column)
self.content = content
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("content", self.content)
])
def to_readable(self):
return "{}(content={})".format(self.clsname, self.content)
class NodeBoolean(NodeConstant):
def __init__(self, content, line, column):
super().__init__(line= line, column= column)
self.content = content
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("content", self.content)
])
def to_readable(self):
return "{}(content={})".format(self.clsname, self.content)
class NodeString(NodeConstant):
def __init__(self, content, line, column):
super().__init__(line= line, column= column)
self.content = content
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("content", self.content)
])
def to_readable(self):
return "{}(content={})".format(self.clsname, repr(self.content))
# Cada expresión debe tener una función de evaluación asociada.
# Con un valor de retorno x.
class NodeExpr(Node):
def __init__(self, line, column):
super().__init__(line= line, column= column)
class NodeNewObject(NodeExpr):
def __init__(self, new_type, line, column):
super().__init__(line= line, column= column)
self.type = new_type
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("type", self.type)
])
def to_readable(self):
return "{}(type={})".format(self.clsname, self.type)
class NodeIsVoid(NodeExpr):
def __init__(self, expr, line, column):
super().__init__(line= line, column= column)
self.expr= expr
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("expr", self.expr)
])
def to_readable(self):
return "{}(expr={})".format(self.clsname, self.expr)
class NodeAssignment(NodeExpr):
def __init__(self, nodeObject, expr, line, column, columnAssign):
super().__init__(line= line, column= column)
self.nodeObject = nodeObject
self.expr = expr
self.columnAssign= columnAssign
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("nodeObject", self.nodeObject),
("expr", self.expr)
])
def to_readable(self):
return "{}(nodeObject={}, expr={})".format(self.clsname,
self.nodeObject, self.expr)
class NodeBlock(NodeExpr):
def __init__(self, expr_list, line, column):
super().__init__(line = line, column = column)
self.expr_list = expr_list
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("expr_list", self.expr_list)
])
def to_readable(self):
return "{}(expr_list={})".format(self.clsname, self.expr_list)
class NodeDynamicDispatch(NodeExpr):
def __init__(self, expr, method, arguments, line, column):
super().__init__(line= line, column= column)
self.expr = expr
self.method = method
self.arguments = arguments if arguments is not None else tuple()
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("expr", self.expr),
("method", self.method),
("arguments", self.arguments)
])
def to_readable(self):
return "{}(expr={}, method={}, arguments={})".format(
self.clsname, self.expr, self.method, self.arguments)
class NodeStaticDispatch(NodeExpr):
def __init__(self,
expr,
dispatch_type,
method,
arguments,
line,
column,
columnType,
columnIdMethod):
super().__init__(line= line, column= column)
self.expr = expr
self.dispatch_type = dispatch_type
self.method = method
self.arguments = arguments if arguments is not None else tuple()
self.columnType= columnType
self.columnIdMethod= columnIdMethod
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("expr", self.expr),
("dispatch_type", self.dispatch_type),
("method", self.method),
("arguments", self.arguments)
])
def to_readable(self):
return "{}(expr={}, dispatch_type={}, method={}, arguments={})".format(
self.clsname, self.expr, self.dispatch_type,
self.method, self.arguments)
class NodeLetComplex(NodeExpr):
def __init__(self, nested_lets, body, line, column):
super().__init__(line= line, column= column)
self.nestedLets= nested_lets if type(nested_lets) is list else [nested_lets]
self.body= body
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("nested_lets", self.nestedLets),
("body", self.body)
])
def to_readable(self):
return "{}(nested_lets={}, body={})".format(
self.clsname, self.nestedLets, self.body)
class NodeLet(NodeExpr):
def __init__(self, idName, returnType, body, line, column):
super().__init__(line= line, column= column)
self.idName= idName
self.type= returnType
self.body= body
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("idName", self.idName),
("returnType", self.type),
("body", self.body)
])
def to_readable(self):
return "{}(idName={}, returnType={}, body={})".format(
self.clsname, self.idName, self.type,
self.body)
class NodeIf(NodeExpr):
def __init__(self, predicate, then_body, else_body, line, column):
super().__init__(line= line, column= column)
self.predicate = predicate
self.then_body = then_body
self.else_body = else_body
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("predicate", self.predicate),
("then_body", self.then_body),
("else_body", self.else_body)
])
def to_readable(self):
return "{}(predicate={}, then_body={}, else_body={})".format(
self.clsname, self.predicate, self.then_body, self.else_body)
class NodeWhileLoop(NodeExpr):
def __init__(self, predicate, body, line, column):
super().__init__(line= line, column= column)
self.predicate = predicate
self.body = body
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("predicate", self.predicate),
("body", self.body)
])
def to_readable(self):
return "{}(predicate={}, body={})".format(self.clsname,
self.predicate, self.body)
class NodeCase(NodeExpr):
def __init__(self, expr, actions, line, column):
super().__init__(line= line, column= column)
self.expr = expr
self.actions = actions
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("expr", self.expr),
("actions", self.actions)
])
def to_readable(self):
return "{}(expr={}, actions={})".format(self.clsname,
self.expr, self.actions)
class NodeCaseAction(NodeExpr):
def __init__(self, idName, expr, _type, line, column, typeColumn):
super().__init__(line= line, column= column)
self.idName= idName
self.expr= expr
self.type= _type
self.typeColumn= typeColumn
# ############################## UNARY OPERATIONS ##################################
class NodeUnaryOperation(NodeExpr):
def __init__(self, line, column):
super().__init__(line= line, column= column)
class NodeIntegerComplement(NodeUnaryOperation):
def __init__(self, integer_expr, line, column):
super().__init__(line= line, column= column)
self.symbol = "~"
self.integer_expr = integer_expr
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("integer_expr", self.integer_expr)
])
def to_readable(self):
return "{}(expr={})".format(self.clsname, self.integer_expr)
class NodeBooleanComplement(NodeUnaryOperation):
def __init__(self, boolean_expr, line, column):
super().__init__(line= line, column= column)
self.symbol = "!"
self.boolean_expr = boolean_expr
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("boolean_expr", self.boolean_expr)
])
def to_readable(self):
return "{}(expr={})".format(self.clsname, self.boolean_expr)
# ############################## BINARY OPERATIONS ##################################
class NodeBinaryOperation(NodeExpr):
def __init__(self, line, column):
super().__init__(line= line, column= column)
self.type= ''
class NodeAddition(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "+"
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
class NodeSubtraction(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "-"
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
class NodeMultiplication(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "*"
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
class NodeDivision(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "/"
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
class NodeEqual(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "="
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
class NodeLessThan(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "<"
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
class NodeLessThanOrEqual(NodeBinaryOperation):
def __init__(self, first, second, line, column):
super().__init__(line= line, column= column)
self.symbol = "<="
self.first = first
self.second = second
def to_tuple(self):
return tuple([
("class_name", self.clsname),
("first", self.first),
("second", self.second)
])
def to_readable(self):
return "{}(first={}, second={})".format(self.clsname,
self.first, self.second)
|
python
|
from ckan.common import config
def get_ytp_recommendation_recaptcha_sitekey():
return config.get('ckanext.ytp_recommendation.recaptcha_sitekey')
|
python
|
# The MIT License(MIT)
# Copyright (c) 2013-2014 Matt Thomson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['pyembed']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='pyembed',
version='1.3.3',
author='Matt Thomson',
author_email='[email protected]',
url='http://pyembed.github.io',
description='Python OEmbed consumer library with automatic discovery of ' +
'producers',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGES.rst').read(),
download_url='https://pypi.python.org/pypi/pyembed/',
license=open('LICENSE.txt').read(),
provides=['pyembed.core'],
packages=['pyembed.core'],
namespace_packages=['pyembed'],
package_data={
"pyembed.core": [
"config/providers.json"
]
},
install_requires=[
'beautifulsoup4',
'requests'
],
tests_require=[
'mock',
'pytest',
'vcrpy'
],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing'
]
)
|
python
|
import itertools
a = input()
s = input().split()
n = int(input())
L = list(itertools.combinations(s, n))
f = filter(lambda x: 'a' in x, L)
print("{:.4f}".format(len(list(f))/len(L)))
|
python
|
from django.conf.urls import include, url
urlpatterns = [
# browsable REST API
url(r'^api/', include('osmaxx.rest_api.urls')),
url(r'^version/', include('osmaxx.version.urls', namespace='version')),
]
|
python
|
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Lab 1 - Driving in Shapes
"""
########################################################################################
# Imports
########################################################################################
import sys
sys.path.insert(1, "../../library")
import racecar_core
# import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
# Put any global variables here
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
# Begin at a full stop
rc.drive.stop()
# Print start message
# TODO (main challenge): add a line explaining what the Y button does
print(
">> Lab 1 - Driving in Shapes\n"
"\n"
"Controls:\n"
" Right trigger = accelerate forward\n"
" Left trigger = accelerate backward\n"
" Left joystick = turn front wheels\n"
" A button = drive in a circle\n"
" B button = drive in a square\n"
" X button = drive in a figure eight\n"
" Y button = drive in a wave\n"
)
# Setting up constants
counter = 0
driveA = False
driveB = False
driveX = False
driveY = False
turnTimeIncrement = 6.3
forwardsTimeIncrement = 5
limits = [0]
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
# Use global constants
global counter
global driveA
global driveB
global driveX
global driveY
# for square challenge
global turnTimeIncrement
global forwardsTimeIncrement
global limits
# TODO (warmup): Implement manual acceleration and steering
manual_speed = 0
manual_angle = 0
manual_speed -= rc.controller.get_trigger(rc.controller.Trigger.LEFT)
manual_speed += rc.controller.get_trigger(rc.controller.Trigger.RIGHT)
manual_angle = rc.controller.get_joystick(rc.controller.Joystick.LEFT)[0]
# print(manual_speed, manual_angle)
rc.drive.set_speed_angle(manual_speed, manual_angle)
# TODO (main challenge): Drive in a circle
if rc.controller.was_pressed(rc.controller.Button.A):
print("Driving in a circle...")
driveA = True
counter = 0
if driveA == True:
rc.drive.set_speed_angle(0.5, -1)
counter += rc.get_delta_time()
if counter > 11.7:
rc.drive.stop()
driveA = False
print("STOPPED CIRCLE")
# TODO (main challenge): Drive in a square when the B button is pressed
if rc.controller.was_pressed(rc.controller.Button.B):
print("Driving in a square...")
counter = 0
driveB = True
limits = [0]
for i in range(8):
if i % 2 == 0:
limits.append(limits[-1] + forwardsTimeIncrement)
else:
limits.append(limits[-1] + turnTimeIncrement)
print(limits)
limits[1] += 1
if driveB == True:
forward_speed = 0.5
turn_speed = 0.15
counter += rc.get_delta_time()
if counter < limits[1]:
rc.drive.set_speed_angle(forward_speed, 0)
elif counter < limits[2]:
rc.drive.set_speed_angle(turn_speed, -1)
elif counter < limits[3]:
rc.drive.set_speed_angle(forward_speed, 0)
elif counter < limits[4]:
rc.drive.set_speed_angle(turn_speed, -1)
elif counter < limits[5]:
rc.drive.set_speed_angle(forward_speed, 0)
elif counter < limits[6]:
rc.drive.set_speed_angle(turn_speed, -1)
elif counter < limits[7]:
rc.drive.set_speed_angle(forward_speed, 0)
elif counter < limits[8]:
rc.drive.set_speed_angle(turn_speed, -1)
else:
driveB = False
rc.drive.stop()
print("STOPPED SQUARE")
# TODO (main challenge): Drive in a figure eight when the X button is pressed
if rc.controller.was_pressed(rc.controller.Button.X):
print("Driving in a figure eight...")
driveX = True
counter = 0
if driveX == True:
rc.drive.set_speed_angle(0.5, -1)
counter += rc.get_delta_time()
if counter < 11.7:
rc.drive.set_speed_angle(0.5, 1)
elif counter > 23.4:
rc.drive.stop()
driveX = False
print("STOPPED FIGURE EIGHT")
# TODO (main challenge): Drive in a shape of your choice when the Y button
# is pressed
if rc.controller.was_pressed(rc.controller.Button.Y):
print("Driving in a wave...")
driveY = True
counter = 0
if driveY == True:
print(counter)
counter += rc.get_delta_time()
if counter < 3:
rc.drive.set_speed_angle(1, -1)
elif counter < 6:
rc.drive.set_speed_angle(1, 1)
elif counter < 9:
rc.drive.set_speed_angle(1, -1)
elif counter < 12:
rc.drive.set_speed_angle(1, 1)
elif counter < 15:
rc.drive.set_speed_angle(1, -1)
else:
driveY = False
rc.drive.stop()
print("STOPPED WAVE")
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update)
rc.go()
|
python
|
import pyOcean_cpu as ocean
a = ocean.zeros([5,5])
a[1,...,3] = 99
print(a)
|
python
|
import sqlite3
import pandas as pd
from .. import CONFIG
import datetime
from ..scrape.utils import PROPER_DATE_FORMAT
class DB_Query():
def __init__(self, column_names, rows):
self.column_names = column_names
self.rows = rows
def to_df(self):
return pd.DataFrame(self.rows, columns=self.column_names)
def execute_sql(sql, input_con=None, params=()):
"""
Executes a sql query and commits the result.
params is a list of values that will be used
in place of question marks in the sql statement.
The input_con parameter is useful for creating
temporary tables that will be persisted across a connection.
Returns a DB_Query.
"""
if input_con is None:
con = get_db_connection()
else:
con = input_con
if type(params) != tuple and type(params) != list:
params = (params, )
cur = con.execute(sql, params)
results = cur.fetchall()
column_names = [description[0] for description in cur.description] if cur.description is not None else None
if input_con is None:
close_db_connection(con)
return DB_Query(column_names, results)
execute_sql_persist = execute_sql
def execute_many_sql(sql, seq_of_params):
"""
Executes a sql statement for a batch of values.
Returns a DB_Query.
"""
con = get_db_connection()
cur = con.executemany(sql, seq_of_params)
results = cur.fetchall()
column_names = [description[0] for description in cur.description] if cur.description is not None else None
close_db_connection(con)
return DB_Query(column_names, results)
def get_table_column_names(table_name: str):
"""
Returns a list of column names of the specified table.
This function is in this module because it deals with
the cursor and connection abstractipn layer.
"""
con = get_db_connection()
cur = con.execute("""SELECT * FROM {} LIMIT 1;""".format(table_name))
column_names = [description[0] for description in cur.description]
return column_names
def clear_scrape_logs(date=None):
"""
Removes all entries from the table scrape_log
that come before the given date.
date must be in YYYY-MM-DD format, otherwise a ValueError
will be thrown.
"""
if date is None:
execute_sql("""DELETE FROM scrape_log WHERE TRUE;""")
else:
try:
datetime.datetime.strptime(date, PROPER_DATE_FORMAT)
except ValueError:
raise ValueError('date was not in the correct format: YYYY-MM-DD')
execute_sql("""DELETE FROM scrape_log WHERE date < ?;""", date)
def drop_tables():
"""
Drops all tables in the database.
"""
table_names = [l[0] for l in execute_sql("""SELECT name FROM sqlite_master WHERE type='table';""").rows]
for table_name in table_names:
execute_sql("""DROP TABLE {};""".format(table_name))
def get_table_names():
"""
Returns a list of table names (list of strings).
"""
return [l[0] for l in execute_sql("""SELECT name FROM sqlite_master WHERE type='table';""").rows]
def execute_sql_file(file_name, input_con=None):
"""
Executes sql in the given file.
Returns the output of the last sql statement in the file.
Statements are separated by the semicolon (;) character.
"""
if input_con is None:
con = get_db_connection()
else:
con = input_con
output = None
with open(file_name, 'r') as f:
for cmd in f.read().split(';')[:-1]:
try:
output = execute_sql(cmd, con)
except Exception as e:
print('Tried to execute this command but failed: {}'.format(cmd))
raise e
# close the con if it was created by this function
if input_con is None:
close_db_connection(con)
return output
execute_sql_file_persist = execute_sql_file
def get_db_connection():
con = sqlite3.connect('{}/{}.db'.format(CONFIG['DB_PATH'], CONFIG['DB_NAME']))
return con
def close_db_connection(con):
con.commit()
con.close()
|
python
|
#!/usr/bin/env python3
"""
This is the player application called by remctl or
through SSH; it is the replacement for the Perl command
'acoustics' and provides the same functionality.
"""
import sys
import os
import importlib
sys.path.append(os.path.dirname(sys.argv[0]) + '/lib')
from amp import db, config
if __name__ == "__main__":
# Arguments are [player] [command] [arguments]
if len(sys.argv) < 2:
print("Expected player_id")
sys.exit(1)
# And we expect at least a player and a command
if len(sys.argv) < 3:
print("Expected command")
sys.exit(1)
conf = config.AcousticsConfig()
if sys.argv[1] not in conf["{}"]['players'].split(","):
sys.exit(1)
player_module = importlib.import_module(
conf.translate(conf['player.' + sys.argv[1]]["module"]))
DB = db.Sqlite(conf['database']['data_source'].split(":")[-1])
player = player_module.PlayerImpl(sys.argv[1], DB)
# Execute the player command.
player.execute(sys.argv[2], sys.argv[3:])
|
python
|
import argparse
import yaml
import os
import anndata as ad
from morphelia.preprocessing import *
from morphelia.features import *
def run(inp):
"""Preprocess morphological annotated data."""
# where to save figures
figdir = os.path.join(inp['output'], './figures/')
# load data
inp_data = os.path.join(inp['output'], inp[inp['pp_inp']])
if not os.path.exists(inp_data):
raise OSError(f"File with subsample data does not exist: {inp_data}")
adata = ad.read_h5ad(inp_data)
# clean data
print("[STEP 1] Clean data: Drop features containing Nan values, "
"duplicated features or invariant features.")
adata = drop_nan(adata, verbose=True)
adata = drop_duplicates(adata, verbose=True)
adata = drop_invariant(adata, verbose=True)
# filter debris
print("[STEP 2] Filter debris.")
if inp['filter_debris']:
adata = filter_debris(adata, show=True, save=figdir, verbose=True)
else:
print('Skipped.')
# filter cells
print("[STEP 3] Normalize data.")
adata = normalize(adata, method=inp['norm_method'],
by=inp['batch_id'],
pop_var=inp['treat_var'],
norm_pop=inp['ctrl_name'],
drop_nan=True,
verbose=True)
print("[STEP 4] Drop noise.")
adata = drop_noise(adata, verbose=True,
by=inp['condition_group'])
print("[STEP 5] Drop features with near zero variance.")
adata = drop_near_zero_variance(adata, verbose=True)
print("[STEP 6] Drop outlier.")
adata = drop_outlier(adata, verbose=True)
print("[STEP 6] Drop highly correlated features.")
adata = drop_highly_correlated(adata,
thresh=0.95,
verbose=True,
show=True,
save=figdir)
# write file
print("[STEP 7] Write file.")
adata.write(os.path.join(inp['output'], inp['pp_name']))
def main(args=None):
"""Implements the commandline tool to Preprocess an AnnData object
with morphological data from single cells."""
# initiate the arguments parser
parser = argparse.ArgumentParser(description=f'Preprocess data.')
parser.add_argument('config', type=str, help='config file in yaml format.')
# parse
args = parser.parse_args(args)
yml_path = args.config
with open(yml_path, 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
# run
run(data)
|
python
|
"""Encoder definition for transformer-transducer models."""
import torch
from espnet.nets.pytorch_backend.transducer.blocks import build_blocks
from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
class Encoder(torch.nn.Module):
"""Transformer encoder module.
Args:
idim (int): input dim
enc_arch (list): list of encoder blocks (type and parameters)
input_layer (str): input layer type
repeat_block (int): repeat provided block N times if N > 1
self_attn_type (str): type of self-attention
positional_encoding_type (str): positional encoding type
positionwise_layer_type (str): linear
positionwise_activation_type (str): positionwise activation type
conv_mod_activation_type (str): convolutional module activation type
normalize_before (bool): whether to use layer_norm before the first block
padding_idx (int): padding_idx for embedding input layer (if specified)
"""
def __init__(
self,
idim,
enc_arch,
input_layer="linear",
repeat_block=0,
self_attn_type="selfattn",
positional_encoding_type="abs_pos",
positionwise_layer_type="linear",
positionwise_activation_type="relu",
conv_mod_activation_type="relu",
normalize_before=True,
padding_idx=-1,
):
"""Construct an Transformer encoder object."""
super().__init__()
self.embed, self.encoders, self.enc_out = build_blocks(
"encoder",
idim,
input_layer,
enc_arch,
repeat_block=repeat_block,
self_attn_type=self_attn_type,
positional_encoding_type=positional_encoding_type,
positionwise_layer_type=positionwise_layer_type,
positionwise_activation_type=positionwise_activation_type,
conv_mod_activation_type=conv_mod_activation_type,
padding_idx=padding_idx,
)
self.normalize_before = normalize_before
if self.normalize_before:
self.after_norm = LayerNorm(self.enc_out)
def forward(self, xs, masks):
"""Encode input sequence.
Args:
xs (torch.Tensor): input tensor
masks (torch.Tensor): input mask
Returns:
xs (torch.Tensor): position embedded input
mask (torch.Tensor): position embedded mask
"""
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
xs, masks = self.encoders(xs, masks)
if isinstance(xs, tuple):
xs = xs[0]
if self.normalize_before:
xs = self.after_norm(xs)
return xs, masks
|
python
|
import gym
import quadruped_gym.gym # noqa: F401
def test_reset():
env = gym.make('A1BulletGymEnv-v0')
observation = env.reset()
assert observation in env.observation_space
def test_step():
env = gym.make('A1BulletGymEnv-v0')
env.reset()
for _ in range(10):
env.step(env.action_space.sample())
|
python
|
import numpy as np
if __name__=="__main__":
N=list(map(int,input().split()))
print(np.zeros(N,int))
print(np.ones(N,int))
|
python
|
from jivago.jivago_application import JivagoApplication
from jivago.wsgi.annotations import Resource
from jivago.wsgi.methods import GET
@Resource("/")
class HelloResource(object):
@GET
def get_hello(self) -> str:
return "Hello World!"
app = JivagoApplication()
if __name__ == '__main__':
app.run_dev()
|
python
|
import pandas
from matplotlib import pyplot
df = pandas.DataFrame([431-106,106])
df = df.transpose()
df.columns=['complete','incomplete']
df.plot(kind='bar', stacked=True, legend=False)
pyplot.show()
|
python
|
from typing import Tuple, Union, List, Optional
from sectionproperties.pre import sections
import numpy as np
import shapely
def create_line_segment(
point_on_line: Union[Tuple[float, float], np.ndarray],
vector: np.ndarray,
bounds: tuple,
):
"""
Return a LineString of a line that contains 'point_on_line' in the direction of 'unit_vector'
bounded by 'bounds'.
'bounds' is a tuple of float containing a max ordinate and min ordinate.
"""
p_x, p_y = point_on_line
b_2 = max(bounds)
b_1 = min(bounds)
if vector[0] != 0: # Not a vertical line
scale_factor_2 = (b_2 - p_x) / vector[0]
y_2 = scale_factor_2 * vector[1] + p_y
scale_factor_1 = (b_1 - p_x) / vector[0]
y_1 = scale_factor_1 * vector[1] + p_y
return shapely.geometry.LineString([(b_1, y_1), (b_2, y_2)])
else: # Vertical line
scale_factor_2 = (b_2 - p_y) / vector[1]
x_2 = scale_factor_2 * vector[0] + p_x
scale_factor_1 = (b_1 - p_y) / vector[1]
x_1 = scale_factor_1 * vector[0] + p_x
return shapely.geometry.LineString([(x_1, b_1), (x_2, b_2)])
def group_top_and_bottom_polys(
polys: shapely.geometry.GeometryCollection, line: shapely.geometry.LineString,
) -> Tuple[list, list]:
"""
Returns tuple of two lists representing the list of Polygons in 'polys' on the "top" side of 'line' and the
list of Polygons on the "bottom" side of the 'line' after the original geometry has been split by 'line'.
The 0-th tuple element is the "top" polygons and the 1-st element is the "bottom" polygons.
In the event that 'line' is a perfectly vertical line, the "top" polys are the polygons on the "right" of the
'line' and the "bottom" polys are the polygons on the "left" of the 'line'.
"""
top_acc = []
bot_acc = []
for poly in polys:
m, b = line_mx_plus_b(line)
px, py = poly.representative_point().coords[0]
if b is not None: # Not a vertical line (special case)
y_test = m * px + b
if py < y_test:
bot_acc.append(poly)
elif py > y_test:
top_acc.append(poly)
else: # The special case of vertical line
lx, _ = line.coords[0]
if px < lx:
bot_acc.append(poly)
elif px > lx:
top_acc.append(poly)
return top_acc, bot_acc
def line_mx_plus_b(line: shapely.geometry.LineString,) -> Tuple[float, float]:
"""
Returns a tuple representing the values of "m" and "b" from the definition of 'line' as "y = mx + b".
"""
y2, y1 = line.coords[1][1], line.coords[0][1]
x2, x1 = line.coords[1][0], line.coords[0][0]
if x2 - x1 == 0:
return (1, None)
m_slope = (y2 - y1) / (x2 - x1)
point_on_line = line.coords[0]
p_x, p_y = point_on_line
# solve line eqn for b given a known point on the line
b_intercept = p_y - m_slope * p_x
return (m_slope, b_intercept)
def perp_mx_plus_b(
m_slope: float, point_on_line: Tuple[float, float],
) -> Tuple[float, float]:
"""
Returns a tuple representing the values of "m" and "b" from
for a line that is perpendicular to 'm_slope' and contains the
'point_on_line', which represents an (x, y) coordinate.
"""
m_perp = -1 / m_slope
p_x, p_y = point_on_line
b_intercept = p_y - m_perp * p_x
return (m_perp, b_intercept)
def line_intersection(
m_1: float, b_1: float, m_2: float, b_2: float,
) -> Optional[float]:
"""
Returns a float representing the x-ordinate of the intersection
point of the lines defined by y = m_1*x + b_1 and y = m_2*x + b_2.
Returns None if the lines are parallel.
"""
try:
x = (b_2 - b_1) / (m_1 - m_2)
except ZeroDivisionError:
x = None
return x
def sum_poly_areas(lop: List[shapely.geometry.Polygon],) -> float:
"""
Returns a float representing the total area of all polygons
in 'lop', the list of polygons.
"""
sum_acc = 0
for poly in lop:
sum_acc += poly.area
return sum_acc
|
python
|
# -*- coding: utf-8 -*-
class Config(object):
def __init__(self, config_path):
config_path = Config.validate_path(config_path)
self.config_path = config_path
self._config = Config.validate_format_and_parse(config_path)
def __getitem__(self, key):
return self._config.get(key)
@staticmethod
def validate_path(path):
import os
if os.path.splitext(path)[1] != '.json':
raise Exception('Config file must be json format')
is_exist = os.path.exists(path)
if not is_exist:
raise FileNotFoundError
else:
return path
@staticmethod
def validate_format_and_parse(path):
import json
import codecs
with codecs.open(path, 'rb+', 'utf-8') as rcf:
return json.load(rcf)
def concat_config_path(file_located, filename):
import os
return os.path.abspath(os.path.join(os.path.split(file_located)[0], os.pardir)) + '/' + filename
|
python
|
from django.conf.urls import include, url
from django.contrib import admin
#handler400 = 'world.views.error400page'
#AEN: THIS doesn't work!
import voxel_globe.main.views
urlpatterns = [
#Admin site apps
url(r'^admin/', include(admin.site.urls)),
#Test app for development reasons
url(r'^world/', include('voxel_globe.world.urls', namespace='world')),
# pages
#Main home page
url(r'', include('voxel_globe.main.urls', namespace='main')),
#Placeholders
# url(r'^apps/imageIngest/$', voxel_globe.main.views.imageIngest,
# name='imageIngest'),
url(r'^apps/voxelCreator/$', voxel_globe.main.views.voxelCreator,
name='voxelCreator'),
url(r'^apps/voxelWorldViewer/$', voxel_globe.main.views.voxelWorldViewer,
name='voxelWorldViewer'),
# url(r'^apps/ingest/upload$', 'voxel_globe.ingest.views.upload',
# name="uploadEndpoint"),
#REST auth endpoint
url(r'^rest/', include('rest_framework.urls', namespace='rest_framework')),
#apps
url(r'^meta/', include('voxel_globe.meta.urls', namespace='meta')),
url(r'^apps/task/', include('voxel_globe.task.urls', namespace='task')),
url(r'^apps/tiepoint/', include('voxel_globe.tiepoint.urls',
namespace='tiepoint')),
url(r'^apps/voxel_viewer/', include('voxel_globe.voxel_viewer.urls',
namespace='voxel_viewer')),
url(r'^apps/ingest/', include('voxel_globe.ingest.urls',
namespace='ingest')),
url(r'^apps/sfm/', include('voxel_globe.visualsfm.urls',
namespace='visualsfm')),
url(r'^apps/voxel_world/',
include('voxel_globe.build_voxel_world.urls',
namespace='build_voxel_world')),
url(r'^apps/generate_point_cloud/',
include('voxel_globe.generate_point_cloud.urls',
namespace='generate_point_cloud')),
url(r'^apps/tiepoint_registration/',
include('voxel_globe.tiepoint_registration.urls',
namespace='tiepoint_registration')),
url(r'^apps/height_map/',
include('voxel_globe.height_map.urls',
namespace='height_map')),
url(r'^apps/filter_number_observations/',
include('voxel_globe.filter_number_observations.urls',
namespace='filter_number_observations')),
url(r'^download/',
include('voxel_globe.download.urls',
namespace='download')),
url(r'^apps/create_site/',
include('voxel_globe.create_site.urls',
namespace='create_site')),
url(r'^apps/image_view/',
include('voxel_globe.image_view.urls',
namespace='image_view')),
url(r'^apps/event_trigger/',
include('voxel_globe.event_trigger.urls',
namespace='event_trigger')),
url(r'^apps/channels/',
include('voxel_globe.channel_test.urls',
namespace='channel_test')),
url(r'^apps/websockets/',
include('voxel_globe.websockets.urls',
namespace='websockets')),
#Other static protected assets
url(r'^images/',
include('voxel_globe.security.urls',
namespace='security')) ,
]
|
python
|
"""added column to DT and created NewTable
Revision ID: 1100598db8eb
Revises: 66362e7784fd
Create Date: 2021-02-21 13:56:05.307362
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1100598db8eb'
down_revision = '66362e7784fd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('dummy_table', sa.Column('mobile_number', sa.String(length=15), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('dummy_table', 'mobile_number')
# ### end Alembic commands ###
|
python
|
import time
import multiprocessing
import subprocess
import sys
import os
def run(name):
dir_path = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(('%s' % sys.executable, os.path.join(dir_path, "test_remote.py"), name, "etc etc"))
if __name__ == '__main__':
multiprocessing.Process(target=run, args=("subprocess",)).start()
while True:
time.sleep(0.1)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from concurrent.futures import TimeoutError
from kikimr.public.sdk.python import client as ydb
import random
EXPIRATION_QUEUE_COUNT = 4
DOC_TABLE_PARTITION_COUNT = 4
ADD_DOCUMENT_TRANSACTION = """PRAGMA TablePathPrefix("%s");
DECLARE $url AS Utf8;
DECLARE $html AS Utf8;
DECLARE $timestamp AS Uint64;
$doc_id = Digest::CityHash($url);
REPLACE INTO documents
(doc_id, url, html, timestamp)
VALUES
($doc_id, $url, $html, $timestamp);
REPLACE INTO expiration_queue_%d
(timestamp, doc_id)
VALUES
($timestamp, $doc_id);
"""
READ_DOCUMENT_TRANSACTION = """PRAGMA TablePathPrefix("%s");
DECLARE $url AS Utf8;
$doc_id = Digest::CityHash($url);
SELECT doc_id, url, html, timestamp
FROM documents
WHERE doc_id = $doc_id;
"""
READ_EXPIRED_BATCH_TRANSACTION = """PRAGMA TablePathPrefix("%s");
DECLARE $timestamp AS Uint64;
DECLARE $prev_timestamp AS Uint64;
DECLARE $prev_doc_id AS Uint64;
$data = (
SELECT *
FROM expiration_queue_%d
WHERE
timestamp <= $timestamp
AND
timestamp > $prev_timestamp
ORDER BY timestamp, doc_id
LIMIT 100
UNION ALL
SELECT *
FROM expiration_queue_%d
WHERE
timestamp = $prev_timestamp AND doc_id > $prev_doc_id
ORDER BY timestamp, doc_id
LIMIT 100
);
SELECT timestamp, doc_id
FROM $data
ORDER BY timestamp, doc_id
LIMIT 100;
"""
DELETE_EXPIRED_DOCUMENT = """PRAGMA TablePathPrefix("%s");
DECLARE $doc_id AS Uint64;
DECLARE $timestamp AS Uint64;
DELETE FROM documents
WHERE doc_id = $doc_id AND timestamp = $timestamp;
DELETE FROM expiration_queue_%d
WHERE timestamp = $timestamp AND doc_id = $doc_id;
"""
def is_directory_exists(driver, path):
try:
return driver.scheme_client.describe_path(path).is_directory()
except ydb.SchemeError:
return False
def ensure_path_exists(driver, database, path):
paths_to_create = list()
path = path.rstrip("/")
while path != "":
full_path = os.path.join(database, path)
if is_directory_exists(driver, full_path):
break
paths_to_create.append(full_path)
path = os.path.dirname(path).rstrip("/")
while len(paths_to_create) > 0:
full_path = paths_to_create.pop(-1)
driver.scheme_client.make_directory(full_path)
# Creates Documents table and multiple ExpirationQueue tables
def create_tables(table_client, path):
session = table_client.session().create()
# Documents table stores the contents of web pages.
# The table is partitioned by hash(Url) in order to evenly distribute the load.
session.create_table(
os.path.join(path, "documents"),
ydb.TableDescription()
.with_primary_keys("doc_id")
.with_columns(
ydb.Column("doc_id", ydb.OptionalType(ydb.PrimitiveType.Uint64)),
ydb.Column("url", ydb.OptionalType(ydb.PrimitiveType.Utf8)),
ydb.Column("html", ydb.OptionalType(ydb.PrimitiveType.Utf8)),
ydb.Column("timestamp", ydb.OptionalType(ydb.PrimitiveType.Uint64)),
)
.with_profile(
ydb.TableProfile()
# Partition Documents table by DocId
.with_partitioning_policy(
ydb.PartitioningPolicy().with_uniform_partitions(
DOC_TABLE_PARTITION_COUNT
)
)
)
)
# Multiple ExpirationQueue tables allow to scale the load.
# Each ExpirationQueue table can be handled by a dedicated worker.
for expiration_queue in range(EXPIRATION_QUEUE_COUNT):
session.create_table(
os.path.join(path, "expiration_queue_%d" % expiration_queue),
ydb.TableDescription()
.with_primary_keys("timestamp", "doc_id")
.with_columns(
ydb.Column("doc_id", ydb.OptionalType(ydb.PrimitiveType.Uint64)),
ydb.Column("timestamp", ydb.OptionalType(ydb.PrimitiveType.Uint64)),
)
)
# Insert or replaces a document.
def add_document(session, path, url, html, timestamp):
queue = random.randint(0, EXPIRATION_QUEUE_COUNT - 1)
# this will keep prepared query in cache
prepared = session.prepare(ADD_DOCUMENT_TRANSACTION % (path, queue))
print(
"> AddDocument: \n"
" Url: %s\n"
" Timestamp %d" % (
url,
timestamp,
)
)
session.transaction().execute(
prepared,
{'$url': url, '$html': html, '$timestamp': timestamp},
commit_tx=True,
)
# Reads document contents.
def read_document(session, path, url):
prepared = session.prepare(READ_DOCUMENT_TRANSACTION % path)
print("> ReadDocument %s:" % url)
result_sets = session.transaction().execute(prepared, {'$url': url}, commit_tx=True)
result_set = result_sets[0]
if len(result_set.rows) > 0:
document = result_sets[0].rows[0]
print(
" DocId: %s\n"
" Url: %s\n"
" Timestamp: %d\n"
" Html: %s" % (
document.doc_id,
document.url,
document.timestamp,
document.html,
)
)
else:
print(" Not found")
def read_expired_document(session, path, expiration_queue, timestamp, last_timestamp, last_doc_id):
prepared = session.prepare(READ_EXPIRED_BATCH_TRANSACTION % (path, expiration_queue, expiration_queue))
result_sets = session.transaction().execute(
prepared, {'$timestamp': timestamp, '$prev_timestamp': last_timestamp, '$prev_doc_id': last_doc_id},
commit_tx=True,
)
return result_sets[0]
def delete_expired_document(session, path, expiration_queue, doc_id, timestamp):
prepared = session.prepare(DELETE_EXPIRED_DOCUMENT % (path, expiration_queue))
session.transaction().execute(
prepared, {'$doc_id': doc_id, '$timestamp': timestamp},
commit_tx=True,
)
def delete_expired(session, path, expiration_queue, timestamp):
print("> DeleteExpired from queue #%d:" % expiration_queue)
last_timestamp = 0
last_doc_id = 0
while True:
result_set = read_expired_document(
session, path, expiration_queue, timestamp,
last_timestamp, last_doc_id
)
if not result_set.rows:
break
for document in result_set.rows:
last_doc_id = document.doc_id
last_timestamp = document.timestamp
print(" DocId: %s Timestamp: %d" % (last_doc_id, timestamp))
delete_expired_document(
session, path, expiration_queue, last_doc_id, last_timestamp)
def _run(driver, database, path):
ensure_path_exists(driver, database, path)
full_path = os.path.join(database, path)
create_tables(driver.table_client, full_path)
session = driver.table_client.session().create()
add_document(
session, full_path,
"https://yandex.ru/",
"<html><body><h1>Yandex</h1></body></html>",
1,
)
add_document(
session, full_path,
"https://ya.ru/",
"<html><body><h1>Yandex</h1></body></html>",
2
)
read_document(session, full_path, "https://yandex.ru/")
read_document(session, full_path, "https://ya.ru/")
for expiration_queue in range(EXPIRATION_QUEUE_COUNT):
delete_expired(
session,
full_path,
expiration_queue,
1
)
read_document(session, full_path, "https://ya.ru/")
add_document(
session, full_path,
"https://yandex.ru/",
"<html><body><h1>Yandex</h1></body></html>",
2
)
add_document(
session, full_path,
"https://yandex.ru/",
"<html><body><h1>Yandex</h1></body></html>",
3
)
for expiration_queue in range(EXPIRATION_QUEUE_COUNT):
delete_expired(
session, full_path,
expiration_queue,
2
)
read_document(session, full_path, "https://yandex.ru/")
read_document(session, full_path, "https://ya.ru/")
def run(endpoint, database, path):
driver_config = ydb.DriverConfig(endpoint, database, credentials=ydb.construct_credentials_from_environ())
with ydb.Driver(driver_config) as driver:
try:
driver.wait(timeout=5)
except TimeoutError:
raise RuntimeError("Connect failed to YDB")
_run(driver, database, path)
|
python
|
#Elsa by Frostmeister
import discord
import math
import time
import googlesearch as gs
import urbandictionary as ud
import random
import asyncio
from discord.ext import commands
####### General
class General:
def __init__(self , bot):
self.bot = bot
@commands.command()
async def invite(self):
"""Invite link for Elsa"""
embed = discord.Embed(title="Elsa's Invite Link ", description="You can invite me to your server" ,color = 0xf5f5dc)
embed.add_field(name= "Name " , value="Elsa" , inline=True)
embed.add_field(name= "Prefix for commands" , value = "e! , E!" , inline =True)
embed.add_field(name ="Invite Link" , value= " https://discordapp.com/oauth2/authorize?client_id=396540743877001216&scope=bot&permissions=2146958591" , inline=True)
embed.set_thumbnail(url = "https://cdn.discordapp.com/avatars/396540743877001216/85e72bb348b8e0646f25c2926cd7cea5.jpg?size=1024 ")
embed.set_footer(text = " Feel free to uncheck some permissions " )
await self.bot.say(embed=embed)
@commands.command(pass_context=True)
async def info(self ,ctx, user: discord.Member = None):
"""Gives info about the someone"""
user = user or ctx.message.author
try:
embed = discord.Embed(title="{}'s info".format(user.name), description="Here's what I could find in my bag...", color=0xf5f5dc)
embed.add_field(name="Username", value=user.name, inline=True)
embed.add_field(name="Nickname" , value = user.nick , inline = True)
embed.add_field(name="ID", value=user.id, inline=True)
embed.add_field(name="Bot", value=user.bot ,inline=True)
embed.add_field(name="Status", value=user.status,inline=True)
embed.add_field(name="Highest role", value=user.top_role)
embed.add_field(name="Joined Server", value=user.joined_at)
embed.add_field(name="Joined Discord" , value=user.created_at)
embed.set_thumbnail(url=user.avatar_url)
await self.bot.say(embed=embed)
except:
await self.bot.say("Error")
@commands.command(pass_context=True, aliases =['svinfo'])
async def serverinfo(self , ctx):
"""Gives info about the server"""
embed = discord.Embed(title="{}'s info".format(ctx.message.server.name), description="Here's what I could find in my bag...", color=0xf5f5dc)
embed.add_field(name="Servername", value=ctx.message.server.name, inline=True)
embed.add_field(name="ID", value=ctx.message.server.id, inline=True)
embed.add_field(name="Verification Level", value=ctx.message.server.verification_level, inline=True)
embed.add_field(name="Server Region", value=ctx.message.server.region, inline=True)
embed.add_field(name="Owner" , value=ctx.message.server.owner, inline=True)
embed.add_field(name="Channels",value=len(ctx.message.server.channels))
embed.add_field(name="Roles", value=len(ctx.message.server.roles))
embed.add_field(name="Members", value=len(ctx.message.server.members))
embed.add_field(name="Emojis", value=len(ctx.message.server.emojis))
embed.set_thumbnail(url=ctx.message.server.icon_url)
await self.bot.say(embed=embed)
###### SETUP
def setup(bot):
bot.add_cog(General(bot))
|
python
|
# -*- coding:utf-8 -*-
"""
jsondict <-> dict <-> model object
\______________________/
"""
def _datetime(*args):
import pytz
from datetime import datetime
args = list(args)
args.append(pytz.utc)
return datetime(*args)
def _getTarget():
from alchemyjsonschema.mapping import Draft4MappingFactory
return Draft4MappingFactory
def _makeOne(schema_factory, model, *args, **kwargs):
import alchemyjsonschema.tests.models as models
module = models
mapping_factory = _getTarget()(schema_factory, module, *args, **kwargs)
return mapping_factory(model)
def test_it__dict_from_model_object():
from alchemyjsonschema import StructuralWalker, SchemaFactory
from .models import Group, User
schema_factory = SchemaFactory(StructuralWalker)
target = _makeOne(schema_factory, Group)
group = Group(
name="ravenclaw", color="blue", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0)
)
group.users = [User(name="foo", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0))]
group_dict = target.dict_from_object(group)
assert group_dict == {
"color": "blue",
"users": [
{
"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0),
"pk": None,
"name": "foo",
}
],
"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0),
"pk": None,
"name": "ravenclaw",
}
def test_it__jsondict_from_model():
from alchemyjsonschema import StructuralWalker, SchemaFactory
from .models import Group, User
schema_factory = SchemaFactory(StructuralWalker)
target = _makeOne(schema_factory, Group)
group = Group(
name="ravenclaw", color="blue", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0)
)
group.users = [User(name="foo", created_at=_datetime(2000, 1, 1, 10, 0, 0, 0))]
jsondict = target.jsondict_from_object(group, verbose=True)
import json
assert json.dumps(jsondict)
assert jsondict == {
"color": "blue",
"name": "ravenclaw",
"users": [
{"name": "foo", "pk": None, "created_at": "2000-01-01T10:00:00+00:00"}
],
"pk": None,
"created_at": "2000-01-01T10:00:00+00:00",
}
def test_it__validate__jsondict():
from alchemyjsonschema import StructuralWalker, SchemaFactory
from .models import Group
schema_factory = SchemaFactory(StructuralWalker)
target = _makeOne(schema_factory, Group)
jsondict = {
"color": "blue",
"name": "ravenclaw",
"users": [{"name": "foo", "pk": 1, "created_at": "2000-01-01T10:00:00+00:00"}],
"pk": 1,
"created_at": "2000-01-01T10:00:00+00:00",
}
target.validate_jsondict(jsondict)
def test_it__dict_from_jsondict():
from alchemyjsonschema import StructuralWalker, SchemaFactory
from .models import Group
schema_factory = SchemaFactory(StructuralWalker)
target = _makeOne(schema_factory, Group)
jsondict = {
"color": "blue",
"name": "ravenclaw",
"users": [{"name": "foo", "pk": 10, "created_at": "2000-01-01T10:00:00+00:00"}],
"pk": None,
"created_at": "2000-01-01T10:00:00+00:00",
}
group_dict = target.dict_from_jsondict(jsondict)
assert group_dict == {
"color": "blue",
"users": [
{"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0), "pk": 10, "name": "foo"}
],
"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0),
"pk": None,
"name": "ravenclaw",
}
def test_it__object_from_dict():
from alchemyjsonschema import StructuralWalker, SchemaFactory
from .models import Group, User
schema_factory = SchemaFactory(StructuralWalker)
target = _makeOne(schema_factory, Group)
group_dict = {
"color": "blue",
"users": [
{
"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0),
"pk": None,
"name": "foo",
}
],
"created_at": _datetime(2000, 1, 1, 10, 0, 0, 0),
"pk": None,
"name": "ravenclaw",
}
group = target.object_from_dict(group_dict, strict=False)
assert isinstance(group, Group)
assert group.color == "blue"
assert group.name == "ravenclaw"
assert group.pk is None
assert group.created_at == _datetime(2000, 1, 1, 10, 0, 0, 0)
assert (len(group.users) == 1) and (isinstance(group.users[0], User))
assert group.users[0].name == "foo"
assert group.users[0].pk is None
assert group.users[0].created_at == _datetime(2000, 1, 1, 10, 0, 0, 0)
|
python
|
import numpy as np
'''
Reorient the mesh represented by @vertices so that the z-axis is aligned with @axis
'''
def orient_mesh(vertices, axis):
vector_norm = np.sqrt(axis[0]**2 + axis[1]**2 + axis[2]**2)
yz_length = np.sqrt(axis[1]**2 + axis[2]**2)
# Rotate around the y-axis
if vector_norm != 0:
y_angle = np.arccos(yz_length/vector_norm)
rotation_y = [[np.cos(y_angle), 0, np.sin(y_angle)], [0, 1, 0], [-np.sin(y_angle), 0, np.cos(y_angle)]]
vertices = np.dot(vertices, rotation_y)
# Rotate around the x-axis
if yz_length != 0 :
x_angle = np.arccos(axis[2]/yz_length)
rotation_x = [[1, 0, 0], [0, np.cos(x_angle), -np.sin(x_angle)], [0, np.sin(x_angle), np.cos(x_angle)]]
vertices = np.dot(vertices, rotation_x)
return vertices
|
python
|
# -*- coding: utf-8 -*-
import logging
import math
import random
from PIL import Image, ImageDraw
from .wallpaper_filter import WallpaperFilter
from ..geom.point import Point
from ..geom.size import Size
logger = logging.getLogger(__name__)
class Tunnel(WallpaperFilter):
def _centroid(self, size) -> Point:
return Point(size.width // 2, size.height // 2)
def _filter(self, image: Image.Image, monitor: 'Monitor', position: Point) -> Image.Image:
m_centre = self._centroid(monitor.size)
i_centre = (Point(*position) + self._centroid(Size(*image.size)))
angle = (math.degrees(-math.atan2(m_centre.y - i_centre.y, m_centre.x - i_centre.x)))
if angle > 90:
angle = 180 - angle
if angle < -90:
angle = 180 + angle
logger.info('P1: %s, P2:%s, Angle: %s', m_centre, i_centre, angle)
image = image.rotate(angle, expand=1, fillcolor=(0, 0, 0, 0))
return image
class Jiggle(WallpaperFilter):
def _filter(self, image: Image.Image, monitor: 'Monitor', position: Point) -> Image.Image:
angle = 20 * random.random() - 10.0
image = image.rotate(angle, expand=1, fillcolor=(0, 0, 0, 0))
return image
|
python
|
#!/usr/bin/env python
#-*- coding=UTF-8 -*-
import httplib2
import json
import random
import string
import time
import urllib
from securUtil import SecurUtil
class smsUtil():
@staticmethod
def baseHTTPSRequest(url, data):
# 配置 HTTP Request Header
AppKey = '1958cd7bc542a299b0c3bc428f14006e'
AppSecret = 'a3774be7f5a4'
Nonce = ''.join(random.sample(string.ascii_letters + string.digits, random.randint(1, 62)))
CurTime = '%.0f' % time.time()
CheckSum = SecurUtil.hashForString('sha1', '%s%s%s' % (AppSecret, Nonce, CurTime))
headers = {}
headers['AppKey'] = AppKey
headers['Nonce'] = Nonce
headers['CurTime'] = CurTime
headers['CheckSum'] =CheckSum
headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=utf-8'
print(headers)
# 将数据转换为 JSON 格式
#json_data = json.dumps(data)
#print(json_data)
conn = httplib2.Http(disable_ssl_certificate_validation=True)
resp, content = conn.request('https://api.netease.im/sms/sendtemplate.action', method='POST', body=urllib.urlencode(data), headers=headers)
print(resp.status)
print(content)
return resp.status, content
@staticmethod
def sendTemplate(templateid, mobiles, params):
url = 'https://api.netease.im:443/sms/sendtemplate.action'
data = {}
data['templateid'] = templateid
data['mobiles'] = mobiles
data['params'] = params
smsUtil.baseHTTPSRequest(url, data)
|
python
|
"""
drift
=====
Drift calculation methods.
"""
from .continuous import drift_continuous
from .roman import drift_roman
|
python
|
# Copyright (c) 2019, Digi International, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from machine import Pin
from xbee import relay
# Pin D9 (ON/SLEEP/DIO9)
LED_PIN_ID = "D9"
print(" +------------------------------------------+")
print(" | XBee MicroPython Relay Frames LED Sample |")
print(" +------------------------------------------+\n")
# Set up the LED pin object to manage the LED status. Configure the pin
# as output and set its initial value to off (0).
led_pin = Pin(LED_PIN_ID, Pin.OUT, value=0)
while True:
# Start reading relay frames.
relay_frame = relay.receive()
# If a relay frame is received, parse the data.
if relay_frame is not None:
data = relay_frame["message"].decode("utf-8")
if data == "ON":
led_pin.on()
elif data == "OFF":
led_pin.off()
time.sleep(0.1)
|
python
|
import math, glm
class Camera:
def __init__(self, position):
self.position = position
self.up = glm.vec3(0, 1, 0)
self.worldUp = glm.vec3(0, 1, 0)
self.pitch = 0
self.yaw = 0
self.speed = 20
self.sensitivity = 0.25
self.updateVectors()
def moveRight(self, time):
self.position += self.right * (self.speed * time)
def moveLeft(self, time):
self.position -= self.right * (self.speed * time)
def moveTop(self, time):
self.position += self.direction * (self.speed * time)
def moveBottom(self, time):
self.position -= self.direction * (self.speed * time)
def rotate(self, offsetX, offsetY):
self.yaw += offsetX * self.sensitivity
self.pitch += offsetY * self.sensitivity
if self.pitch > 89:
self.pitch = 89
elif self.pitch < -89:
self.pitch = -89
self.updateVectors()
def updateVectors(self):
x = math.cos(glm.radians(self.yaw)) * math.cos(glm.radians(self.pitch))
y = math.sin(glm.radians(self.pitch))
z = math.sin(glm.radians(self.yaw)) * math.cos(glm.radians(self.pitch))
front = glm.vec3(x, y, z)
self.direction = glm.normalize(front)
self.right = glm.normalize(glm.cross(self.direction, self.worldUp))
self.up = glm.normalize(glm.cross(self.right, self.direction))
def getViewMatrix(self):
return glm.lookAt(self.position, self.position + self.direction, self.up)
|
python
|
from bfieldtools import contour
import pytest
import numpy as np
from numpy.testing import (
assert_array_almost_equal,
assert_array_equal,
assert_allclose,
assert_equal,
)
def setup_contour_input():
""" Load example mesh and create scalars data
"""
from bfieldtools.utils import load_example_mesh
mesh = load_example_mesh("unit_disc")
r = np.linalg.norm(mesh.vertices, axis=1)
scalars = r ** 2
return mesh, scalars
def compare_contour_direction_to_rotated_gradient(mesh, scalars, polyline):
""" Check inner product between the polyline edges
the rotated gradient vectors closes to the initial points of those
edges. These should point to the same direction.
Parameters
----------
mesh : trimesh
mesh
scalars : ndarray
stream function
polyline : ndarray (N, 3)
coordinates of points representing a polyline
"""
from bfieldtools.mesh_calculus import gradient
edges = polyline[1:] - polyline[:-1]
g = gradient(scalars, mesh, rotated=True).T
fc = mesh.vertices[mesh.faces].mean(axis=1)
norm = np.linalg.norm
p = polyline
# Find closest face centers to polyline nodes
f_inds = np.argmin(norm(p[:, None, :] - fc[None, :, :], axis=-1), axis=1)
g_poly = g[f_inds]
assert np.all(np.sum(g_poly[:-1] * edges, axis=1) > 0)
def compare_magnetic_field_directions(mesh, scalars, polys, test_point):
""" Check direction of magnetic calculated from mesh and stream function
versus the direction of the field generated by contours
Parameters
----------
mesh : trimesh
mesh
scalars : ndarray
stream function
polys : list of ndarray (N, 3)
coordinates of points representing a many polylines
test_point: ndarray(1, 3)
coordinates of a test point
"""
from bfieldtools.line_magnetics import magnetic_field
from bfieldtools.mesh_conductor import magnetic_field_coupling
B_mesh = magnetic_field_coupling(mesh, test_point)
b_stream_func = B_mesh @ scalars
b_polys = np.array([magnetic_field(p, test_point) for p in polys])
b_poly_sum = b_polys.sum(axis=0)
assert np.sum(b_poly_sum * b_stream_func) > 0
def test_scalar_contour_direction():
""" Test the direction of scalar_contour and the field direction
generated by the scalar contour
"""
mesh, scalars = setup_contour_input()
N = 10
polys, vals = contour.scalar_contour(
mesh, scalars, N_contours=N, return_values=True
)
compare_contour_direction_to_rotated_gradient(mesh, scalars, polys[-1])
test_point = np.array([[0, 0, 1]])
compare_magnetic_field_directions(mesh, scalars, polys, test_point)
def test_simplify_contour():
assert True
if __name__ == "__main__":
test_scalar_contour_direction()
|
python
|
# Leia um valor de comprimento em jardas e apresente-o convertido em metros
# A foruma de conversão é: M = 0.91 * J
J = float(input("Digite um valor em jardas: "))
M = 0.91 * J
print("O valor em de jardas para metros é: %0.2f" % M)
|
python
|
from .arch import Arch
from .debian import Debian
from .ubuntu import Ubuntu
from .redhat import RedHat
from .centos import CentOS
|
python
|
import argparse
import torch
from torch.autograd import Variable
import model
import util
import data
import time
import torchvision.transforms as transforms
import shutil
model_names = sorted(name for name in model.__dict__
if name.startswith("Planet")
and callable(model.__dict__[name]))
print model_names
# def weighted_binary_cross_entropy(output, target, weights=None):
#
# if weights is not None:
# assert len(weights) == 2
#
# loss = weights[1] * (target * torch.log(output)) + \
# weights[0] * ((1 - target) * torch.log(1 - output))
# else:
# loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
#
# return torch.neg(torch.mean(loss))
def weighted_multi_label_loss(p,y):
return torch.neg(torch.mean(y*torch.log(p+1e-10)*0.1
+(1.-y)*torch.log(1.-p+1e-10)))
# class WeightedMultiLabelLoss(torch.nn.modules.loss._WeightedLoss):
#
# def forward(self, input, target):
# #_assert_no_grad(target)
# weight = Variable(torch.zeros(input.size()))#self.weight.repeat(input.size(0),1))
# return weighted_multi_label_loss(torch.sigmoid(input), target,
# weight)
def train(net,loader,criterion,optimizer,decay=0.):
net.train()
avg_loss = 0.
start = time.time()
for i, (X, y) in enumerate(loader):
input_var = torch.autograd.Variable(X)
target_var = torch.autograd.Variable(y)
# weights = torch.autograd.Variable(weight.repeat(X.size(0),1),requires_grad=False)
output = net(input_var)
#loss = weighted_multi_label_loss(torch.sigmoid(output),target_var)
loss = criterion(output, target_var)
avg_loss += loss.data[0]
l1_crit = torch.nn.L1Loss(size_average=False)
reg_loss = 0
for param in net.parameters():
reg_loss += l1_crit(param,Variable(torch.zeros(param.size()),requires_grad=False))
loss += decay * reg_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i%20 == 0:
dt = time.time()-start
pct = float(i+1)/len(loader)
curr_loss = avg_loss / (i+1)
print('%fs elapsed \t'
'%f done \t'
'%f loss \t'
'%fs remaining'%(dt,pct*100,curr_loss,dt/pct*(1.-pct)))
return avg_loss / len(loader)
def validate(net,loader,criterion):
net.eval()
avg_loss = 0.
for i, (X, y) in enumerate(loader):
input_var = torch.autograd.Variable(X, volatile=True) #no backprop
target_var = torch.autograd.Variable(y)
output = net(input_var)
#weights = torch.autograd.Variable(weight.repeat(X.size(0),1),requires_grad=False)
output = net(input_var)
#loss = weighted_multi_label_loss(torch.sigmoid(output),target_var)
loss = criterion(output, target_var)
avg_loss += loss.data[0]
return avg_loss/len(loader)
def save_model(model_state,filename='checkpoint.pth.tar',is_best=False):
fname = model_state['arch']+'-'+filename
torch.save(model_state, fname)
if is_best:
shutil.copyfile( fname, model_state['arch']+'-best.pth.tar')
def main(args):
# create model and optimizer
train_trans = []
val_trans = []
debug_trans=[]
siz = (256,256)
if args.flip:
train_trans.append(transforms.RandomHorizontalFlip())
train_trans.append(util.RandomVerticalFlip())
if args.rotate:
train_trans.append(util.RandomVerticalFlip())
if args.translate:
train_trans.append(util.RandomTranslation())
if args.scale > 0:
train_trans.append(transforms.CenterCrop(224))
train_trans.append(transforms.Scale(args.scale))
val_trans.append(transforms.CenterCrop(224))
val_trans.append(transforms.Scale(args.scale))
debug_trans.append(transforms.CenterCrop(224))
debug_trans.append(transforms.Scale(args.scale))
siz = (args.scale,args.scale)
if args.crop > 0:
train_trans.append(transforms.RandomCrop(args.crop))
val_trans.append(transforms.CenterCrop(args.crop))
debug_trans.append(transforms.CenterCrop(args.crop))
siz = (args.crop,args.crop)
train_trans.append(transforms.ToTensor())
#train_trans.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
val_trans.append(transforms.ToTensor())
debug_trans.append(transforms.ToTensor())
# val_trans.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
net = model.__dict__[args.model](input_size=siz,num_labels=17,dropout=args.dropout,feature_maps=args.features)
print net
optimizer = torch.optim.Adam(net.parameters(),weight_decay=args.l2_decay)
#stats = torch.load('positive.pth.tar')
#weights = (1.-stats['positive'])/stats['positive']
#criterion = WeightedMultiLabelLoss(weight = weights)
criterion = torch.nn.MultiLabelSoftMarginLoss()#torch.nn.BCELoss()#torch.nn.MultiLabelSoftMarginLoss()
print net.feature_size
#optionally restore weights
if args.resume is not None:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
best_loss = checkpoint['score']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
else:
best_loss = 1e10
# load data
debug_data = data.PlanetData(args.datapath +'/debug',
args.datapath + '/img_labels.csv',
args.datapath + '/labels.txt',
transform=debug_trans)
train_data = data.PlanetData(args.datapath +'/train',
args.datapath + '/img_labels.csv',
args.datapath + '/labels.txt',
transform=train_trans)
val_data = data.PlanetData(args.datapath +'/val',
args.datapath + '/img_labels.csv',
args.datapath + '/labels.txt',
transform=val_trans)
debug_loader = torch.utils.data.DataLoader(
debug_data,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers)
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
#run training
patience = args.patience
for e in range(args.nepochs):
start = time.time()
# run 1 training epoch
if args.debug:
train_loss = train(net,debug_loader, criterion, optimizer, decay=args.l1_decay)
val_loss = 0.
else:
train_loss = train(net,train_loader, criterion, optimizer, decay=args.l1_decay)
val_loss = validate(net, val_loader, criterion)
# validate
end = time.time()
#checkpoint
print ('epoch %d \t'
'time %f \t'
'train loss %f \t'
'val loss %f \t'%(e,end-start,train_loss, val_loss)
)
model_state = {
'epoch': e,
'score': val_loss,
'cfg': net.cfg,
'arch': args.model,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict()
}
save_model(model_state,'checkpoint.pth.tar', val_loss < best_loss)
#early stopping
if val_loss < best_loss:
best_loss = val_loss
patience = args.patience
else:
patience -= 1
if patience == 0:
print('early_stopping')
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-model", type=str, default='PlanetNet', help="model name")
parser.add_argument("-patience", type=int, default=5, help="early stopping patience")
parser.add_argument("-crop", type=int, default=0, help="crop size")
parser.add_argument("-scale", type=int, default=0, help="scale size")
parser.add_argument("-features", type=int, default=64, help="feature maps")
parser.add_argument("-flip", type=bool, default=True, help="random flips")
parser.add_argument("-rotate", type=bool, default=True, help="random rotation")
parser.add_argument("-translate", type=bool, default=True, help="random translation")
parser.add_argument("-debug", action="store_true", help="run on debug set")
parser.add_argument("-dropout", type=float, default=0.5, help="dropout")
parser.add_argument("-l1_decay", type=float, default=0., help="l1 weight decay")
parser.add_argument("-l2_decay", type=float, default=0., help="l2 weight decay")
parser.add_argument("-batch_size", type=int, default=128, help="batch size")
parser.add_argument("-resume", type=str, default=None, help="resume training model file")
parser.add_argument("-nepochs", type=int, default=100, help="max epochs")
parser.add_argument("-workers", type=int, default=2, help="number of data loaders")
parser.add_argument("datapath", type=str, help="data path")
args = parser.parse_args()
main(args)
|
python
|
import abc
class RecurrentSupervisedLearningEnv(metaclass=abc.ABCMeta):
"""
An environment that's really just a supervised learning task.
"""
@abc.abstractmethod
def get_batch(self, batch_size):
"""
:param batch_size: Size of the batch size
:return: tuple (X, Y) where
X is a numpy array of size (
batch_size, self.sequence_length, self.feature_dim
)
Y is a numpy array of size (
batch_size, self.sequence_length, self.target_dim
)
"""
pass
@property
@abc.abstractmethod
def feature_dim(self):
"""
:return: Integer. Dimension of the features.
"""
pass
@property
@abc.abstractmethod
def target_dim(self):
"""
:return: Integer. Dimension of the target.
"""
pass
@property
@abc.abstractmethod
def sequence_length(self):
"""
:return: Integer. Dimension of the target.
"""
pass
|
python
|
'''
Created on Aug 9, 2013
@author: [email protected] - salcho
'''
class wsResponse:
def __init__(self, id=-1, params=None, size=-1, response=None, payload=None, plugin=None):
self.id = id
self.params = params
self.size = size
self.http_code = -1
self.response = None
self.response = response[0] if response[0] else None
self.body = response[1] if response[1] else None
#self.http_code = response[1][0] if response[1][0] else -1
self.http_code = 200
self.payload = payload
self.plugin = plugin
def getID(self):
return self.id
def getParams(self):
return self.params
def getSize(self):
return self.size
def getBody(self):
return self.body
def getHTTPCode(self):
return self.http_code
def getResponse(self):
return self.response
def getPayload(self):
return self.payload
def getPlugin(self):
return self.plugin
|
python
|
import tensorflow as tf
def iou(source, target):
"""Calculates intersection over union (IoU) and intersection areas for two sets
of objects with box representations.
This uses simple arithmetic and outer products to calculate the IoU and intersections
between all pairs without looping.
Parameters
----------
source: tensor (float32)
M x 4 tensor where each row contains the x,y location of the upper left
corner of a box and its width and height in that order. Typically the
predictions.
target: tensor (float32)
N x 4 tensor where each row contains the x,y location of the upper left
corner of a box and its width and height in that order. Typically the
ground truth.
Returns
-------
iou: tensor (float32)
M x N tensor containing IoU values between source and target boxes.
intersection: tensor (float32)
M x N tensor containing area overlaps between source and target boxes
in pixels.
"""
# split into corners and sizes
xs, ys, ws, hs = tf.split(source, 4, axis=1)
xt, yt, wt, ht = tf.split(target, 4, axis=1)
# overlap in dimensions
left = tf.maximum(xs, tf.transpose(xt))
top = tf.maximum(ys, tf.transpose(yt))
right = tf.minimum(xs + ws, tf.transpose(xt + wt))
bottom = tf.minimum(ys + hs, tf.transpose(yt + ht))
horizontal = tf.minimum(xs + ws, tf.transpose(xt + wt)) - tf.maximum(xs, tf.transpose(xt))
vertical = tf.minimum(ys + hs, tf.transpose(yt + ht)) - tf.maximum(ys, tf.transpose(yt))
# calculate intersection
intersection = tf.maximum(0.0, horizontal) * tf.maximum(0.0, vertical)
# calculate iou
iou = intersection / (ws * hs + tf.transpose(wt * ht) - intersection)
return iou
def _greedy_iou_mapping_iter(i, ious, source_mask, target_mask, matches):
"""Performs one iteration of greedy IoU mapping.
This is the loop body of the greedy IoU mapping algorithm. This identifies the
best match having the highest IoU and removes the corresponding prediction and
ground truth element from future consideration in matching.
Parameters
----------
i: int32
Iteration number in mapping. Used for writing to output TensorArray.
ious: tensor (float32)
M x N tensor of IoU values used to generate mapping. Regression predictions
are in rows and ground truth elements are in columns. This array is masked
to remove previous matches when identifying the highest IoU match.
source_mask: tensor (bool)
1D M-length tensor where unmatched predictions are represented by 'True'.
target_mask: tensor (bool)
1D M-length tensor where unmatched ground truth elements are represented
by 'True'.
matches: tensor (float32)
2D tensor where each row represents a match, containing the indices of
the matched prediction and ground truth element in that order.
Returns
-------
i: int32
Loop iteration counter.
ious: tensor (float32)
Same as input but updated with current iteration match.
source_mask: tensor (bool)
Same as input but updated with current iteration match.
target_mask: tensor (bool)
Same as input but updated with current iteration match.
matches: tensor (float32)
Same as input but updated with current iteration match.
"""
# mask targets and get best match for each source
maxima = tf.reduce_max(tf.boolean_mask(ious, target_mask, axis=1), axis=1)
target_indices = tf.argmax(tf.boolean_mask(ious, target_mask, axis=1), axis=1)
# mask sources that were already matched
maxima = tf.boolean_mask(maxima, source_mask)
target_indices = tf.boolean_mask(target_indices, source_mask)
# get source and target indices
max = tf.reduce_max(maxima)
source_index = tf.argmax(maxima)
target_index = tf.gather(target_indices, source_index)
# correct for masked sources and targets
source_index = tf.gather(tf.where(source_mask), source_index)
target_index = tf.gather(tf.where(target_mask), target_index)
# update masks
source_mask = tf.tensor_scatter_nd_update(source_mask, [source_index], [tf.constant(False)])
target_mask = tf.tensor_scatter_nd_update(target_mask, [target_index], [tf.constant(False)])
# write (source, target) to TensorArray
matches = matches.write(
i, tf.concat([tf.cast(source_index, tf.float32), tf.cast(target_index, tf.float32), [max]], axis=0)
)
# update index
i = i + 1
return i, ious, source_mask, target_mask, matches
def greedy_iou_mapping(ious, min_iou):
"""Calculates greedy IoU mapping between predictions and ground truth.
Uses intersection-over-union scores to compute a greedy mapping between
ground truth and predicted objects. Greedy mapping can produce suboptimal
results compared to the Kuhn–Munkres algorithm since matching is greedy.
Parameters
----------
ious: tensor (float32)
M x N tensor of IoU values used to generate mapping. Regression predictions
are in rows and ground truth elements are in columns. This array is masked
to remove previous matches when identifying the highest IoU match.
min_iou: float32
Minimum IoU threshold for defining a match between a regression
prediction and a ground truth box.
Returns
-------
precision: float32
Precision of IoU mapping.
recall: float32
Recall of IoU mapping.
tp: int32
True positive count of IoU mapping.
fp: int32
False positive count of IoU mapping.
fn: int32
False negative count of IoU mapping.
tp_list: int32
Two-dimensional tensor containing indices of true positive predictions
in first column, and corresponding matching ground truth indices in second
column.
fp_list: int32
One-dimensional tensor containing indices of false positive predictions.
fn_list: int32
One-dimensional tensor containing indices of false negative ground truth.
"""
# initialize masks
source_mask = tf.ones(tf.shape(ious)[0], tf.bool)
target_mask = tf.ones(tf.shape(ious)[1], tf.bool)
# define loop counter, condition, store for output
i = tf.constant(0)
matches = tf.TensorArray(tf.float32, size=tf.shape(ious)[0], dynamic_size=False)
def condition(i, a, b, c, d):
return tf.less(i, tf.minimum(tf.shape(ious)[0], tf.shape(ious)[1]))
# loop to perform greedy mapping
_, _, _, _, matches = tf.while_loop(
condition,
_greedy_iou_mapping_iter,
[i, ious, source_mask, target_mask, matches],
parallel_iterations=10,
)
# stack outputs
matches = matches.stack()
# discard matches that do not meet min_iou
matches = tf.boolean_mask(matches, tf.greater_equal(matches[:, 2], min_iou), axis=0)
# calculate TP, FP, FN, precision, recall
tp = tf.shape(matches)[0]
fp = tf.shape(ious)[0] - tf.shape(matches)[0]
fn = tf.shape(ious)[1] - tf.shape(matches)[0]
# generate lists of indexes for TP, FP, FN
tp_list = tf.cast(matches[:, 0:2], tf.int32)
fp_list = tf.sets.difference(
[tf.range(tf.shape(ious)[0], dtype=tf.int32)], [tf.cast(matches[:, 0], dtype=tf.int32)]
).values
fn_list = tf.sets.difference(
[tf.range(tf.shape(ious)[1], dtype=tf.int32)], [tf.cast(matches[:, 1], dtype=tf.int32)]
).values
return tp, fp, fn, tp_list, fp_list, fn_list
|
python
|
# Generated by Django 3.0.3 on 2020-08-10 13:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ecommerce_platform', '0015_auto_20200810_1252'),
]
operations = [
migrations.RenameField(
model_name='coupon',
old_name='user',
new_name='users',
),
]
|
python
|
import pytest
from pytest import approx
import os
import shutil
import numpy as np
import pandas as pd
from tarpan.testutils.a03_cars.cars import get_fit
from tarpan.cmdstanpy.waic import (
waic, compare_waic, save_compare_waic_csv, save_compare_waic_txt,
waic_compared_to_df,
WaicData, WaicModelCompared,
compare_waic_tree_plot, save_compare_waic_tree_plot)
from tarpan.testutils.a04_height.height import (
get_fit1_intercept, get_fit2_fungus_treatment, get_fit3_treatment)
def test_waic():
fit = get_fit()
result = waic(fit)
assert result.waic == approx(421.5135196466395, rel=1e-15)
assert len(result.waic_pointwise) == 50
assert result.waic_pointwise[0] == approx(7.284060083431996, rel=1e-15)
assert result.waic_pointwise[49] == approx(7.324510608904949, rel=1e-15)
assert result.waic_std_err == approx(16.327468671341204, rel=1e-15)
assert result.lppd == approx(-206.5875738029627, rel=1e-15)
assert len(result.lppd_pointwise) == 50
assert result.lppd_pointwise[0] == approx(-3.6203241203579615, rel=1e-15)
assert result.lppd_pointwise[49] == approx(-3.641419673133626, rel=1e-15)
assert result.penalty == approx(4.169186020357044, rel=1e-15)
assert len(result.penalty_pointwise) == 50
assert result.penalty_pointwise[0] == approx(0.021705921358036437,
rel=1e-15)
assert result.penalty_pointwise[49] == approx(0.020835631318848448,
rel=1e-15)
def test_compare_waic():
fit1_intercept = get_fit1_intercept()
fit2_fungus_treatment = get_fit2_fungus_treatment()
fit3_treatment = get_fit3_treatment()
models = {
"Itercept": fit1_intercept,
"Fungus+treatment": fit2_fungus_treatment,
"Treatment": fit3_treatment
}
result = compare_waic(models=models)
assert [model.name for model in result] == ['Fungus+treatment',
'Treatment',
'Itercept']
assert [round(model.waic_data.waic, 2) for model in result] == \
[361.45, 402.71, 405.93]
assert [round(model.waic_data.waic_std_err, 2) for model in result] == \
[13.34, 10.78, 11.29]
difference = [
None if model.waic_difference_best is None
else round(model.waic_difference_best, 2)
for model in result
]
assert difference == [None, 41.27, 44.48]
std_err = [
None if model.waic_difference_best_std_err is None
else round(model.waic_difference_best_std_err, 2)
for model in result
]
assert std_err == [None, 9.82, 11.55]
assert [round(model.waic_data.penalty, 1) for model in result] == \
[3.4, 2.6, 1.6]
actual_weight = [
round(model.weight, 5)
for model in result
]
assert actual_weight == [0.99986, 2e-05, 0.00012]
def test_compare_waic__model_with_different_data_points():
cars_fit = get_fit()
plants_fit = get_fit1_intercept()
models = {
"Cars": cars_fit,
"Plants": plants_fit
}
with pytest.raises(AttributeError,
match=r"different number of data points"):
compare_waic(models=models)
def test_waic_compared_to_df():
compared = []
for i in range(1, 4):
waic = WaicData(
waic=i,
waic_pointwise=[i] * 3,
waic_std_err=i * 1.1,
lppd=i * 1.2,
lppd_pointwise=[i * 1.2] * 3,
penalty=i * 0.3,
penalty_pointwise=[i * 0.3] * 3,
)
compared_element = WaicModelCompared(
name=f"Model {i}",
waic_data=waic,
waic_difference_best=i * 1.3,
waic_difference_best_std_err=i * 1.4,
weight=i*1.7
)
compared.append(compared_element)
result = waic_compared_to_df(compared=compared)
assert len(result) == 3
row = result.loc["Model 1"]
assert row["WAIC"] == 1
assert row["SE"] == 1.1
assert row["dWAIC"] == 1.3
assert row["dSE"] == 1.4
assert row["pWAIC"] == 0.3
assert row["Weight"] == 1.7
row = result.loc["Model 2"]
assert row["WAIC"] == 2
assert row["SE"] == 2.2
assert row["dWAIC"] == 2.6
assert row["dSE"] == 2.8
assert row["pWAIC"] == 0.6
assert row["Weight"] == 3.4
def test_save_compare_waic_csv():
fit1_intercept = get_fit1_intercept()
fit2_fungus_treatment = get_fit2_fungus_treatment()
fit3_treatment = get_fit3_treatment()
models = {
"Itercept": fit1_intercept,
"Fungus+treatment": fit2_fungus_treatment,
"Treatment": fit3_treatment
}
outdir = "tarpan/cmdstanpy/model_info/waic_test"
if os.path.isdir(outdir):
shutil.rmtree(outdir)
save_compare_waic_csv(models=models)
assert os.path.isfile(os.path.join(outdir, "compare_waic.csv"))
df = pd.read_csv(os.path.join(outdir, "compare_waic.csv"),
index_col="Name")
assert len(df) == 3
row = df.loc["Fungus+treatment"]
assert row["WAIC"] == approx(361.44, rel=1e-3)
assert row["SE"] == approx(13.33, rel=1e-3)
assert np.isnan(row["dWAIC"])
assert np.isnan(row["dSE"])
assert row["pWAIC"] == approx(3.4388, rel=1e-3)
assert row["Weight"] == approx(0.99985, rel=1e-3)
row = df.loc["Itercept"]
assert row["WAIC"] == approx(405.93, rel=1e-3)
assert row["SE"] == approx(11.292, rel=1e-3)
assert row["dWAIC"] == approx(44.48, rel=1e-3)
assert row["dSE"] == approx(11.55, rel=1e-3)
assert row["pWAIC"] == approx(1.5745, rel=1e-3)
assert row["Weight"] == approx(0.00012332, rel=1e-3)
def test_save_compare_waic_txt():
fit1_intercept = get_fit1_intercept()
fit2_fungus_treatment = get_fit2_fungus_treatment()
fit3_treatment = get_fit3_treatment()
models = {
"Itercept": fit1_intercept,
"Fungus+treatment": fit2_fungus_treatment,
"Treatment": fit3_treatment
}
outdir = "tarpan/cmdstanpy/model_info/waic_test"
if os.path.isdir(outdir):
shutil.rmtree(outdir)
save_compare_waic_txt(models=models)
assert os.path.isfile(os.path.join(outdir, "compare_waic.txt"))
with open(os.path.join(outdir, "compare_waic.txt"), 'r') as file:
data = file.read()
assert "dWAIC" in data
assert "Treatment" in data
assert "402.71" in data
def test_compare_waic_tree_plot():
fit1_intercept = get_fit1_intercept()
fit2_fungus_treatment = get_fit2_fungus_treatment()
fit3_treatment = get_fit3_treatment()
models = {
"Itercept": fit1_intercept,
"Fungus+treatment": fit2_fungus_treatment,
"Treatment": fit3_treatment
}
fig, ax = compare_waic_tree_plot(models=models)
assert ax.get_xlabel() == "WAIC (deviance)"
def test_save_compare_waic_tree_plot():
fit1_intercept = get_fit1_intercept()
fit2_fungus_treatment = get_fit2_fungus_treatment()
fit3_treatment = get_fit3_treatment()
models = {
"Itercept": fit1_intercept,
"Fungus+treatment": fit2_fungus_treatment,
"Treatment": fit3_treatment
}
outdir = "tarpan/cmdstanpy/model_info/waic_test"
if os.path.isdir(outdir):
shutil.rmtree(outdir)
save_compare_waic_tree_plot(models=models)
assert os.path.isfile(os.path.join(outdir, "compare_waic.pdf"))
|
python
|
# -*- coding: utf-8 -*-
# @Date : 2019-07-26
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
import os
import glob
import argparse
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import utils.fid_score as fid
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
required=True,
help='set path to training set jpg images dir')
parser.add_argument(
'--output_file',
type=str,
default='fid_stat/fid_stats_cifar10_train.npz',
help='path for where to store the statistics')
opt = parser.parse_args()
print(opt)
return opt
def main():
args = parse_args()
########
# PATHS
########
data_path = args.data_path
output_path = args.output_file
# if you have downloaded and extracted
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
# set this path to the directory where the extracted files are, otherwise
# just set it to None and the script will later download the files for you
inception_path = None
print("check for inception model..", end=" ", flush=True)
inception_path = fid.check_or_download_inception(inception_path) # download inception if necessary
print("ok")
# loads all images into memory (this might require a lot of RAM!)
print("load images..", end=" ", flush=True)
image_list = glob.glob(os.path.join(data_path, '*.jpg'))
images = np.array([imread(str(fn)).astype(np.float32) for fn in image_list])
print("%d images found and loaded" % len(images))
print("create inception graph..", end=" ", flush=True)
fid.create_inception_graph(inception_path) # load the graph into the current TF graph
print("ok")
print("calculte FID stats..", end=" ", flush=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
mu, sigma = fid.calculate_activation_statistics(images, sess, batch_size=100)
np.savez_compressed(output_path, mu=mu, sigma=sigma)
print("finished")
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
'''
@author: Jinpeng LI
@contact: [email protected]
@organization: I2BM, Neurospin, Gif-sur-Yvette, France
@organization: CATI, France
@organization: U{IFR 49<http://www.ifr49.org>}
@license: U{CeCILL version 2<http://www.cecill.info/licences/Licence_CeCILL_V2-en.html>}
'''
'''
start to check the requirement on the server side
'''
import os
import sys
resName = None
i = 0
while i < len(sys.argv):
if sys.argv[i] == "-r":
resName = sys.argv[i + 1]
break
i = i + 1
lines2cmd = [
"kill $(ps -ef | grep 'python -m soma_workflow.start_database_server' | grep '%s' \
| grep -v grep | awk '{print $2}')" % (resName),
"rm ~/.soma-workflow.cfg"
]
for line2cmd in lines2cmd:
os.system("echo '%s' " % (line2cmd))
os.system(line2cmd)
|
python
|
from dataclasses import dataclass, field
from typing import Dict
from lf3py.serialization.deserializer import DictDeserializer
@dataclass
class SNSMessage(DictDeserializer):
message: str = ''
attributes: Dict[str, Dict[str, str]] = field(default_factory=dict)
|
python
|
from utils import *
def cross_val_split(dataset, folds):
"""
Splits the dataset into folds number of subsets of almost equal size after randomly shuffling it, for cross
validation.
:param dataset: The dataset to be splitted.
:param folds: The number of folds to be created.
:return: The dataset in cuts.
"""
np.random.shuffle(dataset)
splits = np.array_split(dataset, folds)
return splits
def train_folds_merge(folds, test_id):
"""
Glues together the folds of training splits into a unified train set.
:param folds: The folds produced from the dataset segmentation for crossvalidation.
:param test_id: The one fold that should be excluded to play the role of the test set.
:return: The unified train set.
"""
train_idxs = list(range(0, len(folds)))
train_idxs.remove(test_id)
list_of_splits = []
for j in train_idxs:
list_of_splits.append(folds[j])
return np.vstack(list_of_splits)
def plot_accuracies(accuracies, num_of_models):
"""
Plots a graph with the accuracies on the y axis and the different models on the x axis.
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(15, 10), dpi=100)
xs = np.arange(1, num_of_models + 1)
ax.plot(xs, accuracies)
acc_best = np.argmax(accuracies) + 1
maxim = ax.scatter(acc_best, np.max(accuracies), marker='o', color='red')
ax.grid(b=True, color='grey', linestyle='-.', linewidth=0.5, zorder=0)
ax.legend([maxim], ["Best Accuracy"])
ax.set_title('Accuracy vs Algorithm selected')
ax.set_ylabel('Accuracy')
ax.set_xlabel('Methods')
ax.set_xticklabels(['quest_a', 'quest_b', 'quest_c', 'quest_d'])
ax.set_xticks(xs)
plt.xticks(rotation=25)
# plt.savefig("knn_%s.png" % (d_name))
plt.show()
plt.savefig("2.3.png")
def predictClass(x, mus, sigmas, X_train, number_of_classes, class_probabilities):
"""
For every model, it calculates the likelihood for each class, and picks the class with max likelihood.
:param x: The datapoint we want to derive the class for.
:param mus: A list with the mean vector for each method. First three are for first class, next three for
second class, etc.
:param sigmas: A list with the covariance matrix for each method. Same as mus.
:param X_train: The train set - needed for Parzen Windows method.
:param number_of_classes: The number of different classes in the dataset.
:param class_probabilities: An array with the probability of each class.
:return: A vector with the predicted classes by each model.
"""
predictions = []
# For the parametric methods
number_of_models = int(len(mus) / 2)
for i in range(0, number_of_models):
method_likelihoods = []
for j in range(number_of_classes):
index = i + j * number_of_models # the index will "jump" over the other methds in the lists.
prob = gaussian(x, mus[index], sigmas[index]) * class_probabilities[j] # The beyes classifier rule
method_likelihoods.append(prob)
predictions.append(np.argmax(method_likelihoods))
# For the non-parametric method
method_likelihoods = []
for j in range(number_of_classes):
sumlog_pi = question_d(X_train, x)
p_i = sumlog_pi * class_probabilities[j] # The beyes classifier rule
method_likelihoods.append(p_i)
predictions.append(np.argmax(method_likelihoods))
return predictions
def main():
"""
Dataset is split in folds in order to implement k-fold cross validation, based on which the average accuracy is
computed. For each fold, the underlying pdfs are derived and accuracy is measured.
"""
data = loaddataset()
folds = 5
num_of_models = 4
data_splits = cross_val_split(data, folds)
accuracies = np.zeros((4, folds), dtype=int) # array with one row for each model and one column for each fold.
# Holds the counts of accurates for each model, which turns in accuracies before moving on to the next fold.
# For each fold:
for t, test_set in enumerate(data_splits):
train_set = train_folds_merge(data_splits, t)
X_train = train_set[:, :-1]
y_train = train_set[:, -1:]
X_test = test_set[:, :-1]
y_test = test_set[:, -1:]
number_of_classes = len(np.unique(y_train))
class_probabilities = np.zeros(number_of_classes) # array with the probability for each class to exist
# Lists with model parameters, utilized in predictClass().
mus = []
sigmas = []
# Training
for i in range(number_of_classes):
# Split set based on each label y.
subset = np.array([X_train[j] for j in range(X_train.shape[0]) if y_train[j] == i])
# The class probabitlity for this class and fold.
class_probabilities[i] = subset.shape[0] / X_train.shape[0]
# Get pdf parameters for each class
mus_a, sigmas_a = question_a(subset)
mus.append(mus_a)
sigmas.append(sigmas_a)
mus_b, sigmas_b = question_b(subset)
mus.append(mus_b)
sigmas.append(sigmas_b)
mus_c, sigmas_c = question_c(subset)
mus.append(mus_c)
sigmas.append(sigmas_c)
# Question c: Training for last distribution model takes place right before testing,
# because there aren't any parameters to be learned beforehand.
# Testing:
for i in range(X_test.shape[0]):
# Take the predictions from all methods
preds = predictClass(X_test[i], mus, sigmas, X_train, number_of_classes, class_probabilities)
# Evaluate these predictions
target = y_test[i]
for j, pred in enumerate(preds):
if pred == target:
accuracies[j, t] += 1
# Lastly, the accuracies for this fold are calculated in place.
accuracies[:, t] = accuracies[:, t] * 100 / X_test.shape[0]
accurates_counts_avg = accuracies.mean(axis=1) # We average the accuracies of the k-fold crossvalidation
print("Average Accuracy over {}-fold cross validation:\n".format(folds))
print("Assumption A: {}%".format(round(accurates_counts_avg[0], 1)))
print("Assumption B: {}%".format(round(accurates_counts_avg[1], 1)))
print("Assumption C: {}%".format(round(accurates_counts_avg[2], 1)))
print("Assumption D: {}%".format(round(accurates_counts_avg[3], 1)))
#---------------plot accuracies
plot_accuracies(accurates_counts_avg, num_of_models)
if __name__ == "__main__":
main()
|
python
|
import random
from abc import ABCMeta, abstractmethod
from collections import defaultdict, Counter, OrderedDict
import math
import numpy as np
from gtd.log import indent
from wge.rl import Trace
def normalize_counts(counts):
"""Return a normalized Counter object."""
normed = Counter()
total = float(sum(list(counts.values()), 0.0))
assert total > 0 # cannot normalize empty Counter
for key, ct in list(counts.items()):
normed[key] = ct / total
return normed
class ReplayBuffer(object, metaclass=ABCMeta):
@abstractmethod
def sample(self, num_episodes):
"""Sample WITH replacement from the buffer.
Args:
num_episodes (int): number of episodes to return.
Returns:
sampled_episodes (list[Episode])
sample_probs (list[float]): probability of sampling the episode
trace (ReplayBufferSampleTrace)
"""
raise NotImplementedError
@abstractmethod
def extend(self, episodes):
"""Extends the buffer with the given episodes.
Randomly evicts episodes from the buffer as necessary.
Args:
episodes (list[Episode])
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def status(self):
"""A human-readable string describing the status of the buffer."""
raise NotImplementedError
class UniformReplayBuffer(ReplayBuffer):
"""Minimalist replay buffer."""
def __init__(self):
self._episodes = []
def __len__(self):
return len(self._episodes)
def sample(self, num_episodes):
indices = np.random.choice(len(self._episodes), size=num_episodes, replace=True)
episodes = [self._episodes[i] for i in indices]
probs = [1.] * len(episodes)
trace = None
return episodes, probs, trace
def extend(self, episodes):
self._episodes.extend(episodes)
def status(self):
return 'size: {}'.format(len(self))
class ReplayBufferNotReadyException(Exception):
pass
class RewardPrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, max_size, sampling_quantile, discount_factor):
"""RewardPrioritizedReplayBuffer.
Lowest-reward episodes are evicted when the buffer becomes full.
Buffer only samples from the top K-quantile of what it contains.
(where K = sampling_quantile)
Args:
max_size (int): max size of the buffer.
sampling_quantile (float): should be in (0, 1]
discount_factor (float)
"""
self.max_size = max_size
self.sampling_quantile = sampling_quantile
self._discount_factor = discount_factor
self._episodes = [] # this should always be sorted from highest-reward to lowest-reward
def __len__(self):
return len(self._episodes)
def sample(self, num_episodes):
n = len(self)
if n == 0:
raise RuntimeError('Cannot sample from an empty buffer.')
# only sample as many as are contained in the buffer
num_episodes = min(num_episodes, len(self))
# only sample from the top k-quantile
sample_limit = int(math.ceil(n * self.sampling_quantile))
# if the top k-quantile isn't large enough to get num_episodes unique episodes, expand it
sample_limit = max(sample_limit, num_episodes)
# don't ever sample the same thing twice
sample_indices = list(np.random.choice(sample_limit, size=num_episodes, replace=False))
sample_episodes = [self._episodes[i] for i in sample_indices]
sample_probs = [1.] * len(sample_episodes)
# TODO(kelvin): similar to the old replay buffer, we are just hacking sample_probs to be all 1s right now
trace = PrioritizedRewardReplayBufferTrace(self._episodes)
return sample_episodes, sample_probs, trace
def extend(self, episodes):
# only add episodes with full reward
episodes = [ep for ep in episodes if ep.discounted_return(0, 1.) == 1]
# TODO(kelvin): just create a FullRewardOnlyBuffer, rather than
# hacking RewardPrioritizedBuffer
# DISABLED: only add episodes with positive reward
# episodes = [ep for ep in episodes if ep.discounted_return(0, 1.) > 0]
self._episodes.extend(episodes)
if len(self._episodes) > self.max_size:
# the sort in the following lines is an in-place sort
# this shuffle breaks the in-place nature of that sort, which
# would undesirably favor older episodes
shuffled_episodes = list(self._episodes)
random.shuffle(shuffled_episodes)
sorted_episodes = sorted(shuffled_episodes, key=lambda ep: ep.discounted_return(0, 1.), reverse=True)
self._episodes = sorted_episodes[:self.max_size]
def status(self):
if len(self) == 0:
return 'empty'
rewards = sorted(ep.discounted_return(0, 1.) for ep in self._episodes)
median = rewards[int(len(rewards) / 2)]
min = rewards[0]
max = rewards[-1]
mean = sum(rewards) / len(rewards)
return 'n={n:<4} mean={mean:.2f} range=[{min:.2f}, {max:.2f}] median={median:.2f}'.format(
n=len(rewards), min=min, median=median, max=max, mean=mean)
class GroupedReplayBuffer(ReplayBuffer):
"""Buffer of Episodes to replay."""
def __init__(self, episode_grouper, episode_identifier,
buffer_factory, min_group_size):
"""Construct replay buffer.
WARNING:
We assume that the probability of sampling an episode is just 1.
Compared to using the real sample prob (which can be easily computed),
this is more stable for downstream importance sampling.
We already violate the assumptions of importance sampling, because our
proposal distribution doesn't have full support over the target distribution.
Exact sample probs actually exacerbate the problem.
Approximate sample probs somewhat mitigate the problem.
Args:
episode_grouper (Callable[Episode, object]): see self._sample_from_groups
episode_identifier (Callable[Episode, object]): see self._sample_from_groups
buffer_factory (Callable[[], ReplayBuffer): creates a brand new buffer
min_group_size (int): if a group's buffer is smaller than this size,
we will not sample from it.
"""
self._group_buffers = defaultdict(buffer_factory)
self._episode_grouper = episode_grouper
self._episode_identifier = episode_identifier
self._min_group_size = min_group_size
def sample(self, num_episodes):
group_labels = [label for label, buffer in list(self._group_buffers.items())
if len(buffer) >= self._min_group_size]
if len(group_labels) == 0:
# none of the buffers are ready
raise ReplayBufferNotReadyException()
num_groups = len(group_labels)
uniform_probs = [1. / num_groups] * num_groups
group_counts = np.random.multinomial(num_episodes, uniform_probs) # sample uniformly from groups
sampled_episodes = []
sample_probs = []
traces = {}
assert len(group_labels) == len(group_counts)
for label, group_count in zip(group_labels, group_counts):
group_buffer = self._group_buffers[label]
eps, probs, trace = group_buffer.sample(group_count)
sampled_episodes.extend(eps)
sample_probs.extend(probs)
traces[label] = trace
group_counts_dict = dict(list(zip(group_labels, group_counts)))
full_trace = GroupedReplayBufferTrace(traces, group_counts_dict)
return sampled_episodes, sample_probs, full_trace
def extend(self, episodes):
# group the episodes
grouped_episodes = defaultdict(list)
for ep in episodes:
grouped_episodes[self._episode_grouper(ep)].append(ep)
# add the episodes to their respective buffers
for label, group in list(grouped_episodes.items()):
self._group_buffers[label].extend(group)
def __len__(self):
return sum(len(buffer) for buffer in list(self._group_buffers.values()))
def status(self):
if len(self._group_buffers) == 0:
return 'empty'
return '\n'.join('{}: {}'.format(buffer.status(), label)
for label, buffer in list(self._group_buffers.items()))
class GroupedReplayBufferTrace(Trace):
def __init__(self, group_traces, group_counts):
def trace_sort_key(item):
group_label, trace = item
if isinstance(trace, PrioritizedRewardReplayBufferTrace):
return -trace.mean # sort by mean reward of group
else:
return repr(group_label) # sort by group label
self._group_traces = OrderedDict(sorted(list(group_traces.items()), key=trace_sort_key))
self._group_counts = OrderedDict(sorted(list(group_counts.items()), key=lambda x: -x[1]))
def to_json_dict(self):
return {'group_counts': {repr(label): count for label, count in list(self._group_counts.items())},
'group_traces': {repr(label): stat.to_json_dict() for label, stat in list(self._group_traces.items())}
}
def dumps(self):
return 'group stats:\n{}\nsample counts:\n{}'.format(
indent('\n'.join('{}: {}'.format(
trace.dumps(), label) for label, trace in list(self._group_traces.items()))),
indent('\n'.join('{:<5}: {}'.format(c, k) for k, c in list(self._group_counts.items()))),
)
class PrioritizedRewardReplayBufferTrace(Trace):
def __init__(self, episodes):
self._rewards = sorted(ep.discounted_return(0, 1.) for ep in episodes)
self.median = self._rewards[int(len(self._rewards) / 2)]
self.min = self._rewards[0]
self.max = self._rewards[-1]
self.mean = sum(self._rewards) / len(self._rewards)
def dumps(self):
return 'n={n:<4} mean={mean:.2f} range=[{min:.2f}, {max:.2f}] median={median:.2f}'.format(
n=len(self._rewards), min=self.min, median=self.median, max=self.max, mean=self.mean)
def to_json_dict(self):
return {'median': self.median,
'mean': self.mean,
'min': self.min,
'max': self.max
}
|
python
|
#
# PySNMP MIB module FNCNMS (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FNCNMS
# Produced by pysmi-0.3.4 at Wed May 1 13:14:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
netsmart, = mibBuilder.importSymbols("FNC-COMMON-SMI", "netsmart")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
system, = mibBuilder.importSymbols("SNMPv2-MIB", "system")
Unsigned32, NotificationType, MibIdentifier, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Bits, IpAddress, Gauge32, ModuleIdentity, Counter32, Counter64, enterprises, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "NotificationType", "MibIdentifier", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Bits", "IpAddress", "Gauge32", "ModuleIdentity", "Counter32", "Counter64", "enterprises", "ObjectIdentity")
TextualConvention, DisplayString, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "DateAndTime")
netsmart1500 = ModuleIdentity((1, 3, 6, 1, 4, 1, 3861, 4, 1500))
netsmart1500.setRevisions(('2012-02-06 16:00', '2011-06-16 16:00', '2003-08-02 16:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: netsmart1500.setRevisionsDescriptions(('Added keep alive message event', 'Added following NETypes to neType object FLASHWAVE 9500, FLASHWAVE CDS, FLASHWAVE 9410, FLASHWAVE 9420, FLASHWAVE 7120, FLASHWAVE 7420', 'Initial Version.',))
if mibBuilder.loadTexts: netsmart1500.setLastUpdated('201202061600Z')
if mibBuilder.loadTexts: netsmart1500.setOrganization('Fujitsu Network Communications Inc.')
if mibBuilder.loadTexts: netsmart1500.setContactInfo('Fujitsu Network Communications Tel: I-800-USE-FTAC i.e (800) 873 3822')
if mibBuilder.loadTexts: netsmart1500.setDescription(" This MIB represents the interface definition between a third party SNMP Manager and FNC's NETSMART Management System. The functions provided in this MIB are: - Network Element (NE) Target IDentifier (TID) Discovery - Dynamic Alarm reporting - Alarm Discovery and Reconciliation ")
nmsNEMgmtMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1))
nmsNEMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1))
nmsNEAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2))
nmsNotificationTrapBase = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0))
class NMSSeverity(TextualConvention, Integer32):
description = ' Severity represents the severity of the Notification. It is assigned by the NE. Cleared indicates that this notification clears a previously sent trap with a severity of critical,major,minor or info. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("cleared", 1), ("info", 2), ("minor", 3), ("major", 4), ("critical", 5))
class NMSCondDirection(TextualConvention, Integer32):
description = 'Whether the fault is on the transmit side or on the receive side. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("na", 1), ("transmit", 2), ("receive", 3))
class NMSCondLocation(TextualConvention, Integer32):
description = ' Location indicates whether the failure occurred on the nearEnd of this NE or on the farEnd. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("na", 1), ("nearEnd", 2), ("farEnd", 3))
class NMSServiceEffect(TextualConvention, Integer32):
description = 'Service Affecting indicates whether the alarm affects traffic or not. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("na", 1), ("serviceAffecting", 2), ("nonServiceAffecting", 3))
class NMSTrapSeqNumber(TextualConvention, Integer32):
description = ' A value in the range 1-99999 is reported. The value 0 is never reported but can only be obtained through a GET.A value of 0 indicates that agent has not forwarded any notifications since it booted up. When the value of this field reaches 99999, the number resets and the subsequent notification is sent with a number 1. This value is not unique per NE.It is a global number used across all NEs. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 99999)
class NMSMgdNE(DisplayString):
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 20)
class NMSNEConnState(TextualConvention, Integer32):
description = ' Describes the state of connection between an NE and agent.Connected indicates that the agent is connected to the NE. NotConnected indicates that there is no communication between NE and agent. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("connected", 1), ("notConnected", 2))
nmsNETable = MibTable((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1), )
if mibBuilder.loadTexts: nmsNETable.setStatus('current')
if mibBuilder.loadTexts: nmsNETable.setDescription(' This Table contains a row for each managed NE in NETSMART. The row contains: - the TID (20 characters max) of each NE. - the NE Type ( A string representing the FNC Product Name) . - the NE Connection State. @see NMSNEConnState ')
nmsNEEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1), ).setIndexNames((1, "FNCNMS", "neTID"))
if mibBuilder.loadTexts: nmsNEEntry.setStatus('current')
if mibBuilder.loadTexts: nmsNEEntry.setDescription(' Provides information about a managed NE in NETSMART. ')
neTID = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1, 1), NMSMgdNE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neTID.setStatus('current')
if mibBuilder.loadTexts: neTID.setDescription('TID of the NE. @see NMSMgdNE. ')
neType = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neType.setStatus('current')
if mibBuilder.loadTexts: neType.setDescription(' Describes the type of Fujitsu NETWORK Element Current Types are FLM6, FACTR, FLM150, FLM600, FLM2400, FLX 600A, FLX 2500A, FLASH 192, FLASH 10G, FLASHWAVE 4010, FLASHWAVE 4020, FLASHWAVE 4100, FLASHWAVE 4300, FLASHWAVE 4500, FLASHWAVE 4560, FLASHWAVE 7200, FLASHWAVE 7300, FLASHWAVE 7500, FLASHWAVE 7700, FLASHWAVE 9500, FLASHWAVE CDS, FLASHWAVE 9410, FLASHWAVE 9420, FLASHWAVE 7120, FLASHWAVE 7420, Fujitsu NE, Generic NE, Unknown. -Unknown NEType represents NEs whose type is not determined yet. -Fujitsu NE NEType represents the NEs which are minimally (only fault management) supported by the agent. -Generic NE NEType represents non Fujitsu NEs whose fault management functionality is supported by the agent. ')
neConnState = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 1, 1, 1, 3), NMSNEConnState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neConnState.setStatus('current')
if mibBuilder.loadTexts: neConnState.setDescription(' Describes connection state between agent and NE. @see NMSNEConnState. A nmsNEStateChangeEvent NOTIFICATION generated whenever there is a change in neConnState. Also see neOperation and nmsNEOperationEvent for the default state when an NE is added. ')
nmsNEAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1), )
if mibBuilder.loadTexts: nmsNEAlarmTable.setStatus('current')
if mibBuilder.loadTexts: nmsNEAlarmTable.setDescription(' This table contains a NE TID and a List of nmsNEAlarmList for this TID. ')
nmsNEAlarmListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1), ).setIndexNames((0, "FNCNMS", "alarmTID"), (0, "FNCNMS", "alarmIndex"))
if mibBuilder.loadTexts: nmsNEAlarmListEntry.setStatus('current')
if mibBuilder.loadTexts: nmsNEAlarmListEntry.setDescription(' A row in the table indicating the specific Alarm for the given TID. ')
alarmTID = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 1), NMSMgdNE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmTID.setStatus('current')
if mibBuilder.loadTexts: alarmTID.setDescription(' NE TID against which this Alarm is being raised. @see NMSMgdNE. ')
alarmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99999))).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmIndex.setStatus('current')
if mibBuilder.loadTexts: alarmIndex.setDescription('The instance of the alarm for this NE. This variable is used to identify a unique row for each alarm. In the nmsNEAlarmTable a sequence of alarms will look like: TID alarmIndex Other Alarm Data ======================================================== TID1 1 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID1 2 2-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID1 3 3-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID2 1 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID2 2 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 1 2-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 2 3-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 3 4-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 4 5-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID3 5 6-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 TID4 1 1-1 oc3 CR LOS SA RCV Loss Of Signal neTimeStamp1 nmsTimeStamp2 i.e. For each TID alarmIndex starts with 1 and is incremented by 1. This is to help retrieve alarms on an NE basis. ')
alarmEntityId = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmEntityId.setStatus('current')
if mibBuilder.loadTexts: alarmEntityId.setDescription('Describes the AID of the entity against which this notification is being sent. ')
alarmEntityType = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmEntityType.setStatus('current')
if mibBuilder.loadTexts: alarmEntityType.setDescription(' Describes the kind the entity against which this notification is being sent . The combination of (alarmEntity, alarmEntityType) represents a addressable entity on the NE. Examples of kind of entities are EQPT,OC3,STS1,COM,ENV etc. ')
alarmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 5), NMSSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmSeverity.setStatus('current')
if mibBuilder.loadTexts: alarmSeverity.setDescription('Describes the severity of the notification being sent. @see NMSSeverity ')
alarmCondType = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 6), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmCondType.setStatus('current')
if mibBuilder.loadTexts: alarmCondType.setDescription(' Describes the condition type i.e RMVD or LOS etc. ')
alarmServEffect = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 7), NMSServiceEffect()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmServEffect.setStatus('current')
if mibBuilder.loadTexts: alarmServEffect.setDescription(' Describes whether the notification is serviceAffecting or not. @see NMSServiceEffect. ')
alarmLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 8), NMSCondLocation()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmLocation.setStatus('current')
if mibBuilder.loadTexts: alarmLocation.setDescription(' Describes if the location is applicable and if applicable the appropriate location. @see NMSCondLocation. ')
alarmDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 9), NMSCondDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmDirection.setStatus('current')
if mibBuilder.loadTexts: alarmDirection.setDescription(' Describes if the direction is a applicable and if applicable the appropriate direction. @see NMSCondLocation. ')
alarmDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 10), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmDescription.setStatus('current')
if mibBuilder.loadTexts: alarmDescription.setDescription(' Gives a textual description of the condition. ')
neAlarmTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 11), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neAlarmTimeStamp.setStatus('current')
if mibBuilder.loadTexts: neAlarmTimeStamp.setDescription('Time stamp at which the NE generated the fault. The correctness of the value depends whether this fault is reported to the agent or agent retrieved this information from NE. For faults reported from the NE this value is correct where as for faults retrieved the timestamp indicates the time on NE at which this fault was retrieved. ')
nmsAlarmTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 1, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsAlarmTimeStamp.setStatus('current')
if mibBuilder.loadTexts: nmsAlarmTimeStamp.setDescription(' Time at which the NMS received the fault. ')
nmsLastMsgNumber = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 2), NMSTrapSeqNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsLastMsgNumber.setStatus('current')
if mibBuilder.loadTexts: nmsLastMsgNumber.setDescription(' This variable describes the value of the last NMSTrapIndex reported by the agent in the notifications. @see NMSTrapSeqNumber. ')
nmsTrapHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3), )
if mibBuilder.loadTexts: nmsTrapHistoryTable.setStatus('current')
if mibBuilder.loadTexts: nmsTrapHistoryTable.setDescription(' Thus table contains a sequence of trap seq number and the corresponding neTID.A Management system upon detecting that it has not received a particular sequence number, can determine the NE against which the sequence number was used and do a alarm resynchornization for that NE. ')
nmsTrapHistoryTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3, 1), ).setIndexNames((0, "FNCNMS", "nmsTrapHistoryIndex"))
if mibBuilder.loadTexts: nmsTrapHistoryTableEntry.setStatus('current')
if mibBuilder.loadTexts: nmsTrapHistoryTableEntry.setDescription(' A row indicating the trap sequence number and NE TID which caused this event to be sent. ')
nmsTrapHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3, 1, 1), NMSTrapSeqNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsTrapHistoryIndex.setStatus('current')
if mibBuilder.loadTexts: nmsTrapHistoryIndex.setDescription(' A Sequence Number that was previously sent. @see NMSTrapSeqNumber. ')
nmsTrapHistoryTID = MibTableColumn((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 3, 1, 2), NMSMgdNE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsTrapHistoryTID.setStatus('current')
if mibBuilder.loadTexts: nmsTrapHistoryTID.setDescription(' The tid for which the sequence entry was sent. @see NMSMGgNE. ')
nmsNotificationBase = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4))
notifTID = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 1), NMSMgdNE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: notifTID.setStatus('current')
if mibBuilder.loadTexts: notifTID.setDescription(' NE TID against which this notification is being sent. @see NMSMgdNE. ')
neEntityID = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neEntityID.setStatus('current')
if mibBuilder.loadTexts: neEntityID.setDescription(' Describes the AID of the entity against which this notification is being sent. ')
neEntityType = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neEntityType.setStatus('current')
if mibBuilder.loadTexts: neEntityType.setDescription(' Describes the type the entity against which this notification is being sent . The combination (neEntityID,neEntityType) represents a addressable entity on the network element. Examples of neEntityType would be EQPT,OC3,COM,STS1,ENV etc. ')
neSeverity = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 4), NMSSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neSeverity.setStatus('current')
if mibBuilder.loadTexts: neSeverity.setDescription(' Describes the severity of the notification being sent. @see NMSSeverity. ')
neCondType = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neCondType.setStatus('current')
if mibBuilder.loadTexts: neCondType.setDescription(' Describes the condition type i.e RMVD or LOS etc. ')
neServEffect = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 6), NMSServiceEffect()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neServEffect.setStatus('current')
if mibBuilder.loadTexts: neServEffect.setDescription(' Describes whether the notification is ServiceAffecting or not. @see NMSServiceEffect. ')
neLocation = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 7), NMSCondLocation()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neLocation.setStatus('current')
if mibBuilder.loadTexts: neLocation.setDescription(' Describes if the location is applicable for this notification, if it is applicable describes the location. @see NMSCondLocation. ')
neDirection = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 8), NMSCondDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neDirection.setStatus('current')
if mibBuilder.loadTexts: neDirection.setDescription('Describes if the direction is a applicable and if it is applicable the direction. @see NMSCondDirection. ')
neCondDescription = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neCondDescription.setStatus('current')
if mibBuilder.loadTexts: neCondDescription.setDescription(' Textual Description of the notification. ')
nmsNotifTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 10), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsNotifTimeStamp.setStatus('current')
if mibBuilder.loadTexts: nmsNotifTimeStamp.setDescription(' Time when NMS received this event. ')
neNotifTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 11), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: neNotifTimeStamp.setStatus('current')
if mibBuilder.loadTexts: neNotifTimeStamp.setDescription(' Time when NE raised this event. ')
nmsTrapSeqNumber = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 12), NMSTrapSeqNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsTrapSeqNumber.setStatus('current')
if mibBuilder.loadTexts: nmsTrapSeqNumber.setDescription(' @See NMSTrapSeqNumber. ')
nmsNEConnState = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 13), NMSNEConnState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsNEConnState.setStatus('current')
if mibBuilder.loadTexts: nmsNEConnState.setDescription(' Describes the state of connection between an NE and NMS. ')
neOperation = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: neOperation.setStatus('current')
if mibBuilder.loadTexts: neOperation.setDescription(' Indicates if a new NE is added to NETSMART or if it is removed. Addition always implies a nmsNEConnState with value notConnected and NEType of Unknown. ')
notifServer = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 15), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: notifServer.setStatus('current')
if mibBuilder.loadTexts: notifServer.setDescription(' Describes the Server which this notification is being sent. ')
nmsKeepAliveState = MibScalar((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 4, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("alive", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nmsKeepAliveState.setStatus('current')
if mibBuilder.loadTexts: nmsKeepAliveState.setDescription(' Describes the keep alive state which this notification is being sent. ')
nmsNEEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 1)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "neEntityID"), ("FNCNMS", "neEntityType"), ("FNCNMS", "neSeverity"), ("FNCNMS", "neCondType"), ("FNCNMS", "neServEffect"), ("FNCNMS", "neLocation"), ("FNCNMS", "neDirection"), ("FNCNMS", "neCondDescription"), ("FNCNMS", "nmsNotifTimeStamp"), ("FNCNMS", "neNotifTimeStamp"), ("FNCNMS", "nmsTrapSeqNumber"))
if mibBuilder.loadTexts: nmsNEEvent.setStatus('current')
if mibBuilder.loadTexts: nmsNEEvent.setDescription(' The agent generates this notification when a rept alarm or rept evt is generated. ')
nmsNEStateChangeEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 2)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "nmsNEConnState"), ("FNCNMS", "nmsTrapSeqNumber"))
if mibBuilder.loadTexts: nmsNEStateChangeEvent.setStatus('current')
if mibBuilder.loadTexts: nmsNEStateChangeEvent.setDescription(' The agent generates this notification when a NMS changes its connection state to the NE. @see NMSConnState. ')
nmsNEOperationEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 3)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "neOperation"), ("FNCNMS", "nmsTrapSeqNumber"))
if mibBuilder.loadTexts: nmsNEOperationEvent.setStatus('current')
if mibBuilder.loadTexts: nmsNEOperationEvent.setDescription(' The agent generates this notification when a NE is created or deleted. @see neOperation. ')
nmsKeepAliveEvent = NotificationType((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 2, 0, 4)).setObjects(("FNCNMS", "notifServer"), ("FNCNMS", "nmsKeepAliveState"))
if mibBuilder.loadTexts: nmsKeepAliveEvent.setStatus('current')
if mibBuilder.loadTexts: nmsKeepAliveEvent.setDescription(' The agent generates this notification when reaching the keep alive trap interval. @see nmsKeepAliveState. ')
fncNMSMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3))
fncNMSMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 1))
fncNMSMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2))
fncNMSMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 1, 1)).setObjects(("SNMPv2-MIB", "system"), ("FNCNMS", "nmsNEMgmtGroup"), ("FNCNMS", "nmsNEAlarmGroup"), ("FNCNMS", "nmsNETrapGroup"), ("FNCNMS", "nmsNETrapObjects"), ("FNCNMS", "nmsServerTrapGroup"), ("FNCNMS", "nmsServerTrapObjects"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
fncNMSMIBCompliance = fncNMSMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: fncNMSMIBCompliance.setDescription(' Module Compliancy ')
nmsNEMgmtGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 1)).setObjects(("FNCNMS", "neTID"), ("FNCNMS", "neType"), ("FNCNMS", "neConnState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
nmsNEMgmtGroup = nmsNEMgmtGroup.setStatus('current')
if mibBuilder.loadTexts: nmsNEMgmtGroup.setDescription(' This Group defines objects which are common to all NE Table related retrievals ')
nmsNEAlarmGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 2)).setObjects(("FNCNMS", "alarmTID"), ("FNCNMS", "alarmIndex"), ("FNCNMS", "alarmEntityId"), ("FNCNMS", "alarmEntityType"), ("FNCNMS", "alarmSeverity"), ("FNCNMS", "alarmCondType"), ("FNCNMS", "alarmServEffect"), ("FNCNMS", "alarmLocation"), ("FNCNMS", "alarmDirection"), ("FNCNMS", "alarmDescription"), ("FNCNMS", "neAlarmTimeStamp"), ("FNCNMS", "nmsAlarmTimeStamp"), ("FNCNMS", "nmsLastMsgNumber"), ("FNCNMS", "nmsTrapHistoryIndex"), ("FNCNMS", "nmsTrapHistoryTID"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
nmsNEAlarmGroup = nmsNEAlarmGroup.setStatus('current')
if mibBuilder.loadTexts: nmsNEAlarmGroup.setDescription(' This Group defines objects which are common to all Alarm Table related retrievals ')
nmsNETrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 3)).setObjects(("FNCNMS", "nmsNEEvent"), ("FNCNMS", "nmsNEStateChangeEvent"), ("FNCNMS", "nmsNEOperationEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
nmsNETrapGroup = nmsNETrapGroup.setStatus('current')
if mibBuilder.loadTexts: nmsNETrapGroup.setDescription(' This Group defines objects which are used part of notifications ')
nmsNETrapObjects = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 4)).setObjects(("FNCNMS", "notifTID"), ("FNCNMS", "neEntityID"), ("FNCNMS", "neEntityType"), ("FNCNMS", "neSeverity"), ("FNCNMS", "neCondType"), ("FNCNMS", "neServEffect"), ("FNCNMS", "neLocation"), ("FNCNMS", "neDirection"), ("FNCNMS", "neCondDescription"), ("FNCNMS", "nmsNotifTimeStamp"), ("FNCNMS", "neNotifTimeStamp"), ("FNCNMS", "nmsTrapSeqNumber"), ("FNCNMS", "nmsNEConnState"), ("FNCNMS", "neOperation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
nmsNETrapObjects = nmsNETrapObjects.setStatus('current')
if mibBuilder.loadTexts: nmsNETrapObjects.setDescription(' This Group defines objects reported as part of notifications. ')
nmsServerTrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 5)).setObjects(("FNCNMS", "nmsKeepAliveEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
nmsServerTrapGroup = nmsServerTrapGroup.setStatus('current')
if mibBuilder.loadTexts: nmsServerTrapGroup.setDescription(' This Group defines objects which are used part of notifications ')
nmsServerTrapObjects = ObjectGroup((1, 3, 6, 1, 4, 1, 3861, 4, 1500, 1, 3, 2, 6)).setObjects(("FNCNMS", "notifServer"), ("FNCNMS", "nmsKeepAliveState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
nmsServerTrapObjects = nmsServerTrapObjects.setStatus('current')
if mibBuilder.loadTexts: nmsServerTrapObjects.setDescription(' This Group defines objects reported as part of notifications. ')
mibBuilder.exportSymbols("FNCNMS", nmsTrapHistoryTableEntry=nmsTrapHistoryTableEntry, nmsNEAlarm=nmsNEAlarm, alarmIndex=alarmIndex, NMSMgdNE=NMSMgdNE, alarmEntityType=alarmEntityType, alarmServEffect=alarmServEffect, NMSServiceEffect=NMSServiceEffect, PYSNMP_MODULE_ID=netsmart1500, nmsTrapHistoryTable=nmsTrapHistoryTable, nmsTrapHistoryIndex=nmsTrapHistoryIndex, nmsNotificationBase=nmsNotificationBase, NMSNEConnState=NMSNEConnState, nmsNEAlarmTable=nmsNEAlarmTable, alarmTID=alarmTID, nmsNEAlarmGroup=nmsNEAlarmGroup, neAlarmTimeStamp=neAlarmTimeStamp, NMSTrapSeqNumber=NMSTrapSeqNumber, fncNMSMIBConformance=fncNMSMIBConformance, neType=neType, fncNMSMIBCompliance=fncNMSMIBCompliance, neEntityType=neEntityType, nmsTrapSeqNumber=nmsTrapSeqNumber, alarmEntityId=alarmEntityId, neServEffect=neServEffect, nmsLastMsgNumber=nmsLastMsgNumber, neEntityID=neEntityID, notifTID=notifTID, nmsTrapHistoryTID=nmsTrapHistoryTID, nmsNEMgmtGroup=nmsNEMgmtGroup, nmsNEEvent=nmsNEEvent, nmsNETrapObjects=nmsNETrapObjects, neLocation=neLocation, nmsNEAlarmListEntry=nmsNEAlarmListEntry, nmsNEOperationEvent=nmsNEOperationEvent, nmsNETable=nmsNETable, nmsNEEntry=nmsNEEntry, neSeverity=neSeverity, nmsKeepAliveState=nmsKeepAliveState, alarmDescription=alarmDescription, fncNMSMIBGroups=fncNMSMIBGroups, nmsNETrapGroup=nmsNETrapGroup, nmsNotificationTrapBase=nmsNotificationTrapBase, NMSCondDirection=NMSCondDirection, neTID=neTID, neConnState=neConnState, nmsNEMgmtMIB=nmsNEMgmtMIB, neDirection=neDirection, notifServer=notifServer, nmsKeepAliveEvent=nmsKeepAliveEvent, nmsServerTrapObjects=nmsServerTrapObjects, neCondType=neCondType, nmsAlarmTimeStamp=nmsAlarmTimeStamp, NMSCondLocation=NMSCondLocation, NMSSeverity=NMSSeverity, neCondDescription=neCondDescription, nmsNEStateChangeEvent=nmsNEStateChangeEvent, alarmCondType=alarmCondType, fncNMSMIBCompliances=fncNMSMIBCompliances, alarmLocation=alarmLocation, alarmDirection=alarmDirection, alarmSeverity=alarmSeverity, netsmart1500=netsmart1500, neNotifTimeStamp=neNotifTimeStamp, nmsNEConnState=nmsNEConnState, neOperation=neOperation, nmsServerTrapGroup=nmsServerTrapGroup, nmsNEMgmt=nmsNEMgmt, nmsNotifTimeStamp=nmsNotifTimeStamp)
|
python
|
import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j','l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x' 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W','X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '&', '(', ')', '*', '+']
print("Welcome to the PyPassword Genrator!!!")
letterLen = eval(input("Number of Letter: "))
symLen = eval(input("Number of Symbol: "))
numLen = eval(input("NUmber in password: "))
password = []
for i in range(1,letterLen+1):
password += random.choice(letters)
for j in range(1,symLen+1):
password += random.choice(symbols)
for k in range(1,numLen+1):
password += random.choice(numbers)
print(password)
random.shuffle(password)
print(password)
passsd = ""
for str in password:
passsd += str
print("Your Strong Password: "+passsd)
|
python
|
"""
Determine an optimal list of hotel to visit.
```
$ python src/domain/solver.py \
-s "/Users/fpaupier/projects/samu_social/data/hotels_subset.csv
```
Note that the first record should be the adress of the starting point (let's say the HQ of the Samu Social)
"""
import argparse
import numpy as np
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
from src.services.map import Map
from src.services.csv_reader import parse_csv
MAX_DISTANCE = 15000 # Maximum distance (meters) that a worker can cover in a day
MAX_VISIT_PER_DAY = 8 # Maximum number of various hotel a worker can cover within a day
def get_distances_matrix(hotels, workers):
"""Compute the distance matrix (distance between each hotels).
Returns a triangular matrix and the labels of the hotels.
Note:
1) That the first address shall be the address of the depot.
2) If the API doesn't returna a match for the address, we drop
the point. This may not be the expected behavior. TODO
Args:
hotels (list[dict]): list of address, each dict has the struct
{'address': 'Avenue Winston Churchill', 'postcode': 27000}
workers (dict(int: int))
Returns:
distances(list[list[int]]): matrix of distances
labels(dict[int, string]): the index of the address and it's name
Warnings:
Function seems to break if size of input hotels is too big ? Returns empty distances
that leads to a segmentation fault down the processing pipeline.
"""
map = Map()
distances = []
labels = dict()
index = 0
hotels_and_workers = workers + workers + hotels
for hotel1 in hotels_and_workers:
src_address = {
"address": hotel1.get("address"),
"postcode": hotel1.get("postcode"),
}
# point1 = map.point(src_address)
point1 = hotel1["point"]
src_dist = []
if not point1:
continue
labels[index] = "{} {}".format(
src_address.get("address"), src_address.get("postcode")
) # Store the address as labels for the node
index = index + 1
for hotel2 in hotels_and_workers:
target_address = {
"address": hotel2.get("address"),
"postcode": hotel2.get("postcode"),
}
# point2 = map.point(target_address)
point2 = hotel2["point"]
if not point2:
continue
distance = map.distance(point1, point2)
distance = int(np.round(distance * 1000)) # Distance expressed in meters
src_dist.append(distance)
if src_dist:
distances.append(src_dist)
return distances, labels
###########################
# Problem Data Definition #
###########################
def create_data_model(hotels, workers, from_raw_data):
"""Creates the data for the example.
Args:
hotels(list[dict])
workers(dict(int: int): number of couple of Samu Social workers available
from_raw_data(bool):
"""
data = {}
n_workers = len(workers)
data["num_vehicles"] = n_workers
# Precise start and end locations of the workers
# The number_workers-th first line correspond to the start locations of the workers
start_locations = [idx for idx in range(n_workers)]
# The number_workers-th to the 2*number_workers-th line correspond to the end locations of the workers
end_locations = [idx for idx in range(n_workers, 2 * n_workers)]
data["start_locations"] = start_locations
data["end_locations"] = end_locations
# Matrix of distances between locations.
if from_raw_data:
hotels_data = parse_csv(hotels, "hotel", write=False)
else:
hotels_data = hotels
_distances, labels = get_distances_matrix(hotels_data, workers)
data["distances"] = _distances
data["labels"] = labels
num_locations = len(_distances)
data["num_locations"] = num_locations
# The problem is to find an assignment of routes to vehicles that has the shortest total distance
# and such that the total amount a vehicle is carrying never exceeds its capacity. Capacities can be understood
# as the max number of visits that a worker can do in a day
demands = [1] * num_locations
capacities = [MAX_VISIT_PER_DAY] * n_workers
data["demands"] = demands
data["vehicle_capacities"] = capacities
return data
#######################
# Problem Constraints #
#######################
def create_distance_callback(data):
"""Creates callback to return distance between points."""
distances = data["distances"]
def distance_callback(from_node, to_node):
"""Returns the manhattan distance between the two nodes"""
return distances[from_node][to_node]
return distance_callback
def create_demand_callback(data):
"""Creates callback to get demands at each location."""
def demand_callback(from_node, to_node):
return data["demands"][from_node]
return demand_callback
def add_capacity_constraints(routing, data, demand_callback):
"""Adds capacity constraint"""
capacity = "Capacity"
routing.AddDimensionWithVehicleCapacity(
demand_callback,
0, # null capacity slack
data["vehicle_capacities"], # vehicle maximum capacities
True, # start cumul to zero
capacity,
)
###########
# FORMATTER #
###########
def format_solution(data, routing, assignment):
"""Print routes on console."""
plan_output = []
for vehicle_id in range(data["num_vehicles"]):
route = []
index = routing.Start(vehicle_id)
route_dist = 0
while not routing.IsEnd(index):
node_index = routing.IndexToNode(index)
next_node_index = routing.IndexToNode(
assignment.Value(routing.NextVar(index))
)
route_dist += routing.GetArcCostForVehicle(
node_index, next_node_index, vehicle_id
)
route.append(("{0}".format(data["labels"].get(node_index))))
index = assignment.Value(routing.NextVar(index))
# Add return address to the route
route.append((data["labels"].get(routing.IndexToNode(index))))
plan_output.append(route)
return plan_output
########
# Main #
########
def solve_routes(hotels, number_workers, from_raw_data=False):
"""
Entry point of the program
Args:
hotels:
number_workers:
from_raw_data (bool): should we consider the raw csv file or not
Returns:
"""
# Instantiate the data problem.
data = create_data_model(hotels, number_workers, from_raw_data)
# Create Routing Model
routing = pywrapcp.RoutingModel(
data["num_locations"],
data["num_vehicles"],
data["start_locations"],
data["end_locations"],
)
# Define weight of each edge
distance_callback = create_distance_callback(data)
routing.SetArcCostEvaluatorOfAllVehicles(distance_callback)
# Add Capacity constraint
demand_callback = create_demand_callback(data)
add_capacity_constraints(routing, data, demand_callback)
# Setting first solution heuristic (cheapest addition).
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
)
# Solve the problem.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
itinerary = format_solution(data, routing, assignment)
return itinerary
else:
return None
if __name__ == "__main__":
"""
Solve a Vehicle Routing Problem
Note:
The first record should be the address of the starting point (let's say the HQ of the Samu Social)
"""
parser = argparse.ArgumentParser(description="Solve a Vehicle Routing Problem")
parser.add_argument(
"-s", "--source", help="path to the source address csv file", type=str
)
parser.add_argument(
"-n",
"--number_workers",
help="Number of workers available to perform the visit",
type=int,
default=4,
)
args = parser.parse_args()
solve_routes(args.source, args.number_workers, from_raw_data=True)
|
python
|
import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['SENet', 'Sphere20a', 'senet50']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# This SEModule is not used.
class SEModule(nn.Module):
def __init__(self, planes, compress_rate):
super(SEModule, self).__init__()
self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size=1, stride=1, bias=True)
self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = F.avg_pool2d(module_input, kernel_size=module_input.size(2))
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return module_input * x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# SENet
compress_rate = 16
# self.se_block = SEModule(planes * 4, compress_rate) # this is not used.
self.conv4 = nn.Conv2d(planes * 4, planes * 4 // compress_rate, kernel_size=1, stride=1, bias=True)
self.conv5 = nn.Conv2d(planes * 4 // compress_rate, planes * 4, kernel_size=1, stride=1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
## senet
out2 = F.avg_pool2d(out, kernel_size=out.size(2))
out2 = self.conv4(out2)
out2 = self.relu(out2)
out2 = self.conv5(out2)
out2 = self.sigmoid(out2)
# out2 = self.se_block.forward(out) # not used
if self.downsample is not None:
residual = self.downsample(x)
out = out2 * out + residual
# out = out2 + residual # not used
out = self.relu(out)
return out
class SENet(nn.Module):
def __init__(self, block, layers, num_classes=8631, include_top=True):
self.inplanes = 64
super(SENet, self).__init__()
self.include_top = include_top
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
if self.include_top:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, get_feat=True):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x0 = self.maxpool(x)
x1 = self.layer1(x0)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x_avg = self.avgpool(x4)
if not self.include_top:
if get_feat:
return [x0, x1, x2, x3, x4]
else:
return x_avg
else:
x_fc = x_avg.view(x_avg.size(0), -1)
x_fc = self.fc(x_fc)
if get_feat:
return [x0, x1, x2, x3, x4]
else:
return x_fc
def senet50(**kwargs):
"""Constructs a SENet-50 model.
"""
model = SENet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
class Sphere20a(nn.Module):
def __init__(self,classnum=10574,feature=False):
super(Sphere20a, self).__init__()
self.classnum = classnum
self.feature = feature
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6, 512)
def forward(self, x):
feat_outs = []
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
feat_outs.append(x)
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
feat_outs.append(x)
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
feat_outs.append(x)
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
feat_outs.append(x)
x = x.view(x.size(0), -1)
x = self.fc5(x)
feat_outs.append(x)
return feat_outs
|
python
|
# Create your views here.
from django.conf import settings
from django.core.cache import cache
from django.db.models import Prefetch
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.generics import RetrieveAPIView, ListAPIView
from question.models import Question, Testcase
from question.permissions import IsQuestionAllowed, IsInTime, \
IsQuestionListInTime
from question.serializers import QuestionDetailSerializer, \
QuestionListSerializer
class QuestionDetail(RetrieveAPIView):
serializer_class = QuestionDetailSerializer
lookup_url_kwarg = 'que_id'
lookup_field = 'id'
permission_classes = [IsQuestionAllowed, IsInTime]
def get_queryset(self):
return Question.objects.all().prefetch_related(
Prefetch('test_cases',
queryset=Testcase.objects.filter(is_public=True))
)
@method_decorator(cache_page(settings.CACHE_TTLS['QUESTION_DETAIL']))
def get(self, request, *args, **kwargs):
return self.retrieve(self, request, *args, **kwargs)
class QuestionList(ListAPIView):
serializer_class = QuestionListSerializer
permission_classes = [IsQuestionListInTime]
pagination_class = None
def get_queryset(self):
que_list = cache.get('contest-{}-questions'
.format(self.kwargs['contest_id']))
if not que_list:
que_list = Question.objects.filter(
contests__id=self.kwargs['contest_id']
)
cache.set('contest-{}-questions'
.format(self.kwargs['contest_id']),
que_list,
settings.CACHE_TTLS['CONTEST_QUESTIONS'])
return que_list
|
python
|
import os.path
import random
import multiprocessing
import pandas as pd
from utils import load_library, correct_full_sequence, get_precursor_indice, tear_library, flatten_list
from mz_calculator import calc_fragment_mz
def shuffle_seq(seq = None, seed = None):
"""Fisher-Yates algorithm. Modified from PECAN's decoyGenerator.py"""
if seq is None:
return None
else:
l = list(seq)
random.seed(seed)
for i in range(len(l) - 1, 0, -1):
j = int(random.random() * (i + 1))
if i == j:
continue
else:
(l[i], l[j]) = (l[j], l[i])
return tuple(l)
def reverse(seq):
return seq[::-1]
def shift_seq(seq):
i = len(seq) // 2
return seq[i::] + seq[:i:]
def mutate_seq(seq):
mutations = {"G" : "L",
"A" : "L",
"V" : "L",
"L" : "V",
"I" : "V",
"F" : "L",
"M" : "L",
"P" : "L",
"W" : "L",
"S" : "T",
"C" : "S",
"T" : "S",
"Y" : "S",
"H" : "S",
"K" : "L",
"R" : "L",
"Q" : "N",
"E" : "D",
"N" : "Q",
"D" : "E"}
return [seq[0], mutations[seq[1]]] + seq[2:-2] + [mutations[seq[-2]], seq[-1]]
def get_mod_indice(sort_base):
cursor, lock = -1, 0
poses, mods = [], []
for i, lett in enumerate(sort_base):
if lett == "(":
lock = 1
poses.append(cursor)
mod = ""
elif lett == ")":
lock = 0
cursor -= 1
mods.append(mod + ")")
if not lock:
cursor += 1
else:
mod += sort_base[i]
return poses, mods
def decoy_generator(library, lib_cols, decoy_method, precursor_indice, original_colnames, result_collector, fixed_colnames, seed):
product_mz, peptide_sequence, full_uniMod_peptide_name = [], [], []
transition_group_id, decoy, protein_name = [], [], []
transition_name, peptide_group_label = [], []
valid_indice = []
for idx, pep in enumerate(precursor_indice):
target_record = library.iloc[pep, :]
if ("decoy" in list(library.columns)) and (list(target_record["decoy"])[0] == 1):
continue
valid_indice.extend(pep)
target_fullseq = list(target_record[lib_cols["FULL_SEQUENCE_COL"]])[0]
target_pureseq = list(target_record[lib_cols["PURE_SEQUENCE_COL"]])[0]
if decoy_method in ["shuffle", "pseudo_reverse", "shift"]:
unimod5, KR_end, KR_mod_end = False, False, False
sort_base = target_fullseq[:]
if sort_base.startswith("(UniMod:5)"):
unimod5 = True
sort_base = sort_base[10:]
if sort_base[-1] in ["K", "R"]:
KR_end = sort_base[-1]
sort_base = sort_base[:-1]
elif (sort_base.endswith("(UniMod:259)") or sort_base.endswith("(UniMod:267)")):
KR_mod_end = sort_base[-13:]
sort_base = sort_base[:-13]
mod_indice, mod_list = get_mod_indice(sort_base)
if KR_end or KR_mod_end:
pure_seq_list = [i for i in target_pureseq[:-1]]
else:
pure_seq_list = [i for i in target_pureseq]
seq_list = pure_seq_list[:]
for mod_id, mod in zip(mod_indice, mod_list):
seq_list[mod_id] += mod
if decoy_method == "shuffle":
shuffled_indice = shuffle_seq([i for i in range(len(seq_list))], seed = seed)
elif decoy_method == "pseudo_reverse":
shuffled_indice = reverse([i for i in range(len(seq_list))])
elif decoy_method == "shift":
shuffled_indice = shift_seq([i for i in range(len(seq_list))])
decoy_fullseq = "".join([seq_list[i] for i in shuffled_indice])
decoy_pureseq = "".join([pure_seq_list[i] for i in shuffled_indice])
if unimod5:
decoy_fullseq = "(UniMod:5)" + decoy_fullseq
if KR_end:
decoy_fullseq += KR_end
decoy_pureseq += KR_end
elif KR_mod_end:
decoy_fullseq += KR_mod_end
decoy_pureseq += KR_mod_end[0]
elif decoy_method == "reverse":
unimod5 = False
sort_base = target_fullseq[:]
if sort_base.startswith("(UniMod:5)"):
unimod5 = True
sort_base = sort_base[10:]
mod_indice, mod_list = get_mod_indice(sort_base)
pure_seq_list = [i for i in target_pureseq]
seq_list = pure_seq_list[:]
for mod_id, mod in zip(mod_indice, mod_list):
seq_list[mod_id] += mod
shuffled_indice = reverse([i for i in range(len(seq_list))])
decoy_fullseq = "".join([seq_list[i] for i in shuffled_indice])
decoy_pureseq = "".join([pure_seq_list[i] for i in shuffled_indice])
if unimod5:
decoy_fullseq = "(UniMod:5)" + decoy_fullseq
elif decoy_method == "mutate":
unimod5 = False
sort_base = target_fullseq[:]
if sort_base.startswith("(UniMod:5)"):
unimod5 = True
sort_base = sort_base[10:]
mod_indice, mod_list = get_mod_indice(sort_base)
pure_seq_list = [i for i in target_pureseq]
mutated_pure_seq_list = mutate_seq(pure_seq_list)
mutated_seq_list = mutated_pure_seq_list[:]
for mod_id, mod in zip(mod_indice, mod_list):
mutated_seq_list[mod_id] += mod
decoy_fullseq = "".join(mutated_seq_list)
decoy_pureseq = "".join(mutated_pure_seq_list)
if unimod5:
decoy_fullseq = "(UniMod:5)" + decoy_fullseq
for charge, tp, series in zip(target_record[lib_cols["FRAGMENT_CHARGE_COL"]], target_record[lib_cols["FRAGMENT_TYPE_COL"]], target_record[lib_cols["FRAGMENT_SERIES_COL"]]):
product_mz.append(calc_fragment_mz(decoy_fullseq, decoy_pureseq, charge, "%s%d" % (tp, series)))
peptide_sequence.append(decoy_pureseq)
full_uniMod_peptide_name.append(decoy_fullseq)
if "transition_name" in original_colnames:
transition_name.extend(["DECOY_" + list(target_record["transition_name"])[0]] * target_record.shape[0])
if "PeptideGroupLabel" in original_colnames:
peptide_group_label.extend(["DECOY_" + list(target_record["PeptideGroupLabel"])[0]] * target_record.shape[0])
transition_group_id.extend(["DECOY_" + list(target_record[lib_cols["PRECURSOR_ID_COL"]])[0]] * target_record.shape[0])
decoy.extend([1] * target_record.shape[0])
protein_name.extend(["DECOY_" + list(target_record[lib_cols["PROTEIN_NAME_COL"]])[0]] * target_record.shape[0])
result_collector.append([product_mz, peptide_sequence, full_uniMod_peptide_name,
transition_group_id, decoy, protein_name, transition_name,
peptide_group_label, library.iloc[valid_indice, :].loc[:, fixed_colnames]])
def generate_decoys(lib, do_not_output_library, n_threads, seed, mz_min, mz_max, n_frags_each_precursor, decoy_method, logger):
output_filename = os.path.join(os.path.dirname(lib), os.path.basename(lib)[:-4] + ".DreamDIA.with_decoys.tsv")
lib_cols, library = load_library(lib)
library = correct_full_sequence(library, lib_cols["PRECURSOR_ID_COL"], lib_cols["FULL_SEQUENCE_COL"])
library = library[(library[lib_cols["PRECURSOR_MZ_COL"]] >= mz_min) & (library[lib_cols["PRECURSOR_MZ_COL"]] < mz_max)]
library = library[(library[lib_cols["FRAGMENT_MZ_COL"]] >= mz_min) & (library[lib_cols["FRAGMENT_MZ_COL"]] < mz_max)]
library.index = [i for i in range(library.shape[0])]
precursor_indice = get_precursor_indice(library[lib_cols["PRECURSOR_ID_COL"]])
too_few_indice = flatten_list([i for i in precursor_indice if len(i) < n_frags_each_precursor])
library.drop(too_few_indice, inplace = True)
library.index = [i for i in range(library.shape[0])]
precursor_indice, chunk_indice = tear_library(library, lib_cols, n_threads)
original_colnames = list(library.columns)
modifiable_colnames = [lib_cols["FRAGMENT_MZ_COL"],
lib_cols["PURE_SEQUENCE_COL"],
lib_cols["FULL_SEQUENCE_COL"],
lib_cols["PRECURSOR_ID_COL"],
lib_cols["PROTEIN_NAME_COL"],
"transition_name", "decoy", "PeptideGroupLabel"]
fixed_colnames = [i for i in original_colnames if i not in modifiable_colnames]
if "decoy" in original_colnames:
decoy_types = library["decoy"].value_counts()
if 0 in decoy_types and 1 in decoy_types:
if decoy_types[1] > 0.5 * decoy_types[0]:
logger.info("The spectral library has enough decoys, so DreamDIA-XMBD will not generate more.")
if not do_not_output_library:
library.to_csv(output_filename, sep = "\t", index = False)
return lib_cols, library
generators = []
mgr = multiprocessing.Manager()
result_collectors = [mgr.list() for _ in range(n_threads)]
for i, chunk_index in enumerate(chunk_indice):
precursor_index = [precursor_indice[idx] for idx in chunk_index]
p = multiprocessing.Process(target = decoy_generator,
args = (library, lib_cols, decoy_method, precursor_index, original_colnames, result_collectors[i], fixed_colnames, seed, ))
generators.append(p)
p.daemon = True
p.start()
for p in generators:
p.join()
product_mz = flatten_list([collector[0][0] for collector in result_collectors])
peptide_sequence = flatten_list([collector[0][1] for collector in result_collectors])
full_uniMod_peptide_name = flatten_list([collector[0][2] for collector in result_collectors])
transition_group_id = flatten_list([collector[0][3] for collector in result_collectors])
decoy = flatten_list([collector[0][4] for collector in result_collectors])
protein_name = flatten_list([collector[0][5] for collector in result_collectors])
transition_name = flatten_list([collector[0][6] for collector in result_collectors])
peptide_group_label = flatten_list([collector[0][7] for collector in result_collectors])
fixed_part = pd.concat([collector[0][8] for collector in result_collectors])
modified_part = pd.DataFrame({lib_cols["FRAGMENT_MZ_COL"] : product_mz,
lib_cols["PURE_SEQUENCE_COL"] : peptide_sequence,
lib_cols["FULL_SEQUENCE_COL"] : full_uniMod_peptide_name,
lib_cols["PRECURSOR_ID_COL"] : transition_group_id,
lib_cols["DECOY_OR_NOT_COL"] : decoy,
lib_cols["PROTEIN_NAME_COL"] : protein_name})
if "transition_name" in original_colnames:
modified_part["transition_name"] = transition_name
if "PeptideGroupLabel" in original_colnames:
modified_part["PeptideGroupLabel"] = peptide_group_label
modified_part.index = [nn for nn in range(modified_part.shape[0])]
fixed_part.index = [nn for nn in range(fixed_part.shape[0])]
if "decoy" in original_colnames:
decoy_data = pd.concat([modified_part, fixed_part], axis = 1).loc[:, original_colnames]
else:
decoy_data = pd.concat([modified_part, fixed_part], axis = 1).loc[:, original_colnames + ["decoy"]]
library["decoy"] = [0 for _ in range(library.shape[0])]
library_with_decoys = pd.concat([library, decoy_data])
library_with_decoys = library_with_decoys.sort_values(by = [lib_cols["PRECURSOR_ID_COL"], lib_cols["LIB_INTENSITY_COL"]], ascending = [True, False])
library_with_decoys.index = [i for i in range(library_with_decoys.shape[0])]
if (not do_not_output_library) and (not os.path.exists(output_filename)):
library_with_decoys.to_csv(output_filename, index = False, sep = "\t")
return lib_cols, library_with_decoys
|
python
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Micron Technology, Inc. All rights reserved.
import argparse
import datetime
import os
import time
import subprocess
import sys
import requests_unixsocket
import yaml
TZ_LOCAL = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def dt():
return datetime.datetime.now(tz=TZ_LOCAL).isoformat()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--interval", "-i", type=int, default=10)
parser.add_argument("--output-dir", "-d", default="cn_tree_shapes")
grp = parser.add_mutually_exclusive_group()
grp.add_argument("--kvs", nargs="+")
grp.add_argument("--mpool")
args = parser.parse_args()
if not args.kvs and not args.mpool:
print("One of --kvs, or --mpool is required. Quitting.")
sys.exit(1)
if os.path.exists(args.output_dir):
print("%s already exists. Quitting." % args.output_dir)
sys.exit(1)
print(args.mpool)
if args.mpool:
kvslist = []
cmd = ["hse", "kvdb", "list", "-v"]
cmd += [args.mpool]
out = subprocess.check_output(cmd)
data = yaml.safe_load(out)
if "kvdbs" in data and data["kvdbs"]:
for record in data["kvdbs"]:
kvslist.extend(record["kvslist"])
else:
kvslist = args.kvs
url = {}
dirpath = {}
sockpath = {}
for kvs in kvslist:
kvdbname, kvsname = kvs.split("/")
socket_path = os.getenv('HSE_REST_SOCK_PATH')
url[kvs] = "http+unix://%s/mpool/%s/kvs/%s/cn/tree" % (
socket_path.replace("/", "%2F"),
kvdbname,
kvsname,
)
dirpath[kvs] = os.path.join(args.output_dir, kvdbname, kvsname)
os.makedirs(dirpath[kvs])
sockpath[kvs] = socket_path
session = requests_unixsocket.Session()
counter = 1
while True:
t1 = time.time()
for kvs in kvslist:
path = os.path.join(dirpath[kvs], "cn_tree_%06d.yaml" % counter)
if os.path.exists(sockpath[kvs]):
response = session.get(url[kvs])
else:
print(
"[%s] Iteration #%06d of KVS %s path %s does not exist, "
"KVS not open? Skipping." % (dt(), counter, kvs, sockpath[kvs])
)
continue
if response.text.startswith("Usage:"):
print(
"[%s] Iteration #%06d of KVS %s returned usage message, "
"KVS not open? Skipping." % (dt(), counter, kvs)
)
else:
print("[%s] Writing to path %s" % (dt(), path))
with open(path, "w") as fp:
fp.write(response.text)
time.sleep(args.interval - ((time.time() - t1) % args.interval))
counter += 1
if __name__ == "__main__":
main()
|
python
|
import csv
import six
import io
import json
import logging
from collections import Mapping
from ..util import resolve_file_path
logger = logging.getLogger(__name__)
EPILOG = __doc__
class MappingTableIntakeException(Exception):
""" Specific type of exception we'd like to throw if we fail in this stage
due to an error with the table itself
"""
pass
class GeneTableIntakeException(Exception):
""" Specific type of exception we'd like to throw if we fail in this stage
due to an error with the table itself
"""
pass
class MappingTableHeader:
""" Constant class that holds information on the structure of the mapping table (csv) that does
not vary across . """
HEADER_ROW_INDEX = 2
INTEGER_FIELDS = ['no', 'maximum_length_of_value', 'default', 'min', 'max']
BOOLEAN_FIELDS = ['is_list', 'calculated_property', 'embedded_field', 'do_import', 'add_no_value']
STRING_FIELDS = ['field_name', 'vcf_field', 'source_name', 'source_version', 'sub_embedding_group',
'annotation_category', 'separator', 'description',
'scope', 'schema_title', 'pattern', 'link', 'abbreviation']
SPECIAL_FIELDS = ['field_type', 'enum_list', 'links_to']
ENUM_FIELDS = ['enum_list']
IGNORED_FIELDS = ['source', 'priority', 'annotation_space_location', 'comments', 'value_example']
ALL_FIELDS = INTEGER_FIELDS + BOOLEAN_FIELDS + STRING_FIELDS + SPECIAL_FIELDS + ENUM_FIELDS + IGNORED_FIELDS
class VariantTableParser(object):
""" Class that encapsulates data/functions related to the annotation field mapping table. """
FIELD_TYPE_INDEX = 10 # XXX: hardcoded, must change if field_type is moved on mapping table
EMBEDDED_VARIANT_FIELDS = resolve_file_path('schemas/variant_embeds.json')
EMBEDDED_VARIANT_SAMPLE_FIELDS = resolve_file_path('schemas/variant_sample_embeds.json') # XXX: unused currently
EMBEDS_TO_GENERATE = [('variant', EMBEDDED_VARIANT_FIELDS),
('variant_sample', EMBEDDED_VARIANT_SAMPLE_FIELDS)]
NAME_FIELD = 'field_name'
def __init__(self, _mp, schema, skip_embeds=False):
self.mapping_table = _mp
self.annotation_field_schema = json.load(io.open(schema, 'r'))
self.version, self.date, self.fields = self.read_mp_meta()
if not skip_embeds: # if calling from gene, do not wipe variant/variant_sample embeds
self.provision_embeds()
@staticmethod
def process_fields(row):
""" Takes in the row of field names and processes them. At this point fields are all
lowercased and use underscores, such as 'field_name'
Args:
row: row of fields to be processed from the mapping table
Raises:
MappingTableIntakeException if a duplicate field is detected or no fields
are detected
Returns:
list of fields
"""
fields = {}
for name in row:
if name not in fields:
fields[name] = True
else:
raise MappingTableIntakeException('Found duplicate field in %s' % row)
if not fields:
raise MappingTableIntakeException('Did not find any fields on row %s' % row)
return fields.keys()
def provision_embeds(self):
""" Does setup necessary for writing embeds to JSON files in the schemas directory
Called by initializer based on EMBEDS_TO_GENERATE, overwrite this to control
this functionality (for genes).
"""
for field, f in self.EMBEDS_TO_GENERATE:
with io.open(f, 'w+') as fd:
json.dump({field: {}}, fd, indent=4)
def read_mp_meta(self):
""" Reads mapping table from file given to class. First 3 rows of the mapping
table contain this information. Version and Date are in the second column
while fields are across the third row, as below:
,version=v1.5, ...
,date=12/1/2019, ...
field1, field2, field3, ...
Returns:
3 tuple - version, date, fields
"""
version, date, fields = None, None, None
with io.open(self.mapping_table, 'r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
for row_idx, row in enumerate(reader):
if row_idx == 0:
version = row[1].split('=')[1].strip()
elif row_idx == 1:
date = row[1].split('=')[1].strip()
elif row_idx == 2:
fields = self.process_fields(row)
else:
break # we are done with this step
logger.info('Mapping table Version: %s, Date: %s\n' % (version, date))
logger.info('Mapping table fields: %s\n' % (", ".join(fields)))
return version, date, fields
def process_annotation_field_inserts(self):
""" Processes the annotation fields in the mapping table to produce inserts
Note that project and institution are required fields on the annotation
field schema and are not set here
:returns: list of annotation field inserts
"""
inserts = []
with io.open(self.mapping_table, 'r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
for row_idx, row in enumerate(reader):
insert = {}
if row_idx <= MappingTableHeader.HEADER_ROW_INDEX: # skip header rows
continue
for field_name, entry in zip(self.fields, row):
if field_name not in self.annotation_field_schema['properties'] or not entry:
continue # IMPORTANT: skip entry not in field schema
if field_name in MappingTableHeader.INTEGER_FIELDS: # handle int fields
if entry is not None: # entry=0 is a normal value
insert[field_name] = int(entry)
elif field_name in MappingTableHeader.BOOLEAN_FIELDS: # handle bool fields
if entry is not None:
if entry == 'Y':
insert[field_name] = True
else: # assume False if anything other than 'Y' is present
insert[field_name] = False
elif field_name in MappingTableHeader.ENUM_FIELDS: # handle enum fields
if entry is not None:
field_type = row[self.FIELD_TYPE_INDEX]
val_list = []
if field_type == 'string':
val_list = [en.strip() for en in entry.split(',') if en.strip()]
elif field_type == 'number':
val_list = [float(en.strip()) for en in entry.split(',') if en.strip()]
elif field_type == 'integer':
val_list = [int(en.strip()) for en in entry.split(',') if en.strip()]
insert[field_name] = val_list
else: # handle all other fields with direct copy if they exist
if field_name == 'pattern': # must decode escape characters
insert[field_name] = entry.encode().decode('unicode-escape')
else:
insert[field_name] = entry
inserts.append(insert)
return inserts
@staticmethod
def filter_fields_by_sample(inserts):
""" Returns annotation fields that belong on the sample variant schema
:param inserts: annotation field inserts processed from above
:return: only annotations fields that are part of the sample variant
"""
return [field for field in inserts if field.get('scope', '') == 'sample_variant']
@staticmethod
def filter_fields_by_variant(inserts):
""" Returns annotation fields that belong on the variant schema
:param inserts: all raw annotation field inserts
:return: only annotation fields that are part of the sample variant
"""
return [field for field in inserts if field.get('scope', '') == 'variant']
def update_embeds(self, item, scope):
""" Updates the EMBEDDED_FIELDS location JSON containing the embeds for Variant.
NOTE: the files are overwritten every time you run the process!
:param item: embedded field to be written
:param scope: which item type this embed is for
"""
# XXX: This does NOT work properly if for linkTos, embeds required .keyword!
for t, f in self.EMBEDS_TO_GENERATE:
if scope == t:
with io.open(f, 'rb') as fd:
embeds = json.load(fd)
link_type = 'embedded_field'
prefix = ''
if item.get('sub_embedding_group', None):
prefix = self.format_sub_embedding_group_name(item.get('sub_embedding_group'), t='key') + '.'
if link_type not in embeds[t]:
embeds[t][link_type] = [prefix + item[self.NAME_FIELD]]
else:
embeds[t][link_type].append(prefix + item[self.NAME_FIELD])
with io.open(f, 'w+') as wfd:
json.dump(embeds, wfd, indent=4)
wfd.write('\n') # write newline at EOF
@staticmethod
def format_sub_embedding_group_name(json_or_str, t='key'):
""" Helper method that will extract the appropriate value from sub_embedding_group
:param json_or_str: entry in mapping table, could be string or json, so we try both
:param t: one of key or title
:return: title that you wanted based on inputs
"""
if t not in ['key', 'title']:
raise MappingTableIntakeException('Tried to parse sub_embedded_group with'
'key other than "key" or "title": %s ' % t)
try:
fmt = json.loads(json_or_str)
except Exception: # just a string is given, use for both name and title
return json_or_str
else:
return fmt[t]
def generate_properties(self, inserts, variant=True):
""" Generates variant/variant sample properties.
:param inserts: result of one of the above two functions
:param variant: whether or not we are generating variant props or sample_variant props
:return: properties
"""
# TODO: refactor this process, as it is a little hard to follow - Will 1/21/2021
props = {}
cols = {}
facs = {}
# inner functions to be used as helper
def get_prop(item):
if item.get('embedded_field', False):
self.update_embeds(item, item.get('scope', 'gene')) # XXX: HACK - how to get around? -Will
return {}
if not item.get('do_import', True): # DROP fields that explicitly have do_import = False
return {}
temp = {}
prop_name = item[self.NAME_FIELD]
features = {}
features.update({
"title": item.get('schema_title', prop_name),
self.NAME_FIELD: prop_name,
"type": item['field_type']
})
# handle fields where key changes directly
if item.get('schema_description'):
features['description'] = item['schema_description']
if item.get('links_to'):
features['linkTo'] = item['links_to']
if item.get('enum_list'):
features['enum'] = item['enum_list']
if item.get('field_priority'):
features['lookup'] = item['field_priority']
# handle boolean fields
for a_field in MappingTableHeader.BOOLEAN_FIELDS:
if item.get(a_field) and a_field != 'is_list':
features[a_field] = item[a_field]
# handle string fields
for a_field in MappingTableHeader.STRING_FIELDS:
if item.get(a_field) is not None:
features[a_field] = item[a_field]
# handle int fields
for a_field in MappingTableHeader.INTEGER_FIELDS:
if item.get(a_field) is not None:
features[a_field] = int(item[a_field])
# handle sub_embedded object
if item.get('sub_embedding_group'):
sub_temp = {}
prop = {}
sum_ob_name = self.format_sub_embedding_group_name(item['sub_embedding_group'], t='key')
sub_title = self.format_sub_embedding_group_name(item['sub_embedding_group'], t='title')
# handle sub-embedded object that is an array
if item.get('is_list'):
prop[prop_name] = {
'title': item.get(self.NAME_FIELD, 'None provided'),
'type': 'array',
'items': features
}
sub_temp.update({
'title': sum_ob_name,
'type': 'array',
'items': {
'title': sub_title,
'type': 'object',
'properties': prop
}
})
else:
prop[prop_name] = features
sub_temp.update({
'title': sub_title,
'type': 'array',
'items': {
'title': sub_title,
'type': 'object',
'properties': prop,
}
})
temp[sum_ob_name] = sub_temp
return temp
# convert to array structure
if item.get('is_list'):
array_item = {}
array_item.update({
"title": item.get('schema_title', item[self.NAME_FIELD]),
"type": "array",
self.NAME_FIELD: item[self.NAME_FIELD]
})
if item.get('schema_description'):
array_item['description'] = item['schema_description']
array_item['items'] = features
temp[prop_name] = array_item
return temp
else:
temp[prop_name] = features
return temp
def update(d, u):
for k, v in six.iteritems(u):
dv = d.get(k, {})
if not isinstance(dv, Mapping):
d[k] = v
elif isinstance(v, Mapping):
d[k] = update(dv, v)
else:
d[k] = v
return d
def is_variant(o):
return o.get('scope') == 'variant'
def is_sub_embedded_object(o):
return o.get('sub_embedding_group')
def is_facet(o):
return o.get('facet_order', None)
def is_column(o):
return o.get('column_order')
def is_link_to(o):
return o.get('links_to')
def is_numbered_field(o):
return o.get('field_type') in ['integer', 'number']
def has_grouping(o):
return o.get('annotation_category', False)
def is_default_hidden(o):
return o.get('facet_default_hidden', 'N') == 'Y'
def insert_column_or_facet(d, o, facet=True):
val = {'title': o.get('schema_title', o.get(self.NAME_FIELD))}
if is_default_hidden(o):
val['default_hidden'] = True
if is_numbered_field(o) and is_facet(o):
val['aggregation_type'] = 'stats'
if "number_step" in o:
val['number_step'] = o["number_step"]
elif o['field_type'] == "integer":
val['number_step'] = 1
else:
# Default. Is assumed to be "any" on frontend if absent,
# but adding 'documentation through redundancy', if such thing is a thing.
val['number_step'] = "any"
# add facet (or column) order/grouping
if facet and is_facet(o) is not None:
val['order'] = is_facet(o)
if has_grouping(o) is not False:
val['grouping'] = o.get('annotation_category')
if not facet and is_column(o) is not None:
val['order'] = is_column(o)
if is_sub_embedded_object(o):
if is_link_to(o): # add .display_title if we are a linkTo
d[self.format_sub_embedding_group_name(o.get('sub_embedding_group')) + '.'
+ o[self.NAME_FIELD] + '.display_title'] = val
else:
d[self.format_sub_embedding_group_name(o.get('sub_embedding_group')) + '.'
+ o[self.NAME_FIELD]] = val
else:
if is_link_to(o):
d[o[self.NAME_FIELD] + '.display_title'] = val
else:
d[o[self.NAME_FIELD]] = val
# go through all annotation objects generating schema properties and
# adding columns/facets as defined by the mapping table
for obj in inserts:
update(props, get_prop(obj))
if variant: # we are doing variant, so take columns only from variant context
if is_variant(obj):
if is_facet(obj):
insert_column_or_facet(facs, obj)
if is_column(obj):
insert_column_or_facet(cols, obj, facet=False)
else: # we are doing variant_sample, so we should take columns/facets from BOTH
if is_facet(obj):
insert_column_or_facet(facs, obj)
if is_column(obj):
insert_column_or_facet(cols, obj, facet=False)
if not props:
raise MappingTableIntakeException('Got no properties on schema!')
return props, cols, facs
@staticmethod
def add_default_schema_fields(schema):
""" Adds default schema fields
Args:
schema: schema to add fields to
"""
schema['$schema'] = 'http://json-schema.org/draft-04/schema#'
schema['type'] = 'object'
schema['required'] = ['institution', 'project'] # for display_title
schema['identifyingProperties'] = ['uuid', 'aliases', 'annotation_id']
schema['additionalProperties'] = False
@staticmethod
def add_variant_required_fields(schema):
schema['required'].extend(['CHROM', 'REF', 'ALT', 'POS'])
@staticmethod
def add_variant_sample_required_fields(schema):
schema['required'].extend(['CALL_INFO', 'variant', 'file'])
@staticmethod
def add_identifier_field(props):
""" Adds the 'annotation_id' field, the unique_key constraint on variant/variant_sample which
is an alias for the display_title.
"""
props['annotation_id'] = {
'title': 'Annotation ID',
'type': 'string',
'uniqueKey': True,
}
@staticmethod
def add_extra_variant_sample_columns(cols):
""" Adds href, variant display title to columns (fields not on mapping table) """
cols['display_title'] = {
"title": "Position",
"order": 0,
"sort_fields" : [
{ "field" : "variant.display_title", "title" : "Variant Display Title" },
{ "field" : "variant.csq_rs_dbsnp151", "title": "dbSNP RS Number" }
]
}
cols['bam_snapshot'] = {
"title": 'Genome Snapshot',
"order": 81
}
cols["associated_genotype_labels.proband_genotype_label"] = {
"title": "Genotype",
"order": 39,
"sort_fields": [
{ "field": "associated_genotype_labels.proband_genotype_label", "title": "Proband GT" },
{ "field": "associated_genotype_labels.mother_genotype_label", "title": "Mother GT" },
{ "field": "associated_genotype_labels.father_genotype_label", "title": "Father GT" }
]
}
# Redundant - display_title column renders this as well.
# cols['variant.display_title'] = {
# 'title': 'Variant',
# }
@staticmethod
def extend_variant_sample_columns(cols):
if "variant.genes.genes_most_severe_gene.display_title" in cols:
# We combine `genes_most_severe_gene` + `genes_most_severe_transcript` columns in the UI column render func for compactness.
cols["variant.genes.genes_most_severe_gene.display_title"].update({
"title": "Gene, Transcript",
"sort_fields": [
{ "field": "variant.genes.genes_most_severe_gene.display_title", "title": "Gene" },
{ "field": "variant.genes.genes_most_severe_transcript", "title": "Most Severe Transcript" }
]
})
if "DP" in cols:
# We combine `DP` + `AF` columns in the UI column render func for compactness.
cols["DP"].update({
"title": "Coverage, VAF",
"sort_fields": [
{ "field": "DP", "title": "Coverage" },
{ "field": "AF", "title": "VAF" }
]
})
if "variant.csq_gnomadg_af" in cols:
# We combine `csq_gnomadg_af` + `csq_gnomadg_af_popmax` columns in the UI column render func for compactness.
cols["variant.csq_gnomadg_af"].update({
"title" : "gnomAD",
"sort_fields": [
{ "field": "variant.csq_gnomadg_af", "title": "gnomad AF" },
{ "field": "variant.csq_gnomadg_af_popmax", "title": "gnomad AF Population Max" }
]
})
if "variant.csq_cadd_phred" in cols:
cols["variant.csq_cadd_phred"].update({
"title": "Predictors",
"sort_fields": [
{ "field": "variant.csq_cadd_phred", "title": "Cadd Phred Score" },
{ "field": "variant.spliceaiMaxds", "title": "SpliceAI Max DS"},
{ "field": "variant.csq_phylop100way_vertebrate", "title": "PhyloP 100 Score"}
]
})
if "variant.genes.genes_most_severe_hgvsc" in cols:
cols["variant.genes.genes_most_severe_hgvsc"].update({
"title": "Variant",
"sort_fields": [
{ "field": "variant.genes.genes_most_severe_hgvsc", "title": "Coding Sequence" },
{ "field": "variant.genes.genes_most_severe_hgvsp", "title": "Protein Sequence" }
]
})
# Default Hidden Columns:
if "variant.csq_clinvar" in cols:
cols["variant.csq_clinvar"].update({
"default_hidden": True
})
if "GT" in cols:
cols["GT"].update({
"default_hidden": True
})
@staticmethod
def add_extra_variant_sample_facets(facs):
"""
Order of a Facet Group within top-level FacetList is determined by `min(grouped facet 1, grouped facet 2, ...)`
which is then used for sorting relative to all other top-level facets' and facet groups' orders.
Facets within a group are sorted relative to each other.
"""
facs["variant.genes.genes_most_severe_gene.display_title"] = {
"title": "Gene",
"order": 1,
"grouping": "Genes",
"search_type": "sayt_without_terms", # Enables search-as-you-type via AJAX (SAYT-AJAX) for this facet
"sayt_item_type": "Gene" # Required if "search_type" == "sayt_without_terms"
}
facs["variant.genes.genes_most_severe_gene.gene_lists.display_title"] = {
"title": "Gene List",
"order": 2,
"grouping": "Genes",
"description": "Groups of genes that are relevant for a disease or condition"
}
facs['inheritance_modes'] = {
'title': 'Inheritance Modes',
'order': 15,
}
# Range facets using range aggregation_type (ranges will be defined from Q2Q tab in future)
facs['variant.csq_gnomadg_af'] = {
"title": "GnomAD Alt Allele Frequency",
"aggregation_type": "range",
"number_step": "any",
"order": 18,
"grouping": "Population Frequency",
"ranges": [
{ "from": 0, "to": 0, "label": "unobserved" },
{ "from": 0, "to": 0.001, "label": "ultra-rare" },
{ "from": 0.001, "to": 0.01, "label": "rare" },
{ "from": 0.01, "to": 1, "label": "common" }
]
}
facs['variant.csq_gnomadg_af_popmax'] = {
"title": "GnomAD Alt AF - PopMax",
"aggregation_type": "range",
"number_step": "any",
"order": 19,
"grouping": "Population Frequency",
"ranges": [
{ "from": 0, "to": 0, "label": "unobserved" },
{ "from": 0, "to": 0.001, "label": "ultra-rare" },
{ "from": 0.001, "to": 0.01, "label": "rare" },
{ "from": 0.01, "to": 1, "label": "common" }
]
}
facs['variant.csq_phylop100way_vertebrate'] = {
"title": "PhyloP (100 Vertebrates)",
"aggregation_type": "range",
"number_step": "any",
"order": 22,
"grouping": "Effect Predictors",
"ranges": [
{ "from": -20, "to": -3, "label": "strong positive selection" },
{ "from": -3, "to": -2, "label": "positive selection" },
{ "from": -2, "to": 2, "label": "low selection" },
{ "from": 2, "to": 3, "label": "conserved" },
{ "from": 3, "to": 10, "label": "highly conserved"}
]
}
facs['FS'] = {
"title": "Strand Fisher Score",
"aggregation_type": "range",
"number_step": "any",
"order": 12,
"grouping": "Variant Quality",
"ranges": [
{ "to": 20, "label": "Low Strand Bias (P ≥ 0.01)" },
{ "from": 20, "label": "High Strand Bias (P < 0.01)" }
]
}
facs['AD_ALT'] = {
"title": "AD (Alt)",
"aggregation_type": "range",
"number_step": 1,
"order": 10,
"grouping": "Variant Quality",
"ranges": [
{ "from": 1, "to": 4, "label": "Very Low" },
{ "from": 5, "to": 9, "label": "Low" },
{ "from": 10, "to": 19, "label": "Medium" },
{ "from": 20, "label": "High" }
]
}
facs['novoPP'] = {
"title": "novoCaller PP",
"aggregation_type": "range",
"number_step": "any",
"order": 16,
"grouping": "Genotype",
"ranges": [
{ "from": 0.1, "to": 0.9, "label": "de novo candidate (weak)" },
{ "from": 0.9, "to": 1, "label": "de novo candidate (strong)" }
]
}
# Genotype labels (calculated properties)
facs.update({
"associated_genotype_labels.proband_genotype_label": {
"title": "Proband Genotype",
"order": 12,
"grouping": "Genotype"
},
"associated_genotype_labels.mother_genotype_label": {
"title": "Mother Genotype",
"order": 13,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.father_genotype_label": {
"title": "Father Genotype",
"order": 14,
"grouping": "Genotype",
"default_hidden": True
},
# Below facets are default-hidden unless e.g. additional_facet=associated_genotype_labels.co_parent_genotype_label
# URL param is supplied in filter block flags or search href.
"associated_genotype_labels.co_parent_genotype_label": {
"title": "Co-Parent Genotype",
"order": 1000,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.sister_genotype_label": {
"title": "Sister Genotype",
"order": 1001,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.sister_II_genotype_label": {
"title": "Sister II Genotype",
"order": 1002,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.sister_III_genotype_label": {
"title": "Sister III Genotype",
"order": 1003,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.sister_IV_genotype_label": {
"title": "Sister IV Genotype",
"order": 1004,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.brother_genotype_label": {
"title": "Brother Genotype",
"order": 1005,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.brother_II_genotype_label": {
"title": "Brother II Genotype",
"order": 1006,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.brother_III_genotype_label": {
"title": "Brother III Genotype",
"order": 1007,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.brother_IV_genotype_label": {
"title": "Brother IV Genotype",
"order": 1008,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.daughter_genotype_label": {
"title": "Daughter Genotype",
"order": 1009,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.daughter_II_genotype_label": {
"title": "Daughter II Genotype",
"order": 1010,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.daughter_III_genotype_label": {
"title": "Daughter III Genotype",
"order": 1011,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.daughter_IV_genotype_label": {
"title": "Daughter IV Genotype",
"order": 1012,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.son_genotype_label": {
"title": "Son Genotype",
"order": 1013,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.son_II_genotype_label": {
"title": "Son II Genotype",
"order": 1014,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.son_III_genotype_label": {
"title": "Son III Genotype",
"order": 1015,
"grouping": "Genotype",
"default_hidden": True
},
"associated_genotype_labels.son_IV_genotype_label": {
"title": "Son IV Genotype",
"order": 1016,
"grouping": "Genotype",
"default_hidden": True
}
})
@staticmethod
def extend_variant_sample_facets(facs):
pass
def generate_variant_sample_schema(self, sample_props, cols, facs, variant_cols, variant_facs):
""" Builds the variant_sample.json schema based on sample_props. Will also add variant columns and
facets since this information is embedded.
Args:
sample_props: first output of generate_properties
Returns:
Variant sample schema
"""
schema = {}
self.add_default_schema_fields(schema)
self.add_variant_sample_required_fields(schema)
schema['title'] = 'Sample Variant'
schema['description'] = "Schema for variant info for sample"
schema['id'] = '/profiles/variant_sample.json'
schema['mixinProperties'] = [
{"$ref": "mixins.json#/schema_version"},
{"$ref": "mixins.json#/uuid"},
{"$ref": "mixins.json#/aliases"},
{"$ref": "mixins.json#/submitted"},
{"$ref": "mixins.json#/modified"},
{"$ref": "mixins.json#/status"},
{"$ref": "mixins.json#/attribution"},
{"$ref": "mixins.json#/notes"},
{"$ref": "mixins.json#/static_embeds"},
]
schema['properties'] = sample_props
schema['properties']['schema_version'] = {'default': '1'}
schema['properties']['variant'] = { # link to single variant
'title': 'Variant',
'type': 'string',
'linkTo': 'Variant',
}
schema['properties']['gene_notes'] = {
'title': 'Gene Notes',
'description': 'Note item related to this Gene',
'type': 'string',
'linkTo': 'NoteStandard'
}
schema['properties']['variant_notes'] = {
'title': 'Variant Notes',
'description': 'Notes related to the relevant Variant',
'type': 'string',
'linkTo': 'NoteStandard'
}
schema['properties']['interpretation'] = {
'title': 'Clinical Interpretation',
'description': 'Clinical Interpretation Note connected to this item',
'type': 'string',
'linkTo': 'NoteInterpretation'
}
schema['properties']['discovery_interpretation'] = {
'title': 'Discovery Interpretation',
'description': 'Gene/Variant Discovery interpretation note connected to this item',
'type': 'string',
'linkTo': 'NoteDiscovery'
}
schema['properties']['file'] = { # NOT a linkTo as the ID is sufficient for filtering
'title': 'File',
'description': 'String Accession of the vcf file used in digestion',
'type': 'string',
}
schema['properties']['bam_snapshot'] = {
'title': 'Genome Snapshot',
'description': 'Link to Genome Snapshot Image',
'type': 'string',
}
schema['properties']['genotype_labels'] = {
'title': 'Genotype Labels',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'role': {
'title': 'Role',
'type': 'string',
},
'labels': {
'title': 'Genotype Labels',
'type': 'array',
'items': {
'type': 'string'
}
}
}
}
}
schema['properties']['inheritance_modes'] = {
'title': 'Inheritance Modes',
'type': 'array',
'items': {
'type': 'string'
}
}
schema['properties']['samplegeno']['items']['properties']['samplegeno_role'] = { # noqa structure is there
'title': 'Familial Relation',
'description': 'Relationship of the person who submitted this sample relative to the proband',
'type': 'string',
'suggested_enum': ['proband', 'father', 'mother', 'brother', 'sister', 'sibling',
'half-brother', 'half-sister', 'half-sibling', 'wife', 'husband',
'son', 'daughter', 'child', 'grandson', 'granddaughter', 'grandchild',
'grandmother', 'family-in-law', 'extended-family', 'not linked'],
}
schema['properties']['samplegeno']['items']['properties']['samplegeno_sex'] = { # noqa structure is there
'title': 'Sex',
'description': 'Sex of the donor of this sample ID',
'type': 'string',
'enum': ['M', 'F', 'U'], # XXX: what others should be included?
}
# adds annotation ID field, effectively making display_title a primary key constraint
self.add_identifier_field(schema['properties'])
# helper so variant facets work on variant sample
# XXX: Behavior needs testing
def format_variant_cols_or_facs(d):
cp = {}
for k, v in d.items():
cp['variant.' + k] = v
return cp
variant_cols = format_variant_cols_or_facs(variant_cols)
variant_facs = format_variant_cols_or_facs(variant_facs)
cols.update(variant_cols) # add variant stuff since we are embedding this info
facs.update(variant_facs)
self.add_extra_variant_sample_columns(cols)
self.extend_variant_sample_columns(cols)
self.add_extra_variant_sample_facets(facs)
self.extend_variant_sample_facets(facs)
schema['columns'] = cols
schema['facets'] = facs
schema['facets'] = self.sort_schema_properties(schema, key='facets')
schema['columns'] = self.sort_schema_properties(schema, key='columns')
logger.info('Built variant_sample schema')
return schema
def generate_variant_schema(self, var_props, cols, facs):
""" Builds the variant.json schema based on var_props
Args:
var_props: first output of generate_properties for variant
cols: second output of generate_properties for variant
facs: third output of generate_properties for variant
Returns:
Variant schema
"""
schema = {}
self.add_default_schema_fields(schema)
self.add_variant_required_fields(schema)
schema['title'] = 'Variants'
schema['description'] = "Schema for variants"
schema['id'] = '/profiles/variant.json'
schema['mixinProperties'] = [
{"$ref": "mixins.json#/schema_version"},
{"$ref": "mixins.json#/uuid"},
{"$ref": "mixins.json#/aliases"},
{"$ref": "mixins.json#/submitted"},
{"$ref": "mixins.json#/modified"},
{"$ref": "mixins.json#/status"},
{"$ref": "mixins.json#/attribution"},
{"$ref": "mixins.json#/notes"},
{"$ref": "mixins.json#/interpretation"},
{"$ref": "mixins.json#/static_embeds"},
]
schema['properties'] = var_props
schema['properties']['hg19'] = { # required for testing :( - will 1-8-2021
"title": "hg19 Coordinates",
"type": "array",
"items": {
"title": "hg19 Coordinates",
"enable_nested": True,
"type": "object",
"properties": {
"hg19_hgvsg": {
"title": "Variant",
"field_name": "hg19_hgvsg",
"type": "string",
"description": "HGVS genome sequence name (hg19)",
},
"hg19_chrom": {
"title": "Chromosome (hg19)",
"field_name": "hg19_chrom",
"type": "string",
"description": "hg19 coordinate chromosome",
},
"hg19_pos": {
"title": "Position (hg19)",
"field_name": "hg19_pos",
"type": "integer",
"description": "hg19 coordinate position",
}
}
}
}
schema['properties']['variant_notes'] = {
"title": "Variant Notes",
"description": "Notes related to this Variant",
"type": "array",
"items": {
"title": "Variant Note",
"type": "string",
"linkTo": "NoteStandard"
}
}
schema['properties']['schema_version'] = {'default': '2'}
schema['facets'] = facs
schema['columns'] = cols
schema['facets'] = self.sort_schema_properties(schema, key='facets')
schema['columns'] = self.sort_schema_properties(schema, key='columns')
# adds annotation ID field, effectively making display_title a primary key constraint
self.add_identifier_field(schema['properties'])
logger.info('Build variant schema')
return schema
@staticmethod
def sort_schema_properties(schema, key='properties'):
""" Helper method that sorts schema properties by key by inserting sorted key, values into a new
dictionary (since in Python3.6>= all dicts are ordered). Schemas from this point forward
will have their properties sorted alphabetically so it is easier to visualize changes.
Args:
schema: schema with key 'properties' to be sorted
key: optional arg to use as key to resolve dictionary to sort, intended to allow us to sort
properties, columns and facets
"""
sorted_properties = {}
for key, value in sorted(schema[key].items()):
sorted_properties[key] = value
return sorted_properties
def write_schema(self, schema, fname):
""" Writes the given schema (JSON) to the given file 'fname'
Args:
schema: dictionary to write as json as the schema
fname: file to write out to
"""
schema['properties'] = self.sort_schema_properties(schema)
with io.open(fname, 'w+') as out:
json.dump(schema, out, indent=4)
logger.info('Successfully wrote schema: %s to file: %s\n' % (schema['title'], fname))
def run(self, vs_out=None, v_out=None, institution=None, project=None, write=True):
""" Runs the mapping table intake program, generates and writes schemas
and returns inserts to be posted in main
Args:
vs_out: where to write variant_sample schema
v_out: where to write variant schema
institution: what institution to attach to these inserts
project: what project to attach to these inserts
write: whether to write the schemas - default True
Returns:
inserts: annotation field inserts
"""
inserts = self.process_annotation_field_inserts()
variant_sample_props, _, _ = self.generate_properties(self.filter_fields_by_sample(inserts), variant=False)
variant_props, _, _ = self.generate_properties(self.filter_fields_by_variant(inserts))
# as of 3/9/2021, this is now just the 'properties' of the schema
# columns/facets are edited directly - they are read in here from the
# output location (read in schema/overwrite when done, don't touch columns/facets)
new_variant_sample_schema = self.generate_variant_sample_schema(variant_sample_props,
cols={}, facs={}, variant_cols={},
variant_facs={})
new_variant_schema = self.generate_variant_schema(variant_props, cols={}, facs={})
if write:
if not vs_out or not v_out:
raise MappingTableIntakeException('Write specified but no output file given')
# Read/replace columns/facets and update properties
# NOTE: This will not function correctly if you wipe the schemas!
# Although this isn't ideal, I'm not convinced it's a good use of time to do
# the refactoring necessary to pull the column/facet logic out. It's much easier
# to just ignore that info.
variant_sample_schema = json.load(io.open(vs_out))
new_variant_sample_schema['facets'] = variant_sample_schema['facets']
new_variant_sample_schema['columns'] = variant_sample_schema['columns']
self.write_schema(new_variant_sample_schema, vs_out)
variant_schema = json.load(io.open(v_out))
new_variant_schema['facets'] = variant_schema['facets']
new_variant_schema['columns'] = variant_schema['columns']
self.write_schema(new_variant_schema, v_out)
logger.info('Successfully wrote schemas')
if project or institution:
for insert in inserts:
if project:
insert['project'] = project
if institution:
insert['institution'] = institution
return inserts
class StructuralVariantTableParser(VariantTableParser):
"""
Subclass of VariantTableParser used for intake of SV mapping table.
Main differences from the parent class are:
- Explicitly updates methods only related to "properties" field
of the relevant schema; all other fields in schema will
be same as in existing schema.
- Searches schema "properties" objects and embedded objects for
field indicative of the property coming from the mapping
table as implied by presence of VCF_FIELD_KEY
- All "properties" objects that do not come from the mapping
table are included in the new schema, while those from
previous mapping table ingestion are dropped and will
only be re-generated if present in current mapping table.
"""
SV_SCHEMA_PATH = resolve_file_path("schemas/structural_variant.json")
SV_SAMPLE_SCHEMA_PATH = resolve_file_path("schemas/structural_variant_sample.json")
EMBEDDED_VARIANT_FIELDS = resolve_file_path("schemas/structural_variant_embeds.json")
EMBEDDED_VARIANT_SAMPLE_FIELDS = resolve_file_path(
"schemas/structural_variant_sample_embeds.json"
)
EMBEDS_TO_GENERATE = [
("variant", EMBEDDED_VARIANT_FIELDS),
("variant_sample", EMBEDDED_VARIANT_SAMPLE_FIELDS),
]
VCF_FIELD_KEY = "vcf_field"
def __init__(self, *args, **kwargs):
super(StructuralVariantTableParser, self).__init__(*args, **kwargs)
self.sv_non_vcf_props = {}
self.sv_sample_non_vcf_props = {}
self.get_vcf_props()
@property
def old_sv_schema(self):
"""Explicit property for easier mocking."""
return json.load(io.open(self.SV_SCHEMA_PATH))
@property
def old_sv_sample_schema(self):
"""Explicit property for easier mocking."""
return json.load(io.open(self.SV_SAMPLE_SCHEMA_PATH))
def get_vcf_props(self):
"""
Searches through existing SV and SV sample schemas to identify
existing "properties" objects that did not come from previous
mapping table ingestion, as indicated by lack of VCF_FIELD_KEY
on the object.
Updates self.sv_non_vcf_props and self.sv_sample_non_vcf_props
dicts with keys as top-level "properties" fields to keep and
values as list of sub-embedded fields to keep if applicable.
Expects sub-embedded objects from previous mapping table
ingestion to be one-layer deep, e.g. an array of objects that
are not themselves arrays of objects.
NOTE: This will obviously fail if the VCF_FIELD_KEY is dropped
from the mapping table.
"""
for key, value in self.old_sv_schema["properties"].items():
vcf_field = self._is_vcf_field(key, value)
if not vcf_field:
self.sv_non_vcf_props[key] = ""
else:
sub_embeds_to_keep = self._collect_non_vcf_sub_embeds(key, value)
if sub_embeds_to_keep:
self.sv_non_vcf_props[key] = sub_embeds_to_keep
for key, value in self.old_sv_sample_schema["properties"].items():
vcf_field = self._is_vcf_field(key, value)
if not vcf_field:
self.sv_sample_non_vcf_props[key] = ""
else:
sub_embeds_to_keep = self._collect_non_vcf_sub_embeds(key, value)
if sub_embeds_to_keep:
self.sv_sample_non_vcf_props[key] = sub_embeds_to_keep
def _is_vcf_field(self, key, value):
"""
Helper function to self.get_vcf_props() to identify
"properties" fields that stem from previous mapping table
ingestion as indicated by VCF_FIELD_KEY.
:param key: str field name
:param value: dict corresponding to key
:return result: bool if key corresponds to a vcf field
"""
result = False
item_type = value.get("type", "")
vcf_field = value.get(self.VCF_FIELD_KEY, "")
if not vcf_field:
if item_type == "array":
item_dict = value["items"]
if "properties" in item_dict: # Array of objects
for item_key, item_value in item_dict["properties"].items():
result = self._is_vcf_field(item_key, item_value)
if result:
break
else:
result = self._is_vcf_field(key, item_dict)
else:
result = True
return result
def _collect_non_vcf_sub_embeds(self, key, value):
"""
Helper function to self.get_vcf_props that collects non-vcf
fields nested within an object that contains at least one vcf field.
:param key: str field name
:param value: dict corresponding to key
:return result: list of nested non-vcf fields
"""
result = []
item_type = value.get("type", "")
vcf_field = value.get(self.VCF_FIELD_KEY, "")
if not vcf_field:
if item_type == "array":
item_dict = value["items"]
if "properties" in item_dict: # Array of objects
for item_key, item_value in item_dict["properties"].items():
sub_item_type = item_value.get("type", "")
sub_item_vcf_field = item_value.get(self.VCF_FIELD_KEY, "")
if sub_item_type == "array":
sub_item_vcf_field = (
item_value["items"].get(self.VCF_FIELD_KEY, "")
)
if not sub_item_vcf_field:
result.append(item_key)
return result
def provision_embeds(self):
"""
Does setup necessary for writing embeds to JSON files in the
schemas directory.
Called by initializer based on EMBEDS_TO_GENERATE.
"""
for field, f in self.EMBEDS_TO_GENERATE:
field = "structural_" + field
with open(f, 'w+') as fd:
json.dump({field: {}}, fd, indent=4)
def update_embeds(self, item, scope):
"""
Updates the EMBEDDED_FIELDS location JSON containing the embeds
for structural variant.
NOTE: the files are overwritten every time you run the process!
:param item: embedded field to be written
:param scope: which item type this embed is for
"""
# XXX: This does NOT work properly if for linkTos, embeds required .keyword!
for t, f in self.EMBEDS_TO_GENERATE:
if scope == t:
t = "structural_" + t
with open(f, 'rb') as fd:
embeds = json.load(fd)
link_type = 'embedded_field'
prefix = ''
if item.get('sub_embedding_group', None):
prefix = self.format_sub_embedding_group_name(
item.get('sub_embedding_group'), t='key'
) + '.'
if link_type not in embeds[t]:
embeds[t][link_type] = [prefix + item[self.NAME_FIELD]]
else:
embeds[t][link_type].append(prefix + item[self.NAME_FIELD])
with open(f, 'w+') as wfd:
json.dump(embeds, wfd, indent=4)
wfd.write('\n') # write newline at EOF
@staticmethod
def generate_schema(var_props, old_schema, props_to_keep):
"""
Generate new schema by updating the properties of the old schema
according to the new mapping table, leaving the remainder of the
schema the same.
:param var_props: dict of new props from mapping table ingested
:param old_schema: dict of existing schema
:param props_to_keep: dict of non-vcf fields and sub-embedded
non-vcf fields to keep, if applicable
:return schema: dict of updated schema with "properties" field
containing all new props and existing non-vcf fields
"""
schema = {}
old_schema_props = old_schema["properties"]
for field in props_to_keep:
sub_embeds_to_keep = props_to_keep[field]
if not sub_embeds_to_keep:
var_props[field] = old_schema_props[field]
else:
if field in var_props:
try:
var_prop_sub_embeds = var_props[field]["items"]["properties"]
old_schema_prop_sub_embeds = (
old_schema_props[field]["items"]["properties"]
)
for sub_embed in sub_embeds_to_keep:
var_prop_sub_embeds[sub_embed] = (
old_schema_prop_sub_embeds[sub_embed]
)
except KeyError:
# Field went from array of objects to other type, so don't
# attempt to sub-embed previous non-vcf fields
continue
else:
var_props[field] = old_schema_props[field]
tmp = [
key for key in old_schema_props[field]["items"]["properties"]
]
for sub_embed in tmp:
if sub_embed not in sub_embeds_to_keep:
del var_props[field]["items"]["properties"][sub_embed]
for key in old_schema:
if key == "properties":
schema["properties"] = var_props
else:
schema[key] = old_schema[key]
return schema
def run(self, project=None, institution=None, write=True):
"""
Runs mapping table intake for SVs, writing new 'properties' fields
for structural variants and structural variant samples.
:param project: str project identifier
:param institution: str institution identifier
:param write: bool to write new schema
:return inserts: list of dicts corresponding to props of ingested
mapping table
"""
inserts = self.process_annotation_field_inserts()
sv_props, _, _ = self.generate_properties(
self.filter_fields_by_variant(inserts)
)
sv_sample_props, _, _ = self.generate_properties(
self.filter_fields_by_sample(inserts), variant=False
)
new_sv_schema = self.generate_schema(
sv_props, self.old_sv_schema, self.sv_non_vcf_props
)
new_sv_sample_schema = self.generate_schema(
sv_sample_props, self.old_sv_sample_schema, self.sv_sample_non_vcf_props
)
if write:
self.write_schema(new_sv_schema, self.SV_SCHEMA_PATH)
self.write_schema(new_sv_sample_schema, self.SV_SAMPLE_SCHEMA_PATH)
logger.info("Successfully wrote schemas")
if project or institution:
for insert in inserts:
if project:
insert['project'] = project
if institution:
insert['institution'] = institution
return inserts
class GeneTableParser(VariantTableParser):
""" Subclass of MappingTableParser that overrides methods required for any differences across tables. """
def __init__(self, *args, **kwargs):
self.FIELD_TYPE_INDEX = 8
kwargs['skip_embeds'] = True # do not clear embeds when running gene intake
super(GeneTableParser, self).__init__(*args, **kwargs)
@staticmethod
def add_default_schema_fields(schema):
""" Adds default schema fields
Args:
schema: schema to add fields to
"""
schema['$schema'] = 'http://json-schema.org/draft-04/schema#'
schema['type'] = 'object'
schema['required'] = ['institution', 'project', 'gene_symbol', 'ensgid']
schema['identifyingProperties'] = ['uuid', 'aliases']
schema['additionalProperties'] = False
schema['mixinProperties'] = [
{"$ref": "mixins.json#/schema_version"},
{"$ref": "mixins.json#/uuid"},
{"$ref": "mixins.json#/aliases"},
{"$ref": "mixins.json#/submitted"},
{"$ref": "mixins.json#/modified"},
{"$ref": "mixins.json#/status"},
{"$ref": "mixins.json#/attribution"},
{"$ref": "mixins.json#/notes"},
{"$ref": "mixins.json#/static_embeds"},
{"$ref": "mixins.json#/interpretation"}
]
def generate_gene_schema(self, gene_props, columns, facets):
"""
Builds gene.json schema based on gene_props
:param gene_props: dictionary of 'properties' based on the gene fields
:param columns: columns to attach
:param facets: facets to compute
:return: gene schema
"""
schema = {}
self.add_default_schema_fields(schema)
schema['title'] = 'Genes'
schema['description'] = "Schema for Genes"
schema['id'] = '/profiles/gene.json'
gene_props['ensgid']['uniqueKey'] = True # XXX: This is required for genes
schema['properties'] = gene_props
schema['properties']['schema_version'] = {'default': '1'}
schema['properties']['gene_notes'] = {
"title": "Gene Notes",
"description": "Notes related to this Gene",
"type": "array",
"items": {
"title": "Gene Note",
"type": "string",
"linkTo": "NoteStandard"
}
}
schema['facets'] = facets
schema['columns'] = columns
logger.info('Build gene schema')
return schema
def run(self, gs_out=None, write=False): # noqa - args are different then in superclass but we don't care
"""
Ingests the gene table, producing the gene schema
:param gs_out: path where to write the gene schema
:param write: whether or not to actually write the schema (can do dry-run)
:return: gene_annotation_field inserts
"""
inserts = self.process_annotation_field_inserts()
gene_props, columns, facets = self.generate_properties(inserts)
gene_schema = self.generate_gene_schema(gene_props, columns, facets)
if write:
if not gs_out:
raise GeneTableIntakeException('Write specified but no output file given')
self.write_schema(gene_schema, gs_out)
logger.info('Successfully wrote gene schema to %s' % gs_out)
return inserts
|
python
|
import numpy as np
import torch
import itertools
from torch.autograd import Variable
def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_occupancy = False):
'''
This function computes the binary mask that represents the
occupancy of each ped in the other's grid
params:
frame : This will be a MNP x 3 matrix with each row being [pedID, x, y]
dimensions : This will be a list [width, height]
neighborhood_size : Scalar value representing the size of neighborhood considered
grid_size : Scalar value representing the size of the grid discretization
num_person : number of people exist in given frame
is_occupancy: A flag using for calculation of accupancy map
'''
mnp = num_person
width, height = dimensions[0], dimensions[1]
if is_occupancy:
frame_mask = np.zeros((mnp, grid_size**2))
else:
frame_mask = np.zeros((mnp, mnp, grid_size**2))
frame_np = frame.data.numpy()
#width_bound, height_bound = (neighborhood_size/(width*1.0)), (neighborhood_size/(height*1.0))
width_bound, height_bound = (neighborhood_size/(width*1.0))*2, (neighborhood_size/(height*1.0))*2
#print("weight_bound: ", width_bound, "height_bound: ", height_bound)
#instead of 2 inner loop, we check all possible 2-permutations which is 2 times faster.
list_indices = list(range(0, mnp))
for real_frame_index, other_real_frame_index in itertools.permutations(list_indices, 2):
current_x, current_y = frame_np[real_frame_index, 0], frame_np[real_frame_index, 1]
width_low, width_high = current_x - width_bound/2, current_x + width_bound/2
height_low, height_high = current_y - height_bound/2, current_y + height_bound/2
other_x, other_y = frame_np[other_real_frame_index, 0], frame_np[other_real_frame_index, 1]
#if (other_x >= width_high).all() or (other_x < width_low).all() or (other_y >= height_high).all() or (other_y < height_low).all():
if (other_x >= width_high) or (other_x < width_low) or (other_y >= height_high) or (other_y < height_low):
# Ped not in surrounding, so binary mask should be zero
#print("not surrounding")
continue
# If in surrounding, calculate the grid cell
cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size))
cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size))
if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0:
continue
if is_occupancy:
frame_mask[real_frame_index, cell_x + cell_y*grid_size] = 1
else:
# Other ped is in the corresponding grid cell of current ped
frame_mask[real_frame_index, other_real_frame_index, cell_x + cell_y*grid_size] = 1
#Two inner loops aproach -> slower
# # For each ped in the frame (existent and non-existent)
# for real_frame_index in range(mnp):
# #real_frame_index = lookup_seq[pedindex]
# #print(real_frame_index)
# #print("****************************************")
# # Get x and y of the current ped
# current_x, current_y = frame[real_frame_index, 0], frame[real_frame_index, 1]
# #print("cur x : ", current_x, "cur_y: ", current_y)
# width_low, width_high = current_x - width_bound/2, current_x + width_bound/2
# height_low, height_high = current_y - height_bound/2, current_y + height_bound/2
# #print("width_low : ", width_low, "width_high: ", width_high, "height_low : ", height_low, "height_high: ", height_high)
# # For all the other peds
# for other_real_frame_index in range(mnp):
# #other_real_frame_index = lookup_seq[otherpedindex]
# #print(other_real_frame_index)
# #print("################################")
# # If the other pedID is the same as current pedID
# if other_real_frame_index == real_frame_index:
# # The ped cannot be counted in his own grid
# continue
# # Get x and y of the other ped
# other_x, other_y = frame[other_real_frame_index, 0], frame[other_real_frame_index, 1]
# #print("other_x: ", other_x, "other_y: ", other_y)
# if (other_x >= width_high).all() or (other_x < width_low).all() or (other_y >= height_high).all() or (other_y < height_low).all():
# # Ped not in surrounding, so binary mask should be zero
# #print("not surrounding")
# continue
# # If in surrounding, calculate the grid cell
# cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size))
# cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size))
# #print("cell_x: ", cell_x, "cell_y: ", cell_y)
# if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0:
# continue
# # Other ped is in the corresponding grid cell of current ped
# frame_mask[real_frame_index, other_real_frame_index, cell_x + cell_y*grid_size] = 1
# #print("frame mask shape %s"%str(frame_mask.shape))
return frame_mask
def getSequenceGridMask(sequence, dimensions, pedlist_seq, neighborhood_size, grid_size, using_cuda, is_occupancy=False):
'''
Get the grid masks for all the frames in the sequence
params:
sequence : A numpy matrix of shape SL x MNP x 3
dimensions : This will be a list [width, height]
neighborhood_size : Scalar value representing the size of neighborhood considered
grid_size : Scalar value representing the size of the grid discretization
using_cuda: Boolean value denoting if using GPU or not
is_occupancy: A flag using for calculation of accupancy map
'''
sl = len(sequence)
sequence_mask = []
for i in range(sl):
mask = Variable(torch.from_numpy(getGridMask(sequence[i], dimensions, len(pedlist_seq[i]), neighborhood_size, grid_size, is_occupancy)).float())
if using_cuda:
mask = mask.cuda()
sequence_mask.append(mask)
return sequence_mask
|
python
|
# Copyright 2021 The KaiJIN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
from logging import log
import socket
import json
from .tfevent import TFEventAccumulator
from .logger import logger
class DaemonClient():
def __init__(self):
logger.init('rs.client.log', './')
logger.info('start client daemon.')
def start(self, ip, port, mode='all', verbose=True):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# recv data
bufs = bytes()
while True:
buf = s.recv(1024)
if len(buf) == 0:
break
bufs += buf
data = json.loads(bufs.decode())
# print mode
if verbose and mode == 'all':
logger.info(json.dumps(data, indent=2))
elif verbose and mode == 'cpu':
logger.info(json.dumps(data['cpu'], indent=2))
elif verbose and mode == 'gpu':
logger.info(json.dumps(data['gpu'], indent=2))
return data
def start_file(self, file, mode='all'):
machines = []
with open(file) as fp:
for line in fp:
ip, port = line.replace('\n', '').split(':')
machines.append((ip, int(port), mode, False))
results = []
for m in machines:
try:
results.append((m, self.start(*m)))
logger.info(f'Successfully to receive data from {m[0]}:{m[1]}.')
except Exception as e:
logger.warn(f'Failed to receive data from {m[0]}:{m[1]} due to {e}')
def sep(length=200):
return '\n' + '-' * length
s = sep(200)
if mode in ['all', 'cpu']:
s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format(
'ip', 'port', 'cpu_count', 'cpu_current_freq', 'cpu_percent',
'memory_total(GB)', 'memory_used(GB)', 'memory_free(GB)', 'memory_percent(GB)', 'memory_shared(GB)')
s += sep(200)
for res in results:
m, data = res[0], res[1]
s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format(
m[0], m[1],
data['cpu']['cpu_count'],
data['cpu']['cpu_current_freq'],
data['cpu']['cpu_percent'],
data['cpu']['memory_total'],
data['cpu']['memory_used'],
data['cpu']['memory_free'],
data['cpu']['memory_percent'],
data['cpu']['memory_shared'],
)
s += sep(200)
if mode in ['all', 'gpu']:
s += sep(200)
s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format(
'ip', 'port', 'driver_version', 'cuda_version', 'product_name',
'fan_speed', 'total_memory', 'used_memory', 'memory_percent', 'utilization')
s += sep(200)
for res in results:
m, data = res[0], res[1]
for gpu in data['gpu']['gpus']:
s += '\n{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}{:^20}'.format(
m[0], m[1],
data['gpu']['driver_version'],
data['gpu']['cuda_version'],
gpu['product_name'].replace('NVIDIA ', ''),
gpu['fan_speed'],
gpu['total_memory'],
gpu['used_memory'],
'{:.2f} %'.format(float(gpu['used_memory'].split(' ')[0]) *
100 / float(gpu['total_memory'].split(' ')[0])),
gpu['utilization'],
)
s += sep(200)
if mode in ['all', 'event']:
s += sep(200)
s += '\n{:^20}{:^20}{:^100}{:^40}{:^20}'.format('ip', 'port', 'expr', 'update', 'epoch')
s += sep(200)
for res in results:
m, data = res[0], res[1]
for k, v in data['event'].items():
s += '\n{:^20}{:^20}{:^100}{:^40}{:^20}'.format(
m[0], m[1],
k,
data['event'][k]['modify'],
data['event'][k]['epoch'],
)
s += sep(200)
logger.info(s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ip', type=str, help='local ipv4 addr.')
parser.add_argument('--port', type=int, help='bind or listen port.')
parser.add_argument('--mode', type=str, default='all', choices=['cpu', 'gpu', 'event', 'all'])
parser.add_argument('--file', type=str, default=None)
args, _ = parser.parse_known_args()
print(args)
daemon = DaemonClient()
if args.file is None:
daemon.start(ip=args.ip, port=args.port, mode=args.mode)
else:
daemon.start_file(file=args.file, mode=args.mode)
|
python
|
import anyio
from anyio_mqtt import AnyIOMQTTClient
import logging
_LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("anyio_mqtt").setLevel(logging.DEBUG)
PAHO_LOGGER = logging.getLogger("paho")
PAHO_LOGGER.setLevel(logging.DEBUG)
async def main() -> None:
_LOG.debug("Creating client")
async with AnyIOMQTTClient() as client:
client.enable_logger(PAHO_LOGGER)
client.username_pw_set("test", "tesffffft")
_LOG.debug("Subscribing to a/b/c")
client.subscribe("a/b/c")
_LOG.debug("Connecting to broker")
client.connect("walternate")
_LOG.debug("Subscribing to d/e/f")
client.subscribe("d/e/f")
_LOG.debug("Publishing message to a/b/c with QoS 0")
client.publish("a/b/c", "hi0", qos=0)
_LOG.debug("Publishing message to a/b/c with QoS 1")
client.publish("a/b/c", "hi1", qos=1)
_LOG.debug("Publishing message to a/b/c with QoS 2")
client.publish("a/b/c", "hi2", qos=2)
i = 0
_LOG.debug("Waiting for messages (1)")
async for msg in client.messages:
print(
f"Message received in test.py (1): {msg.topic} - {msg.payload.decode('utf8')}"
)
i += 1
if i >= 5:
break
_LOG.debug("Publishing message to a/b/c with QoS 0")
client.publish("a/b/c", "2hi0", qos=0)
_LOG.debug("Not listening for messages for 3 seconds")
await anyio.sleep(3)
i = 0
_LOG.debug("Waiting for messages (2)")
async for msg in client.messages:
print(
f"Message received in test.py (2): {msg.topic} - {msg.payload.decode('utf8')}"
)
i += 1
if i >= 5:
_LOG.debug("Calling client.disconnect()")
client.disconnect()
break
_LOG.debug("Publishing message to a/b/c with QoS 0")
client.publish("a/b/c", "3hi0", qos=0)
_LOG.debug("Publishing message to a/b/c with QoS 1")
client.publish("a/b/c", "3hi1", qos=1)
_LOG.debug("Publishing message to a/b/c with QoS 2")
client.publish("a/b/c", "3hi2", qos=2)
_LOG.debug("Waiting 3 seconds")
await anyio.sleep(3)
_LOG.debug("Connecting to broker")
client.connect("localhost")
i = 0
_LOG.debug("Waiting for messages (3)")
async for msg in client.messages:
print(
f"Message received in test.py (3): {msg.topic} - {msg.payload.decode('utf8')}"
)
i += 1
if i >= 5:
print("Breaking out of last msg loop")
break
print("Now leaving async context...")
print("Finished!")
if __name__ == "__main__":
anyio.run(main)
|
python
|
import time
import random
import numpy as np
import torch
from torchtuples import tuplefy, TupleTree
def make_name_hash(name='', file_ending='.pt'):
year, month, day, hour, minute, second = time.localtime()[:6]
ascii_letters_digits = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
random_hash = ''.join(random.choices(ascii_letters_digits, k=20))
path = f"{name}_{year}-{month}-{day}_{hour}-{minute}-{second}_{random_hash}{file_ending}"
return path
class TimeLogger:
def __init__(self, start=None):
self.start = self.time() if start is None else start
self.prev = self.start
@staticmethod
def time():
return time.time()
def diff(self):
prev, self.prev = (self.prev, self.time())
return self.prev - self.start, self.prev - prev
@staticmethod
def _hms_from_sec(sec):
"""Hours, minutes, seconds."""
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return h, m, s
@staticmethod
def _hms_str(h, m, s, shorten=True):
"""Hours, minutes, seconds."""
hs = f"{int(h)}h:"
ms = f"{int(m)}m:"
ss = f"{int(s)}s"
if shorten:
if h == 0:
hs = ''
if m == 0:
ms = ''
return f"{hs}{ms}{ss}"
# return f"{int(h)}h:{int(m)}m:{int(s)}s"
def hms_diff(self, shorten=True):
diff_start, diff_prev = self.diff()
hms_start = self._hms_from_sec(diff_start)
hms_prev = self._hms_from_sec(diff_prev)
return self._hms_str(*hms_start, shorten), self._hms_str(*hms_prev, shorten)
def array_or_tensor(tensor, numpy, input):
"""Returs a tensor if numpy is False or input is tensor.
Else it returns numpy array, even if input is a DataLoader.
"""
is_tensor = None
if numpy is False:
is_tensor = True
elif (numpy is True) or is_dl(input):
is_tensor = False
elif not (is_data(input) or is_dl(input)):
raise ValueError(f"Do not understand type of `input`: {type(input)}")
elif tuplefy(input).type() is torch.Tensor:
is_tensor = True
elif tuplefy(input).type() is np.ndarray:
is_tensor = False
else:
raise ValueError("Something wrong")
if is_tensor:
tensor = tuplefy(tensor).to_tensor().val_if_single()
else:
tensor = tuplefy(tensor).to_numpy().val_if_single()
return tensor
def is_data(input):
"""Returns True if `input` is data of type tuple, list, TupleTree, np.array, torch.Tensor."""
datatypes = [np.ndarray, torch.Tensor, tuple, list, TupleTree]
return any([isinstance(input, ct) for ct in datatypes])
def is_dl(input):
"""Returns True if `input` is a DataLoader (inherit from DataLoader)."""
return isinstance(input, torch.utils.data.DataLoader)
|
python
|
# Copyright (c) Andrey Sobolev, 2019. Distributed under MIT license, see LICENSE file.
GEOMETRY_LABEL = 'Geometry optimization'
PHONON_LABEL = 'Phonon frequency'
ELASTIC_LABEL = 'Elastic constants'
PROPERTIES_LABEL = 'One-electron properties'
|
python
|
import django # this verifies local libraries can be packed into the egg
def addition(first, second):
return first + second
|
python
|
import sys
import os
import argparse
import pandas as pd
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.util.time.Timer import Timer
from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil
from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
#===============================================================================
# Started 28-Dec-2016
# Diogo Ribeiro
# Based on LncRNAScore.py
DESC_COMMENT = "Script to map attributes from Mukherjee2016 to groups of lncRNAs."
SCRIPT_NAME = "LncRNAGroupAnalysis.py"
#===============================================================================
#===============================================================================
# General plan:
# 1) Read file with RNA annotation, gene IDs
# 2) Read Mukherjee2016 file with data for lncRNAs
# 3) Output into R-readable format
#===============================================================================
#===============================================================================
# Processing notes:
# 1) A category including all RNAs in Mukherjee2016 is created while reading its file
#===============================================================================
class LncRNAGroupAnalysis(object):
#=======================================================================
# Constants
#=======================================================================
ANNOTATION_FILE_ID_COLUMN = 0
ANNOTATION_FILE_ANNOTATION_COLUMN = 1
DATA_FILE_ID_COLUMN = 0
# DATA_FILE_ANNOTATION_COLUMN = 9
OUTPUT_FILE = "lncRNA_group_analysis.tsv"
ALL_MRNA_ANNOTATION = "0-All_mRNAs"
ALL_LNCRNA_ANNOTATION = "1-All_lncRNAs"
def __init__(self, annotationFile, dataFile, outputFolder, dataColumns, useMRNA, dataAnnotationColumn):
self.annotationFile = annotationFile
self.dataFile = dataFile
self.outputFolder = outputFolder
try:
self.dataColumns = []
sp = dataColumns.split(",")
for s in sp:
self.dataColumns.append( int( s))
except:
raise RainetException("LncRNAGroupAnalysis.__init__: data column input in wrong format:", dataColumns)
self.useMRNA = useMRNA
self.dataAnnotationColumn = dataAnnotationColumn
# make output folder
if not os.path.exists( self.outputFolder):
os.mkdir( self.outputFolder)
# #
# Read list of annotation per RNA.
def read_annotation_file( self):
#=======================================================================
# Example file
#
# ENSG00000256751 Predicted
# ENSG00000256750 Predicted
# ENSG00000261773 Interacting
# ENSG00000237402 Interacting
#=======================================================================
# The same gene can have several annotations
#=======================================================================
# initialising
#=======================================================================
transcriptAnnotation = {} # Key -> transcript ensemblID, value -> set of annotations
groupTranscripts = {} # Key -> annotation, value -> set of transcripts
lineCounter = 0
#=======================================================================
# read file
#=======================================================================
with open( self.annotationFile, "r") as inFile:
for line in inFile:
line = line.strip()
lineCounter+=1
spl = line.split( "\t")
geneID = spl[ LncRNAGroupAnalysis.ANNOTATION_FILE_ID_COLUMN]
# select column to use as annotation
annotationItem = spl[ LncRNAGroupAnalysis.ANNOTATION_FILE_ANNOTATION_COLUMN]
if not geneID.startswith( "ENS"):
raise RainetException("read_annotation_file: entry is not ENS*:", geneID)
if "." in geneID:
geneID = geneID.split( ".")[0]
# storing tx as key
if geneID not in transcriptAnnotation:
transcriptAnnotation[ geneID] = set()
transcriptAnnotation[ geneID].add( annotationItem)
# storing annotation as key
if annotationItem not in groupTranscripts:
groupTranscripts[ annotationItem] = set()
groupTranscripts[ annotationItem].add( geneID)
print "read_annotation_file: number of entries read:", lineCounter
print "read_annotation_file: number of transcripts with annotation:", len( transcriptAnnotation)
print "read_annotation_file: number of annotations:", len( groupTranscripts)
self.transcriptAnnotation = transcriptAnnotation
self.groupTranscripts = groupTranscripts
for group in sorted(groupTranscripts):
print group, len( groupTranscripts[ group])
# #
# Read Mukherjee 2016 file with data
def read_data_file(self):
#=======================================================================
# Example file
#
# Gene Syn Proc Deg CytNuc PolyCyt TrP Copies Exon Annotation Cluster Host Complex
# ENSG00000005206.12 0.3240500888 -0.0260844809 0.1373502068 -0.5552417614 -0.2815917912 0.6640126412 0.2623901975 MultiExon lncRNA c3 None processed_transcript
# ENSG00000006062.9 0.1118696177 -0.0129556703 0.3003516672 -0.4050632081 0.0920502949 -0.5617828392 -0.0963797176 MultiExon lncRNA c4 None processed_transcript
# ENSG00000031544.10 -1.050910308 -0.254916842 0.9567499553 -0.9364242934 -0.1898011997 -2.9665750821 -1.9313555304 MultiExon lncRNA c7 None processed_transcript
#=======================================================================
# Note the gene ID has a value after the "."
# Some classifications are as floats others as strings.
#=======================================================================
# Output file, a melted file
#
# Gene Group Metric Value
# ENSG00000005206 Predicted Syn 0.3240500888
# ENSG00000005206 Predicted Proc -0.0260844809
# ENSG00000006062 Interacting Syb 0.1118696177
outFile = open( self.outputFolder + "/" + LncRNAGroupAnalysis.OUTPUT_FILE, "w")
# write header
outFile.write("Gene\tGroup\tMetric\tValue\n")
numbersPerGroup = {} # key -> group, value -> count of transcripts
numbersPerGroup[ LncRNAGroupAnalysis.ALL_LNCRNA_ANNOTATION] = 0
if self.useMRNA:
numbersPerGroup[ LncRNAGroupAnalysis.ALL_MRNA_ANNOTATION] = 0
#=======================================================================
# read input file and write output
#=======================================================================
table = pd.read_table( self.dataFile, header = 0, sep = "\t", skip_blank_lines = True)
columnNames = list(table.columns.values)
newTable = table[ :]
for index, gene in newTable.iterrows():
geneID = gene[LncRNAGroupAnalysis.DATA_FILE_ID_COLUMN]
## process geneID
if not geneID.startswith( "ENS"):
raise RainetException("read_data_file: entry is not ENS*:", geneID)
# Note: some entries contain ENSGR*, this is a small modification due to chromosome Y/X, it can safely be changed to ENSG0*
if geneID.startswith( "ENSGR"):
geneID = geneID.replace("ENSGR","ENSG0")
if "." in geneID:
geneID = geneID.split( ".")[0]
# if gene has annotation
if geneID in self.transcriptAnnotation:
# for each of its annotations, write a line
for annotation in self.transcriptAnnotation[ geneID]:
for metric in self.dataColumns:
outFile.write( "%s\t%s\t%s\t%s\n" % (geneID, annotation, columnNames[ metric], gene[ metric]))
if annotation not in numbersPerGroup:
numbersPerGroup[ annotation] = 0
numbersPerGroup[ annotation]+= 1
# if mRNA
if gene[ self.dataAnnotationColumn] == "protein_coding":
if self.useMRNA:
# add to mRNA category
numbersPerGroup[ LncRNAGroupAnalysis.ALL_MRNA_ANNOTATION]+= 1
for metric in self.dataColumns:
outFile.write( "%s\t%s\t%s\t%s\n" % (geneID, LncRNAGroupAnalysis.ALL_MRNA_ANNOTATION, columnNames[ metric], gene[ metric]))
elif gene[ self.dataAnnotationColumn] == "lncRNA":
# add lncRNA to all lncRNA group regardless of its existence in our annotations
numbersPerGroup[ LncRNAGroupAnalysis.ALL_LNCRNA_ANNOTATION]+= 1
for metric in self.dataColumns:
outFile.write( "%s\t%s\t%s\t%s\n" % (geneID, LncRNAGroupAnalysis.ALL_LNCRNA_ANNOTATION, columnNames[ metric], gene[ metric]))
else:
# neither lncRNA nor mRNA
continue
outFile.close()
print "read_data_file: number of lines in input data:", len(newTable)
print "read_data_file: number of lncRNAs per group", numbersPerGroup
if __name__ == "__main__":
try:
# Start chrono
Timer.get_instance().start_chrono()
print "STARTING " + SCRIPT_NAME
#===============================================================================
# Get input arguments, initialise class
#===============================================================================
parser = argparse.ArgumentParser(description= DESC_COMMENT)
# positional args
parser.add_argument('annotationFile', metavar='annotationFile', type=str,
help='TSV file with annotation per transcript (gene). No header. Can have several annotations for same transcript, one per line. E.g. transcriptID\tannotation.')
parser.add_argument('dataFile', metavar='dataFile', type=str,
help='File with data per lncRNA from Mukherjee2016. Header is important. Already filtered for lncRNAs.')
parser.add_argument('outputFolder', metavar='outputFolder', type=str, help='Folder where to write output files.')
parser.add_argument('--dataColumns', metavar='dataColumns', type=str, default = "1,2,3,4,5,7,10",
help='Which 0-based columns in the input data file we want to process. At least the gene ID column needs to be included and as the first in list. Give attribute as comma-separated.')
parser.add_argument('--useMRNA', metavar='useMRNA', type=int, default = 1,
help='Whether to create protein_coding category, if available on file.')
parser.add_argument('--dataAnnotationColumn', metavar='dataAnnotationColumn', type=int, default = 9,
help='Which 0-based column to use as transcript biotype/group annotation.')
#gets the arguments
args = parser.parse_args( )
# init
run = LncRNAGroupAnalysis( args.annotationFile, args.dataFile, args.outputFolder, args.dataColumns, args.useMRNA, args.dataAnnotationColumn)
# read annotations file
Timer.get_instance().step( "Reading annotation file..")
run.read_annotation_file( )
# read data file and write output
Timer.get_instance().step( "Reading data file..")
run.read_data_file()
# Stop the chrono
Timer.get_instance().stop_chrono( "FINISHED " + SCRIPT_NAME )
# Use RainetException to catch errors
except RainetException as rainet:
Logger.get_instance().error( "Error during execution of %s. Aborting :\n" % SCRIPT_NAME + rainet.to_string())
|
python
|
"""
参数及配置
"""
# main.py
small_dataset = False # 选择数据集规模
small_train_path = "../data/small_dataset/train.conll" # 小数据集-训练集
small_dev_path = "../data/small_dataset/dev.conll" # 小数据集-验证集
big_train_path = "../data/big_dataset/train" # 大数据集-训练集
big_dev_path = "../data/big_dataset/dev" # 大数据集-验证集
big_test_path = "../data/big_dataset/test" # 大数据集-测试集
embedding_path = "../data/embedding/giga.100.txt" # 预训练好的词向量文件
result_path = "../result/small_dataset_result.txt" # 保存结果的文件
max_epoch = 50 # 最大的epoch(一次epoch是训练完所有样本一次)
max_no_rise = 10 # epoch连续几轮(验证集最大正确率)没有上升时结束训练
# embedding.py
window = 5 # 上下文窗口大小
embedding_dim = 100 # 每个词向量的维度
# dataloader.py
shuffle = True # 是否打乱数据集
batch_size = 50 # 多少样本更新一次
# bpnn.py
hidden_layer_size = 354 # 隐藏层中的神经元数量, 2/3(input + output)
activation = 'relu' # 隐藏层的激活函数
Lambda = 0.01 # L2正则化项系数λ
learning_rate = 0.5 # 初始学习率
embedding_trainable = True # 是否训练embedding
decay_rate = 0.96 # 学习率衰减速率(防止 loss function 在极小值处不停震荡)
random_seed = 1110 # 随机数种子
|
python
|
from cakechat.utils.data_structures import create_namedtuple_instance
SPECIAL_TOKENS = create_namedtuple_instance(
'SPECIAL_TOKENS', PAD_TOKEN=u'_pad_', UNKNOWN_TOKEN=u'_unk_', START_TOKEN=u'_start_', EOS_TOKEN=u'_end_')
DIALOG_TEXT_FIELD = 'text'
DIALOG_CONDITION_FIELD = 'condition'
|
python
|
from models.models import Departamento
|
python
|
# -*- coding: utf-8 -*-
"""
Python implementation of Tanner Helland's color color conversion code.
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
import math
# Aproximate colour temperatures for common lighting conditions.
COLOR_TEMPERATURES = {
'candle': 1900,
'sunrise': 2000,
'incandescent': 2500,
'tungsten': 3200,
'halogen': 3350,
'sunlight': 5000,
'overcast': 6000,
'shade': 7000,
'blue-sky': 10000,
'warm-fluorescent': 2700,
'fluorescent': 37500,
'cool-fluorescent': 5000,
}
def correct_output(luminosity):
"""
:param luminosity: Input luminosity
:return: Luminosity limited to the 0 <= l <= 255 range.
"""
if luminosity < 0:
val = 0
elif luminosity > 255:
val = 255
else:
val = luminosity
return round(val)
def kelvin_to_rgb(kelvin):
"""
Convert a color temperature given in kelvin to an approximate RGB value.
:param kelvin: Color temp in K
:return: Tuple of (r, g, b), equivalent color for the temperature
"""
temp = kelvin / 100.0
# Calculate Red:
if temp <= 66:
red = 255
else:
red = 329.698727446 * ((temp - 60) ** -0.1332047592)
# Calculate Green:
if temp <= 66:
green = 99.4708025861 * math.log(temp) - 161.1195681661
else:
green = 288.1221695283 * ((temp - 60) ** -0.0755148492)
# Calculate Blue:
if temp > 66:
blue = 255
elif temp <= 19:
blue = 0
else:
blue = 138.5177312231 * math.log(temp - 10) - 305.0447927307
return tuple(correct_output(c) for c in (red, green, blue))
|
python
|
# The MIT license:
#
# Copyright 2017 Andre Netzeband
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Note: The DeepDriving project on this repository is derived from the DeepDriving project devloped by the princeton
# university (http://deepdriving.cs.princeton.edu/). The above license only applies to the parts of the code, which
# were not a derivative of the original DeepDriving project. For the derived parts, the original license and
# copyright is still valid. Keep this in mind, when using code from this project.
from .Initializer import *
|
python
|
# Generated by Django 2.2.5 on 2019-10-07 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0016_datetimes_to_dates'),
]
operations = [
migrations.AlterField(
model_name='preliminarycase',
name='completed_on',
field=models.DateField(blank=True, null=True, verbose_name='Completed On'),
),
migrations.AlterField(
model_name='preliminarycase',
name='date_recorded',
field=models.DateField(blank=True, null=True, verbose_name='Date Recorded'),
),
]
|
python
|
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class IscsiNetworkportalIpconfig(Base):
'''
Operations that can be performed on iSCSI Network Portal (iSCSI vmknic)'s IP configuration
'''
moid = 'ha-cli-handler-iscsi-networkportal-ipconfig'
def set(self, adapter, ip, subnet, dns1=None, dns2=None, gateway=None, nic=None):
'''
Set iSCSI network portal IP configuration
:param adapter: string, The iSCSI adapter name.
:param dns1: string, The iSCSI network portal primary DNS address
:param dns2: string, The iSCSI network portal secondary DNS address
:param gateway: string, The iSCSI network portal gateway address
:param ip: string, The iSCSI network portal IP address
:param nic: string, The iSCSI network portal (vmknic)
:param subnet: string, The iSCSI network portal subnet mask
:returns: void
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.networkportal.ipconfig.Set',
adapter=adapter,
dns1=dns1,
dns2=dns2,
gateway=gateway,
ip=ip,
nic=nic,
subnet=subnet,
)
def get(self, adapter, nic=None):
'''
Get iSCSI network portal ip configuration
:param adapter: string, The iSCSI adapter name.
:param nic: string, The iSCSI network portal (vmknic)
:returns: vim.EsxCLI.iscsi.networkportal.ipconfig.get.NetworkPortal[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.iscsi.networkportal.ipconfig.Get',
adapter=adapter,
nic=nic,
)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.