content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import sys
import json
import twint
import threading
import lzma
import glob
import instaloader
import os
import shutil
from datetime import datetime
from googleapiclient.discovery import build
from facebook_scraper import get_posts
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtCore import QObject, Slot, Signal, QTimer, QUrl
from PySide2.QtGui import QGuiApplication, QIcon
class storyThread (threading.Thread):
def __init__(self,name, kullaniciAdi,sifre):
threading.Thread.__init__(self)
self.name = name
self.kullaniciAdi = kullaniciAdi
self.sifre = sifre
def run(self):
self.downloadStories(self.name,self.kullaniciAdi,self.sifre)
def downloadStories(self, allUsers,kullaniciAdi,sifre):
L = instaloader.Instaloader()
L.login(str(kullaniciAdi), str(sifre))
L2 = instaloader.Instaloader()
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Story)"
list_username = allUsers.split("-")
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for i in list_username:
profile = L2.check_profile_id(i)
L.download_stories(userids=[profile.userid])
path_stories = ":stories"
jpg_files = glob.glob(path_stories + "/**/*.jpg", recursive=True)
json_files = glob.glob(path_stories + "/**/*.xz", recursive=True)
mp4_files = glob.glob(path_stories + "/**/*.mp4", recursive=True)
for jsonAd in json_files:
index = jsonAd.find("json.xz")
newJson = jsonAd[:33]
newJpeg = newJson + "jpg"
newMp4 = newJson + "mp4"
jpeg = newJpeg.replace("\\", "/")
mp4 = newMp4.replace("\\", "/")
with lzma.open(jsonAd, mode='rt') as file:
for line in file:
a = line.find("username\":\"")
c = -1
for b in line[a + 11:]:
c = c + 1
if b == "\"":
username = line[a + 11:a + c + 11]
directory = username +" " +str(datetime.now().hour)+"."+str(datetime.now().minute)
parent_dir = path_directoryFirst
path = os.path.join(parent_dir, directory)
try:
os.makedirs(path, exist_ok=True)
except OSError as error:
print("%s Dosya önceden oluşturulmuş. Lütfen kontrol et" % directory)
try:
original = jpeg
target = path
if os.path.isfile(mp4):
shutil.move(mp4, target)
else:
shutil.move(original, target)
except shutil.Error as e:
print(e)
break
try:
shutil.rmtree(":stories")
except FileNotFoundError as error:
print(i + " için story bulunamadı")
class instagramThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd,kullaniciAdi, sifre):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
self.kullaniciAdi = kullaniciAdi
self.sifre = sifre
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getInstagramLink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name,self.kullaniciAdi,self.sifre)
def getInstagramLink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers, kullaniciAdi, sifre):
L = instaloader.Instaloader()
L.login(str(kullaniciAdi), str(sifre))
start = datetime(sYil, sAy, sGun)
end = datetime(eYil, eAy, eGun)
list_username = allUsers.split("-")
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Instagram)"
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for username in list_username:
text = path_directoryFirst + "/" + username + " " + str(datetime.now().hour) + "." + str(
datetime.now().minute) + ".txt"
f = open(text, 'w')
a = 0
profile = instaloader.Profile.from_username(L.context, username)
posts = profile.get_posts()
print(username + " ---")
for post in posts:
if post.date > start and post.date > end:
pass
elif post.date >= start and post.date <= end:
a = 1
link = "https://www.instagram.com/p/" + post.shortcode + "/?utm_source=ig_web_copy_link"
f.write(link + "\n")
print("https://www.instagram.com/p/" + post.shortcode + "/?utm_source=ig_web_copy_link")
elif post.date <= start:
break
if a == 0:
f.close()
os.remove(text)
class twitterThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getTwitterlink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name)
def getTwitterlink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers):
list_username = allUsers.split("-")
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Twitter)"
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for username in list_username:
c = twint.Config()
c.Since = str(sYil) + "-" + str(sAy) + "-" + str(sGun)
c.Until = str(eYil) + "-" + str(eAy) + "-" + str(eGun)
c.Store_json = True
#c.User_id = 'randomstring'
c.Username = username
c.Output = username + ".json"
twint.run.Search(c)
if os.path.exists(username + ".json"):
text = path_directoryFirst + "/" + username + " " + str(datetime.now().hour) + "." + str(
datetime.now().minute) + ".txt"
f = open(text, 'w')
print("Linkler başarıyla alındı")
tweets = []
try:
for line in open(username + ".json", 'r', encoding='UTF-8'):
tweets.append(json.loads(line))
print(username)
for tweet in tweets:
f.write(tweet["link"] + "\n")
print(tweet["link"])
f.close()
except Exception as e:
print("Okumada bir hata oluştu. Lütfen sonradan yeniden dene.")
if os.path.exists(username + ".json"):
os.remove(username + ".json")
else:
print("Belirtilen aralıkta daha fazla link bulunmamaktadır.")
class facebookThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getFacebookLink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name)
def getFacebookLink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers):
start = datetime(sYil, sAy, sGun)
end = datetime(eYil, eAy, eGun)
list_username = allUsers.split("-")
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Facebook)"
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for i in list_username:
text = path_directoryFirst + "/" + i + " " +str(datetime.now().hour)+"."+str(datetime.now().minute) + ".txt"
f = open(text, 'w')
print(i)
a = 0
for post in get_posts(i, pages=50):
if start < post["time"] and post["time"] < end:
a = 1
f.write(post['post_url'] + "\n")
print(post['post_url'])
elif start > post["time"]:
f.close()
break
if a == 0:
os.remove(text)
class youtubeThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getYoutubeLink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name)
def getYoutubeLink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers):
api_key = "YOUR YOUTUBE API KEY"
youtube = build('youtube', 'v3', developerKey=api_key)
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Youtube)"
list_of_channel = allUsers.split(".")
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for channelID in list_of_channel:
text = path_directoryFirst + "/" + channelID+" " +str(datetime.now().hour)+"."+str(datetime.now().minute) + ".txt"
f = open(text, 'w')
request = youtube.search().list(
part='id,snippet',
channelId=channelID,
type='video',
order='date',
fields='nextPageToken,items(id,snippet)',
maxResults=50,
publishedAfter=str(sYil) + '-' + str(sAy) + '-' + str(sGun) + 'T00:00:00Z',
publishedBefore=str(eYil) + '-' + str(eAy) + '-' + str(eGun) + 'T00:00:00Z',
)
list_of_links = []
while request:
response = request.execute()
video_link_array = [f"https://www.youtube.com/watch?v={video['id']['videoId']}" \
for video in response['items']]
for videoLink in video_link_array:
list_of_links.append(videoLink)
request = youtube.search().list_next(
request, response)
print(channelID)
for link in list_of_links:
f.write(link + "\n")
print(link)
f.close()
size = os.path.getsize(text)
if size == 0:
os.remove(text)
class MainWindow(QObject):
def __init__(self):
QObject.__init__(self)
@Slot(str,str,str, str, str)
def toGetInstagramLink(self, name, dateStart , dateEnd, kullaniciAdi, sifre):
threadInstagram = instagramThread(name, dateStart, dateEnd, kullaniciAdi, sifre)
threadInstagram.start()
@Slot(str, str, str)
def toGetTwitterLink(self, name, dateStart, dateEnd):
threadTwitter = twitterThread(name, dateStart, dateEnd)
threadTwitter.start()
@Slot(str, str, str)
def toGetFacebookLink(self, name, dateStart, dateEnd):
threadFacebook = facebookThread(name, dateStart, dateEnd)
threadFacebook.start()
@Slot(str, str, str)
def toGetYoutubeLink(self, name, dateStart, dateEnd):
threadYoutube = youtubeThread(name, dateStart, dateEnd)
threadYoutube.start()
@Slot(str,str,str)
def toGetDownloadStory(self, name, kullaniciAdi,sifre):
threadStory = storyThread(name, kullaniciAdi,sifre)
threadStory.start()
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
# Get Context
main = MainWindow()
engine.rootContext().setContextProperty("backend", main)
# Set App Extra Info
app.setOrganizationName("senfoni.co")
app.setOrganizationDomain("N/A")
# Set Icon
app.setWindowIcon(QIcon("senfonico_logo_siyah.ico"))
# Load Initial Window
engine.load(os.path.join(os.path.dirname(__file__), "main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
|
python
|
from _Framework.ButtonSliderElement import ButtonSliderElement
SLIDER_MODE_OFF = 0
SLIDER_MODE_TOGGLE = 1
SLIDER_MODE_SLIDER = 2
SLIDER_MODE_PRECISION_SLIDER = 3
SLIDER_MODE_SMALL_ENUM = 4
SLIDER_MODE_BIG_ENUM = 5
#TODO: repeat buttons.
# not exact / rounding values in slider and precision slider
class DeviceControllerStrip(ButtonSliderElement):
def __init__(self, buttons, control_surface, parent = None):
ButtonSliderElement.__init__(self, buttons)
self._control_surface = control_surface
self._parent = parent
self._num_buttons = len(buttons)
self._value_map = tuple([float(index) / (self._num_buttons-1) for index in range(self._num_buttons)])
self._precision_mode = False
self._enabled = True
def set_enabled(self,enabled):
self._enabled = enabled
def set_precision_mode(self, precision_mode):
self._precision_mode = precision_mode
self.update()
@property
def _value(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.value
else:
return 0
@property
def _max(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.max
else:
return 0
@property
def _min(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.min
else:
return 0
@property
def _range(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.max - self._parameter_to_map_to.min
else:
return 0
@property
def _default_value(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to._default_value
else:
return 0
@property
def _is_quantized(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.is_quantized
else:
return False
@property
def _mode(self):
if self._parameter_to_map_to != None:
if self._is_quantized:
if self._range == 1:
return SLIDER_MODE_TOGGLE
elif self._range<=self._num_buttons:
return SLIDER_MODE_SMALL_ENUM
else:
return SLIDER_MODE_BIG_ENUM
else:
if self._precision_mode:
return SLIDER_MODE_PRECISION_SLIDER
else:
return SLIDER_MODE_SLIDER
else:
return SLIDER_MODE_OFF
def update(self):
if self._enabled:
if self._mode == SLIDER_MODE_TOGGLE:
self._update_toggle()
elif self._mode == SLIDER_MODE_SMALL_ENUM:
self._update_small_enum()
elif self._mode == SLIDER_MODE_BIG_ENUM:
self._update_big_enum()
elif (self._mode == SLIDER_MODE_SLIDER):
self._update_slider()
elif (self._mode == SLIDER_MODE_PRECISION_SLIDER):
self._update_precision_slider()
else:
self._update_off()
def reset(self):
self._update_off()
def reset_if_no_parameter(self):
if self._parameter_to_map_to == None:
self.reset()
def _update_off(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
self._update_buttons(tuple(v))
def _update_toggle(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
if self._value==self._max:
v[0]="Device.Toggle.On"
else:
v[0]="Device.Toggle.Off"
self._update_buttons(tuple(v))
def _update_small_enum(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
for index in range(int(self._range+1)):
if self._value==index+self._min:
v[index]="Device.Enum.On"
else:
v[index]="Device.Enum.Off"
self._update_buttons(tuple(v))
def _update_big_enum(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
if self._value>self._min:
v[3]="Device.BigEnum.On"
else:
v[3]="Device.BigEnum.Off"
if self._value<self._max:
v[4]="Device.BigEnum.On"
else:
v[4]="Device.BigEnum.Off"
self._update_buttons(tuple(v))
def _update_slider(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
for index in range(len(self._buttons)):
if self._value >=self._value_map[index]*self._range+self._min:
v[index]="Device.Slider.On"
else:
v[index]="Device.Slider.Off"
self._update_buttons(tuple(v))
def _update_precision_slider(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
if self._value>self._min:
v[3]="Device.PrecisionSlider.On"
else:
v[3]="Device.PrecisionSlider.Off"
if self._value<self._max:
v[4]="Device.PrecisionSlider.On"
else:
v[4]="Device.PrecisionSlider.Off"
self._update_buttons(tuple(v))
def _update_buttons(self, buttons):
assert isinstance(buttons, tuple)
assert (len(buttons) == len(self._buttons))
for index in range(len(self._buttons)):
self._buttons[index].set_on_off_values(buttons[index],buttons[index])
if buttons[index]>0:
self._buttons[index].turn_on()
else:
self._buttons[index].turn_off()
def _button_value(self, value, sender):
assert isinstance(value, int)
assert (sender in self._buttons)
self._last_sent_value = -1
if (self._parameter_to_map_to != None and self._enabled and ((value != 0) or (not sender.is_momentary()))):
if (value != self._last_sent_value):
index_of_sender = list(self._buttons).index(sender)
if (self._mode == SLIDER_MODE_TOGGLE) and index_of_sender==0:
if self._value == self._max:
self._parameter_to_map_to.value = self._min
else:
self._parameter_to_map_to.value = self._max
elif self._mode == SLIDER_MODE_SMALL_ENUM:
self._parameter_to_map_to.value = index_of_sender + self._min
elif self._mode == SLIDER_MODE_BIG_ENUM:
if index_of_sender>=4:
inc = 2**(index_of_sender - 3 -1)
if self._value + inc <= self._max:
self._parameter_to_map_to.value += inc
else:
self._parameter_to_map_to.value = self._max
else:
inc = 2**(4 - index_of_sender -1)
if self._value - inc >= self._min:
self._parameter_to_map_to.value -= inc
else:
self._parameter_to_map_to.value = self._min
elif (self._mode == SLIDER_MODE_SLIDER):
self._parameter_to_map_to.value = self._value_map[index_of_sender]*self._range + self._min
elif (self._mode == SLIDER_MODE_PRECISION_SLIDER):
inc = float(self._range) / 128
if self._range>7 and inc<1:
inc=1
if index_of_sender >= 4:
inc = inc * 2**(index_of_sender - 3-1)
if self._value + inc <= self._max:
self._parameter_to_map_to.value += inc
else:
self._parameter_to_map_to.value = self._max
else:
inc = inc * 2**(4 - index_of_sender-1)
if self._value - inc >= self._min:
self._parameter_to_map_to.value -= inc
else:
self._parameter_to_map_to.value = self._min
self.notify_value(value)
if self._parent is not None:
self._parent._update_OSD()
def _on_parameter_changed(self):
assert (self._parameter_to_map_to != None)
if self._parent is not None:
self._parent._update_OSD()
self.update()
|
python
|
from selenium.webdriver.common.by import By
class MainPageLocators():
SEARCH_STRING = (By.ID, 'text')
SUGGEST = (By.CSS_SELECTOR, '[role=listbox]')
IMAGES = (By.CSS_SELECTOR, '[data-statlog="services_new.item.images.2"]')
class ResultPageLocators():
FIRST_RESULT = (By.CSS_SELECTOR, '[data-cid="0"] .organic__path b')
NEXT_RESULTS = (By.CSS_SELECTOR, '.Organic-Path b')
class ImagePageLocators():
CATEGORY = (By.CSS_SELECTOR, '.PopularRequestList-Item_pos_0')
CATEGORY_NAME = (By.CSS_SELECTOR, '.PopularRequestList-SearchText')
TEXT_IN_SEARCH_STRING = (By.CSS_SELECTOR, '.input__box input')
FIRST_IMAGE = (By.CSS_SELECTOR, '.serp-item__preview')
IMAGE_DATA = (By.CSS_SELECTOR, '.MMImage-Origin')
NEXT_IMAGE_BUTTON = (By.CSS_SELECTOR, '.CircleButton_type_next')
PREVIOUS_IMAGE_BUTTON = (By.CSS_SELECTOR, '.CircleButton_type_prev')
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
def env_or_default(name, d):
from os import environ
try:
a = environ[name]
if a:
return type(d)(a)
else:
return d
except Exception:
return d
def print_stats(steps, t_diff, dl):
from time import strftime
print(
'{:s} | stp: {:d} time: {:.5f} v: {:d} e: {:d}'.format(
strftime('%d/%m/%Y %H:%M:%S'),
steps,
t_diff,
dl.get_vnum(),
dl.get_enum()
)
)
return
|
python
|
import glob, os, numpy
from music21 import converter, instrument, note, chord
from itertools import chain
import json
import pickle
PATH = '../Bach-Two_Part_Inventions_MIDI_Transposed/txt'
OUTPUT_PATH = '../Bach-Two_Part_Inventions_MIDI_Transposed/txt_tokenized'
CHUNK_SIZE = 4 # MEASURES
def write_pickle(filename, obj):
with open(filename, 'wb') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def read_pickle(filename):
with open(filename) as f:
return pickle.load(f)
def generate_duration_tokens(directory):
symbols = set()
for file in glob.glob(os.path.join(directory, '*.txt')):
#print(file)
with open(file) as f:
for line in f:
tokens = line.strip().split(' ')
for i, token in enumerate(tokens):
# Skip MIDI or REST tokens
if i % 2 == 0:
continue
symbols.add(token)
symbol_list = sorted(symbols)
symbol_to_index = {s: idx for idx,s in enumerate(symbol_list)}
index_to_symbol = {idx: s for idx,s in enumerate(symbol_list)}
return symbol_to_index, index_to_symbol
def simplify_text(directory, output_directory, symbol_to_index):
for file in glob.glob(os.path.join(PATH, '*.txt')):
#print(file)
output_filename = os.path.join(OUTPUT_PATH, os.path.basename(file))
with open(file) as f:
with open(output_filename, 'w') as outfile:
for line in f:
line_out = []
tokens = line.strip().split(' ')
for i, token in enumerate(tokens):
# Don't tokenize MIDI or REST tokens
if i % 2 == 0:
line_out.append(token)
else:
line_out.append(str(symbol_to_index[token]))
outfile.write(' '.join(line_out) + '\n')
if __name__ == '__main__':
symbol_to_index, index_to_symbol = generate_duration_tokens(PATH)
print (symbol_to_index)
simplify_text(PATH, OUTPUT_PATH, symbol_to_index)
write_pickle(os.path.join(OUTPUT_PATH, 'symbol_to_index.pkl'), symbol_to_index)
print('DONE!')
|
python
|
import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="Learn To Code Pakistan will help you master the art of programming for free!">
<meta name="author" content="">
<title>Learn To Code Pakistan</title>
<!-- Bootstrap core CSS -->
<link href="vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom fonts for this template -->
<link href="vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
<!-- Plugin CSS -->
<link href="vendor/magnific-popup/magnific-popup.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/creative.min.css" rel="stylesheet">
</head>
<body id="page-top">
<!-- Navigation -->
<nav class="navbar navbar-expand-lg navbar-light fixed-top" id="mainNav">
<div class="container">
<a class="navbar-brand js-scroll-trigger" href="#page-top">Learn To Code Pakistan</a>
<button class="navbar-toggler navbar-toggler-right" type="button" data-toggle="collapse" data-target="#navbarResponsive" aria-controls="navbarResponsive" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarResponsive">
<ul class="navbar-nav ml-auto">
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#about">About</a>
</li>
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#services">What We Offer?</a>
</li>
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#contact">Instructor?</a>
</li>
</ul>
</div>
</div>
</nav>
<header class="masthead text-center text-white d-flex">
<div class="container my-auto">
<div class="row">
<div class="col-lg-10 mx-auto">
<h1 class="text-uppercase">
<strong>Your One Stop For Programming Tutorials</strong>
</h1>
<hr>
</div>
<div class="col-lg-8 mx-auto">
<a class="btn btn-primary btn-xl js-scroll-trigger" href="#about">What's inside?</a>
</div>
</div>
</div>
</header>
<section class="bg-primary" id="about">
<div class="container">
<div class="row">
<div class="col-lg-8 mx-auto text-center">
<h2 class="section-heading text-white">We've got what you need!</h2>
<hr class="light my-4">
<p class="text-faded mb-4">From Python Tutorials which will help become the master of Data Science to C++ and Unity for object oriented programming to make advanced games, our website is the has all! </p>
<a class="btn btn-light btn-xl js-scroll-trigger" href="#services">Take a look!</a>
</div>
</div>
</div>
</section>
<section id="services">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">What We Offer?</h2>
<hr class="my-4">
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-diamond text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">No Ads!</h3>
<p class="text-muted mb-0">We use cryptocurrency mining to keep our website funded, no pesky ads allowed!</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-video-camera text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">Video Tutorials</h3>
<p class="text-muted mb-0">Our instructors are highly qualified individuals which will explain each and everything with examples and much more for a fun learning experience!</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-user text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">Are You An Instructor?</h3>
<p class="text-muted mb-0">Use the contact options below and we will get your tutorials up on our website ASAP</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-file-text text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">Quizzes</h3>
<p class="text-muted mb-0">No learning experience is complete without the final evaluation, our quizzes are designed to make you ponder and understand the fundamentals of the course you are taking.</p>
</div>
</div>
</div>
</div>
</section>
<section class="bg-dark text-white">
<div class="container text-center">
<h2 class="mb-4">Ready to Learn?</h2>
<a class="btn btn-light btn-xl sr-button" href="file:///D:/Downloads/update 11-1-2018/proj/startbootstrap-creative-gh-pages/learntocode.html">Take Your First Course!</a>
</div>
</section>
<section id="contact">
<div class="container">
<div class="row">
<div class="col-lg-8 mx-auto text-center">
<h2 class="section-heading">Instructor?</h2>
<hr class="my-4">
<p class="mb-5">Ready to start your next project with us? That's great! Give us a call or send us an email and we will get back to you as soon as possible!</p>
</div>
</div>
<div class="row">
<div class="col-lg-4 ml-auto text-center">
<i class="fa fa-phone fa-3x mb-3 sr-contact"></i>
<p>03365545494</p>
</div>
<div class="col-lg-4 mr-auto text-center">
<i class="fa fa-envelope-o fa-3x mb-3 sr-contact"></i>
<p>
<a href="mailto:[email protected]">[email protected]</a>
</p>
</div>
</div>
</div>
</section>
<!-- Bootstrap core JavaScript -->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Plugin JavaScript -->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<script src="vendor/scrollreveal/scrollreveal.min.js"></script>
<script src="vendor/magnific-popup/jquery.magnific-popup.min.js"></script>
<!-- Custom scripts for this template -->
<script src="js/creative.min.js"></script>
</body>
</html>
'''
def homepage():
# Create or overwrite the output file
output_file = open('homepage.html', 'w')
# Output the file
output_file.write(main_page_head)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
homepage()
|
python
|
# demo - opencv
import cv2
# Black and White (gray scale)
img = cv2.imread ('lotus.jpg', 1 )
cv2.imshow('lotus', img)
cv2.waitKey(0)
# cv2.waitKey(2000)
cv2.destroyAllWindows()
|
python
|
'''
import data here and have utility functions that could help
'''
import pandas as pd
import pickle
from thefuzz import fuzz, process
ratings = pd.read_csv('./data/ml-latest-small/ratings.csv')
movies = pd.read_csv('./data/ml-latest-small/movies.csv')
# import nmf model
with open('./nmf_recommender.pkl', 'rb') as file:
nmf_model = pickle.load(file)
# import neighborhood model
with open('./neighbors_recommender.pkl', 'rb') as file:
neighbor_model = pickle.load(file)
def movie_title_search(fuzzy_titles):
'''
does a fuzzy search and returns best matched movie
'''
extracted_titles =[]
choices = movies['title'].tolist()
for fuzzy_title in fuzzy_titles:
extracted_title =process.extract(fuzzy_title, choices, limit=1)
extracted_title = extracted_title[0][0]
extracted_titles.append(extracted_title)
return extracted_title
def movie_to_id(extracted_titles):
'''
converts movie title to id for use in algorithms
'''
movieId = movies.set_index('title').loc[extracted_titles].movieId
movieId = movieId.tolist()
return movieId
def id_to_movie_title(movieId):
'''
converts movie Id to title
'''
title_genre_df = movies.set_index('movieId').loc[movieId]
recommendations_title = title_genre_df.title
return recommendations_title
|
python
|
from bs4 import BeautifulSoup
import requests
import pprint
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'}
base_url = 'https://news.ycombinator.com/news'
response = requests.get(base_url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
with open('hacker_news.html', 'w') as file:
file.write(soup.prettify())
links = soup.select('.storylink')
sub_text = soup.select('.subtext')
def sort_by_votes(hnlist):
#sort by votes by descending order
return sorted(hnlist, key = lambda k:k['votes'], reverse = True)
def create_custom_hacker_news(links, sub_text):
hn = []
for index, item in enumerate(links):
vote = sub_text[index].select('.score')
if len(vote):
points = int(vote[0].getText().strip(' points'))
if points > 150:
title = links[index].getText()
href = links[index].get('href', None)
hn.append({'title':title,'href':href,'votes':points})
return sort_by_votes(hn)
if __name__ == "__main__":
custom_hn_lists = create_custom_hacker_news(links, sub_text)
pprint.pprint(custom_hn_lists)
|
python
|
#!/usr/bin/env python3
#
# __main__.py
"""
CLI entry point.
"""
#
# Copyright (c) 2020 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# stdlib
import sys
# 3rd party
import click
from consolekit import CONTEXT_SETTINGS
# this package
from flake8_prettycount.application import Application
__all__ = ["main"]
@click.command(context_settings={"ignore_unknown_options": True, "allow_extra_args": True, **CONTEXT_SETTINGS})
@click.pass_context
def main(ctx: click.Context):
"""
Wrapper around flake8 providing a formatted count at the end.
All options and arguments are passed through to flake8.
"""
app = Application()
app.run(ctx.args[:])
app.exit()
if __name__ == "__main__":
sys.exit(main(obj={}))
|
python
|
from typing import Any, Union
from bot.event import Event, EventType
from pydantic.utils import deep_update
def event(
event_data: Union[str, dict[str, Any]],
event_type: EventType = EventType.NEW_MESSAGE,
) -> Event:
default = {
"chat": {"chatId": "test"},
"from": "[email protected]",
"msgId": 999,
}
if isinstance(event_data, str):
# for simple cases
default["text"] = event_data
else:
default = deep_update(default, event_data)
return Event(event_type, default)
def part(type: str, **payload: Any) -> dict[str, Any]:
return {"type": type, "payload": payload}
def mention(user_id: int) -> dict[str, Any]:
return part("mention", userId=user_id)
def odesli_response() -> dict[str, Any]:
return {
"entityUniqueId": "ITUNES_SONG::1443109064",
"userCountry": "US",
"pageUrl": "https://song.link/us/i/1443109064",
"entitiesByUniqueId": {
"ITUNES_SONG::1443109064": {
"id": "1443109064",
"type": "song",
"title": "Kitchen",
"artistName": "Kid Cudi",
"thumbnailUrl": "https://is4-ssl.mzstatic.com/image/thumb/Music118/v4/ac/2c/60/ac2c60ad-14c3-a8b2-d962-dc08de2da546/source/512x512bb.jpg",
"thumbnailWidth": 512,
"thumbnailHeight": 512,
"apiProvider": "itunes",
"platforms": ["appleMusic", "itunes"],
},
},
"linksByPlatform": {
"appleMusic": {
"url": "https://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"nativeAppUriMobile": "music://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"nativeAppUriDesktop": "itms://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"entityUniqueId": "ITUNES_SONG::1443109064",
},
"spotify": {
"url": "https://open.spotify.com/track/0Jcij1eWd5bDMU5iPbxe2i",
"nativeAppUriDesktop": "spotify:track:0Jcij1eWd5bDMU5iPbxe2i",
"entityUniqueId": "SPOTIFY_SONG::0Jcij1eWd5bDMU5iPbxe2i",
},
"youtube": {
"url": "https://www.youtube.com/watch?v=w3LJ2bDvDJs",
"entityUniqueId": "YOUTUBE_VIDEO::w3LJ2bDvDJs",
},
},
}
|
python
|
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('open')
|
python
|
from pathlib import Path
from flask import current_app
from flask_marshmallow import Marshmallow
from PIL import Image
from flask_app.commons.util import pil_to_base64
from flask_app.database.database import Version, Card
ma = Marshmallow()
class VersionSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Version
class CardSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Card
exclude = ('img_path',)
img = ma.Method("serialize_img")
def serialize_img(self, obj: Card):
img_path = (Path(current_app.config["ASSETS_PATH"])
/ obj.img_path).resolve()
img = Image.open(img_path)
return pil_to_base64(img)
version_schema = VersionSchema()
card_list_schema = CardSchema(many=True)
|
python
|
# coding=utf-8
#!/bin/python3
# 这个插件用来去掉Equation的xor系列混淆
# 仅仅支持python 3,只在ida pro 7.7下测试过
# 将本文件放到IDA安装目录的plugins目录下,然后启动ida,在ida View中把光标放在解码函数开始,就可以在 Edit->Plugings->Xor Batch Deobfuscation
# 也可以使用快捷键Ctrl+Shift+D进行反混淆
import sys
try:
import idaapi
import idc
import idautils
import flare_emu
# import hexdump
except ImportError:
print("[FlareDeobfacatePlugin] Dependencies missing, Please check ida python and flare_emu is installed")
sys.exit()
VERSION = "0.1.0"
def deobfuscate_function():
# for xref in idautils.XrefsTo(idc.get_screen_ea(), 0):
# print(xref.type, idautils.XrefTypeName(xref.type), 'from', hex(xref.frm), 'to', hex(xref.to))
eh = flare_emu.EmuHelper()
info = idaapi.get_inf_structure()
if info.is_64bit():
dx = "rdx"
ax = "rax"
else:
dx = "edx"
ax = "eax"
ea = idc.get_screen_ea()
for xref in idautils.XrefsTo(ea, 0):
addr_call = xref.frm
addr_before = idc.prev_head(addr_call) # 前一个指令
addr_before = idc.prev_head(addr_before) # 前一个指令
addr_after = idc.next_head(addr_call) # 后一个指令
# 校验前一个指令是在传参,符合 mov eax, xxx
if idc.print_insn_mnem(addr_before) == "mov" and idc.print_operand(addr_before, 0) == dx:
#print("0x{:x} => 0x{:x}".format(addr_before, addr_call))
eh.emulateRange(addr_before, endAddr=addr_after, skipCalls=False)
ret = eh.getRegVal( ax )
print( "decrypted at 0x%x: %s" %( addr_call ,eh.getEmuString(ret) ))
# 设置注释
idc.set_cmt(addr_call, "decrypted: " + eh.getEmuString(ret).decode(), 0)
print ("Deobfuscated")
class XorPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_PROC
comment = "Equation 字符串反混淆"
help = "一个小工具,用于反混淆Equation的字符串混淆"
wanted_name = "Xor Batch Deobfuscation"
# wanted_hotkey = "Ctrl+Shift+D"
def init(self):
return idaapi.PLUGIN_KEEP
def term(self):
pass
def run(self, arg):
deobfuscate_function()
def PLUGIN_ENTRY():
return XorPlugin()
|
python
|
__all__ = ['App']
import control_characters
import mdfind
import os
import plistlib
import stat
import subprocess
import sys
from writable_property import writable_property
"""
path/to/<name>.py class Name(mac_app.App)
output:
~/Applications/.mac-app-generator/<name>.app (customizable)
app logs:
~/Library/Logs/Applications/<name>/out.log (customizable)
~/Library/Logs/Applications/<name>/err.log (customizable)
app files:
<name>.app/Contents/MacOS/executable bash wrapper (hack to keep app visible)
<name>.app/Contents/MacOS/launchd.plist launchd.plist
<name>.app/Contents/MacOS/script (your class file)
"""
LOGS = os.path.join(os.environ["HOME"], "Library/Logs/Applications")
CODE = """#!/usr/bin/env bash
# LaunchAgent required to keep app visible in Dock
set "${0%/*}"/launchd.plist
trap "launchctl unload '$1'" EXIT
PlistBuddy() { /usr/libexec/PlistBuddy "$@"; }
PlistBuddy -c "Delete WorkingDirectory" -c "Add WorkingDirectory string ${0%/*}" "$1"
PlistBuddy -c "Delete Program" -c "Add Program string ${0%/*}"/script "$1"
Label="$(PlistBuddy -c "Print Label" "$1")"
# logs must exists or launchd will create logs with root permissions
logs="$(PlistBuddy -c "Print StandardErrorPath" -c "Print StandardOutPath" "$1")"
dirs="$(echo "$logs" | grep / | sed 's#/[^/]*$##' | uniq)"
( IFS=$'\\n'; set -- $dirs; [ $# != 0 ] && mkdir -p "$@" )
launchctl unload "$1" 2> /dev/null; launchctl load -w "$1"
while :; do sleep 0.3 && launchctl list "$Label" | grep -q PID || exit 0; done
"""
def dirname(path):
return os.path.dirname(path)
def write(path, data):
"""write a dictionary to a plist file"""
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
data = {k: v for k, v in data.items() if v is not None}
if hasattr(plistlib, "dump"):
plistlib.dump(data, open(path, 'wb'))
else:
plistlib.writePlist(data, path)
class App:
"""Mac app generator. writable properties: `app_folder`, `app_name`, `app_path`, `app_code`, `app_script`, `app_image`, `app_stderr`, `app_stdout`, `app_env`. methods: `create_app()`"""
app_env = dict((k, control_characters.remove(str(v)))
for k, v in os.environ.items())
def __init__(self, **kwargs):
for k, v in kwargs.items():
if v is not None:
setattr(self, k, v)
@writable_property
def app_name(self):
"""app name. default is class name
app name concepts:
1) custom name self._app_name with @app_name.setter
2) class name self.__class__.__name__.lower().replace("_", "-")
3) module name (os.path.splitext(os.path.basename(self.app_script))[0].replace("_", "-"))
"""
return self.__class__.__name__.lower().replace("_", "-")
@writable_property
def app_folder(self):
"""app folder. default is `~/Applications/.appify/`"""
return os.path.expanduser("~/Applications/.mac-app-generator")
@writable_property
def app_script(self):
"""source script path. default is class module file"""
return sys.modules[self.__class__.__module__].__file__
@writable_property
def app_code(self):
"""source code string"""
return open(self.app_script).read()
@writable_property
def app_path(self):
"""app path. `app_folder`+`app_name`"""
path = os.path.join(self.app_folder, self.app_name)
return "%s.app" % path if os.path.splitext(path)[1] != ".app" else path
@writable_property
def app_image(self):
"""app image. default is `mdfind kMDItemFSName=<app_name>.png` result"""
filename = "%s.png" % self.app_name
matches = mdfind.mdfind(["kMDItemFSName=%s" % filename]).splitlines()
if matches and os.path.exists(matches[0]) and os.path.isfile(matches[0]):
return matches[0]
@writable_property
def app_stdout(self):
"""stdout path. default is `~/Library/Logs/Applications/<name>/out.log`"""
return os.path.join(LOGS, self.app_name, "out.log")
@writable_property
def app_stderr(self):
"""stderr path. default is `~/Library/Logs/Applications/<name>/err.log`"""
return os.path.join(LOGS, self.app_name, "err.log")
def create_app(self):
"""create Mac app"""
if ".app/" not in os.getcwd():
self.create_app_executable()
self.create_app_script()
if self.app_image:
self.create_app_icon()
self.create_app_info()
self.create_app_launchd()
self.refresh_app()
return self
def create_app_launchd(self):
Label = "%s.app" % self.app_name
"""<Program> and <WorkingDirectory> are created at runtime"""
data = dict(
Label=Label,
RunAtLoad=True,
EnvironmentVariables=self.app_env,
StandardOutPath=os.path.expanduser(self.app_stdout),
StandardErrorPath=os.path.expanduser(self.app_stderr)
)
path = os.path.join(self.app_path, "Contents",
"MacOS", "launchd.plist")
write(path, data)
def create_app_executable(self):
"""create app executable file"""
path = os.path.join(self.app_path, "Contents", "MacOS", "executable")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
open(path, "w").write(CODE)
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def create_app_script(self):
"""create app script file"""
path = os.path.join(self.app_path, "Contents", "MacOS", "script")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
open(path, "w").write(self.app_code)
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def create_app_icon(self):
"""create app icon"""
if not self.app_image:
raise OSError('app_image unknown')
if not os.path.exists(self.app_image):
raise OSError('%s NOT EXISTS' % self.app_image)
path = os.path.join(self.app_path, "Contents",
"Resources", "Icon.icns")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
args = ["/usr/bin/sips", "-s", "format",
"icns", self.app_image, "--out", path]
subprocess.check_call(args, stdout=subprocess.PIPE)
def create_app_info(self):
path = os.path.join(self.app_path, "Contents", "Info.plist")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
data = dict(CFBundleExecutable="executable")
if self.app_image:
data.update(CFBundleIconFile="Icon.icns")
write(path, data)
def refresh_app(self):
"""remove .DS_Store and touch folder"""
for folder in [self.app_path, os.path.dirname(self.app_path)]:
try:
f = os.path.join(folder, ".DS_Store")
if os.path.exists(f):
os.unlink(f)
os.utime(folder, None)
except PermissionError:
pass
return self
def __str__(self):
return '<App "%s">' % self.app_path
def __repr__(self):
return self.__str__()
|
python
|
from pymongo import MongoClient
client = MongoClient()
def get_db():
return client['parallel_chat']
|
python
|
import numpy as np
import os
# Readme_2_data_abstracts showed how the data abstracts work.
# Technically, they embody all functionality to work with data
# This part introduces the dataset class, which is based on a dictseqabstract
# This means that you can use it in a similar way.
# However it has additional functionality such as cross validation loading/saving, subsetting, ..
# This part mainly forces you to define datasets in terms of classes in a generic framework to allow
# easy reuse between researchers.
# -------------------------------------------------------------------------
### class example
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.summary()
# -------------------------------------------------------------------------
### class example with data selection
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.select import random_subsample
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')},
select = random_subsample(ratio=0.3))
db.summary()
# -------------------------------------------------------------------------
### class example and xval
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.xval import random_kfold
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.set_xval(random_kfold(folds=4,val_frac=1/3))
xval = db.get_xval_set(fold=0,set='train')
# -------------------------------------------------------------------------
### class example and xval with xval saving for later reuse
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.xval import random_kfold
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.set_xval(random_kfold(folds=4,val_frac=1/3), save_path='xval')
xval = db.get_xval_set(fold=0,set='train')
# -------------------------------------------------------------------------
### class example and xval from the dataset class based on an item
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.xval import xval_from_item
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.add('set', ['test'] * len(db))
db.set_xval(xval_from_item(key='set'))
xval = db.get_xval_set(fold=0,set='test')
# -------------------------------------------------------------------------
### Feature extraction
### paths/feat is a mandatory field that should be added when doing feature extraction
### as it determines where the features are stored
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction on 'data' if not already performed and add it to the dataset as a 'feat' key
# if new_key is not specified, the item of 'data' is replaced by the feature extracted version
db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat')
# again you can specify multiprocessing as:
# db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat', workers=2)
# -------------------------------------------------------------------------
### Feature extraction (Nested)
### paths/feat is a mandatory field that should be added when doing feature extraction
### as it determines where the features are stored
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain()
processor2 = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction for the first time
db.prepare_feat('data',fe_name='np_audio', fe_dp=processor, new_key='feat')
# do feature extraction for the second time (e.g. if its modular, this could save computation time)
db.prepare_feat('feat',fe_name='raw_audio', fe_dp=processor2, new_key='feat2')
# -------------------------------------------------------------------------
### Load data from memory
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction on 'data'
db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat')
# load features into memory
db.load_memory('feat')
# -------------------------------------------------------------------------
### Load data from memory and keep internal structure
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction on 'data'
db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat')
# load features into memory
db.load_memory('feat', keep_structure=True)
# -------------------------------------------------------------------------
### Splitting
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
db.summary()
# define processor
processor = ProcessingChain().add(Framing(windowsize=0.1, stepsize=0.1))
# prepare features
db.prepare_feat('data',fe_name='Framing0101', fe_dp=processor, new_key='feat')
# add splitting
db.add_split(split_size=0.5)
# show summary
db.summary()
# both feat and data are timesplitted and read from disk
print(db['data'][0].shape)
print(db['feat'][0].shape)
# -------------------------------------------------------------------------
### Splitting (per frame)
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
db.summary()
# define processor
processor = ProcessingChain().add(Framing(windowsize=0.1, stepsize=0.1))
# prepare features
db.prepare_feat('data',fe_name='Framing0101', fe_dp=processor, new_key='feat')
# add splitting
db.add_split(split_size=1, type='samples', reference_key='feat')
# show summary
db.summary()
# both feat and data are timesplitted and read from disk
print(db['data'][0].shape)
print(db['feat'][0].shape)
# -------------------------------------------------------------------------
### Dataset from config
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
## Loads the following yaml file
# datasets:
# - name: !class [custom.dataset.dbs.EXAMPLE]
# parameters:
# paths:
# data: !pathjoin [data,data]
# meta: !pathjoin [data,data]
# tmp: !pathjoin [data,tmp]
# !class is a custom constructor that load_yaml_config uses to replace that item by that class
data = load_yaml_config(filename='EXAMPLE_anomaly2', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config through custom_dir
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable to indicate where custom fct are (custom, as in, not present in dabstract)
# This can be used instead of !class [] depending on what you think is most convenient
# structure of the custom should be:
# /dbs/.. for datasets
# /dp/.. for processing layers
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config through custom_dir with xval
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly_xval', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
print(data.get_xval_set(set='train',fold=0))
# -------------------------------------------------------------------------
### Dataset from config with two datasets and splitting
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.add_split(0.5)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config with two datasets and splitting from config
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly_split', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config with two datasets and subsampling on string
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
print([subdb for subdb in data['data']['subdb']])
data.add_select((lambda x,k: x['data']['subdb'][k]=='normal'))
data.summary()
print([subdb for subdb in data['data']['subdb']])
# -------------------------------------------------------------------------
### Dataset from config with two datasets and random subsampling
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
from dabstract.dataset.select import random_subsample
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
print([subdb for subdb in data['data']['subdb']])
data.add_select(random_subsample(0.5))
data.summary()
print([subdb for subdb in data['data']['subdb']])
# -------------------------------------------------------------------------
### Dataset from config with two datasets and subsampling on a list and random from config
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly_subsample', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Merge two datasets
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
from dabstract.dataset.select import random_subsample
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data0 = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data0.summary()
# load dataset
data1 = load_yaml_config(filename='EXAMPLE_anomaly2', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data1.summary()
# merge
data = data0+data1
|
python
|
from .model import EfficientUnetPlusPlus
|
python
|
import machine
import time
import json
# GPIO connections on test fixture
gpio_conn = [
{ 'out': 2, 'io': (23, 32) },
{ 'out': 1, 'io': (33, 34) },
{ 'out': 2, 'io': (5, 12, 35) },
{ 'out': 2, 'io': (4, 18) },
{ 'out': 2, 'io': (13, 15) },
{ 'out': 2, 'io': (2, 14) }
]
def testGPIO():
# List of problem pairs (output, input)
problems = []
# Test pins as output in two cycles. There's one set of connections
# that connects three pins together, but one is input only.
for out_idx in range(2):
# Go through each defined connection for setup
for conn in gpio_conn:
if out_idx < conn['out']:
# Make the indexed pin output, the other pins input
for pin_idx, pin in enumerate(conn['io']):
if pin_idx == out_idx:
machine.Pin(pin, machine.Pin.OUT)
else:
machine.Pin(pin, machine.Pin.IN)
# Go through each defined connection again for testing
for test_conn in gpio_conn:
if out_idx < test_conn['out']:
# Turn the output for the indexed connection under test high,
# the others low
for conn in gpio_conn:
if out_idx < conn['out']:
machine.Pin(conn['io'][out_idx]) \
.value(1 if conn == test_conn else 0)
# Now check that the inputs connected to the active output
# are high and the rest are low
for conn in gpio_conn:
if out_idx < conn['out']:
for pin_idx, pin in enumerate(conn['io']):
if pin_idx != out_idx and machine.Pin(pin).value() \
!= (1 if conn == test_conn else 0):
problems.append((conn['io'][out_idx], pin))
# Return list of problem pairs
return list(set(problems))
def getVPlus():
return machine.ADC(machine.Pin(39)).read() * 15.7 * 1.1 / 4095
def getV3V3():
return machine.ADC(machine.Pin(36)).read() * 5.7 * 1.1 / 4095
# Wait for network
network_tries = 8
while lan.ifconfig()[0] == '0.0.0.0' and network_tries:
time.sleep(2)
network_tries = network_tries - 1
# Get the IP address
ip = lan.ifconfig()[0]
# Run the GPIO test to discover problems
gpio_problems = testGPIO()
# Print all test results as JSON
print(json.dumps({
'vplus': getVPlus(),
'v3v3': getV3V3(),
'gpio': {
'ok': not bool(gpio_problems),
'problems': gpio_problems,
},
'ip': {
'ok': ip != '0.0.0.0',
'address': ip
}
}))
|
python
|
import os
import re
import sqlite3
import configparser
from helpers.logger import Logger
from helpers.match import MatchDatabase, MatchSource
from helpers.searchwords import Searchwords
class File:
config = configparser.ConfigParser()
config.read("config.ini")
non_regex_indicator = config.get("ProgramConfig", 'non_regex_indicator')
def __init__(self, file_path):
self.file_path = file_path
self.name = os.path.basename(file_path).replace(".", "_")
self.src_matches = list()
self.db_matches = list()
self.icon = "insert_drive_file" # Materialise Icon
self.fa_icon = "file" # Font Awesome icon, used in the tree-view
self.all_matches = list()
self.unique_words = list()
self.grouped_matches = dict() #contains an array of Match objects for each unique word
def find_matches_in_db_file(self):
# Set icon of file
self.icon = "insert_invitation"
self.fa_icon = "database"
db = sqlite3.connect(self.file_path)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
for table_name in tables:
table_name = table_name[0]
cursor = db.execute("SELECT * from %s" % table_name)
line = 0
for row in cursor.fetchall():
line += 1
for matchword in Searchwords.db_search_words:
exclude = False
if re.match(File.non_regex_indicator, str(row)):
Searchwords.db_search_words[matchword].regex = True
if re.search(matchword, str(row), re.IGNORECASE):
for item in Searchwords.exclusion_list:
if item[0] == matchword and item[1] in self.file_path:
Logger("Database xclusion found: %s in file %s" % (str(item[0]), self.file_path), Logger.INFO)
exclude = True
if exclude == False:
importance = Searchwords.db_search_words[matchword]
db_match = MatchDatabase(matchword, line, str(table_name), str(row), importance)
self.db_matches.append(db_match)
self.all_matches.append(db_match)
self.orden_matches()
def find_matches_in_src_file(self, CODE_OFFSET, QUERY_IMPORTANCE):
try:
with open(self.file_path, "r", encoding="utf8", errors='ignore') as file:
lines_in_file = file.read().splitlines()
except IOError as e:
Logger("could not open file '%s'. Error:" %(self.file_path, e.strerror), Logger.WARNING)
return list()
line_index = 1
for line in lines_in_file:
for query in Searchwords.src_search_words.keys():
if int(Searchwords.src_search_words[query]) > QUERY_IMPORTANCE:
if re.match(File.non_regex_indicator, query):
Searchwords.src_search_words[query].regex = True
if re.search(query, line.lower(), re.IGNORECASE):
exclude = False
for item in Searchwords.exclusion_list:
if re.search(item[0], line, re.IGNORECASE):
if (item[1] in self.file_path or (item[1] == "" or item[1] is None)):
Logger("Exclusion found: %s in file %s" % (str(item[0]), self.file_path),
Logger.INFO)
exclude = True
if exclude == False:
upper_range = min(line_index + CODE_OFFSET, len(lines_in_file)+1)
lower_range = max(line_index - CODE_OFFSET-1, 1)
src_match = MatchSource(query, line_index, lines_in_file[lower_range:upper_range],
Searchwords.src_search_words[query], len(lines_in_file))
self.all_matches.append(src_match)
self.src_matches.append(src_match)
line_index = line_index + 1
self.orden_matches()
def orden_matches(self):
grouped_matches = list()
#grouping
for match in self.all_matches:
if match.matchword not in self.unique_words:
self.unique_words.append(match.matchword)
for word in self.unique_words:
self.grouped_matches[word] = list()
for match in self.all_matches:
if match.matchword == word:
grouped_matches.append(match)
self.grouped_matches[word].append(match)
# To sort here, use this. But searchwords itself are sorted so.
# print(self.name)
# for matches in reversed([self.grouped_matches[i] for i in sorted(self.grouped_matches,
# key=Searchwords.all_searchwords.__getitem__)]):
# for match in matches:
# print(match.matchword)
# ----
#self.src_matches = grouped_matches
# order them according to query importance
# ordened_matches = OrderedDict()
# for match in self.src_matches:
# ordened_matches[match.matchword] = match
# sorting_dict = OrderedDict(sorted(ordened_matches.items(), key=itemgetter(0)))
# temp_matches = []
# for f, match in sorting_dict.items():
# temp_matches.insert(0, match)
# self.src_matches = temp_matches
|
python
|
#!/usr/bin/env python3
"""
Interpret all transforms, thereby flattening cairo operations
to just a few primitives, such as move_to line_to and curve_to.
"""
import sys
from math import sin, cos, pi
from bruhat.argv import argv
from bruhat.render import back
from bruhat.render.base import Context
class Flatten(Context):
def __init__(self):
Context.__init__(self)
self.path = back.Compound()
self.paths = []
def move_to(self, x, y):
x, y = self.matrix(x, y)
item = back.MoveTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def line_to(self, x, y):
x, y = self.matrix(x, y)
item = back.LineTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def curve_to(self, x0, y0, x1, y1, x2, y2):
x0, y0 = self.matrix(x0, y0)
x1, y1 = self.matrix(x1, y1)
x2, y2 = self.matrix(x2, y2)
item = back.CurveTo_Pt(x0, -y0, x1, -y1, x2, -y2)
self.path.append(item)
self.pos = x2, y2
def rel_move_to(self, dx, dy):
assert self.pos is not None, "no current point"
x, y = self.pos
dx, dy = self.matrix.transform_distance(dx, dy)
x, y = x+dx, y+dy
item = back.MoveTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def rel_line_to(self, dx, dy):
assert self.pos is not None, "no current point"
x, y = self.pos
dx, dy = self.matrix.transform_distance(dx, dy)
x, y = x+dx, y+dy
item = back.LineTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def rel_curve_to(self, dx0, dy0, dx1, dy1, dx2, dy2):
assert self.pos is not None, "no current point"
x, y = self.pos
dx0, dy0 = self.matrix.transform_distance(dx0, dy0)
dx1, dy1 = self.matrix.transform_distance(dx1, dy1)
dx2, dy2 = self.matrix.transform_distance(dx2, dy2)
x0, y0 = x+dx0, y+dy0
x1, y1 = x+dx1, y+dy1
x2, y2 = x+dx2, y+dy2
item = back.CurveTo_Pt(x0, -y0, x1, -y1, x2, -y2)
self.path.append(item)
self.pos = x2, y2
def arc(self, x, y, radius, angle1, angle2):
# stay in user space coordinates
if self.pos is None:
x1, y1 = x+radius*cos(angle1), y+radius*sin(angle1)
self.move_to(x1, y1)
p = back.arc_to_bezier_pt(x, -y, radius, -angle2, -angle1)
p = p.reversed()
p.process_cairo(self)
def arc_negative(self, x, y, radius, angle1, angle2):
# stay in user space coordinates
if self.pos is None:
x1, y1 = x+radius*cos(angle1), y+radius*sin(angle1)
self.move_to(x1, y1)
p = back.arc_to_bezier_pt(x, -y, radius, -angle1, -angle2)
p.process_cairo(self)
def close_path(self):
item = back.ClosePath()
self.path.append(item)
def set_source_rgba(self, r, g, b, a):
deco = back.RGBA(r, g, b, a)
self.path.append(deco)
def set_line_width(self, w):
deco = back.LineWidth_Pt(w)
self.path.append(deco)
def stroke(self):
deco = back.Stroke()
self.path.append(deco)
self.paths.append(self.path)
self.path = back.Compound()
self.pos = None
def fill_preserve(self):
deco = back.FillPreserve()
self.path.append(deco)
def fill(self):
deco = back.Fill()
self.path.append(deco)
self.paths.append(self.path)
self.path = back.Compound()
self.pos = None
def test():
def draw_test(cxt):
cxt.translate(100., 0.)
cxt.scale(0.8, 0.7)
cxt.move_to(10., 10.)
cxt.line_to(100., 100.)
cxt.arc(200., 200., 80., 0., 1.1*pi)
cxt.scale(0.7, 1.2)
cxt.translate(50., 50.)
cxt.line_to(300., 300.)
cxt.arc_negative(400., 300., 60., 0., -1.8*pi)
cxt.line_to(600.-10, 400.-10)
cxt.stroke()
import cairo
W, H = 600., 400. # point == 1/72 inch
# black line should follow the red line.
surface = cairo.PDFSurface("output.pdf", W, H)
context = cairo.Context(surface)
context.save()
context.set_source_rgba(1., 0., 0., 0.5)
context.set_line_width(10.)
draw_test(context)
context.restore()
cxt = Flatten()
draw_test(cxt)
for path in cxt.paths:
path.process_cairo(context)
surface.finish()
print("OK")
if __name__ == "__main__":
test()
|
python
|
"""REPL server for inspecting and hot-patching a running Python process.
This module makes your Python app serve up a REPL (read-eval-print-loop)
over TCP, somewhat like the Swank server in Common Lisp.
In a REPL session, you can inspect and mutate the global state of your running
program. You can e.g. replace top-level function definitions with new versions
in your running process, or reload modules from disk (with `importlib.reload`).
The REPL server runs in a daemon thread. Terminating the main thread of your
process will terminate the server, also forcibly terminating all open REPL
sessions in that process.
**SECURITY WARNING**: A REPL SERVER IS ESSENTIALLY A BACK DOOR.
Currently, we provide NO authentication or encryption. Anyone can connect, and
once connected, do absolutely anything that the user account running your app
can do. Connections are anonymous.
Hence, only use this server in carefully controlled environments, such as:
a) Your own local machine, with no untrusted human users on it,
b) A dedicated virtual server running only your app, in which case
the OS level already provides access control and encrypted connections.
Even then, serve this ONLY on the loopback interface, to force users to connect
to the machine via SSH first (or have physical local console access).
With that out of the way, to enable the server in your app::
from unpythonic.net import server
server.start(locals={})
The `locals=...` argument sets the top-level namespace for variables for use by
the REPL. It is shared between REPL sessions.
Using `locals=globals()` makes the REPL directly use the calling module's
top-level scope. If you want a clean environment, where you must access any
modules through `sys.modules`, use `locals={}` (recommended).
To connect to a running REPL server (with tab completion and Ctrl+C support)::
python3 -m unpythonic.net.client localhost 1337
If you're already running in a local Python REPL, this should also work::
from unpythonic.net import client
client.connect(("127.0.0.1", 1337))
For basic use (history, but no tab completion), you can use::
rlwrap netcat localhost 1337
or even just (no history, no tab completion)::
netcat localhost 1337
**CAUTION**: Python's builtin `help(foo)` does not work in this REPL server.
It cannot, because the client runs a complete second input prompt (that holds
the local TTY), separate from the input prompt running on the server.
So the stdin/stdout are not just redirected to the socket.
Trying to open the built-in help will open the help locally on the server,
causing the client to hang. The top-level `help()`, which uses a command-based
interface, appears to work, until you ask for a help page, at which point it
runs into the same problem.
As a workaround, we provide `doc(foo)`, which just prints the docstring (if any),
and performs no paging.
**CAUTION**: Python was not designed for arbitrary hot-patching. If you change
a **class** definition (whether by re-assigning the reference or by reloading
the module containing the definition), only new instances will use the new
definition, unless you specifically monkey-patch existing instances to change
their type.
The language docs hint it is somehow possible to retroactively change an
object's type, if you're careful with it:
https://docs.python.org/3/reference/requestmodel.html#id8
In fact, ActiveState recipe 160164 explicitly tells how to do it,
and even automate that with a custom metaclass:
https://github.com/ActiveState/code/tree/master/recipes/Python/160164_automatically_upgrade_class_instances
Based on socketserverREPL by Ivor Wanders, 2017. Used under the MIT license.
https://github.com/iwanders/socketserverREPL
Based on macropy.core.MacroConsole by Li Haoyi, Justin Holmgren, Alberto Berti and all the other contributors,
2013-2019. Used under the MIT license.
https://github.com/azazel75/macropy
Based on imacropy.console by the same author as unpythonic. 2-clause BSD license.
https://github.com/Technologicat/imacropy
**Trivia**:
Default port is 1337, because connecting to a live Python program can be
considered somewhat that. Refer to https://megatokyo.com/strip/9.
The `socketserverREPL` package uses the same default, and actually its
`repl_tool.py` can talk to this server (but doesn't currently feature
remote tab completion).
The default port for the control channel is 8128, because it's for
*completing* things, and https://en.wikipedia.org/wiki/Perfect_number
This is the first one above 1024, and was already known to Nicomachus
around 100 CE.
"""
# TODO: use logging module instead of server-side print
# TODO: support several server instances? (makes sense if each is connected to a different module)
__all__ = ["start", "stop"] # Exports for code that wants to embed the server.
import rlcompleter # yes, just rlcompleter without readline; backend for remote tab completion.
import threading
import sys
import os
import time
import socketserver
import atexit
import inspect
from itertools import count
try:
# Advanced macro-enabled console. Importing this also boots up `mcpyrate`.
from mcpyrate.repl.console import MacroConsole as Console
except ModuleNotFoundError:
from code import InteractiveConsole as Console
from ..collections import ThreadLocalBox, Shim
from ..misc import async_raise, namelambda
from ..symbol import sym
from .util import ReuseAddrThreadingTCPServer, socketsource
from .msg import MessageDecoder
from .common import ApplevelProtocolMixin
from .ptyproxy import PTYSocketProxy
# Because "These are only defined if the interpreter is in interactive mode.",
# we have to do something like this.
# https://docs.python.org/3/library/sys.html#sys.ps1
try:
_original_ps1, _original_ps2 = sys.ps1, sys.ps2
except AttributeError:
_original_ps1, _original_ps2 = None, None
_server_instance = None
_active_sessions = {}
_session_counter = count(start=1) # for generating session ids, needed for pairing control and REPL sessions.
_halt_pending = False
_original_stdin = sys.stdin
_original_stdout = sys.stdout
_original_stderr = sys.stderr
_threadlocal_stdin = ThreadLocalBox(_original_stdin)
_threadlocal_stdout = ThreadLocalBox(_original_stdout)
_threadlocal_stderr = ThreadLocalBox(_original_stderr)
_console_locals_namespace = None
_banner = None
# --------------------------------------------------------------------------------
# Exports for REPL sessions
# These `_get_source` and `doc` functions come from `mcpyrate.repl.utils`,
# with the coloring code removed.
#
# We strictly need a local copy of only if `mcpyrate` is not installed,
# to allow viewing docstrings in the stdlib console (which does not use
# colored output anyway).
def _get_source(obj):
# `inspect.getsourcefile` accepts "a module, class, method, function,
# traceback, frame, or code object" (the error message says this if
# we try it on something invalid).
#
# So if `obj` is an instance, we need to try again with its `__class__`.
for x in (obj, obj.__class__): # TODO: other places to fall back to?
try:
filename = inspect.getsourcefile(x)
source, firstlineno = inspect.getsourcelines(x)
return filename, source, firstlineno
except (TypeError, OSError):
continue
raise NotImplementedError
def doc(obj):
"""Print an object's docstring, non-interactively.
Additionally, if the information is available, print the filename
and the starting line number of the definition of `obj` in that file.
This is printed before the actual docstring.
This works around the problem that in a REPL session, the stdin/stdout
of the builtin `help()` are not properly redirected.
And that looking directly at `some_macro.__doc__` prints the string
value as-is, without formatting it.
NOTE: if you have the `mcpyrate` package installed, you can use
the IPython-like `obj?` and `obj??` syntax instead (provided by
`mcpyrate.repl.console.MacroConsole`).
"""
try:
filename, source, firstlineno = _get_source(obj)
print(f"{filename}:{firstlineno}", file=sys.stderr)
except NotImplementedError:
pass
if not hasattr(obj, "__doc__") or not obj.__doc__:
print("<no docstring>", file=sys.stderr)
return
print(inspect.cleandoc(obj.__doc__), file=sys.stderr)
# TODO: detect stdout, stderr and redirect to the appropriate stream.
def server_print(*values, **kwargs):
"""Print to the original stdout of the server process."""
print(*values, **kwargs, file=_original_stdout)
def halt(doit=True):
"""Tell the REPL server to shut down after the last client has disconnected.
To cancel a pending halt, use `halt(False)`.
"""
if doit:
msg = "Halt requested, REPL server will shut down after the last client disconnects."
else:
msg = "Halt canceled, REPL server will remain running."
global _halt_pending
_halt_pending = doit
print(msg)
server_print(msg)
_bg_results = {}
_bg_running = sym("_bg_running")
_bg_success = sym("_bg_success")
_bg_fail = sym("_bg_fail")
def bg(thunk):
"""Spawn a thread to run `thunk` in the background. Return the thread object.
To get the return value of `thunk`, see `fg`.
"""
@namelambda(thunk.__name__)
def worker():
_bg_results[thread.ident] = (_bg_running, None)
try:
result = thunk()
except Exception as err:
_bg_results[thread.ident] = (_bg_fail, err)
else:
_bg_results[thread.ident] = (_bg_success, result)
thread = threading.Thread(target=worker, name=thunk.__name__, daemon=True)
thread.start()
return thread
# TODO: we could use a better API, but I don't want timeouts or a default return value.
def fg(thread):
"""Get the return value of a `bg` thunk.
`thread` is the thread object returned by `bg` when the computation was started.
If the thread is still running, return `thread` itself.
If completed, **pop** the result. If the thread:
- returned normally: return the value.
- raised an exception: raise that exception.
"""
if "ident" not in thread:
raise TypeError(f"Expected a Thread object, got {type(thread)} with value {repr(thread)}.")
if thread.ident not in _bg_results:
raise ValueError(f"No result for thread {repr(thread)}")
# This pattern is very similar to that used by unpythonic.fun.memoize...
status, value = _bg_results[thread.ident]
if status is _bg_running:
return thread
_bg_results.pop(thread.ident)
if status is _bg_success:
return value
elif status is _bg_fail:
raise value
assert False
# Exports available in REPL sessions.
# These will be injected to the `locals` namespace of the REPL session when the server starts.
_repl_exports = {doc, server_print, halt, bg, fg}
# --------------------------------------------------------------------------------
# Server itself
class ControlSession(socketserver.BaseRequestHandler, ApplevelProtocolMixin):
"""Entry point for connections to the control server.
We use a separate connection for control to avoid head-of-line blocking.
For example, how the remote tab completion works: the client sends us
a request. We invoke `rlcompleter` on the server side, and return its
response to the client.
"""
def handle(self):
# TODO: ipv6 support
caddr, cport = self.client_address
client_address_str = f"{caddr}:{cport}"
class ClientExit(Exception):
pass
try:
server_print(f"Control channel for {client_address_str} opened.")
# TODO: fancier backend? See examples in https://pymotw.com/3/readline/
completer_backend = rlcompleter.Completer(_console_locals_namespace)
# From the docstring of `socketserver.BaseRequestHandler`:
# This class is instantiated for each request to be handled.
# ...
# Since a separate instance is created for each request, the
# handle() method can define other arbitrary instance variables.
self.sock = self.request
self.decoder = MessageDecoder(socketsource(self.sock))
self.paired_repl_session_id = None
while True:
# The control server follows a request-reply application-level
# protocol, which is essentially a remote procedure call
# interface. We use ApplevelProtocolMixin, which allows us to
# transmit the function name, arguments and return values in
# a dictionary format.
#
# A request from the client is a dictionary, with str keys. It
# must contain the field "command", with its value set to one
# of the recognized command names as an `str`.
#
# Existence and type of any other fields depends on each
# individual command. This server source code is the official
# documentation of this small app-level protocol.
#
# For each request received, the server sends a reply, which is
# also a dictionary with str keys. It has one compulsory field:
# "status". Upon success, it must contain the string "ok". The
# actual return value(s) (if any) may be provided in arbitrary
# other fields, defined by each individual command.
#
# Upon failure, the "status" field must contain the string
# "failed". An optional (but strongly recommended!) "reason"
# field may contain a short description about the failure.
# More information may be included in arbitrary other fields.
request = self._recv()
if not request:
server_print(f"Socket for {client_address_str} closed by client.")
raise ClientExit
if "command" not in request:
reply = {"status": "failed", "reason": "Request is missing the 'command' field."}
elif request["command"] == "DescribeServer":
reply = {"status": "ok",
# needed by client's prompt detector
"prompts": {"ps1": sys.ps1, "ps2": sys.ps2},
# for future-proofing only
"control_protocol_version": "1.0",
"supported_commands": ["DescribeServer", "PairWithSession", "TabComplete", "KeyboardInterrupt"]}
elif request["command"] == "PairWithSession":
if "id" not in request:
reply = {"status": "failed", "reason": "Request is missing the PairWithSession parameter 'id'."}
else:
if request["id"] not in _active_sessions:
errmsg = f"Pairing control session failed; there is no active REPL session with id={request['id']}."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
else:
server_print(f"Pairing control session for {client_address_str} to REPL session {request['id']}.")
self.paired_repl_session_id = request["id"]
reply = {"status": "ok"}
elif request["command"] == "TabComplete":
if "text" not in request or "state" not in request:
reply = {"status": "failed", "reason": "Request is missing at least one of the TabComplete parameters 'text' and 'state'."}
else:
completion = completer_backend.complete(request["text"], request["state"])
# server_print(request, reply) # DEBUG
reply = {"status": "ok", "result": completion}
elif request["command"] == "KeyboardInterrupt":
server_print(f"Client {client_address_str} sent request for remote Ctrl+C.")
if not self.paired_repl_session_id:
errmsg = "This control channel is not currently paired with a REPL session."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
else:
server_print(f"Remote Ctrl+C in session {self.paired_repl_session_id}.")
try:
target_session = _active_sessions[self.paired_repl_session_id]
target_thread = target_session.thread
except KeyError:
errmsg = f"REPL session {self.paired_repl_session_id} no longer active."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
except AttributeError:
errmsg = f"REPL session {self.paired_repl_session_id} has no 'thread' attribute."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
else:
try:
# The implementation of async_raise is one of the dirtiest hacks ever,
# and only works on Python implementations providing the `ctypes` module,
# since Python has no officially exposed mechanism to trigger an asynchronous
# exception (such as KeyboardInterrupt) in an arbitrary thread.
async_raise(target_thread, KeyboardInterrupt)
except (ValueError, SystemError, RuntimeError) as err:
server_print(err)
reply = {"status": "failed", "reason": err.args, "failure_type": str(type(err))}
else:
reply = {"status": "ok"}
else:
cmd = request["command"]
reply = {"status": "failed", "reason": f"Command '{cmd}' not understood by this server."}
self._send(reply)
except ClientExit:
server_print(f"Control channel for {client_address_str} closed.")
except BaseException as err:
server_print(err)
class ConsoleSession(socketserver.BaseRequestHandler):
"""Entry point for connections from the TCP server.
Primary channel. This serves the actual REPL session.
"""
def handle(self):
# TODO: ipv6 support
caddr, cport = self.client_address
client_address_str = f"{caddr}:{cport}"
try:
# for control/REPL pairing
self.session_id = next(_session_counter)
_active_sessions[self.session_id] = self # also for exit monitoring
# self.request is the socket. We don't need a StreamRequestHandler with self.rfile and self.wfile,
# since we in any case only forward raw bytes between the PTY master FD and the socket.
# https://docs.python.org/3/library/socketserver.html#socketserver.StreamRequestHandler
def on_socket_disconnect(adaptor):
server_print(f"PTY on {os.ttyname(adaptor.slave)} for client {client_address_str} disconnected by client.")
os.write(adaptor.master, "quit()\n".encode("utf-8")) # as if this text arrived from the socket
def on_slave_disconnect(adaptor):
server_print(f"PTY on {os.ttyname(adaptor.slave)} for client {client_address_str} disconnected by PTY slave.")
adaptor = PTYSocketProxy(self.request, on_socket_disconnect, on_slave_disconnect)
adaptor.start()
server_print(f"PTY on {os.ttyname(adaptor.slave)} for client {client_address_str} opened.")
# fdopen the slave side of the PTY to get file objects to work with.
# Be sure not to close the fd when exiting, it is managed by PTYSocketProxy.
#
# Note we can open the slave side in text mode, so these streams can behave
# exactly like standard input and output. The proxying between the master side
# and the network socket runs in binary mode inside PTYSocketProxy.
with open(adaptor.slave, "wt", encoding="utf-8", closefd=False) as wfile:
with open(adaptor.slave, "rt", encoding="utf-8", closefd=False) as rfile:
# Set up the input and output streams for the thread we are running in.
# We use ThreadingTCPServer, so each connection gets its own thread.
# Here we just send the relevant object into each thread-local box.
_threadlocal_stdin << rfile
_threadlocal_stdout << wfile
_threadlocal_stderr << wfile
self.thread = threading.current_thread() # needed by remote Ctrl+C
# This must be the first thing printed by the server, so that the client
# can get the session id from it. This hack is needed for netcat compatibility.
#
# (In case of the custom client, it establishes two independent TCP connections.
# The REPL session must give an ID for attaching the control channel, but since
# we want it to remain netcat-compatible, it can't use the message protocol to
# send that information.)
print(f"REPL session {self.session_id} connected.") # print at the *client* side
if _banner != "":
print(_banner)
self.console = Console(locals=_console_locals_namespace)
# All errors except SystemExit are caught inside interact().
try:
server_print(f"Opening REPL session {self.session_id} for {client_address_str}.")
self.console.interact(banner=None, exitmsg="Bye.")
except SystemExit: # Close the connection upon server process exit.
pass
finally:
server_print(f"Closing PTY on {os.ttyname(adaptor.slave)} for {client_address_str}.")
adaptor.stop()
server_print(f"Closing REPL session {self.session_id} for {client_address_str}.")
except BaseException as err: # yes, SystemExit and KeyboardInterrupt, too.
server_print(err)
finally:
del _active_sessions[self.session_id]
# TODO: IPv6 support
def start(locals, bind="127.0.0.1", repl_port=1337, control_port=8128, banner=None):
"""Start the REPL server.
bind: Interface to bind to. The default value is recommended,
to accept connections from the local machine only.
repl_port: TCP port number for main channel (REPL session).
control_port: TCP port number for the control channel (tab completion
and Ctrl+C requests).
locals: Namespace (dict-like) to use as the locals namespace
of REPL sessions that connect to this server. It is
shared between sessions.
Some useful values for `locals`:
- `{}`, to make a clean environment which is seen by
the REPL sessions only. Maybe the most pythonic.
- `globals()`, the top-level namespace of the calling
module. Can be convenient, especially if the server
is started from your main module.
This is not set automatically, because explicit is
better than implicit.
In any case, note you can just grab modules from
`sys.modules` if you need to access their top-level scopes.
banner: Startup message. Default is to show help for usage.
To suppress, use banner="".
To connect to the REPL server (assuming default settings)::
python3 -m unpythonic.net.client localhost
**NOTE**: Currently, only one REPL server is supported per process,
but it accepts multiple simultaneous connections. A new thread is
spawned to serve each new connection.
**CAUTION**: There is absolutely no authentication support, so it is
recommended to only serve to localhost, and only on a machine whose
users you trust.
"""
global _server_instance, _console_locals_namespace
if _server_instance is not None:
raise RuntimeError("The current process already has a running REPL server.")
_console_locals_namespace = locals
for function in _repl_exports: # Inject REPL utilities
_console_locals_namespace[function.__name__] = function
global _banner
if banner is None:
default_msg = ("Unpythonic REPL server at {addr}:{port}, on behalf of:\n"
" {argv}\n"
" quit(), exit() or EOF (Ctrl+D) at the prompt disconnects this session.\n"
" halt() tells the server to close after the last session has disconnected.\n"
" print() prints in the REPL session.\n"
" NOTE: print() is only properly redirected in the session's main thread.\n"
" doc(obj) shows obj's docstring. Use this instead of help(obj) in the REPL.\n"
" server_print(...) prints on the stdout of the server.\n"
" A very limited form of job control is available:\n"
" bg(thunk) spawns and returns a background thread that runs thunk.\n"
" fg(thread) pops the return value of a background thread.\n"
" If you stash the thread object in the REPL locals, you can disconnect the\n"
" session, and read the return value in another session later.")
_banner = default_msg.format(addr=bind, port=repl_port, argv=" ".join(sys.argv))
else:
_banner = banner
# Set the prompts. We use four "." to make semi-sure the prompt string only appears as a prompt.
# The client needs to identify the prompts from the data stream in order to know when to switch
# between listening and prompting, so "..." is not even semi-safe (it's valid Python, as well as
# valid English).
sys.ps1 = ">>>> "
sys.ps2 = ".... "
# We use a combo of Shim and ThreadLocalBox to redirect attribute lookups
# to the thread-specific read/write streams.
#
# sys.stdin et al. are replaced by shims, which hold their targets in
# thread-local boxes. In the main thread (and as a default), the boxes contain
# the original sys.stdin et al., whereas in session threads, the boxes are filled
# with streams established for that particular session.
sys.stdin = Shim(_threadlocal_stdin)
sys.stdout = Shim(_threadlocal_stdout)
sys.stderr = Shim(_threadlocal_stderr)
# https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.server_address
# https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.RequestHandlerClass
server = ReuseAddrThreadingTCPServer((bind, repl_port), ConsoleSession)
server.daemon_threads = True # Allow Python to exit even if there are REPL sessions alive.
server_thread = threading.Thread(target=server.serve_forever, name="Unpythonic REPL server", daemon=True)
server_thread.start()
# Control channel for remote tab completion and remote Ctrl+C requests.
# Default port is 8128 because it's for *completing* things, and https://en.wikipedia.org/wiki/Perfect_number
# This is the first one above 1024, and was already known to Nicomachus around 100 CE.
cserver = ReuseAddrThreadingTCPServer((bind, control_port), ControlSession)
cserver.daemon_threads = True
cserver_thread = threading.Thread(target=cserver.serve_forever, name="Unpythonic REPL control server", daemon=True)
cserver_thread.start()
_server_instance = (server, server_thread, cserver, cserver_thread)
atexit.register(stop)
return bind, repl_port, control_port
def stop():
"""Stop the REPL server.
If the server has been started, this will be called automatically when the
process exits. It can be called earlier manually to shut down the server if
desired.
"""
global _server_instance, _console_locals_namespace
if _server_instance is not None:
server, server_thread, cserver, cserver_thread = _server_instance
server.shutdown()
server.server_close()
server_thread.join()
cserver.shutdown()
cserver.server_close()
cserver_thread.join()
_server_instance = None
sys.stdin = _original_stdin
sys.stdout = _original_stdout
sys.stderr = _original_stderr
_console_locals_namespace = None
atexit.unregister(stop)
if _original_ps1:
sys.ps1 = _original_ps1
else:
delattr(sys, "ps1")
if _original_ps2:
sys.ps2 = _original_ps2
else:
delattr(sys, "ps2")
# demo app
def main():
server_print("REPL server starting...")
bind, repl_port, control_port = start(locals={})
server_print(f"Started REPL server on {bind}:{repl_port}.")
try:
while True:
time.sleep(1)
if _halt_pending and not _active_sessions:
break
server_print("REPL server closed.")
except KeyboardInterrupt:
server_print("Server process got Ctrl+C, closing REPL server and all connections NOW.")
if __name__ == '__main__':
main()
|
python
|
# standard
from collections import defaultdict, OrderedDict
import csv
import sys
import tempfile
import unittest
class File:
'''
An abstract class simplifying file access through the use of only two functions:
- read (file)
- write (data, file):
'''
@classmethod
def read(cls, filename):
'''
return file elements in a generator
'''
assert False
@classmethod
def write(cls, data, filename):
'''
write data to filename
'''
assert False
@staticmethod
def decomment(file, comment):
for row in file:
if comment is None:
yield row
else:
raw = row.split(comment)[0].strip()
if raw:
yield raw
class Text(File):
'''
Instantiate the File class for a simple text file
'''
@classmethod
def read(cls, filename, comment=None, blanklines=False, strip=True):
'''
- comment: ignore comments
- blanklines: ignore blank lines
- strip: strip write space
'''
def line(d):
if comment is None:
return d
elif comment not in d:
return d
else:
return d[:d.index(comment)].strip()
with open(filename, 'rt') as f:
for datum in f:
if strip:
d = datum.strip()
else:
d = datum.rstrip()
if blanklines:
yield line(d)
elif len(d) > 0:
remnant = line(d)
if len(remnant) > 0:
yield remnant
@classmethod
def write(cls,
data,
filename,
eol='\n' # explicitly change the End of Line marker
):
if filename is None:
f = sys.stdout
else:
f = open(filename, 'wt')
with f:
for datum in data:
f.write(datum + eol)
class CSV(File):
'''
Instantiate the File class for Comma Separated Values (CSV)
'''
@classmethod
def read(cls,
filename,
header=True,
comment=None,
fields=None):
'''
- header: is first line the header?
- fields: optional list of field values
'''
with open(filename, 'rt') as file:
csv_file = csv.reader(File.decomment(file, comment))
for i, record in enumerate(csv_file):
if len(record) == 0:
continue
record = [f.strip() for f in record]
if header:
if i == 0:
if fields is None:
fields = record
else:
yield OrderedDict(list(zip(fields, record)))
else:
yield record
@classmethod
def write(cls,
data,
filename=None,
fields=None,
header=True,
append=False,
delimiter=','):
'''
- fields: optional list of field values
- header: display header on first line?
- append: add to existing file?
- delimiter: what character to use for separating elements
'''
def formatter(datum, fields):
if not isinstance(datum, dict):
return dict(list(zip(fields, [str(d) for d in datum])))
else:
d = defaultdict()
for field in fields:
if field in datum:
d[field] = datum[field]
return d
if append:
mode = 'a'
else:
mode = 'w'
if filename is None:
f = sys.stdout
elif sys.version_info < (3, 0, 0):
mode += 'b'
f = open(filename, mode)
else:
f = open(filename, mode, newline='')
with f as csv_file:
first = True
for datum in data:
if first:
if fields is None:
if isinstance(datum, dict):
fields = list(datum.keys())
else:
fields = datum # first line is the list of fields
csv_writer = csv.DictWriter(csv_file, fields,
lineterminator='\n', delimiter=delimiter)
if header:
csv_writer.writerow(dict(list(zip(fields, fields))))
first = False
csv_writer.writerow(formatter(datum, fields))
class Test_File(unittest.TestCase):
def setUp(self):
self.named = tempfile.NamedTemporaryFile(delete=True)
self.data = [[i+str(j) for j in range(4)] for i in ['x', 'a', 'b', 'c']]
self.filename = self.named.name
def tearDown(self):
self.named.close()
def test_text(self):
data = [' '.join(datum) for datum in self.data]
Text.write(data, self.filename)
for i, same in enumerate(Text.read(self.filename)):
assert data[i] == same
def test_csv(self):
CSV.write(self.data, self.filename, header=False)
for i, same in enumerate(CSV.read(self.filename, header=True)):
assert list(same.keys()) == self.data[0]
assert list(same.values()) == self.data[i+1]
if __name__ == '__main__':
unittest.main()
|
python
|
from setuptools import setup, find_packages
setup(
name="users",
version="0.1.0",
description="Stoic authentication service",
license="Apache",
packages=find_packages(),
)
|
python
|
from __future__ import print_function
# for Python 2/3 compatibility
try:
import queue
except ImportError:
import Queue as queue
import logging
import serial
import time
import threading
from binascii import hexlify, unhexlify
from uuid import UUID
from enum import Enum
from collections import defaultdict
from pygatt.exceptions import NotConnectedError
from pygatt.backends import BLEBackend, Characteristic, BLEAddressType
from pygatt.util import uuid16_to_uuid
from . import bglib, constants
from .exceptions import BGAPIError, ExpectedResponseTimeout
from .device import BGAPIBLEDevice
from .bglib import EventPacketType, ResponsePacketType
from .packets import BGAPICommandPacketBuilder as CommandBuilder
from .error_codes import get_return_message
from .util import find_usb_serial_devices
try:
import termios
except ImportError:
# Running in Windows (not Linux/OS X/Cygwin)
serial_exception = RuntimeError
else:
serial_exception = termios.error
log = logging.getLogger(__name__)
BLED112_VENDOR_ID = 0x2458
BLED112_PRODUCT_ID = 0x0001
MAX_CONNECTION_ATTEMPTS = 10
UUIDType = Enum('UUIDType', ['custom', 'service', 'attribute',
'descriptor', 'characteristic',
'nonstandard'])
def _timed_out(start_time, timeout):
return time.time() - start_time > timeout
def bgapi_address_to_hex(address):
address = hexlify(bytearray(
list(reversed(address)))).upper().decode('ascii')
return ':'.join(''.join(pair) for pair in zip(*[iter(address)] * 2))
class AdvertisingAndScanInfo(object):
"""
Holds the advertising and scan response packet data from a device at a given
address.
"""
def __init__(self):
self.name = ""
self.address = ""
self.rssi = None
self.packet_data = {
# scan_response_packet_type[xxx]: data_dictionary,
}
class BGAPIBackend(BLEBackend):
"""
A BLE backend for a BGAPI compatible USB adapter.
"""
def __init__(self, serial_port=None, receive_queue_timeout=0.1):
"""
Initialize the backend, but don't start the USB connection yet. Must
call .start().
serial_port -- The name of the serial port for the BGAPI-compatible
USB interface. If not provided, will attempt to auto-detect.
"""
self._lib = bglib.BGLib()
self._serial_port = serial_port
self._receive_queue_timeout = receive_queue_timeout
self._ser = None
self._receiver = None
self._running = None
self._lock = threading.Lock()
# buffer for packets received
self._receiver_queue = queue.Queue()
# State
self._num_bonds = 0 # number of bonds stored on the adapter
self._stored_bonds = [] # bond handles stored on the adapter
self._devices_discovered = {
# 'address': AdvertisingAndScanInfo,
# Note: address formatted like "01:23:45:67:89:AB"
}
self._characteristics = defaultdict(dict)
self._connections = {}
self._current_characteristic = None # used in char/descriptor discovery
self._packet_handlers = {
ResponsePacketType.sm_get_bonds: self._ble_rsp_sm_get_bonds,
EventPacketType.attclient_attribute_value: (
self._ble_evt_attclient_attribute_value),
EventPacketType.attclient_find_information_found: (
self._ble_evt_attclient_find_information_found),
EventPacketType.connection_status: self._ble_evt_connection_status,
EventPacketType.connection_disconnected: (
self._ble_evt_connection_disconnected),
EventPacketType.gap_scan_response: self._ble_evt_gap_scan_response,
EventPacketType.sm_bond_status: self._ble_evt_sm_bond_status,
}
log.debug("Initialized new BGAPI backend")
def _detect_device_port(self):
log.debug("Auto-detecting serial port for BLED112")
detected_devices = find_usb_serial_devices(
vendor_id=BLED112_VENDOR_ID,
product_id=BLED112_PRODUCT_ID)
if len(detected_devices) == 0:
raise BGAPIError("Unable to auto-detect BLED112 serial port")
log.info("Found BLED112 on serial port %s",
detected_devices[0].port_name)
return detected_devices[0].port_name
def _open_serial_port(self,
max_connection_attempts=MAX_CONNECTION_ATTEMPTS):
"""
Open a connection to the named serial port, or auto-detect the first
port matching the BLED device. This will wait until data can actually be
read from the connection, so it will not return until the device is
fully booted.
max_connection_attempts -- Max number of times to retry
detecting and connecting to a device.
Raises a NotConnectedError if the device cannot connect after 10
attempts, with a short pause in between each attempt.
"""
for attempt in range(max_connection_attempts):
log.debug("Opening connection to serial port (attempt %d)",
attempt + 1)
try:
serial_port = self._serial_port or self._detect_device_port()
self._ser = None
self._ser = serial.Serial(serial_port, baudrate=115200,
timeout=0.25)
# Wait until we can actually read from the device
self._ser.read()
break
except (BGAPIError, serial.serialutil.SerialException,
serial_exception):
log.debug("Failed to open serial port", exc_info=True)
if self._ser:
self._ser.close()
elif attempt == 0:
raise NotConnectedError(
"No BGAPI compatible device detected")
self._ser = None
time.sleep(0.25)
else:
raise NotConnectedError("Unable to reconnect with USB "
"device after rebooting")
def _initialize_device(self, reset=True):
""" Prepare an opened BGAPI device for use """
self._receiver = threading.Thread(target=self._receive)
self._receiver.daemon = True
self._running = threading.Event()
self._running.set()
self._receiver.start()
# Stop any ongoing procedure
log.debug("Stopping any outstanding GAP procedure")
self.send_command(CommandBuilder.gap_end_procedure())
try:
self.expect(ResponsePacketType.gap_end_procedure)
except BGAPIError:
# Ignore any errors if there was no GAP procedure running
pass
self.disable_advertising(skip_reply=not reset)
self.set_bondable(False)
# Check to see if there are any existing connections and add them
# Request the number of currently connected modules from the adapter
self.send_command(CommandBuilder.system_get_connections())
_, connections = self.expect(ResponsePacketType.system_get_connections)
# Adapter should also generate one EventPacketType.connection_status
# for each supported connection
for _ in range(connections['maxconn']):
_, conn = self.expect(EventPacketType.connection_status)
# If any connection flags are set, this is an active connection
if conn['flags'] > 0:
# Create new ble object to insert into the adapter
ble = BGAPIBLEDevice(bgapi_address_to_hex(conn['address']),
conn['connection_handle'],
self)
# pylint: disable=protected-access
self._connections[conn['connection_handle']] = ble
def start(self, reset=True, tries=5):
"""
Connect to the USB adapter, reset it's state and start a backgroud
receiver thread.
"""
if self._running and self._running.is_set():
self.stop()
# Fail immediately if no device is attached, don't retry waiting for one
# to be plugged in.
self._open_serial_port(max_connection_attempts=1)
if reset:
log.debug("Resetting and reconnecting to device for a clean environment")
# Blow everything away and start anew.
# Only way to be sure is to burn it down and start again.
# (Aka reset remote state machine)
# Note: Could make this a conditional based on parameter if this
# happens to be too slow on some systems.
# The zero param just means we want to do a normal restart instead of
# starting a firmware update restart.
self.send_command(CommandBuilder.system_reset(0))
self._ser.flush()
self._ser.close()
# Re-open the port. On Windows, it has been observed that the
# port is no immediately available - so retry for up to 2 seconds.
start = time.clock()
retry_t = 0.2
while True:
try:
self._open_serial_port()
except:
if time.clock() - start > 2:
raise
else:
log.debug('Port not ready, retry in %.2f seconds...' % retry_t)
time.sleep(retry_t)
else:
break
if tries is None or not tries:
# Try at least once to open the port
tries = 1
# Sometimes when opening the port without a reset, it'll fail to respond
# So let's try to repeat the initialization process a few times
while tries:
tries -= 1
try:
self._initialize_device(reset)
return
except ExpectedResponseTimeout:
if tries:
log.info("BLED unresponsive, re-opening")
self.stop()
self._open_serial_port(max_connection_attempts=1)
continue
# If we got here, we failed to open the port
raise NotConnectedError()
def stop(self):
for device in self._connections.values():
try:
device.disconnect()
except NotConnectedError:
pass
if self._running:
if self._running.is_set():
log.debug('Stopping')
self._running.clear()
if self._receiver:
self._receiver.join()
self._receiver = None
if self._ser:
self._ser.close()
self._ser = None
def set_bondable(self, bondable):
self.send_command(
CommandBuilder.sm_set_bondable_mode(
constants.bondable['yes' if bondable else 'no']))
self.expect(ResponsePacketType.sm_set_bondable_mode)
def disable_advertising(self, skip_reply=False):
log.debug("Disabling advertising")
self.send_command(
CommandBuilder.gap_set_mode(
constants.gap_discoverable_mode['non_discoverable'],
constants.gap_connectable_mode['non_connectable']))
if not skip_reply:
self.expect(ResponsePacketType.gap_set_mode)
def send_command(self, *args, **kwargs):
with self._lock:
if self._ser is None:
log.warn("Unexpectedly not connected to USB device")
raise NotConnectedError()
return self._lib.send_command(self._ser, *args, **kwargs)
def clear_bond(self, address=None):
"""
Delete the bonds stored on the adapter.
address - the address of the device to unbond. If not provided, will
erase all bonds.
Note: this does not delete the corresponding bond stored on the remote
device.
"""
# Find bonds
log.debug("Fetching existing bonds for devices")
self._stored_bonds = []
self.send_command(CommandBuilder.sm_get_bonds())
try:
self.expect(ResponsePacketType.sm_get_bonds)
except NotConnectedError:
pass
if self._num_bonds == 0:
return
while len(self._stored_bonds) < self._num_bonds:
self.expect(EventPacketType.sm_bond_status)
for b in reversed(self._stored_bonds):
log.debug("Deleting bond %s", b)
self.send_command(CommandBuilder.sm_delete_bonding(b))
self.expect(ResponsePacketType.sm_delete_bonding)
def scan(self, timeout=10, scan_interval=75, scan_window=50, active=True,
discover_mode=constants.gap_discover_mode['observation'],
**kwargs):
"""
Perform a scan to discover BLE devices.
timeout -- the number of seconds this scan should last.
scan_interval -- the number of miliseconds until scanning is restarted.
scan_window -- the number of miliseconds the scanner will listen on one
frequency for advertisement packets.
active -- True --> ask sender for scan response data. False --> don't.
discover_mode -- one of the gap_discover_mode constants.
"""
parameters = 1 if active else 0
# NOTE: the documentation seems to say that the times are in units of
# 625us but the ranges it gives correspond to units of 1ms....
self.send_command(
CommandBuilder.gap_set_scan_parameters(
scan_interval, scan_window, parameters
))
self.expect(ResponsePacketType.gap_set_scan_parameters)
log.debug("Starting an %s scan", "active" if active else "passive")
self.send_command(CommandBuilder.gap_discover(discover_mode))
self.expect(ResponsePacketType.gap_discover)
log.debug("Pausing for %ds to allow scan to complete", timeout)
time.sleep(timeout)
log.debug("Stopping scan")
self.send_command(CommandBuilder.gap_end_procedure())
self.expect(ResponsePacketType.gap_end_procedure)
devices = []
for address, info in self._devices_discovered.items():
devices.append({
'address': address,
'name': info.name,
'rssi': info.rssi,
'packet_data': info.packet_data
})
log.debug("Discovered %d devices: %s", len(devices), devices)
self._devices_discovered = {}
return devices
def _end_procedure(self):
self.send_command(CommandBuilder.gap_end_procedure())
self.expect(ResponsePacketType.gap_end_procedure)
def connect(self, address, timeout=5,
address_type=BLEAddressType.public,
interval_min=60, interval_max=76, supervision_timeout=100,
latency=0):
"""
Connnect directly to a device given the ble address then discovers and
stores the characteristic and characteristic descriptor handles.
Requires that the adapter is not connected to a device already.
address -- a bytearray containing the device mac address.
timeout -- number of seconds to wait before returning if not connected.
address_type -- one of BLEAddressType's values, either public or random.
Raises BGAPIError or NotConnectedError on failure.
"""
address_bytes = bytearray(unhexlify(address.replace(":", "")))
for device in self._connections.values():
if device._address == bgapi_address_to_hex(address_bytes):
return device
log.debug("Connecting to device at address %s (timeout %ds)",
address, timeout)
self.set_bondable(False)
if address_type == BLEAddressType.public:
addr_type = constants.ble_address_type['gap_address_type_public']
else:
addr_type = constants.ble_address_type['gap_address_type_random']
self.send_command(
CommandBuilder.gap_connect_direct(
address_bytes, addr_type, interval_min, interval_max,
supervision_timeout, latency))
try:
self.expect(ResponsePacketType.gap_connect_direct)
_, packet = self.expect(EventPacketType.connection_status,
timeout=timeout)
# TODO what do we do if the status isn't 'connected'? Retry?
# Raise an exception? Should also check the address matches the
# expected TODO i'm finding that when reconnecting to the same
# MAC, we geta conneciotn status of "disconnected" but that is
# picked up here as "connected", then we don't get anything
# else.
if self._connection_status_flag(
packet['flags'],
constants.connection_status_flag['connected']):
device = BGAPIBLEDevice(
bgapi_address_to_hex(packet['address']),
packet['connection_handle'],
self)
if self._connection_status_flag(
packet['flags'],
constants.connection_status_flag['encrypted']):
device.encrypted = True
self._connections[packet['connection_handle']] = device
log.info("Connected to %s", address)
return device
except ExpectedResponseTimeout:
# If the connection doesn't occur because the device isn't there
# then you should manually stop the command.
#
# If we never get the connection status it is likely that it
# didn't occur because the device isn't there. If that is true
# then we have to manually stop the command.
self._end_procedure()
exc = NotConnectedError()
exc.__cause__ = None
raise exc
def discover_characteristics(self, connection_handle):
att_handle_start = 0x0001 # first valid handle
att_handle_end = 0xFFFF # last valid handle
log.debug("Fetching characteristics for connection %d",
connection_handle)
self.send_command(
CommandBuilder.attclient_find_information(
connection_handle, att_handle_start, att_handle_end))
self.expect(ResponsePacketType.attclient_find_information)
self.expect(EventPacketType.attclient_procedure_completed,
timeout=10)
for char_uuid_str, char_obj in (
self._characteristics[connection_handle].items()):
log.debug("Characteristic 0x%s is handle 0x%x",
char_uuid_str, char_obj.handle)
for desc_uuid_str, desc_handle in (
char_obj.descriptors.items()):
log.debug("Characteristic descriptor 0x%s is handle 0x%x",
desc_uuid_str, desc_handle)
return self._characteristics[connection_handle]
@staticmethod
def _connection_status_flag(flags, flag_to_find):
"""
Is the given flag in the connection status flags?
flags -- the 'flags' parameter returned by ble_evt_connection_status.
flag_to_find -- the flag to look for in flags.
Returns true if flag_to_find is in flags. Returns false otherwise.
"""
return (flags & flag_to_find) == flag_to_find
@staticmethod
def _get_uuid_type(uuid):
"""
Checks if the UUID is a custom 128-bit UUID or a GATT characteristic
descriptor UUID.
uuid -- the UUID as a bytearray.
Return a UUIDType.
"""
if len(uuid) == 16: # 128-bit --> 16 byte
return UUIDType.custom
if uuid in constants.gatt_service_uuid.values():
return UUIDType.service
if uuid in constants.gatt_attribute_type_uuid.values():
return UUIDType.attribute
if uuid in constants.gatt_characteristic_descriptor_uuid.values():
return UUIDType.descriptor
if uuid in constants.gatt_characteristic_type_uuid.values():
return UUIDType.characteristic
log.warn("Unrecognized 4 byte UUID %s", hexlify(uuid))
return UUIDType.nonstandard
def _scan_rsp_data(self, data):
"""
Parse scan response data.
Note: the data will come in a format like the following:
[data_length, data_type, data..., data_length, data_type, data...]
data -- the args['data'] list from _ble_evt_scan_response.
Returns a name and a dictionary containing the parsed data in pairs of
field_name': value.
"""
# Result stored here
data_dict = {
# 'name': value,
}
bytes_left_in_field = 0
field_name = None
field_value = []
# Iterate over data bytes to put in field
dev_name = ""
for b in data:
if bytes_left_in_field == 0:
# New field
bytes_left_in_field = b
field_value = []
else:
field_value.append(b)
bytes_left_in_field -= 1
if bytes_left_in_field == 0:
# End of field
field_name = (
constants.scan_response_data_type[field_value[0]])
field_value = field_value[1:]
# Field type specific formats
if (field_name == 'complete_local_name' or
field_name == 'shortened_local_name'):
dev_name = bytearray(field_value).decode("utf-8")
data_dict[field_name] = dev_name
elif (field_name ==
'complete_list_128-bit_service_class_uuids'):
if len(field_value) % 16 == 0: # 16 bytes
data_dict[field_name] = []
for i in range(0, int(len(field_value) / 16)):
service_uuid = (
"0x%s" %
bgapi_address_to_hex(
field_value[i * 16:i * 16 + 16]))
data_dict[field_name].append(service_uuid)
else:
log.warning("Expected a service class UUID of 16\
bytes. Instead received %d bytes",
len(field_value))
else:
data_dict[field_name] = bytearray(field_value)
return dev_name, data_dict
def expect(self, expected, *args, **kargs):
return self.expect_any([expected], *args, **kargs)
def expect_any(self, expected_packet_choices, timeout=None,
assert_return_success=True):
"""
Process packets until a packet of one of the expected types is found.
expected_packet_choices -- a list of BGLib.PacketType.xxxxx. Upon
processing a packet of a type contained in
the list, this function will return.
timeout -- maximum time in seconds to process packets.
assert_return_success -- raise an exception if the return code from a
matched message is non-zero.
Raises an ExpectedResponseTimeout if one of the expected responses is
not receiving withint the time limit.
"""
timeout = timeout or 1
log.debug("Expecting a response of one of %s within %fs",
expected_packet_choices, timeout or 0)
start_time = None
if timeout is not None:
start_time = time.time()
while True:
packet = None
try:
packet = self._receiver_queue.get(
timeout=self._receive_queue_timeout)
except queue.Empty:
if timeout is not None:
if _timed_out(start_time, timeout):
exc = ExpectedResponseTimeout(
expected_packet_choices, timeout)
exc.__cause__ = None
raise exc
continue
if packet is None:
raise ExpectedResponseTimeout(expected_packet_choices, timeout)
packet_type, response = self._lib.decode_packet(packet)
return_code = response.get('result', 0)
log.debug("Received a %s packet: %s",
packet_type, get_return_message(return_code))
if packet_type in self._packet_handlers:
self._packet_handlers[packet_type](response)
if packet_type in expected_packet_choices:
return packet_type, response
def _receive(self):
"""
Read bytes from serial and enqueue the packets if the packet is not a.
Stops if the self._running event is not set.
"""
log.debug("Running receiver")
while self._running.is_set():
packet = self._lib.parse_byte(self._ser.read())
if packet is not None:
decoded = self._lib.decode_packet(packet)
if decoded is None:
continue
packet_type, args = decoded
if packet_type == EventPacketType.attclient_attribute_value and\
args['connection_handle'] in self._connections:
device = self._connections[args['connection_handle']]
device.receive_notification(args['atthandle'],
bytearray(args['value']))
self._receiver_queue.put(packet)
log.debug("Stopping receiver")
def _ble_evt_attclient_attribute_value(self, args):
"""
Handles the event for values of characteristics.
args -- dictionary containing the attribute handle ('atthandle'),
attribute type ('type'), and attribute value ('value')
"""
log.debug("attribute handle = %x", args['atthandle'])
log.debug("attribute type = %x", args['type'])
log.debug("attribute value = 0x%s", hexlify(bytearray(args['value'])))
def _ble_evt_attclient_find_information_found(self, args):
"""
Handles the event for characteritic discovery.
Adds the characteristic to the dictionary of characteristics or adds
the descriptor to the dictionary of descriptors in the current
characteristic. These events will be occur in an order similar to the
following:
1) primary service uuid
2) 0 or more descriptors
3) characteristic uuid
4) 0 or more descriptors
5) repeat steps 3-4
args -- dictionary containing the characteristic handle ('chrhandle'),
and characteristic UUID ('uuid')
"""
raw_uuid = bytearray(reversed(args['uuid']))
# Convert 4-byte UUID shorthand to a full, 16-byte UUID
uuid_type = self._get_uuid_type(raw_uuid)
if uuid_type != UUIDType.custom:
uuid = uuid16_to_uuid(int(
bgapi_address_to_hex(args['uuid']).replace(':', ''), 16))
else:
uuid = UUID(bytes=bytes(raw_uuid))
# TODO is there a way to get the characteristic from the packet instead
# of having to track the "current" characteristic?
if (uuid_type == UUIDType.descriptor and
self._current_characteristic is not None):
self._current_characteristic.add_descriptor(uuid, args['chrhandle'])
elif (uuid_type == UUIDType.custom or
uuid_type == UUIDType.nonstandard or
uuid_type == UUIDType.characteristic):
if uuid_type == UUIDType.custom:
log.debug("Found custom characteristic %s" % uuid)
elif uuid_type == UUIDType.characteristic:
log.debug("Found approved characteristic %s" % uuid)
elif uuid_type == UUIDType.nonstandard:
log.debug("Found nonstandard 4-byte characteristic %s" % uuid)
new_char = Characteristic(uuid, args['chrhandle'])
self._current_characteristic = new_char
self._characteristics[
args['connection_handle']][uuid] = new_char
def _ble_evt_connection_disconnected(self, args):
"""
Handles the event for the termination of a connection.
"""
self._connections.pop(args['connection_handle'], None)
def _ble_evt_connection_status(self, args):
"""
Handles the event for reporting connection status.
args -- dictionary containing the connection status flags ('flags'),
device address ('address'), device address type ('address_type'),
connection interval ('conn_interval'), connection timeout
(timeout'), device latency ('latency'), device bond handle
('bonding')
"""
connection_handle = args['connection_handle']
if not self._connection_status_flag(
args['flags'],
constants.connection_status_flag['connected']):
# Disconnected
self._connections.pop(connection_handle, None)
log.debug("Connection status: handle=0x%x, flags=%s, address=0x%s, "
"connection interval=%fms, timeout=%d, "
"latency=%d intervals, bonding=0x%x",
connection_handle,
args['address'],
hexlify(bytearray(args['address'])),
args['conn_interval'] * 1.25,
args['timeout'] * 10,
args['latency'],
args['bonding'])
def _ble_evt_gap_scan_response(self, args):
"""
Handles the event for reporting the contents of an advertising or scan
response packet.
This event will occur during device discovery but not direct connection.
args -- dictionary containing the RSSI value ('rssi'), packet type
('packet_type'), address of packet sender ('sender'), address
type ('address_type'), existing bond handle ('bond'), and
scan resonse data list ('data')
"""
# Parse packet
packet_type = constants.scan_response_packet_type[args['packet_type']]
address = bgapi_address_to_hex(args['sender'])
name, data_dict = self._scan_rsp_data(args['data'])
# Store device information
if address not in self._devices_discovered:
self._devices_discovered[address] = AdvertisingAndScanInfo()
dev = self._devices_discovered[address]
if dev.name == "":
dev.name = name
if dev.address == "":
dev.address = address
if (packet_type not in dev.packet_data or
len(dev.packet_data[packet_type]) < len(data_dict)):
dev.packet_data[packet_type] = data_dict
dev.rssi = args['rssi']
log.debug("Received a scan response from %s with rssi=%d dBM "
"and data=%s", address, args['rssi'], data_dict)
def _ble_evt_sm_bond_status(self, args):
"""
Handles the event for reporting a stored bond.
Adds the stored bond to the list of bond handles.
args -- dictionary containing the bond handle ('bond'), encryption key
size used in the long-term key ('keysize'), was man in the
middle used ('mitm'), keys stored for bonding ('keys')
"""
# Add to list of stored bonds found or set flag
self._stored_bonds.append(args['bond'])
def _ble_rsp_sm_delete_bonding(self, args):
"""
Handles the response for the deletion of a stored bond.
args -- dictionary containing the return code ('result')
"""
result = args['result']
if result == 0:
self._stored_bonds.pop()
return result
def _ble_rsp_sm_get_bonds(self, args):
"""
Handles the response for the start of stored bond enumeration. Sets
self._num_bonds to the number of stored bonds.
args -- dictionary containing the number of stored bonds ('bonds'),
"""
self._num_bonds = args['bonds']
log.debug("num bonds = %d", args['bonds'])
|
python
|
# Problem description: http://www.geeksforgeeks.org/dynamic-programming-set-31-optimal-strategy-for-a-game/
def optimal_strategy(coins):
if len(coins) == 1:
return coins[0]
elif len(coins) == 2:
return max(coins[0], coins[1])
else:
return max(coins[0] + min(optimal_strategy(coins[2:]),
optimal_strategy(coins[1:-1])),
coins[-1] + min(optimal_strategy(coins[1:-1]),
optimal_strategy(coins[:-2])))
coins = [8, 15, 3 , 7]
optimal_strategy(coins)
|
python
|
import sys
from twython import Twython
import time
import json
import os
import re
import dropbox
import subprocess
# Secret Keys
apiKey = ""
apiSecret = ""
accessToken = ""
accessTokenSecret = ""
api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)
tweet = api.get_mentions_timeline()
results = api.search(q="@project_mayo",count = 10);
all_tweets = results['statuses']
song_List = []
reading = True
content = ""
for tw in all_tweets:
if reading:
if tw['text'].find("NAME:")> 0:
song_List.append(tw['text'])
if tw['text'].find("DONE") < 0:
reading = False
song_List.reverse()
for x in song_List:
content = content + x
#everything is in content
songNameLoc = content.find("NAME:")+6
toLoc = content.find("TO")
songName = content[songNameLoc:toLoc-1]
print("Song Name: "+songName)
toLoc += 4
#print(content[toLoc])
recp = content[toLoc:content.find("!")]
print("To:" + recp)
end = content.find("DONE")
notes = content[content.find("!")+2:end]
print(notes)
#The song that is going to be played
#Name of file
songName = songName.replace(" ","_")
#conversion section for alda language
notes = notes.replace ("<","<")
notes = notes.replace (">",">")
#printing songName and writing to alda
print("Playing:" + songName)
test = songName + ".txt"
test_object = open(test,"w")
test_object.write(notes)
test_object.close()
os.system("./alda play -f "+test)
#record system audio to a file
if len(songName)>0 :
#upload to Dropbox
dbx = dropbox.Dropbox("NViWwhEdVtAAAAAAAAAAWGmCiE9SH9BE097WfoRq4l8Hif21_OLbEb_F0TvYI4KC")
file = open("/Users/tylergabriel/Desktop/CS/projectmayo/"+songName+".txt")
dbx.files_upload(file.read(),"/"+songName+".txt")
#reply to user
tweetStr="Hey,"+recp+"! Here is a link to "+ songName+" , a song made just for you! https://www.dropbox.com/home/Apps/projectmayo?preview="+songName+".txt"
api.update_status(status=tweetStr)
|
python
|
# -*- coding: utf-8 -*-
# @Author: Sam Zhang
# @Date: 2020-04-12 15:36:23
# @Last Modified by: Sam Zhang
# @Last Modified time: 2020-04-14 11:27:50
from flask import render_template, request, redirect, url_for
from ..utils import google_search
from . import google
@google.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return redirect(url_for('google.search', q=request.form.get('query'), page=0))
return render_template('google/index.html')
@google.route('/s/')
def search():
word = request.args.get('q')
page = int(request.args.get('page', 0))
return render_template('google/search.html', keyword=word, cur=page + 1)
@google.route('/s/s/')
def search_s():
word = request.args.get('q')
page = int(request.args.get('page', 0))
results = google_search(word=word, pn=page)
pages = results[1]
results = results[0]
return render_template('google/search_s.html', results=results, pages=pages, keyword=word, cur=page + 1)
|
python
|
""" Merge spotify playlists. """
import argparse
import logging
from playlist import Playlist
def spunify(destination_playlist: str, source_playlists: set[str]):
"""
Merge the source playlists into the destination playlist.
:param destination_playlist: The url of the playlist where the
tracks will be merged.
:param source_playlists: The urls of the playlists to be merged
into the destination.
"""
logging.info(f"Merging {source_playlists} into {destination_playlist}.")
destination: Playlist = Playlist(destination_playlist)
for source in source_playlists:
destination += Playlist(source)
def parse_args():
parser = argparse.ArgumentParser(
description="Merge spotify playlists",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-v",
"--verbose",
action="store_const",
const=logging.INFO,
help="Increase verbosity",
)
group.add_argument(
"-vv",
"--very-verbose",
action="store_const",
const=logging.DEBUG,
help="Increase verbosity further",
)
parser.add_argument(
"-d",
"--destination",
help="The ID, URI, or URL of the playlist where the tracks will be merged",
required=True,
type=str,
)
parser.add_argument(
"-s",
"--sources",
help="The URLs, URIs, or IDs of the playlists to be merged into the destination",
nargs="+",
required=True,
type=str,
)
return parser.parse_args()
def main():
"""Main function."""
args = parse_args()
logging.basicConfig(
level=args.verbose or args.very_verbose or logging.WARNING,
format=f"%(asctime)s [%(module)s]: %(message)s",
datefmt="%I:%M:%S %p",
)
spunify(args.destination, set(args.sources))
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/python3
import random
import math
import numpy as np
"""
Constants
"""
variance = 0.01
CHECKERBOARD_SIZE = 0.2
"""
Functions
"""
# def get_swiss_roll_dataset(numOfSamples):
# sample_list = []
# label_list = []
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# p_i = random.random()
# q_i = random.random()
# t_i = math.pi * 3 / 2 * (1 + 2 * p_i)
#
# x_i = [np.random.normal(t_i * math.cos(t_i), variance),
# np.random.normal(t_i * math.sin(t_i), variance),
# np.random.normal(30 * q_i, variance),]
# sample_list.append(x_i)
# label_list.append(label)
# return sample_list, label_list
def get_swiss_roll_dataset_with_labels2(n):
t = (3 * math.pi / 2) * (1 + 2 * np.random.random(n))
height = 30 * np.random.random(n)
X = np.array([t * np.cos(t), height, t * np.sin(t)]) + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
labels = np.fmod(np.around(t / 2) + np.around(height / 12), 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_broken_swiss_roll_dataset(numOfSamples):
# sample_list = []
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# while True:
# p_i = random.random()
# q_i = random.random()
# t_i = math.pi * 3 / 2 * (1 + 2 * p_i)
# if p_i >= (4 / 5) or p_i <= (2 / 5):
# break
#
# x_i = [np.random.normal(t_i * math.cos(t_i), variance),
# np.random.normal(t_i * math.sin(t_i), variance),
# np.random.normal(30 * q_i, variance)]
# sample_list.append(x_i)
# return sample_list
def get_broken_swiss_roll_dataset_with_label2(n):
t1 = (3 * math.pi / 2) * (1 + 2 * np.random.random(math.floor(n / 2)) * 0.4)
t2 = (3 * math.pi / 2) * (1 + 2 * (np.random.random(math.ceil(n / 2)) * 0.4 + 0.6))
t = np.append(t1, t2)
height = 30 * np.random.random(n)
X = np.array([t * np.cos(t), height, t * np.sin(t)]) + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
labels = np.fmod(np.around(t / 2) + np.around(height / 12), 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_helix_dataset(numOfSamples):
# sample_list = []
# result = dict()
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# p_i = random.random()
#
# x_i = [np.random.normal((2 + math.cos(8 * p_i)) * math.cos(p_i), variance), np.random.normal((2 + math.cos(8 * p_i)) * math.sin(p_i), variance), np.random.normal(math.sin(8 * p_i), variance)]
# sample_list.append(x_i)
# return sample_list
def get_helix_dataset_with_label2(n):
t = np.random.random(n) * 2 * math.pi;
X = [(2 + np.cos(8 * t)) * np.cos(t), (2 + np.cos(8 * t)) * np.sin(t), np.sin(8 * t)] + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
labels = np.fmod(np.around(t * 1.5), 2);
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_twin_peaks(numOfSamples):
# sample_list = []
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# p_i = random.random()
# q_i = random.random()
# x_i = [np.random.normal(1 - 2 * p_i, variance), np.random.normal(math.sin(math.pi - 2 * math.pi * p_i), variance), np.random.normal(math.tanh(3 - 6 * q_i), variance)]
# sample_list.append(x_i)
# return sample_list
# def get_twin_peaks_with_label(numOfSamples):
# sample_list = []
# label_list = []
# for x in range(0, numOfSamples):
# p_i = random.random()
# q_i = random.random()
#
# loc_p = int(p_i / CHECKERBOARD_SIZE) % 2
# loc_q = int(q_i / CHECKERBOARD_SIZE) % 2
# if (loc_p == loc_q):
# label = 1
# else:
# label = -1
#
# x_i = [np.random.normal(1 - 2 * p_i, variance),
# np.random.normal(math.sin(math.pi - 2 * math.pi * p_i), variance),
# np.random.normal(math.tanh(3 - 6 * q_i), variance)]
# sample_list.append(x_i)
# label_list.append(label)
# return sample_list, label_list
def get_twin_peaks_with_label2(n):
p = 1 - 2 * np.random.random(n)
q = 1 - 2 * np.random.random(n)
X = [p, q, np.sin(math.pi * p) * np.tanh(3 * q)] + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
X[2] *= 10
labels = np.abs(np.fmod(np.sum(np.around((X.T + np.tile(np.amin(X, 1), (n, 1))) / 10), 1), 2))
return X.T, labels.reshape(n, 1)
def get_hd_dataset(numOfSamples):
sample_list = []
coef = []
for x in range(0, 5):
one_set_coef = []
for y in range(0, 5):
one_set_coef.append(random.random())
coef.append(one_set_coef)
for x in range(0, numOfSamples):
d_1 = random.random()
d_2 = random.random()
d_3 = random.random()
d_4 = random.random()
d_5 = random.random()
powers = []
for y in range(0, 5):
one_set_pow = [pow(d_1, random.random()), pow(d_2, random.random()), pow(d_3, random.random()), pow(d_4, random.random()), pow(d_5, random.random())]
powers.append(one_set_pow)
x_i = (np.mat(coef + powers) * np.mat([[d_1], [d_2], [d_3], [d_4], [d_5]])).transpose()
x_i = x_i.tolist()
sample_list.append(x_i[0])
return sample_list
labels = np.fmod(np.sum(np.around((X.T + np.tile(np.amin(X, 1), (n, 1))) * 10), 1), 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_hd_dataset(numOfSamples):
# sample_list = []
# coef = []
# for x in range(0, 5):
# one_set_coef = []
# for y in range(0, 5):
# one_set_coef.append(random.random())
# coef.append(one_set_coef)
# for x in range(0, numOfSamples):
# d_1 = random.random()
# d_2 = random.random()
# d_3 = random.random()
# d_4 = random.random()
# d_5 = random.random()
# powers = []
# for y in range(0, 5):
# one_set_pow = [pow(d_1, random.random()), pow(d_2, random.random()), pow(d_3, random.random()), pow(d_4, random.random()), pow(d_5, random.random())]
# powers.append(one_set_pow)
#
# x_i = (np.mat(coef + powers) * np.mat([[d_1], [d_2], [d_3], [d_4], [d_5]])).transpose()
# x_i = x_i.tolist()
# sample_list.append(x_i[0])
# return sample_list
def get_hd_dataset_with_label2(n):
x1 = np.random.random(n)
x2 = np.random.random(n)
x3 = np.random.random(n)
x4 = np.random.random(n)
x5 = np.random.random(n)
X = [np.cos(x1), np.tanh(3 * x2), x1 + x3, x4 * np.sin(x2), np.sin(x1 + x5), x5 * np.cos(x2), x5 + x4, x2, x3 * x4, x1]
X += variance * np.random.normal(0, 1, n * 10).reshape(10, n)
labels = np.fmod(np.around(x1) + np.around(x2) + np.around(x3) + np.around(x4) + np.around(x5) + 1, 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_hd_dataset_with_label2_2(n):
# seed = np.random.random(math.ceil(math.pow(n, 1/5)))
# A = np.array([p for p in itertools.product(seed, repeat = 5)][0:n]).T
# X = [np.cos(A[0]), np.tanh(3 * A[1]), A[0] + A[2], A[3] * np.sin(A[1]), np.sin(A[0] + A[4]), A[4] * np.cos(A[1]), A[4] + A[3], A[1], A[2] * A[3], A[0]]
# X += variance * np.random.normal(0, 1, n * 10).reshape(10, n)
# labels = np.fmod(np.sum(np.around(A) + 1, 0), 2)
# return X.T.tolist(), labels.reshape(n, 1).tolist()
|
python
|
#take1
pre = '581634BED11C647479ED07B47702E21EFE8147CFA57BF08E105A81852F70CF2ADD01B9E8191622AA5CAB3129D7B5F9EB007C2AA98E48E6E4793BD624F94D40B6B07072A17748A96153C6681716D8593261DD8E5CD6ADEE644D432BD282CB521FA4EBF64109D79A60EFA887D7E8EBE5EB9B43F16F84CBC705D12D1CD429B0166B'
get = ['7rJ1loTd72z8fyA71JWAYODTEPL1TEIbBSXd0OmGEaO/DqFLRtyWDsW2ufaD5+iN', # base64 encoded strings
'7rJ1loTd72z8fyA71JWAYBcI5GLDQhS9yDwtgf6GXCs=',
'Ptk0New/J17zkRTIgf7tw4zn6IteuuSgcXTv7BLAZ5yrC1saCtfHi/yeeQWELZZ8F1UZZVsmuGCeepAlTPsIKM1ZM2CxXHxoMGKrrgO8B+MT/Gs0Xfl6YwUU096M6gzHlccfqMeErSIvTyNbX23TQaXE1jtl6Ss0ey2Yf2GFyrA=',
'Ptk0New/J17zkRTIgf7tw8QWH1rgH1VP8W6DbbUZybrFjaNjR0DIR6Za9UqeXvtjZcz6j6SDS1dTzlCD1SeMlnHSZvcOYPPEgX15LQkY2ZmhJX1ELaNJNnrIFcDrAPsxFXWUsMo+lRLXr+mok7ViZjkAYorL+f1WJSgUxmjJ+eya1vfyosSns4/flFnHOuNkMlZ65BzOFM17Tx38tr4sVl7ZLvgHIG+DigG2TqFW+ig=',
'7rJ1loTd72z8fyA71JWAYNONDlTZIv7f8uLs0jIlx4Ig6VG2ktjC9h7yu5YIY1cZ',
'7rJ1loTd72z8fyA71JWAYAFdwiG4YbPcCllCmo4tAnShrHYxbgnO8qu3VuY0Ze8Z',
'7rJ1loTd72z8fyA71JWAYA3Rwzrq5FhQJOlLCjVMJSi8uvj/Z9RxLWsU8PaFmIip',
'7rJ1loTd72z8fyA71JWAYP/2e2AxvohF0M0ghFKYLMRBhrZ40X/O84QBEpUAfCtQ',
'7rJ1loTd72z8fyA71JWAYMf/gNdLq0aAawF8iilHOns=',
'7rJ1loTd72z8fyA71JWAYMT9Qbs/xpyk4erVgqzmr6NVHqWKcr11jKZghC6r7szN',
'7rJ1loTd72z8fyA71JWAYPr0QJ5kM+PPZBm8m2dHTT6/19vrMBJs7p2kusQbHQKR',
'Ptk0New/J17zkRTIgf7tw7Sxeg0UNsAnyJVTxRfosHiDFvkPRgp8qYELFEi/R48IRD+FuvRQZAt97zQqyjqKSWmy69orh0iFJ69LfAkGzkUPtGdjPv9P2w5Y9uMHYnSa75Ejr0XajojBm4dvaiaDJ5TUFnocwH3NjB32QWkVvd4IQYJfun9ADnjst+YAPL9fqUhSUstLN7Yrjewign10np8lAo4293EOSRPfVAuqS9mbopr6QNfQ7SvGsBsEaWx/jffMqj/MmMF3/8bvhutL0TcToQ7aX1ame0OFvpXkAm9idPCAqDb2Vv4PoQQlVhs0NVEa7YagNO/BDTnSvKsSEMByNyZQBRlWYo87lk6R8F+l1XGHm4WI6exMhyjON1/BcGRX8cD1K50vtz3kZKMK1w==',
'6YglSkdvgi00pZ5BzRaRVEZDW6IUKESKsbZEbbtTVH/z/2DAPPv6n1CuNB1V8Zn/jIVuAympdkGyrCFj2sfqJQ/01IyBukKzjlyXqP9n3orbjPqBi7fPk3rpWBt5CzToK1jtmlzZaK2VQmiNisUIVCLf9B/kjfJaBpJffioX59YWXKJBRmSL3QsRQxtiHNWi',
'6YglSkdvgi00pZ5BzRaRVIu6eahyBzHSMO+0WhTYbSlk0fRXnq8L7DizQtaXJQlqF0RERqPX0rWNdCsN1kZBykPgDgA72AhSgtprWl4j0oUmI+YYBQwZ49bZZCEg7aDU',
'7rJ1loTd72z8fyA71JWAYGL5lBHgr3JKoX39jY1sXHaucSmo+/lIAktIZEv2iUmK',
'7rJ1loTd72z8fyA71JWAYPnFANhbvVT8MAAAmR2v2FniebSPoB+bxIFy0jbQL8A+',
'7rJ1loTd72z8fyA71JWAYOy4oU9K7bYUSQSL9p8UnxjyhkP6rd4KFUFs+3rw7RFn0hz/AL9XorStDyD2HkzMLLWx24lFB/kWKShEi1ZfCH7o9Qdv04+TD9outna00tFs',
'7rJ1loTd72z8fyA71JWAYBd6EqoP+S+JhYbfMfdVS4Yyu3pHtWIRhhkaSSHE1pdxVI+ETFAFa2nfjByU0tuazAT8M3LyZvAsT7SQ5ti6cMr1/DikToHv0+lta1Zvv8qeHQnXDU4Uhz+mjr9ZUJXgwHbnfTdFgTjFQ+V315BtbL7PB8bzpUNdLaN4utbYIZsW',
]
get[0].decode('base64')
#=> eeb2759684ddef6cfc7f203bd4958060 e0d310f2f54c421b0525ddd0e98611a3 bf0ea14b46dc960ec5b6b9f683e7e88d 48B hex encoded str# ^ IV?
get[1].decode('base64')
#=> eeb2759684ddef6cfc7f203bd4958060 1708e462c34214bdc83c2d81fe865c2b
# take2
# 56,73,E5,AC,9A,4E,AF,FC,26,65,75,A7,39,B0,4B,F8,
# 16,04,A7,53,17,56,1B,BE,82,77,6B,08,DB,89,E3,3F,
# 99,FD,ED,BA,89,F0,FD,B3,B0,FD,F3,51,AE,7E,40,D4,
# 39,30,9B,C6,B9,98,60,A2,29,B2,BC,9E,FA,3F,E9,39,
# D6,2F,81,E9,48,38,EF,18,82,01,B8,95,8B,E6,E4,A8,
# 8E,6F,35,13,B7,DF,C7,7F,C6,B9,3A,F4,9A,5C,99,9E,
# 7D,15,77,12,D8,2C,B6,96,7B,54,9A,DD,0E,1D,E9,0B,
# E8,64,34,61,EC,5B,C6,81,9F,53,00,0E,09,47,B3,8D
# take3
# 64,12,B8,2E,23,2C,31,FC,13,FD,A2,57,29,83,93,35,
# 41,AE,CA,99,01,6C,38,29,70,B9,D0,09,AC,01,DF,62,
# 03,63,6E,2E,3E,A2,12,9A,32,E0,9D,AE,EE,4A,CA,0E,
# 07,66,47,19,7E,6B,7E,83,2C,D3,46,3B,6C,07,55,3B,
# 7B,06,61,77,13,A6,03,FB,8C,62,60,07,3E,B8,49,5C,
# 50,EB,CD,88,CE,8F,33,E3,49,AC,C7,36,08,28,69,7D,
# B6,55,68,7D,B4,63,59,6D,29,B5,23,63,12,10,C7,8F,
# A7,41,78,B0,DE,B6,C1,F5,27,6E,AB,D7,EA,66,52,72
# take4
# 5D,9E,62,00,75,DB,EE,9D,C3,4E,B0,3A,55,F4,7E,30,
# E9,AF,A6,46,A0,DF,77,A6,E7,FD,9C,57,C6,72,0B,35,
# 9A,EE,2E,9C,69,DC,EE,2B,FD,05,9E,32,08,01,03,DD,
# 12,D1,40,34,3B,F3,AE,A3,17,9C,5F,36,7C,4C,A1,BA,
# F3,98,C5,AD,85,90,E5,16,C5,EC,69,6C,C4,0F,1F,92,
# 0D,78,CC,0D,FD,DF,77,13,EE,06,8C,47,F0,BB,E6,BD,
# E7,E3,F0,60,78,45,85,39,A6,49,E0,3D,F0,A1,5F,3E,
# 90,16,5C,3E,61,47,EE,53,04,0B,11,18,2A,54,E6,1F
|
python
|
# -*- coding: utf-8 -*-
"""
DEPRECATED
"""
from __future__ import division, print_function
import numpy as np
from theano import gof
import theano.tensor as tt
__all__ = ["tensordotDOp"]
class tensordotDOp(tt.Op):
def __init__(self, func):
self.func = func
self._grad_op = tensordotDGradientOp(self)
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [tt.TensorType(inputs[0].dtype, (False, False))()]
return gof.Apply(self, inputs, outputs)
def infer_shape(self, node, shapes):
return [[shapes[1][0], shapes[0][-1]]]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
def perform(self, node, inputs, outputs):
outputs[0][0] = self.func(*inputs)
def grad(self, inputs, gradients):
return self._grad_op(*(inputs + gradients))
class tensordotDGradientOp(tt.Op):
def __init__(self, base_op):
self.base_op = base_op
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [i.type() for i in inputs[:-1]]
return gof.Apply(self, inputs, outputs)
def infer_shape(self, node, shapes):
return shapes[:-1]
def perform(self, node, inputs, outputs):
bM, bwta = self.base_op.func(*inputs)
outputs[0][0] = np.reshape(bM, np.shape(inputs[0]))
outputs[1][0] = np.reshape(bwta, np.shape(inputs[1]))
|
python
|
# -*- coding: utf-8 -*-
"""
magic is a wrapper around the libmagic file identification library.
See README for more information.
Usage:
>>> import magic
>>> magic.from_file("testdata/test.pdf")
'PDF document, version 1.2'
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
>>>
"""
import sys
import glob
import os.path
import ctypes
import ctypes.util
import threading
from ctypes import c_char_p, c_int, c_size_t, c_void_p
class MagicException(Exception): pass
class Magic:
"""
Magic is a wrapper around the libmagic C library.
"""
def __init__(self, mime=False, magic_file=None, mime_encoding=False,
keep_going=False, uncompress=False):
"""
Create a new libmagic wrapper.
mime - 값이 True면, mimetypes이 textual descriptions을 대신해 반환
mime_encoding - True면, codec 반환
magic_file - 시스템 기본값 대신 mime database 사용
uncompress - 압축파일 uncompress
"""
self.flags = MAGIC_NONE
if mime:
self.flags |= MAGIC_MIME
elif mime_encoding:
self.flags |= MAGIC_MIME_ENCODING
if keep_going:
self.flags |= MAGIC_CONTINUE
if uncompress:
self.flags |= MAGIC_COMPRESS
self.cookie = magic_open(self.flags)
self.lock = threading.Lock()
magic_load(self.cookie, magic_file)
def from_buffer(self, buf):
#`buf`의 내용물을 확인 및 리턴
with self.lock:
try:
return magic_buffer(self.cookie, buf)
except MagicException as e:
return self._handle509Bug(e)
def from_file(self, filename):
#`filename`의 내용물을 확인
#파일이 존재하지 않으면, IOError 발생
if not os.path.exists(filename):
raise IOError("File does not exist: " + filename)
with self.lock:
try:
return magic_file(self.cookie, filename)
except MagicException as e:
return self._handle509Bug(e)
def _handle509Bug(self, e):
#e의 message 값이 없고, self의 flags 와 MAGIC_MIME 값이 같으면 application/octet-stream을 리턴
if e.message is None and (self.flags & MAGIC_MIME):
return "application/octet-stream"
def __del__(self):
# 두 값이 같으면 close하고, cookie값은 초기화
if self.cookie and magic_close:
magic_close(self.cookie)
self.cookie = None
_instances = {}
def _get_magic_type(mime):
i = _instances.get(mime)
if i is None:
i = _instances[mime] = Magic(mime=mime)
return i
def from_file(filename, mime=False):
#filname의 파일형식을 리턴, 리턴 value = mimetype
m = _get_magic_type(mime)
return m.from_file(filename)
def from_buffer(buffer, mime=False):
#2진법 string의 파일타입을 리턴, 리턴 value = mimetype
m = _get_magic_type(mime)
return m.from_buffer(buffer)
libmagic = None
# libmagic 파일을 아래 셋중에 하나로 찾아서 dll파일에 저장. 만약 libmagic 파일이 없다면 Error 출력
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1') or ctypes.util.find_library('cygmagic-1')
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
windows_dlls = ['magic1.dll','cygmagic-1.dll']
platform_to_lib = {'darwin': ['/opt/local/lib/libmagic.dylib',
'/usr/local/lib/libmagic.dylib'] +
# Assumes there will only be one version installed
glob.glob('/usr/local/Cellar/libmagic/*/lib/libmagic.dylib'),
'win32': windows_dlls,
'cygwin': windows_dlls }
for dll in platform_to_lib.get(sys.platform, []):
try:
libmagic = ctypes.CDLL(dll)
break
except OSError:
pass
if not libmagic or not libmagic._name:
raise ImportError('failed to find libmagic. Check your installation')
magic_t = ctypes.c_void_p
#에러 체크 ( 에러가 없을 경우)
def errorcheck_null(result, func, args):
if result is None:
err = magic_error(args[0])
raise MagicException(err)
else:
return result
#에러 체크 (에러가 있을경우)
def errorcheck_negative_one(result, func, args):
if result is -1:
err = magic_error(args[0])
raise MagicException(err)
else:
return result
# 파일 이름 리턴 함수
def coerce_filename(filename):
if filename is None:
return None
is_unicode = (sys.version_info[0] <= 2 and
isinstance(filename, unicode)) or \
(sys.version_info[0] >= 3 and
isinstance(filename, str))
if is_unicode:
return filename.encode('utf-8')
else:
return filename
magic_open = libmagic.magic_open
magic_open.restype = magic_t
magic_open.argtypes = [c_int]
magic_close = libmagic.magic_close
magic_close.restype = None
magic_close.argtypes = [magic_t]
magic_error = libmagic.magic_error
magic_error.restype = c_char_p
magic_error.argtypes = [magic_t]
magic_errno = libmagic.magic_errno
magic_errno.restype = c_int
magic_errno.argtypes = [magic_t]
_magic_file = libmagic.magic_file
_magic_file.restype = c_char_p
_magic_file.argtypes = [magic_t, c_char_p]
_magic_file.errcheck = errorcheck_null
def magic_file(cookie, filename):
return _magic_file(cookie, coerce_filename(filename))
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = c_char_p
_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_magic_buffer.errcheck = errorcheck_null
def magic_buffer(cookie, buf):
return _magic_buffer(cookie, buf, len(buf))
_magic_load = libmagic.magic_load
_magic_load.restype = c_int
_magic_load.argtypes = [magic_t, c_char_p]
_magic_load.errcheck = errorcheck_negative_one
def magic_load(cookie, filename):
return _magic_load(cookie, coerce_filename(filename))
magic_setflags = libmagic.magic_setflags
magic_setflags.restype = c_int
magic_setflags.argtypes = [magic_t, c_int]
magic_check = libmagic.magic_check
magic_check.restype = c_int
magic_check.argtypes = [magic_t, c_char_p]
magic_compile = libmagic.magic_compile
magic_compile.restype = c_int
magic_compile.argtypes = [magic_t, c_char_p]
MAGIC_NONE = 0x000000 # No flags
MAGIC_DEBUG = 0x000001 # Turn on debugging
MAGIC_SYMLINK = 0x000002 # Follow symlinks
MAGIC_COMPRESS = 0x000004 # Check inside compressed files
MAGIC_DEVICES = 0x000008 # Look at the contents of devices
MAGIC_MIME = 0x000010 # Return a mime string
MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
MAGIC_CONTINUE = 0x000020 # Return all matches
MAGIC_CHECK = 0x000040 # Print warnings to stderr
MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
MAGIC_RAW = 0x000100 # Don't translate unprintable chars
MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
|
python
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filetype constants."""
tgz = [
".tar.gz",
".tgz",
]
# Filetype to restrict inputs
tar = [
".tar",
".tar.xz",
] + tgz
deb = [
".deb",
".udeb",
]
# Container images are tarballs (when exported).
container = tar
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
# -- stdlib --
# -- third party --
import gevent
# -- own --
# -- code --
def instantiate(cls):
return cls()
def spawn_autorestart(*args, **kwargs):
def restart(g):
gevent.sleep(1)
spawn_autorestart(*args, **kwargs)
gevent.spawn(*args, **kwargs).link(restart)
def status2emoji(s):
return {
'PROBLEM': u'😱',
'EVENT': u'😱',
'OK': u'😅',
}.get(s, s)
|
python
|
from rest_framework.exceptions import MethodNotAllowed
from api.sparse.serializers import SparseNodeSerializer, SparseRegistrationSerializer
from api.nodes.views import (
NodeDetail,
NodeChildrenList,
NodeList,
LinkedNodesList,
NodeLinkedRegistrationsList,
)
from api.registrations.views import RegistrationDetail, RegistrationChildrenList, RegistrationList
from api.users.views import UserNodes, UserRegistrations
class BaseSparseMixin(object):
view_category = 'sparse'
serializer_class = None
# overrides NodeList because these endpoints don't allow writing
def perform_create(self, *args):
raise MethodNotAllowed(method=self.request.method)
# overrides NodeList because these endpoints don't allow writing
def perform_update(self, *args):
raise MethodNotAllowed(method=self.request.method)
# overrides NodeDetail because these endpoints don't allow writing
def perform_destroy(self, *args):
raise MethodNotAllowed(method=self.request.method)
# overrides NodeList because these endpoints don't allow writing
def allow_bulk_destroy_resources(self, *args):
raise MethodNotAllowed(method=self.request.method)
def get_serializer_class(self):
return self.serializer_class
class SparseNodeMixin(BaseSparseMixin):
serializer_class = SparseNodeSerializer
class SparseRegistrationMixin(BaseSparseMixin):
serializer_class = SparseRegistrationSerializer
class SparseNodeList(SparseNodeMixin, NodeList):
pass
class SparseLinkedNodesList(SparseNodeMixin, LinkedNodesList):
pass
class SparseNodeLinkedRegistrationsList(SparseRegistrationMixin, NodeLinkedRegistrationsList):
pass
class SparseUserNodeList(SparseNodeMixin, UserNodes):
pass
class SparseNodeDetail(SparseNodeMixin, NodeDetail):
pass
class SparseNodeChildrenList(SparseNodeMixin, NodeChildrenList):
pass
class SparseRegistrationDetail(SparseRegistrationMixin, RegistrationDetail):
pass
class SparseRegistrationList(SparseRegistrationMixin, RegistrationList):
pass
class SparseRegistrationChildrenList(SparseRegistrationMixin, RegistrationChildrenList):
pass
class SparseUserRegistrationList(SparseRegistrationMixin, UserRegistrations):
pass
|
python
|
# The sole purpose of this method is to convert the .dbf file that we have
# received which contains all addresses in the city to a .csv file
import dbfread as dbf #To read our .dbf file
import csv #To write to .csv
def convert_addresses_to_csv():
with open('addresses.csv', 'wb') as csvfile:
headerexists = False
for rec in dbf.DBF('LBRS_Site.dbf'):
if headerexists == False:
writer = csv.DictWriter(csvfile, fieldnames=rec.keys())
writer.writeheader()
headerexists = True
writer.writerow(rec)
|
python
|
# pylint:disable=inconsistent-return-statements
"""
This module provides a class to visualise Click CLI structures
"""
import io
from contextlib import redirect_stdout
from copy import deepcopy
from typing import Union, Dict, Any, List
import treelib
from click import Group, MultiCommand
from click_tree_viz.click_utils import ClickNode, recurse_click_cli
from click_tree_viz.rich_utils import build_rich_tree
class ClickTreeViz:
"""
This class is used to traverse the nested CLI structure of a click Click object
and then provide several mechanisms for visualising or exporting the CLI structure
"""
def __init__(self, click_stuct: Union[MultiCommand, Group]):
"""
The constructor for this class accepts a nested Click CLI object
Args:
click_stuct: The structure to traverse and convert
"""
# Copy value just in case
self._raw_struct = deepcopy(click_stuct)
# Flat list of ClickNode objects
self._list_leaf_nodes = recurse_click_cli(click_structure=self._raw_struct)
# Convert to treelib.tree.Tree structure
self._treelib_obj = self._as_tree(node_sequence=self._list_leaf_nodes)
self._treelib_obj_params = self._extend_leaf_params(treelib_obj=self._treelib_obj)
# Graphviz method provided by treelib yields once only
self._graphviz_cached = None
@staticmethod
def _as_tree(node_sequence: List[ClickNode]) -> treelib.tree.Tree:
"""
This method constructs a list of Click leaf nodes (custom dataclass)
to a Treelib object.
Args:
node_sequence: The list of nodes that need to be created as a treelib object
Returns:
The constructed treelib object
"""
# Use tree lib to take clean struct and hold in memory
working_tree = treelib.tree.Tree()
working_tree.create_node(identifier="CLI")
for leaf in node_sequence:
working_tree.create_node(
identifier=leaf.path,
tag=leaf.name,
data=leaf.as_dict(),
parent="CLI" if leaf.is_root else leaf.parent_path,
)
return working_tree
@staticmethod
def _extend_leaf_params(treelib_obj: treelib.tree.Tree) -> treelib.tree.Tree:
"""Add parameters and commands to the tree structure"""
# Copy so working with different reference
working_treelib_obj = deepcopy(treelib_obj)
# Iterate over each node
for node in treelib_obj.nodes:
# Retrieve node object
working_node = treelib_obj[node]
# Filter to nodes with data property
if working_node.data is not None:
params = working_node.data.get("params", [])
for param in params:
# Join any multi-options
opts = ",".join(param["opts"])
# Add to copied tree
working_treelib_obj.create_node(
identifier=working_node.identifier + "." + opts,
tag=f'[{param["type"]}] {opts}',
parent=node,
)
return working_treelib_obj
def to_dict(self, **kwargs) -> Dict[str, Any]:
"""Uses treelib to convert nodes to a dictionary structure"""
return self._treelib_obj.to_dict(with_data=True, **kwargs)
def to_json(self, **kwargs) -> str:
"""Uses treelib to convert nodes to a JSON structure"""
return self._treelib_obj.to_json(with_data=True, **kwargs)
def to_graphviz(self, shape: str = "plain", layout_dir: str = "LR", **kwargs) -> str:
"""
This method leverages the treelib graphviz function, but instead of printing
to the stdout this is captured and returned as a string object. Additionally
the returned graphviz definition is extended to add a layout direction
Args:
shape: The shape to render each node
layout_dir: The direction which the tree will render
**kwargs: Any extra arguments to pass to treelib.tree.Tree.to_graphviz
Returns:
A string of graphviz configuration ready for rendering in another tool
"""
# If graphviz object is already generated, retrieve cached version
if self._graphviz_cached is not None:
return self._graphviz_cached
# treelib graphviz writes once to stdout
stream = io.StringIO()
with redirect_stdout(stream):
self._treelib_obj_params.to_graphviz(shape=shape, **kwargs)
output = stream.getvalue()
# Replace closing } tag with layout condition
output_with_layout = output.replace("}", f'rankdir="{layout_dir}";\n}}')
# save to attr so that we can call >1x
self._graphviz_cached = output_with_layout
return self._graphviz_cached
def print(self, **kwargs):
"""Uses built in treelib print function"""
return self._treelib_obj_params.show(**kwargs)
def rich_print(self, return_object: bool = False):
"""Converts treelib structure to rich.tree.Tree object
and prints it to the console"""
result = build_rich_tree(self._treelib_obj, return_obj=return_object)
if return_object:
return result
|
python
|
from pprint import pformat
import click
import py42.sdk.queries.fileevents.filters as f
from click import echo
from pandas import DataFrame
from py42.exceptions import Py42InvalidPageTokenError
from py42.sdk.queries.fileevents.file_event_query import FileEventQuery
from py42.sdk.queries.fileevents.filters import InsertionTimestamp
from py42.sdk.queries.fileevents.filters.exposure_filter import ExposureType
from py42.sdk.queries.fileevents.filters.file_filter import FileCategory
from py42.sdk.queries.fileevents.filters.risk_filter import RiskIndicator
from py42.sdk.queries.fileevents.filters.risk_filter import RiskSeverity
import code42cli.cmds.search.options as searchopt
import code42cli.options as opt
from code42cli.click_ext.groups import OrderedGroup
from code42cli.click_ext.options import incompatible_with
from code42cli.click_ext.types import MapChoice
from code42cli.cmds.search import SendToCommand
from code42cli.cmds.search.cursor_store import FileEventCursorStore
from code42cli.cmds.util import convert_to_or_query
from code42cli.cmds.util import create_time_range_filter
from code42cli.date_helper import convert_datetime_to_timestamp
from code42cli.date_helper import limit_date_range
from code42cli.enums import OutputFormat
from code42cli.logger import get_main_cli_logger
from code42cli.options import column_option
from code42cli.options import format_option
from code42cli.options import sdk_options
from code42cli.output_formats import DataFrameOutputFormatter
from code42cli.output_formats import FileEventsOutputFormat
from code42cli.output_formats import FileEventsOutputFormatter
from code42cli.util import warn_interrupt
logger = get_main_cli_logger()
MAX_EVENT_PAGE_SIZE = 10000
SECURITY_DATA_KEYWORD = "file events"
file_events_format_option = click.option(
"-f",
"--format",
type=click.Choice(FileEventsOutputFormat(), case_sensitive=False),
help="The output format of the result. Defaults to table format.",
default=FileEventsOutputFormat.TABLE,
)
exposure_type_option = click.option(
"-t",
"--type",
multiple=True,
type=click.Choice(list(ExposureType.choices())),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.ExposureType),
help="Limits events to those with given exposure types.",
)
username_option = click.option(
"--c42-username",
multiple=True,
callback=searchopt.is_in_filter(f.DeviceUsername),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to endpoint events for these Code42 users.",
)
actor_option = click.option(
"--actor",
multiple=True,
callback=searchopt.is_in_filter(f.Actor),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to only those enacted by the cloud service user "
"of the person who caused the event.",
)
md5_option = click.option(
"--md5",
multiple=True,
callback=searchopt.is_in_filter(f.MD5),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file has one of these MD5 hashes.",
)
sha256_option = click.option(
"--sha256",
multiple=True,
callback=searchopt.is_in_filter(f.SHA256),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file has one of these SHA256 hashes.",
)
source_option = click.option(
"--source",
multiple=True,
callback=searchopt.is_in_filter(f.Source),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to only those from one of these sources. For example, Gmail, Box, or Endpoint.",
)
file_name_option = click.option(
"--file-name",
multiple=True,
callback=searchopt.is_in_filter(f.FileName),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file has one of these names.",
)
file_path_option = click.option(
"--file-path",
multiple=True,
callback=searchopt.is_in_filter(f.FilePath),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file is located at one of these paths. Applies to endpoint file events only.",
)
file_category_option = click.option(
"--file-category",
multiple=True,
type=MapChoice(
choices=list(FileCategory.choices()),
extras_map={
"AUDIO": FileCategory.AUDIO,
"DOCUMENT": FileCategory.DOCUMENT,
"EXECUTABLE": FileCategory.EXECUTABLE,
"IMAGE": FileCategory.IMAGE,
"PDF": FileCategory.PDF,
"PRESENTATION": FileCategory.PRESENTATION,
"SCRIPT": FileCategory.SCRIPT,
"SOURCE_CODE": FileCategory.SOURCE_CODE,
"SPREADSHEET": FileCategory.SPREADSHEET,
"VIDEO": FileCategory.VIDEO,
"VIRTUAL_DISK_IMAGE": FileCategory.VIRTUAL_DISK_IMAGE,
"ARCHIVE": FileCategory.ZIP,
"ZIP": FileCategory.ZIP,
"Zip": FileCategory.ZIP,
},
),
callback=searchopt.is_in_filter(f.FileCategory),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file can be classified by one of these categories.",
)
process_owner_option = click.option(
"--process-owner",
multiple=True,
callback=searchopt.is_in_filter(f.ProcessOwner),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits exposure events by process owner, as reported by the device’s operating system. "
"Applies only to `Printed` and `Browser or app read` events.",
)
tab_url_option = click.option(
"--tab-url",
multiple=True,
callback=searchopt.is_in_filter(f.TabURL),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to be exposure events with one of the specified destination tab URLs.",
)
include_non_exposure_option = click.option(
"--include-non-exposure",
is_flag=True,
callback=searchopt.exists_filter(f.ExposureType),
cls=incompatible_with(["advanced_query", "type", "saved_search"]),
help="Get all events including non-exposure events.",
)
risk_indicator_map = {
"PUBLIC_CORPORATE_BOX": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_BOX,
"PUBLIC_CORPORATE_GOOGLE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_GOOGLE_DRIVE,
"PUBLIC_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_ONEDRIVE,
"SENT_CORPORATE_GMAIL": RiskIndicator.CloudDataExposures.SENT_CORPORATE_GMAIL,
"SHARED_CORPORATE_BOX": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_BOX,
"SHARED_CORPORATE_GOOGLE_DRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_GOOGLE_DRIVE,
"SHARED_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_ONEDRIVE,
"AMAZON_DRIVE": RiskIndicator.CloudStorageUploads.AMAZON_DRIVE,
"BOX": RiskIndicator.CloudStorageUploads.BOX,
"DROPBOX": RiskIndicator.CloudStorageUploads.DROPBOX,
"GOOGLE_DRIVE": RiskIndicator.CloudStorageUploads.GOOGLE_DRIVE,
"ICLOUD": RiskIndicator.CloudStorageUploads.ICLOUD,
"MEGA": RiskIndicator.CloudStorageUploads.MEGA,
"ONEDRIVE": RiskIndicator.CloudStorageUploads.ONEDRIVE,
"ZOHO": RiskIndicator.CloudStorageUploads.ZOHO,
"BITBUCKET": RiskIndicator.CodeRepositoryUploads.BITBUCKET,
"GITHUB": RiskIndicator.CodeRepositoryUploads.GITHUB,
"GITLAB": RiskIndicator.CodeRepositoryUploads.GITLAB,
"SOURCEFORGE": RiskIndicator.CodeRepositoryUploads.SOURCEFORGE,
"STASH": RiskIndicator.CodeRepositoryUploads.STASH,
"163.COM": RiskIndicator.EmailServiceUploads.ONESIXTHREE_DOT_COM,
"126.COM": RiskIndicator.EmailServiceUploads.ONETWOSIX_DOT_COM,
"AOL": RiskIndicator.EmailServiceUploads.AOL,
"COMCAST": RiskIndicator.EmailServiceUploads.COMCAST,
"GMAIL": RiskIndicator.EmailServiceUploads.GMAIL,
"ICLOUD_MAIL": RiskIndicator.EmailServiceUploads.ICLOUD,
"MAIL.COM": RiskIndicator.EmailServiceUploads.MAIL_DOT_COM,
"OUTLOOK": RiskIndicator.EmailServiceUploads.OUTLOOK,
"PROTONMAIL": RiskIndicator.EmailServiceUploads.PROTONMAIL,
"QQMAIL": RiskIndicator.EmailServiceUploads.QQMAIL,
"SINA_MAIL": RiskIndicator.EmailServiceUploads.SINA_MAIL,
"SOHU_MAIL": RiskIndicator.EmailServiceUploads.SOHU_MAIL,
"YAHOO": RiskIndicator.EmailServiceUploads.YAHOO,
"ZOHO_MAIL": RiskIndicator.EmailServiceUploads.ZOHO_MAIL,
"AIRDROP": RiskIndicator.ExternalDevices.AIRDROP,
"REMOVABLE_MEDIA": RiskIndicator.ExternalDevices.REMOVABLE_MEDIA,
"AUDIO": RiskIndicator.FileCategories.AUDIO,
"DOCUMENT": RiskIndicator.FileCategories.DOCUMENT,
"EXECUTABLE": RiskIndicator.FileCategories.EXECUTABLE,
"IMAGE": RiskIndicator.FileCategories.IMAGE,
"PDF": RiskIndicator.FileCategories.PDF,
"PRESENTATION": RiskIndicator.FileCategories.PRESENTATION,
"SCRIPT": RiskIndicator.FileCategories.SCRIPT,
"SOURCE_CODE": RiskIndicator.FileCategories.SOURCE_CODE,
"SPREADSHEET": RiskIndicator.FileCategories.SPREADSHEET,
"VIDEO": RiskIndicator.FileCategories.VIDEO,
"VIRTUAL_DISK_IMAGE": RiskIndicator.FileCategories.VIRTUAL_DISK_IMAGE,
"ZIP": RiskIndicator.FileCategories.ZIP,
"FACEBOOK_MESSENGER": RiskIndicator.MessagingServiceUploads.FACEBOOK_MESSENGER,
"MICROSOFT_TEAMS": RiskIndicator.MessagingServiceUploads.MICROSOFT_TEAMS,
"SLACK": RiskIndicator.MessagingServiceUploads.SLACK,
"WHATSAPP": RiskIndicator.MessagingServiceUploads.WHATSAPP,
"OTHER": RiskIndicator.Other.OTHER,
"UNKNOWN": RiskIndicator.Other.UNKNOWN,
"FACEBOOK": RiskIndicator.SocialMediaUploads.FACEBOOK,
"LINKEDIN": RiskIndicator.SocialMediaUploads.LINKEDIN,
"REDDIT": RiskIndicator.SocialMediaUploads.REDDIT,
"TWITTER": RiskIndicator.SocialMediaUploads.TWITTER,
"FILE_MISMATCH": RiskIndicator.UserBehavior.FILE_MISMATCH,
"OFF_HOURS": RiskIndicator.UserBehavior.OFF_HOURS,
"REMOTE": RiskIndicator.UserBehavior.REMOTE,
"FIRST_DESTINATION_USE": RiskIndicator.UserBehavior.FIRST_DESTINATION_USE,
"RARE_DESTINATION_USE": RiskIndicator.UserBehavior.RARE_DESTINATION_USE,
}
risk_indicator_map_reversed = {v: k for k, v in risk_indicator_map.items()}
def risk_indicator_callback(filter_cls):
def callback(ctx, param, arg):
if arg:
mapped_args = tuple(risk_indicator_map[i] for i in arg)
filter_func = searchopt.is_in_filter(filter_cls)
return filter_func(ctx, param, mapped_args)
return callback
risk_indicator_option = click.option(
"--risk-indicator",
multiple=True,
type=MapChoice(
choices=list(risk_indicator_map.keys()), extras_map=risk_indicator_map_reversed,
),
callback=risk_indicator_callback(f.RiskIndicator),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to those classified by the given risk indicator categories.",
)
risk_severity_option = click.option(
"--risk-severity",
multiple=True,
type=click.Choice(list(RiskSeverity.choices())),
callback=searchopt.is_in_filter(f.RiskSeverity),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to those classified by the given risk severity.",
)
begin_option = opt.begin_option(
SECURITY_DATA_KEYWORD,
callback=lambda ctx, param, arg: convert_datetime_to_timestamp(
limit_date_range(arg, max_days_back=90)
),
)
end_option = opt.end_option(SECURITY_DATA_KEYWORD)
checkpoint_option = opt.checkpoint_option(
SECURITY_DATA_KEYWORD, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible
)
advanced_query_option = searchopt.advanced_query_option(SECURITY_DATA_KEYWORD)
def _get_saved_search_option():
def _get_saved_search_query(ctx, param, arg):
if arg is None:
return
query = ctx.obj.sdk.securitydata.savedsearches.get_query(arg)
return query
return click.option(
"--saved-search",
help="Get events from a saved search filter with the given ID."
"WARNING: Using a saved search is incompatible with other query-building arguments.",
callback=_get_saved_search_query,
cls=incompatible_with("advanced_query"),
)
def search_options(f):
f = column_option(f)
f = checkpoint_option(f)
f = advanced_query_option(f)
f = searchopt.or_query_option(f)
f = end_option(f)
f = begin_option(f)
return f
def file_event_options(f):
f = exposure_type_option(f)
f = username_option(f)
f = actor_option(f)
f = md5_option(f)
f = sha256_option(f)
f = source_option(f)
f = file_name_option(f)
f = file_path_option(f)
f = file_category_option(f)
f = process_owner_option(f)
f = tab_url_option(f)
f = include_non_exposure_option(f)
f = risk_indicator_option(f)
f = risk_severity_option(f)
f = _get_saved_search_option()(f)
return f
@click.group(cls=OrderedGroup)
@sdk_options(hidden=True)
def security_data(state):
"""Get and send file event data."""
# store cursor getter on the group state so shared --begin option can use it in validation
state.cursor_getter = _get_file_event_cursor_store
@security_data.command()
@click.argument("checkpoint-name")
@sdk_options()
def clear_checkpoint(state, checkpoint_name):
"""Remove the saved file event checkpoint from `--use-checkpoint/-c` mode."""
_get_file_event_cursor_store(state.profile.name).delete(checkpoint_name)
@security_data.command()
@file_event_options
@search_options
@sdk_options()
@column_option
@searchopt.include_all_option
@file_events_format_option
def search(
state,
format,
begin,
end,
advanced_query,
use_checkpoint,
saved_search,
or_query,
columns,
include_all,
**kwargs,
):
"""Search for file events."""
if format == FileEventsOutputFormat.CEF and columns:
raise click.BadOptionUsage(
"columns", "--columns option can't be used with CEF format."
)
# set default table columns
if format == OutputFormat.TABLE:
if not columns and not include_all:
columns = [
"fileName",
"filePath",
"eventType",
"eventTimestamp",
"fileCategory",
"fileSize",
"fileOwner",
"md5Checksum",
"sha256Checksum",
"riskIndicators",
"riskSeverity",
]
if use_checkpoint:
cursor = _get_file_event_cursor_store(state.profile.name)
checkpoint = _handle_timestamp_checkpoint(cursor.get(use_checkpoint), state)
def checkpoint_func(event):
cursor.replace(use_checkpoint, event["eventId"])
else:
checkpoint = checkpoint_func = None
query = _construct_query(state, begin, end, saved_search, advanced_query, or_query)
dfs = _get_all_file_events(state, query, checkpoint)
formatter = FileEventsOutputFormatter(format, checkpoint_func=checkpoint_func)
# sending to pager when checkpointing can be inaccurate due to pager buffering, so disallow pager
force_no_pager = use_checkpoint
formatter.echo_formatted_dataframes(
dfs, columns=columns, force_no_pager=force_no_pager
)
@security_data.command(cls=SendToCommand)
@file_event_options
@search_options
@sdk_options()
@searchopt.server_options
@searchopt.send_to_format_options
def send_to(
state,
begin,
end,
advanced_query,
use_checkpoint,
saved_search,
or_query,
columns,
**kwargs,
):
"""Send events to the given server address.
HOSTNAME format: address:port where port is optional and defaults to 514.
"""
if use_checkpoint:
cursor = _get_file_event_cursor_store(state.profile.name)
checkpoint = _handle_timestamp_checkpoint(cursor.get(use_checkpoint), state)
def checkpoint_func(event):
cursor.replace(use_checkpoint, event["eventId"])
else:
checkpoint = checkpoint_func = None
query = _construct_query(state, begin, end, saved_search, advanced_query, or_query)
dfs = _get_all_file_events(state, query, checkpoint)
formatter = FileEventsOutputFormatter(None, checkpoint_func=checkpoint_func)
with warn_interrupt():
event = None
for event in formatter.iter_rows(dfs, columns=columns):
state.logger.info(event)
if event is None: # generator was empty
click.echo("No results found.")
@security_data.group(cls=OrderedGroup)
@sdk_options()
def saved_search(state):
"""Search for file events using saved searches."""
pass
@saved_search.command("list")
@format_option
@sdk_options()
def _list(state, format=None):
"""List available saved searches."""
formatter = DataFrameOutputFormatter(format)
response = state.sdk.securitydata.savedsearches.get()
saved_searches_df = DataFrame(response["searches"])
formatter.echo_formatted_dataframes(
saved_searches_df, columns=["name", "id", "notes"]
)
@saved_search.command()
@click.argument("search-id")
@sdk_options()
def show(state, search_id):
"""Get the details of a saved search."""
response = state.sdk.securitydata.savedsearches.get_by_id(search_id)
echo(pformat(response["searches"]))
def _get_file_event_cursor_store(profile_name):
return FileEventCursorStore(profile_name)
def _construct_query(state, begin, end, saved_search, advanced_query, or_query):
if advanced_query:
state.search_filters = advanced_query
elif saved_search:
state.search_filters = saved_search._filter_group_list
else:
if begin or end:
state.search_filters.append(
create_time_range_filter(f.EventTimestamp, begin, end)
)
if or_query:
state.search_filters = convert_to_or_query(state.search_filters)
query = FileEventQuery(*state.search_filters)
query.page_size = MAX_EVENT_PAGE_SIZE
query.sort_direction = "asc"
query.sort_key = "insertionTimestamp"
return query
def _get_all_file_events(state, query, checkpoint=""):
try:
response = state.sdk.securitydata.search_all_file_events(
query, page_token=checkpoint
)
except Py42InvalidPageTokenError:
response = state.sdk.securitydata.search_all_file_events(query)
yield DataFrame(response["fileEvents"])
while response["nextPgToken"]:
response = state.sdk.securitydata.search_all_file_events(
query, page_token=response["nextPgToken"]
)
yield DataFrame(response["fileEvents"])
def _handle_timestamp_checkpoint(checkpoint, state):
try:
checkpoint = float(checkpoint)
state.search_filters.append(InsertionTimestamp.on_or_after(checkpoint))
return None
except (ValueError, TypeError):
return checkpoint
|
python
|
import os
import sys
sys.path.append(".")
from Utils.HTMLTestRunner import *
from Testcases.test_login import Login
from Testcases.test_02 import Test02
# get the directory path to output report file
dir = os.getcwd()
# get all tests from Login class
login1 = unittest.TestLoader().loadTestsFromTestCase(Login)
test02 = unittest.TestLoader().loadTestsFromTestCase(Test02)
# create a test suite
test_suite = unittest.TestSuite([login1, test02])
# open the report file
outfile = open(dir + '\\Reports\\SeleniumPythonTestSummary.html', 'w', encoding='utf-8')
print(dir + '\\SeleniumPythonTestSummary.html')
# configure HTMLTestRunner options
runner = HTMLTestRunner(stream=outfile, title='Test Report', description='Acceptance Tests')
# run the suite using HTMLTestRunner
runner.run(test_suite)
outfile.close()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import dictc
class DictCTest(unittest.TestCase):
def setUp(self):
pass
class BaseDictTest(unittest.TestCase):
def setUp(self):
from DictC.BaseDict import strip_tags
self.strip_tags = strip_tags
import re
self.raw_strip_tags = lambda text: re.sub(r'<[^>]*?>', '', text)
from DictC.BaseDict import BaseDict
self.BaseDict = BaseDict
def test_strip_tags(self):
self.assertIsInstance(self.strip_tags.func_doc, str)
self.assertEqual('hello', self.strip_tags('<>hello</>'))
self.assertEqual('hello', self.strip_tags('<div>hello</div>'))
def test_raw_strip_tags(self):
self.assertIsNone(self.raw_strip_tags.func_doc)
self.assertEqual('hello', self.raw_strip_tags('<>hello</>'))
self.assertEqual('hello', self.raw_strip_tags('<div>hello</div>'))
def test_fetchSuggestion(self):
keyword = 'hello'
self.assertEqual([keyword], self.BaseDict.fetchSuggestion(keyword))
def test_set_get_keyword(self):
keyword = 'hello'
base_dict = self.BaseDict()
base_dict.setKeyword(keyword)
self.assertEqual(keyword, base_dict.getKeyword())
def test_soundUri(self):
pass
def test_html2txt(self):
pairs = [
('<b>Hello</b>', 'Hello'),
('&', '&'),
('<b>&</b>', '&')
]
base_dict = self.BaseDict()
for orig, raw in pairs:
self.assertEqual(base_dict.html2txt(orig), raw)
def test_getOutput(self):
base_dict = self.BaseDict()
self.assertTupleEqual((False, ''), base_dict.getOutput())
def tearDown(self):
# TODO: django is missing?
import django.utils.html
reload(django.utils.html)
class BingDictTest(unittest.TestCase):
def setUp(self):
self.keywords = ['addicted', 'hello', 'welcome', 'it\'s', '你',
'cancer']
from DictC.BingDict import BingDict
self.BingDict = BingDict
self.bing = BingDict()
def test_fetchSuggestion(self):
keywords = [
'你', "it's", 'hello'
]
for keyword in keywords:
data = self.BingDict.fetchSuggestion(keyword)
self.assertTrue(data)
self.assertEqual(10, len(data))
self.assertTupleEqual(
(u'hello', u'你好;您好;哈喽;喂,表示问候,打招呼或接电话时'),
data[0]
)
def test_getOutput(self):
for keyword in self.keywords:
self.bing.setKeyword(keyword)
status, content = self.bing.getOutput()
self.assertTrue(status)
class DictCnTest(unittest.TestCase):
def setUp(self):
self.keywords = ['addicted', 'hello', 'welcome', 'it\'s', '你',
'cancer']
from DictC.DictCnDict import DictCnDict
self.DictCnDict = DictCnDict
self.dict_cn = DictCnDict()
def test_fetchSuggestion(self):
keywords = [
'你', "it's", 'hello'
]
for keyword in keywords:
data = self.DictCnDict.fetchSuggestion(keyword)
self.assertTrue(data)
if __name__ == "__main__":
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 textwidth=79
|
python
|
from .clusterer import Clusterer
class ClusterMerge():
def __init__(self, config):
self.clusterer = Clusterer(**config)
self.pattern_generator = self.clusterer.pattern_generator
def merge(self, base_list, other_list):
for [reprA, countA, patternA, linesA] in other_list:
exists = False
for i in range(len(base_list)):
[reprB, countB, patternB, linesB] = base_list[i]
score = self.clusterer.scorer.distance(
reprA, reprB, self.clusterer.max_dist)
if score <= self.clusterer.max_dist:
exists = True
base_list[i][1] += countA
merged_pattern = self.pattern_generator.create_pattern(
patternA, patternB)
base_list[i][2] = merged_pattern
base_list[i][3] |= linesA
break
if not exists:
base_list.append([reprA, countA, patternA, linesA])
|
python
|
from setuptools import setup, find_namespace_packages
setup(name='sog',
version='0.1',
description='A creative remake of the 80s MUD',
url='',
author='Jason Newblanc',
author_email='<first>.<last>(at)gmail.com',
license='CC0 1.0',
packages=find_namespace_packages(include=['sog.*']),
zip_safe=False,
)
|
python
|
'''
Util module to initialize SimpleML and configure
database management
'''
__author__ = 'Elisha Yadgaran'
# Import table models to register in DeclaritiveBase
from simpleml.persistables.base_persistable import Persistable
import simpleml.datasets.base_dataset
import simpleml.pipelines.base_pipeline
import simpleml.models.base_model
import simpleml.metrics.base_metric
from simpleml.persistables.serializing import custom_dumps, custom_loads
from simpleml.utils.errors import SimpleMLError
from simpleml.utils.configuration import CONFIG, FILESTORE_DIRECTORY
from simpleml.imports import SSHTunnelForwarder
from sqlalchemy import create_engine
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.engine.url import URL, make_url
from alembic import command
from alembic.config import Config
from alembic.migration import MigrationContext
from alembic.script import ScriptDirectory
from os.path import realpath, dirname, join
import os
import logging
import random
LOGGER = logging.getLogger(__name__)
# Database Defaults
DATABASE_NAME = os.getenv('SIMPLEML_DATABASE_NAME', None)
DATABASE_USERNAME = os.getenv('SIMPLEML_DATABASE_USERNAME', None)
DATABASE_PASSWORD = os.getenv('SIMPLEML_DATABASE_PASSWORD', None)
DATABASE_HOST = os.getenv('SIMPLEML_DATABASE_HOST', None)
DATABASE_PORT = os.getenv('SIMPLEML_DATABASE_PORT', None)
DATABASE_DRIVERNAME = os.getenv('SIMPLEML_DATABASE_DRIVERNAME', None)
DATABASE_QUERY = os.getenv('SIMPLEML_DATABASE_QUERY', None)
DATABASE_CONF = os.getenv('SIMPLEML_DATABASE_CONF', None)
DATABASE_URI = os.getenv('SIMPLEML_DATABASE_URI', None)
class BaseDatabase(URL):
'''
Base Database class to configure db connection
Does not assume schema tracking or any other validation
'''
def __init__(self, config=None, configuration_section=None, uri=None,
use_ssh_tunnel=False, sshtunnel_params={}, **credentials):
'''
:param use_ssh_tunnel: boolean - default false. Whether to tunnel sqlalchemy connection
through an ssh tunnel or not
:param sshtunnel_params: Dict of ssh params - specify according to sshtunnel project
https://github.com/pahaz/sshtunnel/ - direct passthrough
'''
self.use_ssh_tunnel = use_ssh_tunnel
# Sort out which credentials are the final ones -- default to remaining passed params
if configuration_section is not None:
if config is None:
raise SimpleMLError('Cannot use config section without a config file')
# Default to credentials in config file
credentials = dict(config[configuration_section])
elif uri is not None:
# Deconstruct URI into credentials
url = make_url(uri)
credentials = {
'drivername': url.drivername,
'username': url.username,
'password': url.password,
'host': url.host,
'port': url.port,
'database': url.database,
'query': url.query,
}
# Reconfigure credentials if SSH tunnel specified
if self.use_ssh_tunnel:
LOGGER.warning(
'''
SSH Tunnel is unreliable at the moment - connections time out randomly.
Usage: call Database.start_tunnel() before Database.initialize() and
end script with Database.stop_tunnel()
'''
)
# Overwrite passed ports and hosts to route localhost port to the
# original destination via tunnel
credentials, self.ssh_config = self.configure_ssh_tunnel(credentials, sshtunnel_params)
super(BaseDatabase, self).__init__(**credentials)
def configure_ssh_tunnel(self, credentials, ssh_config):
# Actual DB location
target_host = credentials.pop('host')
target_port = int(credentials.pop('port'))
# SSH Tunnel location
local_host, local_port = ssh_config.get('local_bind_address', (None, None))
local_host = local_host or 'localhost' # In case it's null
local_port = local_port or random.randint(4000, 5000) # In case it's null
LOGGER.info("Using {}:{} to bind SSH tunnel".format(local_host, local_port))
# Swap em - db URI points to the local tunnel opening and the remote
# ssh tunnel binds to the original host+port
credentials['host'] = local_host
credentials['port'] = local_port
ssh_config['local_bind_address'] = (local_host, local_port)
ssh_config['remote_bind_address'] = (target_host, target_port)
return credentials, ssh_config
def open_tunnel(self):
self.ssh_tunnel.start()
def close_tunnel(self):
self.ssh_tunnel.stop()
@property
def engine(self):
# Custom serializer/deserializer not supported by all drivers
# Definitely works for:
# - Postgres
# - SQLite >= 1.3.7 -- Use _json_serializer for below
return create_engine(self,
json_serializer=custom_dumps,
json_deserializer=custom_loads,
pool_recycle=300)
@property
def ssh_tunnel(self):
if SSHTunnelForwarder is None: # Not installed
raise SimpleMLError('SSHTunnel is not installed, install with `pip install sshtunnel`')
if not hasattr(self, '_sshtunnel'):
self._sshtunnel = SSHTunnelForwarder(**self.ssh_config)
return self._sshtunnel
def create_tables(self, base, drop_tables=False, ignore_errors=False):
'''
Creates database tables (and potentially drops existing ones).
Assumes to be running under a sufficiently privileged user
:param drop_tables: Whether or not to drop the existing tables first.
:return: None
'''
try:
if drop_tables:
base.metadata.drop_all()
base.metadata.create_all()
except ProgrammingError as e: # Permission errors
if ignore_errors:
LOGGER.debug(e)
else:
raise(e)
def _initialize(self, base, create_tables=False, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
:param create_tables: Bool, whether to create tables in database
:param drop_tables: Bool, whether to drop existing tables in database
:return: None
'''
engine = self.engine
session = scoped_session(sessionmaker(autocommit=True,
autoflush=False,
bind=engine))
base.metadata.bind = engine
base.query = session.query_property()
if create_tables:
self.create_tables(base, **kwargs)
base.set_session(session)
def initialize(self, base_list, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
Raises a SimpleML error if database schema is not up to date
:param drop_tables: Bool, whether to drop existing tables in database
:param upgrade: Bool, whether to run an upgrade migration after establishing a connection
:return: None
'''
for base in base_list:
self._initialize(base, **kwargs)
class AlembicDatabase(BaseDatabase):
'''
Base database class to manage dbs with schema tracking. Includes alembic
config references
'''
def __init__(self, alembic_filepath, script_location='migrations', *args, **kwargs):
self.alembic_filepath = alembic_filepath
self.script_location = script_location
super(AlembicDatabase, self).__init__(*args, **kwargs)
@property
def alembic_config(self):
if not hasattr(self, '_alembic_config'):
# load the Alembic configuration
self._alembic_config = Config(self.alembic_filepath)
# For some reason, alembic doesnt use a relative path from the ini
# and cannot find the migration folder without the full path
self._alembic_config.set_main_option('script_location', join(dirname(self.alembic_filepath), self.script_location))
return self._alembic_config
def create_tables(self, base, drop_tables=False, ignore_errors=False):
'''
Creates database tables (and potentially drops existing ones).
Assumes to be running under a sufficiently privileged user
:param drop_tables: Whether or not to drop the existing tables first.
:return: None
'''
try:
if drop_tables:
base.metadata.drop_all()
# downgrade the version table, "stamping" it with the base rev
command.stamp(self.alembic_config, "base")
base.metadata.create_all()
# generate/upgrade the version table, "stamping" it with the most recent rev
command.stamp(self.alembic_config, "head")
except ProgrammingError as e: # Permission errors
if ignore_errors:
LOGGER.debug(e)
else:
raise(e)
def upgrade(self, revision='head'):
'''
Proxy Method to invoke alembic upgrade command to specified revision
'''
command.upgrade(self.alembic_config, revision)
def downgrade(self, revision):
'''
Proxy Method to invoke alembic downgrade command to specified revision
'''
command.downgrade(self.alembic_config, revision)
def validate_schema_version(self):
'''
Check that the newly initialized database is up-to-date
Raises an error otherwise (ahead of any table model mismatches later)
'''
# Establish a context to access db values
context = MigrationContext.configure(self.engine.connect())
current_revision = context.get_current_revision()
# Read local config file to find the current "head" revision
# config = Config()
# config.set_main_option("script_location",
# join(dirname(dirname(dirname(realpath(__file__)))), "migrations"))
script = ScriptDirectory.from_config(self.alembic_config)
head_revision = script.get_current_head()
if current_revision != head_revision:
raise SimpleMLError('''Attempting to connect to an outdated schema.
Set the parameter `upgrade=True` in the initialize method
or manually execute `alembic upgrade head` in a shell''')
def initialize(self, base_list, upgrade=False, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
Raises a SimpleML error if database schema is not up to date
:param drop_tables: Bool, whether to drop existing tables in database
:param upgrade: Bool, whether to run an upgrade migration after establishing a connection
:return: None
'''
# Standard initialization
super(AlembicDatabase, self).initialize(base_list, **kwargs)
# Upgrade schema if necessary
if upgrade:
self.upgrade()
# Assert current db schema is up-to-date
self.validate_schema_version()
class Database(AlembicDatabase):
'''
SimpleML specific configuration to interact with the database
Defaults to sqlite db in filestore directory
'''
def __init__(self,
configuration_section=None,
uri=None,
database=None,
username=None,
password=None,
drivername=None,
host=None,
port=None,
query=None,
*args, **kwargs):
if configuration_section is None and uri is None and \
all([i is None for i in (database, username, password, drivername, port, query)]):
# Fill with env variable values if none are passed directly
configuration_section = DATABASE_CONF
uri = DATABASE_URI
database = DATABASE_NAME
username = DATABASE_USERNAME
password = DATABASE_PASSWORD
drivername = DATABASE_DRIVERNAME
host = DATABASE_HOST
port = DATABASE_PORT
query = DATABASE_QUERY
if configuration_section is None and uri is None and \
all([i is None for i in (database, username, password, drivername, port, query)]):
# Use default creds for a sqlite database in filestore directory if env variables are also null
LOGGER.info('No database connection specified, using default SQLite db in {}'.format(FILESTORE_DIRECTORY))
uri = 'sqlite:///{}'.format(join(FILESTORE_DIRECTORY, 'SimpleML.db'))
root_path = dirname(dirname(dirname(realpath(__file__))))
alembic_filepath = join(root_path, 'simpleml/migrations/alembic.ini')
script_location = ''
super(Database, self).__init__(
config=CONFIG, alembic_filepath=alembic_filepath, script_location=script_location,
configuration_section=configuration_section, uri=uri, database=database,
username=username, password=password, drivername=drivername,
host=host, port=port, query=query,
*args, **kwargs)
def initialize(self, base_list=None, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
Raises a SimpleML error if database schema is not up to date
:param drop_tables: Bool, whether to drop existing tables in database
:param upgrade: Bool, whether to run an upgrade migration after establishing a connection
:return: None
'''
if base_list is None: # Use defaults in project
base_list = [Persistable]
super(Database, self).initialize(base_list, **kwargs)
|
python
|
"""
Functions and objects describing electro-optic components.
"""
from arch.block import Block
from arch.models.model import Linear, SymbolicModel
from sympy import Matrix, sqrt, exp, I, pi
import arch.port as port
class Switch2x2(Block):
"""
extinction_ratio: ratio of desired signal to undesired signal from wrong port
loss_dB: positive number of decibels of loss (0 dB -> 100% tx; 10 dB -> 10% tx)
"""
reference_prefix = "SW"
def define(self, loss_dB = 3.0, extinction_ratio=1000.0):
self.add_port(name='in0', kind=port.kind.optical, direction=port.direction.inp)
self.add_port(name='in1', kind=port.kind.optical, direction=port.direction.inp)
self.add_port(name='out0', kind=port.kind.optical, direction=port.direction.out)
self.add_port(name='out1', kind=port.kind.optical, direction=port.direction.out)
state = self.add_port(name='state', kind=port.kind.digital,
direction=port.direction.inp)
# Lagrange polynomial
s,er,tx = state,extinction_ratio,10**(-loss_dB/10)
r = (s-0)/(1-0)*(1-1/er) + (s-1)/(0-1)*(1/er)
M = sqrt(tx) * Matrix([
[sqrt(r), I*sqrt(1 - r)],
[I*sqrt(1 - r), sqrt(r)] ])
self.add_model(Linear('simple switch '+self.name, block=self, unitary_matrix=M))
class ThermoOpticPhaseShifterBasicRT(Block):
"""
Due to Dario, based on https://doi.org/10.1364/OE.27.010456
"""
reference_prefix = "TOPM"
def define(self, device_length=None, centre_wavelength=2.0E-6, ref_index_temp_func=lambda T:1.0*T, R=None):
"""
thermooptic_coeff: constant thermo-optic coefficient
i0: input port current
v0: input port voltage
"""
A,B,C,D = 1,-R,0,1
M = Matrix([[A,B],[C,D]])
inp = self.add_port(name='inp', kind=port.kind.optical, direction=port.direction.inp)
out = self.add_port(name='out', kind=port.kind.optical, direction=port.direction.out)
i0 = self.add_port(name='i0', kind=port.kind.voltage, direction=port.direction.inp)
v0 = self.add_port(name='v0', kind=port.kind.current, direction=port.direction.inp)
i1 = self.add_port(name='i1', kind=port.kind.voltage, direction=port.direction.out)
v1 = self.add_port(name='v1', kind=port.kind.current, direction=port.direction.out)
T = self.add_port(name='T', kind=port.kind.temperature, direction=port.direction.inp)
oes = {
out: exp(I* (2*pi*device_length/centre_wavelength)*ref_index_temp_func(T) )*inp,
v1: +A*v0 + B*i0,
i1: -C*v0 - D*i0}
self.add_model(SymbolicModel('simple phase '+self.name, block=self, out_exprs=oes))
|
python
|
# """
# All operations for the Electricity Spot Market
# Based on the role ClearIterativeCO2AndElectricitySpotMarketTwoCountryRole
#
# Jim Hommes - 25-3-2021
# """
# import json
# from modules.marketmodule import MarketModule
# from util.repository import Repository
#
#
# class ElectricitySpotMarketSubmitBids(MarketModule):
# """
# The class that submits all bids to the Electricity Spot Market.
# """
#
# def __init__(self, reps: Repository):
# super().__init__('COMPETES Dummy: Electricity Spot Market: Submit Bids', reps)
# reps.dbrw.stage_init_power_plant_dispatch_plan_structure()
#
# def act(self):
# # For every energy producer we will submit bids to the Capacity Market
# for energy_producer in self.reps.energy_producers.values():
#
# # For every plant owned by energyProducer
# for powerplant in self.reps.get_operational_power_plants_by_owner(energy_producer):
# market = self.reps.get_electricity_spot_market_for_plant(powerplant)
# capacity = powerplant.get_actual_nominal_capacity()
# mc = powerplant.calculate_marginal_cost_excl_co2_market_cost(self.reps,
# self.reps.current_tick)
# self.reps.create_or_update_power_plant_dispatch_plan(powerplant, energy_producer, market, capacity, mc,
# self.reps.current_tick)
#
#
# class ElectricitySpotMarketClearing(MarketModule):
# """
# The class that clears the Electricity Spot Market.
# """
#
# def __init__(self, reps: Repository):
# super().__init__('COMPETES Dummy: Electricity Spot Market: Clear Market', reps)
# reps.dbrw.stage_init_market_clearing_point_structure()
#
# def act(self):
# # Calculate and submit Market Clearing Price
# peak_load = max(json.loads(self.reps.load['NL'].parameters['ldc'].to_database())['data'].values())
# for market in self.reps.electricity_spot_markets.values():
# sorted_ppdp = self.reps.get_sorted_power_plant_dispatch_plans_by_market_and_time(market, self.reps.current_tick)
# clearing_price = 0
# total_load = 0
# for ppdp in sorted_ppdp:
# if total_load + ppdp.amount <= peak_load:
# total_load += ppdp.amount
# clearing_price = ppdp.price
# self.reps.set_power_plant_dispatch_plan_production(
# ppdp, self.reps.power_plant_dispatch_plan_status_accepted, ppdp.amount)
# elif total_load < peak_load:
# clearing_price = ppdp.price
# self.reps.set_power_plant_dispatch_plan_production(
# ppdp, self.reps.power_plant_dispatch_plan_status_partly_accepted, peak_load - total_load)
# total_load = peak_load
# else:
# self.reps.set_power_plant_dispatch_plan_production(
# ppdp, self.reps.power_plant_dispatch_plan_status_failed, 0)
#
# self.reps.create_or_update_market_clearing_point(market, clearing_price, total_load, self.reps.current_tick)
|
python
|
"""
Simple integration tests on the API itself.
We make actual ajax requests to the running docker container.
"""
import os
import json
import unittest
import requests
from dotenv import load_dotenv
load_dotenv('.env')
# The URL of the running server from within the docker container
url = 'http://web:5000'
service_token = os.environ['KBASE_SECURE_CONFIG_PARAM_service_token']
os.environ['KBASE_SECURE_CONFIG_PARAM_service_token'] = '' # nosec
def make_request(ws_ref):
"""Helper to make a JSON RPC request with the given workspace ref."""
post_data = {
'params': {
'ws_ref': ws_ref,
'n_max_results': 2,
'bypass_caching': True
},
'method': 'get_homologs',
'id': 0
}
headers = {'Content-Type': 'application/json', 'Authorization': service_token}
resp = requests.post(url, data=json.dumps(post_data), headers=headers)
return resp.json()
class TestApi(unittest.TestCase):
# @unittest.skip('x')
def test_search_reads_paired(self):
"""Test a search on genome read data with paired-ends."""
reads_ref = '15/45/1'
json_resp = make_request(reads_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_reads_single(self):
"""Test a search on single-ended genome read data."""
reads_ref = '15/43/1'
json_resp = make_request(reads_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_genome(self):
"""Test a search on a Genome type."""
genome_ref = '34819/14/1'
json_resp = make_request(genome_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_genome_no_auth(self):
"""Test a search on a Genome type."""
genome_ref = '15792/227059/1'
post_data = {'params': {'ws_ref': genome_ref}, 'method': 'get_homologs', 'id': 0}
headers = {'Content-Type': 'application/json'}
resp = requests.post(url, data=json.dumps(post_data), headers=headers)
json_resp = resp.json()
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_assembly(self):
"""Test a search on an Assembly type."""
assembly_ref = '34819/10/1'
json_resp = make_request(assembly_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_assembly_contigset(self):
"""Test a search on an Assembly with the legacy ContigSet."""
assembly_ref = '34819/8/1'
json_resp = make_request(assembly_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_genome_no_assembly_ref(self):
"""Test a failed search against a Genome that has no assembly_ref or contigset_ref."""
genome_ref = '34819/5/9'
json_resp = make_request(genome_ref)
self.assertTrue('no assembly or contigset references' in json_resp['error']['message'])
# @unittest.skip('x')
def test_search_invalid_ws_id(self):
"""Test a search with an invalid workspace reference ID."""
ref = 'x/y/z'
json_resp = make_request(ref)
self.assertTrue(len(json_resp['error']['message']))
# @unittest.skip('x')
def test_search_strain(self):
ref = '34819/8/1'
json_resp = make_request(ref)
result = json_resp['result']
self.assertTrue('strain' in result['distances'][0])
# @unittest.skip('x')
def test_search_unauthorized_ws_id(self):
"""Test a search with an unauthorized workspace ref."""
ref = '/search/1/2/3'
json_resp = make_request(ref)
self.assertTrue(len(json_resp['error']['message']))
|
python
|
# -*- coding: utf-8 -*-
"""
Python File Template
"""
import os
import string
from onmt.keyphrase.pke.utils import compute_document_frequency
exec('from __future__ import unicode_literals')
import os
import sys
import random
module_path = os.path.abspath(os.path.join('../'))
if module_path not in sys.path:
sys.path.append(module_path)
module_path = os.path.abspath(os.path.join('../onmt'))
if module_path not in sys.path:
sys.path.append(module_path)
from itertools import repeat
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from kp_gen_eval import _get_parser
import string
import onmt.keyphrase.pke as pke
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
__author__ = "Rui Meng"
__email__ = "[email protected]"
def extract_deepkp(text_to_extract):
# Supervised Deep Keyphrase Model
parser = _get_parser()
config_path = '../config/translate/config-rnn-keyphrase.yml'
one2one_ckpt_path = '../models/keyphrase/meng17-one2one-kp20k-topmodels/kp20k-meng17-one2one-rnn-BS128-LR0.05-Layer1-Dim150-Emb100-Dropout0.0-Copytrue-Covfalse-Contboth-IF1_step_30000.pt'
one2seq_ckpt_path = '../models/keyphrase/meng17-one2seq-kp20k-topmodels/kp20k-meng17-verbatim_append-rnn-BS64-LR0.05-Layer1-Dim150-Emb100-Dropout0.0-Copytrue-Reusetrue-Covtrue-PEfalse-Contboth-IF1_step_50000.pt'
opt = parser.parse_args('-config %s' % (config_path))
setattr(opt, 'models', [one2one_ckpt_path])
# start generating
translator = build_translator(opt, report_score=False)
scores, predictions = translator.translate(
src=[text_to_extract],
tgt=None,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
attn_debug=opt.attn_debug,
opt=opt
)
# print results
print('Paragraph:\n\t' + text_to_extract)
print('Top predictions:')
keyphrases = [kp.strip() for kp in predictions[0] if (not kp.lower().strip() in stoplist) and (kp != '<unk>')]
for kp_id, kp in enumerate(keyphrases[: min(len(keyphrases), 20)]):
print('\t%d: %s' % (kp_id + 1, kp.strip(string.punctuation)))
def extract_pke(text, method, dataset_path=None, df_path=None, top_k=10):
method = method.lower()
if method == 'tfidf':
# 0. check if DF file exists
if not os.path.exists(df_path):
# stoplist for filtering n-grams
stoplist = list(string.punctuation)
# compute df counts and store as n-stem -> weight values
compute_document_frequency(input_dir=dataset_path,
output_file=df_path,
extension='xml', # input file extension
language='en', # language of files
normalization="stemming", # use porter stemmer
stoplist=stoplist)
# 1. create a TfIdf extractor.
extractor = pke.unsupervised.TfIdf()
# 2. load the content of the document.
extractor.load_document(input=text,
language='en_core_web_sm',
normalization=None)
# 3. select {1-3}-grams not containing punctuation marks as candidates.
extractor.candidate_selection(n=3, stoplist=list(string.punctuation))
# 4. weight the candidates using a `tf` x `idf`
df = pke.load_document_frequency_file(input_file=df_path)
extractor.candidate_weighting(df=df)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=top_k)
elif method == 'yake':
stoplist = stopwords.words('english')
# 1. create a YAKE extractor.
extractor = pke.unsupervised.YAKE()
# 2. load the content of the document.
extractor.load_document(input=text,
language='en_core_web_sm',
normalization=None)
# 3. select {1-3}-grams not containing punctuation marks and not
# beginning/ending with a stopword as candidates.
extractor.candidate_selection(n=3, stoplist=stoplist)
# 4. weight the candidates using YAKE weighting scheme, a window (in
# words) for computing left/right contexts can be specified.
window = 2
use_stems = False # use stems instead of words for weighting
extractor.candidate_weighting(window=window,
stoplist=stoplist,
use_stems=use_stems)
# 5. get the 10-highest scored candidates as keyphrases.
# redundant keyphrases are removed from the output using levenshtein
# distance and a threshold.
threshold = 0.8
keyphrases = extractor.get_n_best(n=top_k, threshold=threshold)
else:
raise NotImplementedError
for kp_id, kp in enumerate(keyphrases):
print('\t%d: %s (%.4f)' % (kp_id + 1, kp[0], kp[1]))
return keyphrases
if __name__ == '__main__':
dataset_name = 'SF_Prod'
dataset_path = '../data/salesforce/%s/' % dataset_name
prod_dicts = []
for subdir, dirs, files in os.walk(dataset_path):
for file in files:
filepath = subdir + os.sep + file
text = open(filepath, 'r').readlines()
text = '\n'.join([l.strip() for l in text])
doc = {'name': file, 'path': filepath, 'text': text}
prod_dicts.append(doc)
print('Loaded #(PROD docs)=%d' % (len(prod_dicts)))
doc_id = random.randint(0, len(prod_dicts))
doc = prod_dicts[doc_id]
text_to_extract = doc['text']
print(doc_id)
print(doc['name'])
print(text_to_extract)
extract_deepkp(text_to_extract)
extract_pke(text_to_extract, method='tfidf' , dataset_path=dataset_path,
df_path=os.path.abspath(dataset_path + '../%s.df.tsv.gz' % dataset_name))
|
python
|
#!/usr/bin/env python3
# test_app.py
import unittest
from unittest.mock import patch
from snakegame import app
from snakegame.player import Player
class TestApp(unittest.TestCase):
def setUp(self):
self.name = "Bob"
self.score = 0
self.high_score_name = "John"
self.high_score = 50
self.player = Player("Bob")
self.player.high_score_player = self.high_score_name
self.player.high_score = self.high_score
def test_play_game(self):
pass
def test_play_again(self):
pass
# with patch('builtins.print') as mock_print:
# with patch('builtins.input', return_value='N') as mock_input:
# play_again("Bob")
# mock_input.assert_called_once()
# mock_print.assert_called_once_with(
# "\nThanks for playing, Bob!\n\nQuitting program...")
# with patch('builtins.input', return_value='Y') as mock_input:
# self.assertEqual(play_again("Bob"), play_game("Bob"), "IDK")
# mock_input.assert_called_once()
def test_validate_player_input(self):
pass
def test_timeout_input(self):
pass
def test_menu(self):
with patch('builtins.print') as mock_print:
app.menu()
mock_print.assert_called_once_with(
"****** SNAKE ******\n",
"Navigation Keys:",
"E - UP",
"D - DOWN",
"S - LEFT",
"F - RIGHT",
"Q - QUIT",
sep='\n'
)
def test_scoreboard(self):
with patch('builtins.print') as mock_print:
app.scoreboard(self.player)
mock_print.assert_called_once_with(
f"\nHIGH SCORE ({self.high_score_name}): {self.high_score}",
f"{self.name}'S SCORE: {self.score}\n",
sep='\n'
)
def test_keyboard_commands(self):
self.assertEqual(app.keyboard_commands("E"), [-1, 0])
self.assertEqual(app.keyboard_commands("D"), [1, 0])
self.assertEqual(app.keyboard_commands("S"), [0, -1])
self.assertEqual(app.keyboard_commands("F"), [0, 1])
self.assertEqual(app.keyboard_commands("Q"), "QUIT")
self.assertEqual(app.keyboard_commands("K"), None)
def test_clear_screen(self):
pass
def test_update_screen(self):
pass
def test_goodbye_msg(self):
with patch('builtins.print') as mock_print:
app.goodbye_msg(self.player.player_name)
mock_print.assert_called_once_with(
f"\nThanks for playing, {self.name}!\n\nQuitting program..."
)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
python
|
"""Add table for datapath
Revision ID: ce8079bf4ab7
Revises: None
Create Date: 2017-08-26 21:42:40.469444
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ce8079bf4ab7'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'datapath',
sa.Column('id', sa.String(64), primary_key=True),
sa.Column('host', sa.String(64), nullable=False),
sa.Column('port', sa.Integer, nullable=False))
op.create_table(
'port',
sa.Column('datapath_id', sa.String(64), sa.ForeignKey(
'datapath.id', ondelete='CASCADE'), primary_key=True),
sa.Column('mac', sa.String(64), nullable=True),
sa.Column('port', sa.String(10), nullable=False, primary_key=True),
sa.Column('subnet_id', sa.String(36), sa.ForeignKey(
'subnet.id', ondelete='CASCADE'), nullable=False))
|
python
|
"""Mock objects for testing"""
SUBMISSION_POST_DATA = {
"submission": {
"_id": "random_string_from_formio",
"data": {
"project_name": "123 Market St.",
"email": "[email protected]",
"phone": "415-867-5309",
"name": "Jenny"
}
}
}
|
python
|
# Python3
import sys
import shutil
from pathlib import Path
# Get the root path to this repo
repo_dir = Path(__file__).parent
# Get the os dependant kits path
if sys.platform == "win32":
install_path = Path(r"~\AppData\Roaming\Luxology\Kits").expanduser()
elif sys.platform == "darwin":
install_path = Path("~/Library/Application Support/Luxology/Kits").expanduser()
# Get the name of the kits directory
kit_name = "community_hub"
# Get the development kit.
kit_path = repo_dir / kit_name
# Get the modo install path for kit
modo_kit_path = install_path / kit_name
# If the Kit exists in the modo kit path, remove it before copying the new one.
if modo_kit_path.exists():
shutil.rmtree(modo_kit_path)
# Copy the development kit to the modo kit path.
shutil.copytree(src=kit_path, dst=modo_kit_path)
|
python
|
from sqlalchemy.ext.asyncio import AsyncSession
from app.crud import dialogue_crud
from app.service import dialogue_exist
async def get_all_dialogues_for_user(db: AsyncSession, user_id: int):
"""
Get all dialogues for user
:param db: DB
:type db: AsyncSession
:param user_id: User ID
:type user_id: int
:return: Dialogues
"""
dialogues = await dialogue_crud.get_for_user(db, user_id)
return (dialogue.__dict__ for dialogue in dialogues)
@dialogue_exist('pk', 'user_id')
async def get_dialogue(*, db: AsyncSession, user_id: int, pk: int) -> dict:
"""
Get dialogue
:param db: DB
:type db: AsyncSession
:param user_id: User ID
:type user_id: int
:param pk: Dialogue ID
:type pk: int
:return: Dialogue
:rtype: dict
"""
dialogue = await dialogue_crud.get(db, id=pk)
return dialogue.__dict__
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_compartmentmodels
----------------------------------
Tests for `compartmentmodels` module.
The tests for the individual models are in separate files.
"""
import pytest
#import tempfile
import os
import numpy as np
from compartmentmodels.compartmentmodels import loaddata, savedata
def test_load_and_save(tmpdir):
time = np.linspace(0,100)
curve= np.random.randn(len(time))
aif = np.random.randn(len(time))
filename = os.path.join(str(tmpdir), 'tempfile.tca')
# filename = tempfile.NamedTemporaryFile()
print filename
savedata(filename, time, curve, aif)
t, c, a = loaddata(filename)
assert np.all(np.equal(time, t))
assert np.all(np.equal(curve, c))
assert np.all(np.equal(aif, a))
|
python
|
from typing import Optional
from typing import Union
import attr
@attr.s
class Attachment:
content_type = attr.ib(type=str)
id = attr.ib(type=str)
size = attr.ib(type=int)
stored_filename = attr.ib(type=str)
@attr.s
class Reaction:
emoji = attr.ib(type=str)
target_author = attr.ib(type=Union[str, dict])
target_sent_timestamp = attr.ib(type=int)
remove = attr.ib(type=bool, default=False)
@attr.s
class Payment:
note = attr.ib(type=str)
receipt = attr.ib(type=str)
@attr.s
class Message:
username = attr.ib(type=str)
source = attr.ib(type=Union[str, dict])
text = attr.ib(type=str)
source_device = attr.ib(type=int, default=0)
timestamp = attr.ib(type=int, default=None)
expiration_secs = attr.ib(type=int, default=0)
is_receipt = attr.ib(type=bool, default=False)
attachments = attr.ib(type=list, default=[])
quote = attr.ib(type=str, default=None)
group = attr.ib(type=dict, default={})
group_v2 = attr.ib(type=dict, default={})
reaction = attr.ib(type=Optional[Reaction], default=None)
payment = attr.ib(type=Optional[Payment], default=None)
|
python
|
#!/usr/bin/python
# coding: utf-8
# Copyright 2018 AstroLab Software
# Author: Chris Arnault
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The indextry.py script has to be present on the <host> machine
where the minimal HTML server has been activated as
> python server.py
Then, call in a web navigator the URL
http://<host>:24701/indextry.py
https://python-django.dev/page-python-serveur-web-creer-rapidement
"""
# coding: utf-8
import cgi
from pylivy.session import *
from pylivy.client import *
"""
Demo of using the pylivy library
https://pylivy.readthedocs.io/en/latest/index.web
"""
# Initialize post variables
class Variable:
def __init__(self, name, type="int"):
self.name = name
self.type = type
self.reset()
def read(self):
try:
if self.type == "int":
self.value = int(form.getvalue(self.name))
else:
value = form.getvalue(self.name)
if value is None:
value = ""
self.value = value
pass
except:
self.reset()
pass
def to_form(self):
out = """<input type="hidden" name="{}" value="{}" />""".format(self.name, self.value)
return out
def debug(self):
out = " {} = {}\n".format(self.name, self.value)
return out
def reset(self):
if self.type == "int":
self.value = -1
else:
self.value = ""
pass
def set(self, value):
if self.type == "int":
try:
self.value = int(value)
except:
self.value = -1
else:
self.value = value
def is_set(self):
if self.type == "int":
try:
if self.value >= 0:
return True
except:
pass
else:
try:
if len(self.value) > 0:
return True
except:
pass
return False
def incr(self):
if self.type == "int":
self.value += 1
def above(self, threshold):
if self.type == "int":
try:
if self.value > threshold:
return True
except:
pass
return False
class VariableSet:
def __init__(self, names, str_names):
self.base = dict()
type = "int"
for name in names:
if name in str_names:
type = "str"
else:
type = "int"
self.base[name] = Variable(name, type)
def variable(self, name):
return self.base[name]
def read(self):
for v in self.base:
self.base[v].read()
def to_form(self):
out = ""
for v in self.base:
out += self.base[v].to_form()
return out
def debug(self):
out = ""
for v in self.base:
out += self.base[v].debug()
return out
# ======================================================
LIVY_URL = "http://vm-75222.lal.in2p3.fr:21111"
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
client = LivyClient(LIVY_URL)
# init data
variables = VariableSet(["start",
"simul",
"change_simul",
"livy_session",
"waiting_session",
"waiting_statement",
"livy_statement",
"new_statement",
"kill_session",
"result"], ["new_statement", "result"])
start = variables.base["start"]
simul = variables.base["simul"]
change_simul = variables.base["change_simul"]
livy_session = variables.base["livy_session"]
waiting_session = variables.base["waiting_session"]
waiting_statement = variables.base["waiting_statement"]
livy_statement = variables.base["livy_statement"]
kill_session = variables.base["kill_session"]
new_statement = variables.base["new_statement"]
result = variables.base["result"]
variables.read()
if not start.is_set():
simul.set(1)
start.set(1)
# ======================================================
html = """
<!DOCTYPE html>
<head>
<link rel="stylesheet" type="text/css" href="css/finkstyle.css">
<title>Mon programme test</title>
</head>
<body>
<div class="hero-image">
<div class="hero-text">
<h1 style="font-size:50px">Fink</h1>
<h3>Alert dataset monitor</h3>
<div class="topnav"> """
# manage Livy simulation
will_change_simul = change_simul.is_set()
change_simul.reset()
print("<br>change simul = {}".format(will_change_simul))
if will_change_simul:
if simul.is_set():
html += """
<form action="/indextry.py" method="post" name="simul">
<br> Currently using real Livy"""
simul.reset()
html += variables.to_form()
html += """<button type="submit">Simul Livy</button>
</form>
"""
else:
html += """
<form action="/indextry.py" method="post">
<br> Currently simulate Livy"""
simul.set(1)
html += variables.to_form()
html += """<button type="submit">Use real Livy</button>
</form>
"""
else:
if simul.is_set():
html += """
<form action="/indextry.py" method="post">
<br> Currently simulate Livy"""
change_simul.set(1)
html += variables.to_form()
html += """<button type="submit">Use real Livy</button>
</form>
"""
else:
html += """
<form action="/indextry.py" method="post" name="simul">
<br> Currently using real Livy"""
change_simul.set(1)
html += variables.to_form()
html += """<button type="submit">Simul Livy</button>
</form>
"""
change_simul.reset()
# Manage Livy session & Spark statements
html += """
<form action="/indextry.py" method="post" name="operations">
"""
if simul.is_set():
if waiting_session.above(5):
print("<br> session is now idle")
waiting_session.reset()
waiting_statement.reset()
livy_statement.reset()
livy_session.set(1)
if waiting_statement.above(5):
print("<br> statement just finished")
waiting_session.reset()
waiting_statement.reset()
livy_statement.incr()
# debugging
# print("<br>")
# print("Keys = [", ",".join(form.keys()), "]")
print(variables.debug())
"""
Command interface
- select Livy simulation
- open session & wait for idle
- start statement & wait for completion
"""
if kill_session.is_set():
id = livy_session.value
try:
client.delete_session(id)
except:
print("error killing session ", id)
livy_session.reset()
waiting_session.reset()
kill_session.reset()
if livy_session.is_set():
# statement management
if not waiting_statement.is_set():
html += """<br>session is idle: we may start a statement<br>"""
waiting_statement.set(0)
html += variables.to_form()
html += """
Enter a Spark statement
<input type="text" name="new_statement" value="{}" />
<input type="text" name="result" value="{}" />
<button type="submit">Run</button>
""".format(new_statement.value, result.value)
else:
html += """<br>session is idle, we do wait a statement to complete<br>"""
waiting_statement.incr()
id = livy_session.value
s = client.get_session(id)
if not livy_statement.is_set():
st = client.create_statement(s.session_id, new_statement.value)
livy_statement.set(st.statement_id)
else:
st = client.get_statement(s.session_id, livy_statement.value)
if st.state == StatementState.AVAILABLE:
waiting_statement.reset()
result.set(st.output.text)
print("<br>", result.value)
livy_statement.reset()
html += variables.to_form()
html += """<button type="submit">waiting statement to complete</button>"""
else:
# session management
if not waiting_session.is_set():
html += """<br>No session<br>"""
waiting_session.set(0)
print(waiting_session.debug())
waiting_statement.reset()
html += variables.to_form()
html += """<button type="submit">Open a session</button>"""
else:
# we have requested a new session thus waiting_session is set
if simul.is_set():
waiting_session.incr()
else:
if not livy_session.is_set():
print("Create a session ")
s = client.create_session(SessionKind.PYSPARK)
print("<br> session {} <br>".format(s.session_id))
livy_session.set(s.session_id)
# we test if the session is already idle
id = livy_session.value
s = client.get_session(id)
if s.state == SessionState.IDLE:
print("<br> session is now idle")
waiting_session.reset()
waiting_statement.reset()
livy_statement.reset()
new_statement.reset()
html += """<br>Waiting session to become idle<br>"""
html += variables.to_form()
html += """<button type="submit">waiting session</button>"""
html += """</form>"""
if livy_session.is_set():
html += """
<form action="/indextry.py" method="post" name="operations">"""
kill_session.set(1)
html += variables.to_form()
html += """
<button type="submit">Delete the session</button>
</form>
"""
html += """
</div>
<p>© AstroLab Software 2018-2019</p>
</div>
</div>
</body>
</html>
"""
print(html)
|
python
|
from ast import literal_eval
from database.model_people import ModelPeople
from database.model_planet import ModelPlanet
from database import base
import logging
import sys
# Load logging configuration
log = logging.getLogger(__name__)
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if __name__ == '__main__':
log.info('Create database {}'.format(base.db_name))
base.Base.metadata.create_all(base.engine)
log.info('Insert Planet data in database')
with open('database/data/planet.json', 'r') as file:
data = literal_eval(file.read())
for record in data:
planet = ModelPlanet(**record)
base.db_session.add(planet)
base.db_session.commit()
log.info('Insert People data in database')
with open('database/data/people.json', 'r') as file:
data = literal_eval(file.read())
for record in data:
planet = ModelPeople(**record)
base.db_session.add(planet)
base.db_session.commit()
|
python
|
import numpy.random as rd
import numpy as np
from display import *
from solver import *
def init_list(N):
balls = []
r = 10.
v = 10.
x = 400./float(N+1)
for i in range(N):
m = r*(1.-0.05*i)
vv = [-1.*v, 1.*v]
vx = [float(i+1)*x, float(i+1)*x]
balls.append(Ball(m, m, vx, vv))
return balls
if __name__ == "__main__":
balls = init_list(10)
size = 400.
step = 0.02
Display(balls, step, size)
|
python
|
'''
source-directory /etc/network/interfaces.d
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet manual
allow-hotplug ppp0
iface ppp0 inet wvdial
post-up echo "cellular (ppp0) is online"
allow-hotplug wlan0
iface wlan0 inet manual
wpa-roam /etc/wpa_supplicant/wpa_supplicant.conf
allow-hotplug wlan1
iface wlan1 inet manual
wpa-roam /etc/wpa_supplicant/wpa_supplicant.conf
'''
import re
import functools
def _expand_wildcard(*rng):
def outer(func):
@functools.wraps(func)
def inner(i='*', *a, **kw):
return (
'\n'.join(func(i, *a, **kw) for i in range(*rng))
if i == '*' else func(i, *a, **kw))
return inner
return outer
# interfaces
def source_dir(fname='/etc/network/interfaces.d'):
return 'source-directory {}'.format(fname)
def lo(**kw):
return _iface('lo', method='loopback', **kw)
@_expand_wildcard(2)
def eth(i=0, **kw):
return _iface('eth{}'.format(i), **kw)
@_expand_wildcard(2)
def wlan(i=0, wpa='/etc/wpa_supplicant/wpa_supplicant.conf', roam=True, **kw):
kw['wpa_{}'.format('roam' if roam else 'conf')] = wpa
return _iface('wlan{}'.format(i), hotplug=True, **kw)
@_expand_wildcard(2)
def ppp(i=0, method='wvdial', **kw):
name = 'ppp{}'.format(i)
return _iface(
name, hotplug=True, method=method,
post_up='echo "cellular ({}) is online"'.format(name), **kw)
IFACES = {'wlan': wlan, 'eth': eth, 'ppp': ppp}
def iface(name, *a, **kw):
if callable(name):
return name(*a, **kw)
matches = re.search(r'([A-z]+)([\d\*]*)', name)
name, i = matches.groups()
return IFACES[name.lower()](i or '*', *a, **kw)
# utils
def _iface(name, method='manual', hotplug=False, static=None, **kw):
return '''
{allow} {name}
iface {name} inet {method}
{extra}
'''.format(
name=name, allow='allow-hotplug' if hotplug else 'auto', method=method,
extra='\n'.join(l for k, v in kw.items() for l in _cfg_lines(k, v)))
def _cfg_lines(name, value):
name = name.replace('_', '-')
for v in value if isinstance(value, (list, tuple)) else [value]:
yield '{} {}'.format(name, v)
# config formats
def build_file(*ifaces):
return '\n'.join([source_dir(), lo()] + [
ifc() if callable(ifc) else ifc for ifc in ifaces
])
def default():
return build_file(eth, ppp, wlan)
def from_config(config=None):
config = [
{'interface': 'wlan*', 'ssids': 's0nycL1f3l1ne'},
{'interface': 'eth*'},
{'interface': 'ppp*'},
{'interface': 'wlan*'},
]
return build_file(*(
iface(c['interface'], **c.get('etc', {}))
for c in config
))
if __name__ == '__main__':
import fire
fire.Fire()
|
python
|
import torch
from PIL import Image
import numpy as np
from torchvision import datasets, models, transforms
import os
import glob
from models import DoveNetG
transformsC = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transformsG = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))])
# model_dir = '/content/drive/MyDrive/TibaMe/Phantom Captcher/DoveNet/saved_models/latest_net_G.pth'
model_dir = 'pth/DoveNetG.pth'
print("Loading GeneCompressedNet...")
net = DoveNetG()
# model = init_net(model, gpu_ids=[0])
net.load_state_dict(torch.load(model_dir))
def run(img_,mask_):
# width, height = img_.size
height,width = img_.size
transformsC = transforms.Compose([transforms.Resize((width, height)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transformsG = transforms.Compose([transforms.Resize((width, height)), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))])
img_ = transformsC(img_)
mask_ = transformsG(mask_)
inputs = torch.cat([img_,mask_],0)
inputs = torch.unsqueeze(inputs, 0)
with torch.no_grad():
output = net(inputs)
im_numpy = output.data[0].cpu().float().numpy()
im_numpy = (np.transpose(im_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
im_numpy = im_numpy.astype(np.uint8)
im = Image.fromarray(im_numpy).resize((width, height)).convert("RGB")
return im
|
python
|
"""
Desafio 011
Problema: Faça um programa que leia a largura e a altura de uma
parede em metros, calcule a sua área e a quantidade de
tinta necessária para pintá-la, sabendo que cada litro
de tinta pinta uma área de 2 metros quadrados"""
l = float(input('Digite a largura: '))
a = float(input('Digite a altura: '))
ar = l * a
print(f'Sua parede tem a dimensão de {l}x{a} e sua área é de: {ar}m².')
lt = ar/2
print(f'Você precisa de: {lt}l de tinta.')
|
python
|
import torch
import torch.nn as nn
class FactorizedReduce(nn.Module):
"""
Reduce feature map size by factorized pointwise(stride=2).
"""
def __init__(self, C_in, C_out, affine=True):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.pad2 = nn.ZeroPad2d((0, 1, 0, 1))
self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
z1 = self.conv1(x)
z2 = self.conv2(x[:, :, 1:, 1:])
out = torch.cat([z1, z2], dim=1)
out = self.bn(out)
return out
a = torch.rand(5, 10, 6, 6)
l = FactorizedReduce(10, 20)
b = l(a)
|
python
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class DetectedIssueSchema:
"""
Indicates an actual or potential clinical issue with or between one or more
active or proposed clinical actions for a patient; e.g. Drug-drug interaction,
Ineffective treatment frequency, Procedure-condition conflict, etc.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
Indicates an actual or potential clinical issue with or between one or more
active or proposed clinical actions for a patient; e.g. Drug-drug interaction,
Ineffective treatment frequency, Procedure-condition conflict, etc.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a DetectedIssue resource
identifier: Business identifier associated with the detected issue record.
status: Indicates the status of the detected issue.
category: Identifies the general type of issue identified.
severity: Indicates the degree of importance associated with the identified issue based
on the potential impact on the patient.
patient: Indicates the patient whose record the detected issue is associated with.
date: The date or date-time when the detected issue was initially identified.
author: Individual or device responsible for the issue being raised. For example, a
decision support application or a pharmacist conducting a medication review.
implicated: Indicates the resource representing the current activity or proposed activity
that is potentially problematic.
detail: A textual explanation of the detected issue.
reference: The literature, knowledge-base or similar reference that describes the
propensity for the detected issue identified.
mitigation: Indicates an action that has been taken or is committed to to reduce or
eliminate the likelihood of the risk identified by the detected issue from
manifesting. Can also reflect an observation of known mitigating factors that
may reduce/eliminate the need for any action.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.detectedissue_mitigation import (
DetectedIssue_MitigationSchema,
)
if (
max_recursion_limit
and nesting_list.count("DetectedIssue") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["DetectedIssue"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a DetectedIssue resource
StructField("resourceType", StringType(), True),
# Business identifier associated with the detected issue record.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the status of the detected issue.
StructField("status", StringType(), True),
# Identifies the general type of issue identified.
StructField(
"category",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the degree of importance associated with the identified issue based
# on the potential impact on the patient.
StructField("severity", StringType(), True),
# Indicates the patient whose record the detected issue is associated with.
StructField(
"patient",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The date or date-time when the detected issue was initially identified.
StructField("date", StringType(), True),
# Individual or device responsible for the issue being raised. For example, a
# decision support application or a pharmacist conducting a medication review.
StructField(
"author",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the resource representing the current activity or proposed activity
# that is potentially problematic.
StructField(
"implicated",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A textual explanation of the detected issue.
StructField("detail", StringType(), True),
# The literature, knowledge-base or similar reference that describes the
# propensity for the detected issue identified.
StructField("reference", StringType(), True),
# Indicates an action that has been taken or is committed to to reduce or
# eliminate the likelihood of the risk identified by the detected issue from
# manifesting. Can also reflect an observation of known mitigating factors that
# may reduce/eliminate the need for any action.
StructField(
"mitigation",
ArrayType(
DetectedIssue_MitigationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
python
|
# simple no arg function
def simple_function():
print 'Hello, function!'
simple_function()
# simple function with argument
def fib(n):
a, b = 0, 1
while a < n:
print a,
a, b = b, a+b
fib(10)
print ''
# example of using documentation string (so-called docstring)
def other_function():
"""Simple gibbrish print statement"""
print 'Hello'
other_function()
print other_function.__doc__
# functions can be assigned to variables
f = simple_function
f()
# return values with return statement
def fib_ret(n):
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
print fib_ret(20)
# default values in function
def default_args_fun(a=1, b=2):
print a, b
default_args_fun()
default_args_fun(10)
default_args_fun(100, 1000)
# keyword argument notation
# keyword arguments goes after positional arguments
default_args_fun(b=1000)
# *[name] argument contains positional arguments
def positional_arguments(a=1,b=2, *arguments):
print str(arguments)
positional_arguments(1,2)
positional_arguments(1,2,3,4)
# **[name] argument contains keyword arguments
def keyword_arguments(a,b, **arguments):
print str(arguments)
keyword_arguments(10,20)
keyword_arguments(10,20, aa=1, bb=2)
# unpacking argument
# When function requires e.g. three arguments, and we have it all in one list (list with 3 elements), we can use "unapck" synatx
def unpack_function(a,b):
print a,b
args = [1,2]
unpack_function(*args)
# We can unpack key arguments from map as a keyword arguments
args_map = {"a":1, "b":2}
unpack_function(**args_map)
|
python
|
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import InvalidType, UnknownType
fail_list = [
"""
x: bat
""",
"""
x: HashMap[int, int128]
""",
"""
x: [bar, baz]
""",
"""
x: [bar(int128), baz(baffle)]
""",
"""
struct A:
b: B
struct B:
a: A
""",
]
@pytest.mark.parametrize("bad_code", fail_list)
def test_unknown_type_exception(bad_code):
with raises(UnknownType):
compiler.compile_code(bad_code)
invalid_list = [
"""
@public
def foo():
raw_log(b"cow", b"dog")
""",
# Must be a literal string.
"""
@public
def mint(_to: address, _value: uint256):
assert msg.sender == self,minter
""",
# Raise reason must be string
"""
@public
def mint(_to: address, _value: uint256):
raise 1
""",
"""
x: int128[3.5]
""",
]
@pytest.mark.parametrize("bad_code", invalid_list)
def test_invalid_type_exception(bad_code):
with raises(InvalidType):
compiler.compile_code(bad_code)
|
python
|
import os, sys
import numpy as np
from env_handler import EnvHandler
from q_function import Q
from logger import Logger
from agent import Agent
from action_selectors.mbie_eb import MBIE_EB
from action_selectors.epsilon_greedy import EpsilonGreedy
from action_selectors.boltzmann import Boltzmann
from action_selectors.ucb_1 import UCB_1
from action_selectors.controlability import Controlability
from action_selectors.vdbe import VDBE
env_handler = EnvHandler()
args = sys.argv[1:]
episodes = 1
steps = 200
learning_rate = 0.1
discount_factor = 0.9
env_names = ['Acrobot-v1', 'CartPole-v1', 'MountainCar-v0', 'Pendulum-v0', \
'Copy-v0', 'DuplicatedInput-v0', 'RepeatCopy-v0', 'Reverse-v0', 'ReversedAddition-v0', 'ReversedAddition3-v0', \
'Blackjack-v0', 'Roulette-v0', 'FrozenLake-v0', 'FrozenLake8x8-v0', 'NChain-v0', 'Taxi-v3']
epsilon = 1
omega = 0.5
betas = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
epsilon_disc = [0.9999, 0.999, 0.99, 0.9, 0.85, 0.8, 0.75, 0.7, 0.6, 0.5]
temps = [1000000, 500000, 100000, 10000, 5000, 1000]
deltas = [0.05, 0.1, 0.25, 0.5, 0.75, 1, 2, 5, 10, 25]
c_vals = [0.1, 0.5, 1, 5, 10, 25, 50, 100, 250, 500]
seeds = [101, 100, 99, 98, 97]
action_selector_name = args[0]
if action_selector_name == 'epsilon-greedy':
vals = epsilon_disc
elif action_selector_name == 'boltzmann':
vals = temps
elif action_selector_name == 'ucb-1':
vals = c_vals
elif action_selector_name == 'vdbe':
vals = deltas
elif action_selector_name == 'controlability' or action_selector_name == 'mbie-eb':
vals = betas
base_dir = args[1]
for env_name in env_names:
print(env_name)
env = env_handler.get_env(env_name)
q_function = Q(env.get_total_states(), env.get_total_actions(), learning_rate, discount_factor)
log_dir = base_dir + 'data/' + env_name + '/' + action_selector_name
for val in vals:
val_dir = log_dir + '/' + str(val)
os.makedirs(val_dir + '/q_function', exist_ok=True)
os.makedirs(val_dir + '/training-data', exist_ok=True)
print('EDF = ' + str(val))
for seed in seeds:
print('Seed = ' + str(seed))
env.seed(seed)
if action_selector_name == 'epsilon-greedy':
action_selector = EpsilonGreedy(epsilon, val, seed)
elif action_selector_name == 'boltzmann':
action_selector = Boltzmann(val, seed)
elif action_selector_name == 'ucb-1':
action_selector = UCB_1(val, env.get_total_states(), env.get_total_actions())
elif action_selector_name == 'vdbe':
action_selector = VDBE(env.get_total_states(), val, 1 / env.get_total_actions(), learning_rate, seed)
elif action_selector_name == 'mbie-eb':
action_selector = MBIE_EB(val, env.get_total_states(), env.get_total_actions(), discount_factor)
elif action_selector_name == 'controlability':
action_selector = Controlability(val, env.get_total_states(), env.get_total_actions(), learning_rate, omega)
q_function.reset()
logger = Logger(episodes)
filepath = str(seed)
agent = Agent(env, q_function, action_selector, logger)
agent.train(steps, episodes, val_dir, filepath)
print('Done')
|
python
|
#!/bin/python
import sys
import yaml
import datetime
if __name__ == '__main__':
filename = sys.argv[1] if len(sys.argv) > 1 else '/Volumes/Kea/devel/MOSSCO/code/external/fabm/code/util/standard_variables/variables.yaml'
if not filename: sys.exit(1)
with open(filename, 'r') as fid:
yml = yaml.safe_load(fid)
entries = []
for key, value in yml.items():
d=[{'standard_name': item['name'], 'canonical_units': item['units']} for i, item in enumerate(value)]
entries.extend(d)
entries.append({'standard_name': 'x_velocity_at_10m_above_sea_surface', 'canonical_units': 'm s-1'})
entries.append({'standard_name': 'y_velocity_at_10m_above_sea_surface', 'canonical_units': 'm s-1'})
fieldDict={
'version_number': 0.1,
'institution': 'Helmholtz-Zentrum Geesthacht Zentrum für Material- und Küstenforschung',
'source': 'automatically generated from FABM standard variables, with enhancements from MOSSCO',
'contact': 'Carsten Lemmen <[email protected]>',
'last_modified': datetime.datetime.now().isoformat(),
'description': 'Community-based dictionary for shared coupling fields',
'entries': entries}
# We could also use aliases:
# - alias: p
# standard_name: air_pressure
#
with open('field_dictionary.yaml', 'w') as fid:
yaml.dump({'field_dictionary': fieldDict}, stream=fid)
|
python
|
import pandas as pd
from scholarly import scholarly
import plotly.express as px
def search_author(name, return_list=False):
search_query = scholarly.search_author(name)
if not return_list:
return next(search_query)
else:
return list(search_query)
def search_author_id(id):
try:
return scholarly.search_author_id(id)
except Exception as e:
print("Invalid scholar id: {}".format(id))
return None
def get_author_list(name):
authors = search_author(name, return_list=True)
result = [
f"{author['affiliation']} | {author['name']} | {author['scholar_id']}" for author in authors]
result.sort()
result = [
f"{author.split(' | ')[1]} | {author.split(' | ')[2]} | {author.split(' | ')[0]}" for author in result]
return result
def search_org(name):
return scholarly.search_org(name)
def search_author_by_org(org_id, return_list=False):
"""Search authors by organization id.
Args:
org_id (str): Organization id. For example, 145051948357103924
return_list (bool, optional): If True, return a list of authors.
Returns:
list: A list of authors.
"""
search_query = scholarly.search_author_by_organization(org_id)
if not return_list:
return next(search_query)
else:
return list(search_query)
def get_author_record(name=None, id=None, sections=[], sortby="citedby", limit=0):
"""[summary]
Args:
name ([type], optional): [description]. Defaults to None.
id ([type], optional): [description]. Defaults to None.
sections (list, optional): The sections that the user wants filled for an Author object. This can be: ['basics', 'indices', 'counts', 'coauthors', 'publications', 'public_access']. Defaults to [].
sortby (str, optional): [description]. Defaults to "citedby".
limit (int, optional): [description]. Defaults to 0.
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if name is not None:
author = search_author(name)
elif id is not None:
if "|" in id:
id = id.split("|")[1].strip()
author = search_author_id(id)
else:
raise ValueError("Either name or id must be specified.")
result = scholarly.fill(author, sections=sections,
sortby=sortby, publication_limit=limit)
return result
def get_author_pubs(name=None, id=None, record=None, sections=["publications"], sortby="citedby", limit=0, return_df=False):
if record is None:
pubs = get_author_record(
name=name, id=id, sections=sections, sortby=sortby, limit=limit)["publications"]
else:
pubs = record["publications"]
result = []
for pub in pubs:
if "bib" in pub:
if "title" in pub['bib']:
pub["title"] = pub["bib"]["title"]
if "pub_year" in pub['bib']:
pub["year"] = pub["bib"]["pub_year"]
if "bib" in pub:
pub.pop("bib")
if "source" in pub:
pub.pop("source")
if "filled" in pub:
pub.pop("filled")
result.append(pub)
if return_df:
return pd.DataFrame(result)
else:
return result
def get_author_basics(name=None, id=None, record=None, return_df=False):
if record is None and (name is not None or id is not None):
record = get_author_record(name=name, id=id)
elif record is not None:
pass
else:
raise ValueError("Either name or id must be specified.")
items = ["name", "scholar_id", "affiliation", "affiliation_id", "scholar_url",
"url_picture", "homepage", "email_domain", "interests", "citedby", "citedby5y", "hindex", "hindex5y", "i10index", "i10index5y", "cites_per_year"]
result = {}
for item in items:
if item in record:
result[item] = record[item]
else:
result[item] = ""
if "organization" in record:
result["affiliation_id"] = record["organization"]
result["scholar_url"] = f"https://scholar.google.com/citations?user={record['scholar_id']}"
if return_df:
df = pd.DataFrame([result]).transpose()
df.reset_index(inplace=True)
df.columns = ["key", "value"]
return df
else:
return result
def author_pubs_by_year(name=None, id=None, record=None, return_plot=False):
pubs = get_author_pubs(name=name, id=id, record=record, return_df=True)
stats = pubs.groupby("year").size()
df = pd.DataFrame({"pubs": stats})
df.reset_index(inplace=True)
if not return_plot:
return df
else:
fig = px.bar(df, x="year", y="pubs",
title=f"Publications by year")
return df, fig
def author_citations_by_year(name=None, id=None, record=None, return_plot=False):
if record is None and (name is not None or id is not None):
record = get_author_record(name=name, id=id)
elif record is not None:
pass
else:
raise ValueError("Either name or id must be specified.")
citations = record["cites_per_year"]
df = pd.DataFrame(
{"year": citations.keys(), "citations": citations.values()})
if not return_plot:
return df
else:
fig = px.bar(df, x="year", y="citations",
title=f"Citations by year")
return df, fig
def get_author_coauthors(name=None, id=None, record=None, return_df=False):
if record is None and (name is not None or id is not None):
record = get_author_record(name=name, id=id)
elif record is not None:
pass
else:
raise ValueError("Either name or id must be specified.")
coauthors = record["coauthors"]
if not return_df:
return coauthors
else:
df = pd.DataFrame(coauthors)
df = df[["name", "scholar_id", "affiliation"]]
return df
|
python
|
from Tkinter import *
from common import Codes
from ..controllers import AdminDataController
from ..handlers.data import Data
class Admin(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.elements = {}
title_frame = Frame(self)
title_frame.pack(expand=True, fill=BOTH, padx=70, pady=(30, 15))
self.elements['title'] = Label(title_frame, text='Admin',
fg='#003399', font=('Arial', 28))
self.elements['title'].pack(side=TOP)
used_space_frame = Frame(self)
used_space_frame.pack(expand=True, fill=BOTH, padx=70, pady=30)
self.elements['used_space_label'] = Label(used_space_frame, text='Used Space: ',
font=('Arial', 18))
self.elements['used_space_label'].pack(side=LEFT)
errors_frame = Frame(self)
errors_frame.pack(expand=True, fill=BOTH, padx=70, pady=(0, 15))
self.elements['errors_label'] = Label(errors_frame, text='Errors:',
font=('Arial', 18))
self.elements['errors_label'].pack(side=TOP, anchor=NW, pady=5)
y_errors_scrollbar = Scrollbar(errors_frame)
y_errors_scrollbar.pack(side=RIGHT, fill=Y)
x_errors_scrollbar = Scrollbar(errors_frame, orient='horizontal')
x_errors_scrollbar.pack(side=BOTTOM, fill=X)
self.elements['errors_listbox'] = Listbox(errors_frame, font=('Arial', 14),
yscrollcommand=y_errors_scrollbar.set, xscrollcommand=x_errors_scrollbar.set)
self.elements['errors_listbox'].pack(side=TOP, anchor=NW, expand=True, fill=BOTH)
y_errors_scrollbar.config(command=self.elements['errors_listbox'].yview)
x_errors_scrollbar.config(command=self.elements['errors_listbox'].xview)
activity_frame = Frame(self)
activity_frame.pack(expand=True, fill=BOTH, padx=70, pady=(15, 40))
self.elements['activity_label'] = Label(activity_frame, text='Activity:',
font=('Arial', 18))
self.elements['activity_label'].pack(side=TOP, anchor=NW, pady=5)
y_activity_scrollbar = Scrollbar(activity_frame)
y_activity_scrollbar.pack(side=RIGHT, fill=Y)
x_activity_scrollbar = Scrollbar(activity_frame, orient='horizontal')
x_activity_scrollbar.pack(side=BOTTOM, fill=X)
self.elements['activity_listbox'] = Listbox(activity_frame, font=('Arial', 14),
yscrollcommand=y_activity_scrollbar.set, xscrollcommand=x_activity_scrollbar.set)
self.elements['activity_listbox'].pack(side=TOP, anchor=NW, expand=True, fill=BOTH)
y_activity_scrollbar.config(command=self.elements['activity_listbox'].yview)
x_activity_scrollbar.config(command=self.elements['activity_listbox'].xview)
def initialize(self):
admin_data_response = AdminDataController.get_admin_data(Data().get_token())
if admin_data_response.code != Codes.SUCCESS:
self.parent.display_error(admin_data_response.payload['message'])
self.parent.return_frame()
return
self.admin_data = admin_data_response.payload
self.elements['used_space_label']['text'] = 'Used Space: ' + str(
self.admin_data['used_space']) + 'MB'
self.elements['errors_listbox'].delete(0, END)
self.elements['activity_listbox'].delete(0, END)
for log in self.admin_data['logs']:
if log['type'] == 'error':
self.elements['errors_listbox'].insert(END, log['message'])
elif log['type'] == 'activity':
self.elements['activity_listbox'].insert(END, log['message'])
|
python
|
# Import library(toolkit) for deep learning
import numpy as np
import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
import pytorchtools as tools
import pandas as pd
import time
import myfunctions as my
# read data
workspace_dir = './output'
print("Reading data")
train_x, train_y = my.readfile(os.path.join(workspace_dir, "training"), True)
print("Size of training data = {}".format(len(train_x)))
test_x, test_y = my.readfile(os.path.join(workspace_dir, "testing"), True)
print("Size of Testing data = {}".format(len(test_x)))
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomRotation((-15,15)),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
])
train_x, train_y = my._shuffle(train_x, train_y)
train_x, train_y, val_x, val_y = my._train_dev_split(train_x, train_y, 0.2)
batch_size = 150
train_set = tools.DigitDataset(train_x, train_y, train_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle = True)
print("Size of training data = {}".format(len(train_x)))
val_set = tools.DigitDataset(val_x, val_y, test_transform)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle = False)
print("Size of validation data = {}".format(len(val_x)))
#training model
cpu = torch.device("cpu")
gpu = torch.device("cuda:0")
model = tools.maxout_conv_net(4).to(gpu)
model.load_state_dict(torch.load('digitalnetwork_3.pkl'))
patience = 3
loss = nn.CrossEntropyLoss() # since now it is problem about classification, we would use croos entropy to be our loss function
optimizer = torch.optim.Adam(model.parameters(), lr = 0.001) # Adam is a optimizer with momentum, avoiding stucked at saddle points or local minimum
num_epoch = 15
trainingtime = 10
early_stopping = tools.EarlyStopping(patience = patience, verbose=True)
for i in range(trainingtime):
if i > 0:
train_x, train_y = my._shuffle(np.concatenate((train_x, val_x), axis = 0), np.concatenate((train_y, val_y), axis = 0))
train_x, train_y, val_x, val_y = my._train_dev_split(train_x, train_y, 0.2)
train_set = tools.DigitDataset(train_x, train_y, train_transform)
train_loader = DataLoader(train_set, batch_size = batch_size, shuffle = True)
val_set = tools.DigitDataset(val_x, val_y, test_transform)
val_loader = DataLoader(val_set, batch_size = batch_size, shuffle = False)
early_stopping.refresh()
for epoch in range(num_epoch):
epoch_start_time = time.time()
train_acc = 0.0
train_loss = 0.0
val_acc = 0.0
val_loss = 0.0
model.train() # ensure model in train mode(for dropout)
for i, data in enumerate(train_loader):
optimizer.zero_grad() # we have to set gradient to be zero before new decending
train_pred = model(data[0].to(gpu)) #use model to get the predicted probabilities distrubution, which actually is done by calling forward function in the model
batch_loss = loss(train_pred, data[1].to(gpu)) # to calculate out loss, noting prediction and label should be simultaneously on CPU or GPU
batch_loss.backward() # use back propagation algorithm to calculate out gradients for each parameters
optimizer.step() # use gradient to update our parameters by optimizer
train_acc += np.sum(np.argmax(train_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
train_loss += batch_loss.item()
model.eval()
with torch.no_grad():
for i, data in enumerate(val_loader):
val_pred = model(data[0].to(gpu))
batch_loss = loss(val_pred, data[1].to(gpu))
val_acc += np.sum(np.argmax(val_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
val_loss += batch_loss.item()
early_stopping(val_loss/val_set.__len__(), model)
#print out now on accuracy
print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f | Val Acc: %3.6f loss: %3.6f' % \
(epoch + 1, num_epoch, time.time()-epoch_start_time, \
train_acc/train_set.__len__(), train_loss/train_set.__len__(), val_acc/val_set.__len__(), val_loss/val_set.__len__()))
if early_stopping.early_stop:
print("Early stopping")
break
#combine validation data and training data in order to get better model adter getting better model
#train_val_x = np.concatenate((train_x, val_x), axis = 0)
#train_val_y = np.concatenate((train_y, val_y), axis = 0)
#train_val_x, train_val_y = _shuffle(train_val_x, train_val_y)
#train_val_set = DigitDataset(train_val_x, train_val_y, train_transform)
#train_val_loader = DataLoader(train_val_set, batch_size=batch_size, shuffle=True)
#for epoch in range(num_epoch):
# epoch_start_time = time.time()
# train_acc = 0.0
# train_loss = 0.0
# model.train()
# for i, data in enumerate(train_val_loader):
# optimizer.zero_grad()
# train_pred = model(data[0].to(gpu))
# batch_loss = loss(train_pred, data[1].to(gpu))
# batch_loss.backward()
# optimizer.step()
# train_acc += np.sum(np.argmax(train_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
# train_loss += batch_loss.item()
# print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f' % \
# (epoch + 1, num_epoch, time.time()-epoch_start_time, \
# train_acc/train_val_set.__len__(), train_loss/train_val_set.__len__()))
#print out prediction on testing set
test_set = tools.DigitDataset(test_x, test_y, transform = test_transform)
test_loader = DataLoader(test_set, batch_size = batch_size, shuffle = False)
model.load_state_dict(torch.load('digitalnetwork_3.pkl'))
model.eval()
test_acc = 0.0
test_loss = 0.0
with torch.no_grad():
for i, data in enumerate(test_loader):
epoch_start_time = time.time()
test_pred = model(data[0].to(gpu))
batch_loss = loss(test_pred, data[1].to(gpu))
test_acc += np.sum(np.argmax(test_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
test_loss += batch_loss.item()
#print out accuracy and loss
print('%2.2f sec(s) Test Acc: %3.6f Loss: %3.6f' % \
(time.time()-epoch_start_time, \
test_acc/test_set.__len__(), test_loss/test_set.__len__()))
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 04:53:01 2020
@author: Infraestructura 3D
"""
"""
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# =============================================================================
# %% PATHS
# =============================================================================
PATH_chrome = r'D:/LifeWare Technologies/Alarm System/DataCapture/Templates/chromedriver.exe'
# =============================================================================
# %% URLs
# =============================================================================
#An example of how to input the keywords.
#urls = ['https://www.usatoday.com/search/?q=toilet+paper',
# 'https://www.wsj.com/search/term.html?KEYWORDS=toilet%20paper&min-date=2016/04/05&max-date=2020/04/05&isAdvanced=true&daysback=4y&andor=AND&sort=date-desc&source=wsjarticle,wsjblogs,wsjvideo,interactivemedia,sitesearch,wsjpro',
# 'https://www.nytimes.com/search?dropmab=true&endDate=20200405&query=toilet%20paper&sort=newest&startDate=20190405',
# https://nypost.com/search/toilet+paper/?sf=20180101&orderby=date&order=desc''
# 'https://www.latimes.com/search?s=1&q=toilet+paper',
# 'https://www.washingtonpost.com/newssearch/?datefilter=All%20Since%202005&sort=Date&query=toilet%20paper',
# 'https://www.startribune.com/search/?sort=date-desc&q=toilet+paper',
# 'https://www.newsday.com/search#filter=stories&query=toilet%20paper',
# "https://www.chicagotribune.com/search/covid+19/100-y/ALL/date/100/",
# 'https://www3.bostonglobe.com/queryResult/search?q=toilet%20paper&p1=BGMenu_Search&arc404=true']
#urls = ['https://www.usatoday.com/search/?q=',
# ['https://www.wsj.com/search/term.html?min-date=2018/01/01', '&max-date=2020/04/05', '&isAdvanced=true&daysback=4y&andor=AND&sort=date-desc&source=wsjarticle,wsjblogs,wsjvideo,interactivemedia,sitesearch,wsjpro&KEYWORDS='],
# 'https://www.nytimes.com/search?dropmab=true&endDate=20200405&startDate=20180101&sort=newest&query=toilet%20paper',
# ['https://nypost.com/search/', '', '/?sf=20180101&orderby=date&order=desc', ],
# 'https://www.latimes.com/search?s=1&q=',
# 'https://www.washingtonpost.com/newssearch/?datefilter=All%20Since%202005&sort=Date&query=',
# 'https://www.startribune.com/search/?sort=date-desc&q=',
# 'https://www.newsday.com/search#filter=stories&query=',
# "https://www.chicagotribune.com/search/covid+19/100-y/ALL/date/100/",
# ['https://www3.bostonglobe.com/queryResult/search?','q=toilet%20paper','&p1=BGMenu_Search&arc404=true']
# ]
urls = {'usa_today': ['https://www.usatoday.com/search/?q=','&page='],
'wsj': ['https://www.wsj.com/search/term.html?min-date=2018/01/01&max-date=','&page=', '&isAdvanced=true&andor=AND&sort=date-desc&source=wsjarticle,wsjblogs,wsjvideo,interactivemedia,sitesearch,wsjpro&KEYWORDS='],
'ny_t': ['https://www.nytimes.com/search?dropmab=true&endDate=','&startDate=20180101&sort=newest&query='],
'ny_p': ['https://nypost.com/search/', '/?sf=20180101&orderby=date&order=desc'],
'la_t': ['https://www.latimes.com/search?s=','&q='],
'washington_p': ['https://www.washingtonpost.com/newssearch/?datefilter=All%20Since%202005&sort=Date&query='],
'star_t': ['https://www.startribune.com/search/?sort=date-desc&q='],
'news_day': ['https://www.newsday.com/search#filter=stories&query='],
'chicago_t': ['https://www.chicagotribune.com/search/','/100-y/ALL/date/100/'],
'boston_g': ['https://www3.bostonglobe.com/queryResult/search?q=','&p', '=BGMenu_Search&arc404=true']
}
# masks format ['separator for words', 'format of date', 'Does it have different pages?', [array. index of the string number where the search, the date, and the pagination must be joined]]
masks = {'usa_today': {'q_sep':['+', 0], 'pag':[True,1]},
'wsj': {'q_sep': ['%20', 2], 'd_sep': ['/',0], 'pag':[True,1]},
'ny_t': {'q_sep':['%20',1], 'd_sep':['',0]},
'ny_p': {'q_sep':['%20',0]},
'la_t': {'q_sep':['+',1], 'pag':[True,0]},
'washington_p': {'q_sep':['%20',0]},
'star_t': {'q_sep': ['+',0]},
'news_day': {'q_sep':['%20',0]},
'chicago_t': {'q_sep':['+',0]},
'boston_g': {'q_sep':['%20',0], 'pag':[True,1]}
}
#html_tags = {'usa_today': {'general_class':['class', 'gnt_se_a'], 'news_link':['href','gnt_se_a'], 'news_tag':'dat-c-ssts', 'description':'data-c-desc', 'photo_video_link':'gnt_se_f_i','date':'data-c-dt','author':'data-c-by','pag':'gnt_se_pgn_pg'},
# 'wsj': {'general_class':['class', 'item-container headline-item'], 'news_link':['class','gnt_se_a'], 'news_tag':'dat-c-ssts', 'description':'data-c-desc', 'photo_video_link':'gnt_se_f_i','date':'data-c-dt','author':'data-c-by','pag':'gnt_se_pgn_pg'},
# 'ny_t': {'q_sep':['%20',1], 'd_sep':['',0]},
#
# 'ny_p': {'q_sep':['%20',0]},
# 'la_t': {'q_sep':['+',1], 'pag':[True,0]},
# 'washington_p': {'q_sep':['%20',0]},
#
# 'star_t': {'q_sep': ['+',0]},
# 'news_day': {'q_sep':['%20',0]},
# 'chicago_t': {'q_sep':['+',0]},
#
# 'boston_g': {'q_sep':['%20',0], 'pag':[True,1]}
# }
#html_tags = {'usa_today': {'general_class':['class', 'gnt_se_a']},
# 'wsj': {'general_class':['class', 'item-container headline-item']},
# 'ny_t': {'q_sep':['%20',1], 'd_sep':['',0]},
#
# 'ny_p': {'q_sep':['%20',0]},
# 'la_t': {'q_sep':['+',1], 'pag':[True,0]},
# 'washington_p': {'q_sep':['%20',0]},
#
# 'star_t': {'q_sep': ['+',0]},
# 'news_day': {'q_sep':['%20',0]},
# 'chicago_t': {'q_sep':['+',0]},
#
# 'boston_g': {'q_sep':['%20',0], 'pag':[True,1]}
# }
# =============================================================================
# %% Imports
# =============================================================================
import numpy as np
import pandas as pd
from threading import Thread
from threading import Timer
import gc,requests,json
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
# =============================================================================
# %% Functions
# =============================================================================
def downloadPage(url,verbose):
page = requests.get(url)
print('Status Code: '+str(page.status_code))
if verbose:
print(page)
return page
# =============================================================================
# %% Settings
# =============================================================================
keyword = 'toilet paper'
end_date = str(datetime(2020, 4, 5))[:10]
# =============================================================================
# %% Download Pages
# =============================================================================
browser = webdriver.Chrome(executable_path=PATH_chrome)
pages_ = {}
# iter through the urls and its masks
keys = list(urls.keys())
for key in keys:
url = urls[key]
mask = masks[key]
keys_mask = list(mask.keys())
n = False
for key_mask in keys_mask:
if key_mask == 'q_sep':
#convert the work into a readable format for the databse of the page
keyword_temp = keyword.replace(' ', mask[key_mask][0])
idx = mask[key_mask][1]
url[idx] = ''.join([url[idx],keyword_temp])
elif key_mask == 'd_sep':
end_date_temp = end_date.replace('-', mask[key_mask][0])
idx = mask[key_mask][1]
url[idx] = ''.join([url[idx],end_date_temp])
elif key_mask == 'pag':
n = 1
#TO DO: ge the number of pages so is easier to iterate through.
idx = mask[key_mask][1]
url_pag = url.copy()
url[idx] = ''.join([url[idx],str(n)])
pages_[key] = {}
if url:
url = ''.join(url)
print(url)
browser.get(url)
time.sleep(2)
content = browser.page_source
page_parsed = BeautifulSoup(content, 'html.parser')
wait = WebDriverWait(browser, 600)
#Get the individual News
news = page_parsed.find_all(class_= html_tags[key]['general_class'][1])
pages_[key]['page_'+str(n)] = news
#get the pagination numbers
pag = page_parsed.find_all(class_= html_tags[key]['pag'])
pag = len(pag)
for n in range(2,pag+1):
#TO DO: ge the number of pages so is easier to iterate through.
idx = mask[key_mask][1]
url = url_pag.copy()
url[idx] = ''.join([url_pag[idx],str(n)])
url = ''.join(url)
print(url)
browser.get(url)
time.sleep(2)
content = browser.page_source
page_parsed = BeautifulSoup(content, 'html.parser')
wait = WebDriverWait(browser, 600)
f = open(key+'.txt', 'wb')
f.write(content.encode())
f.close()
# Iter through every news link
# for item in news:
#
#
#
# #Get the values and links
# news = page_parsed.find_all(class_= html_tags[key]['news_link'])
# html_keys = list(html_tags[key].keys())
#
# pages_[key] = content
#
#
# f = open(key+'.txt', 'wb')
# f.write(content.encode())
# f.close()
# =============================================================================
# %% End
# =============================================================================
|
python
|
from django.core.cache import cache
from django.test import Client, TestCase
from django.urls import reverse
class MoviesTest(TestCase):
def setUp(self):
self.client = Client()
cache.clear()
def test_view_movies_correct_template(self):
"""check if the right template is called"""
response = self.client.get(reverse('cinema:films'))
self.assertTemplateUsed(response, 'cinema/movies.html')
def test_view_movies(self):
"""Verify that all movies are retrieved"""
response = self.client.get(reverse('cinema:films'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '</table>')
self.assertContains(response, '<title>Movies</title>')
def test_fetch_movies_pagination_is_10(self):
"""Verify that movies are returned based on the paginated default value per page"""
response = self.client.get(reverse('cinema:films'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['movies']), 10)
def test_fetch_single_movie(self):
"""Verify that a single movie is retrieved"""
movie_response = self.client.get(reverse('cinema:films'))
movie_id = list(movie_response.context['movies'])[0]['id']
response = self.client.get(reverse('cinema:film', args=(movie_id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'cinema/movie.html')
self.assertContains(response, 'Cast')
|
python
|
#!usr/env/bin python3
interface = input("enter your interface>>")
mac = input("enter new mac>>")
print (" ")
print ("--------------------------------------------------------------------------")
import subprocess
subprocess.call("ifconfig " + interface + " down",shell=True)
subprocess.call("ifconfig " + interface + " hw ether " + mac,shell=True)
subprocess.call("ifconfig " + interface + " up",shell=True)
print ("ONLY ON ROOTED DEVICE")
|
python
|
"""
Horizontal boxplot with observations
====================================
_thumb: .7, .45
"""
import numpy as np
import seaborn as sns
sns.set(style="ticks", palette="muted", color_codes=True)
# Load the example planets dataset
planets = sns.load_dataset("planets")
# Plot the orbital period with horizontal boxes
ax = sns.boxplot(x="distance", y="method", data=planets,
whis=np.inf, color="c")
# Add in points to show each observation
sns.stripplot(x="distance", y="method", data=planets,
jitter=True, size=3, color=".3", linewidth=0)
# Make the quantitative axis logarithmic
ax.set_xscale("log")
sns.despine(trim=True)
|
python
|
# Generated by Django 3.1 on 2020-08-26 08:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mining', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='match_key',
name='MK_key_type',
),
migrations.RemoveField(
model_name='match_key',
name='MK_match',
),
migrations.RemoveField(
model_name='matched',
name='MAT_match_method_type',
),
migrations.RemoveField(
model_name='matched',
name='MAT_pattern',
),
migrations.RemoveField(
model_name='matched',
name='MAT_topology',
),
migrations.RemoveField(
model_name='pattern_source',
name='PS_pattern',
),
migrations.RemoveField(
model_name='pattern_source',
name='PS_topology',
),
migrations.DeleteModel(
name='Key_Type',
),
migrations.DeleteModel(
name='Match_Key',
),
migrations.DeleteModel(
name='Match_Method_Type',
),
migrations.DeleteModel(
name='Matched',
),
migrations.DeleteModel(
name='Pattern',
),
migrations.DeleteModel(
name='Pattern_Source',
),
]
|
python
|
from flask import Blueprint
bp = Blueprint("StCourierServer", __name__, url_prefix="/master")
from . import view
|
python
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from functools import partial
from ..inputs import BooleanInput, IntegerInput, ListInput, ScientificInput, StringInput
from ..Qt import QtCore, QtGui
from ...experiment import parameters
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class InputsWidget(QtGui.QWidget):
# tuple of Input classes that do not need an external label
NO_LABEL_INPUTS = (BooleanInput,)
def __init__(self, procedure_class, inputs=(), parent=None, hide_groups=True):
super().__init__(parent)
self._procedure_class = procedure_class
self._procedure = procedure_class()
self._inputs = inputs
self._setup_ui()
self._layout()
self._hide_groups = hide_groups
self._setup_visibility_groups()
def _setup_ui(self):
parameter_objects = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameter_objects[name]
if parameter.ui_class is not None:
element = parameter.ui_class(parameter)
elif isinstance(parameter, parameters.FloatParameter):
element = ScientificInput(parameter)
elif isinstance(parameter, parameters.IntegerParameter):
element = IntegerInput(parameter)
elif isinstance(parameter, parameters.BooleanParameter):
element = BooleanInput(parameter)
elif isinstance(parameter, parameters.ListParameter):
element = ListInput(parameter)
elif isinstance(parameter, parameters.Parameter):
element = StringInput(parameter)
setattr(self, name, element)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(6)
self.labels = {}
parameters = self._procedure.parameter_objects()
for name in self._inputs:
if not isinstance(getattr(self, name), self.NO_LABEL_INPUTS):
label = QtGui.QLabel(self)
label.setText("%s:" % parameters[name].name)
vbox.addWidget(label)
self.labels[name] = label
vbox.addWidget(getattr(self, name))
self.setLayout(vbox)
def _setup_visibility_groups(self):
groups = {}
parameters = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameters[name]
group_state = {g: True for g in parameter.group_by}
for group_name, condition in parameter.group_by.items():
if group_name not in self._inputs or group_name == name:
continue
if isinstance(getattr(self, group_name), BooleanInput):
# Adjust the boolean condition to a condition suitable for a checkbox
condition = QtCore.Qt.CheckState.Checked if condition else QtCore.Qt.CheckState.Unchecked # noqa: E501
if group_name not in groups:
groups[group_name] = []
groups[group_name].append((name, condition, group_state))
for group_name, group in groups.items():
toggle = partial(self.toggle_group, group_name=group_name, group=group)
group_el = getattr(self, group_name)
if isinstance(group_el, BooleanInput):
group_el.stateChanged.connect(toggle)
toggle(group_el.checkState())
elif isinstance(group_el, StringInput):
group_el.textChanged.connect(toggle)
toggle(group_el.text())
elif isinstance(group_el, (IntegerInput, ScientificInput)):
group_el.valueChanged.connect(toggle)
toggle(group_el.value())
elif isinstance(group_el, ListInput):
group_el.currentTextChanged.connect(toggle)
toggle(group_el.currentText())
else:
raise NotImplementedError(
f"Grouping based on {group_name} ({group_el}) is not implemented.")
def toggle_group(self, state, group_name, group):
for (name, condition, group_state) in group:
if callable(condition):
group_state[group_name] = condition(state)
else:
group_state[group_name] = (state == condition)
visible = all(group_state.values())
if self._hide_groups:
getattr(self, name).setHidden(not visible)
else:
getattr(self, name).setDisabled(not visible)
if name in self.labels:
if self._hide_groups:
self.labels[name].setHidden(not visible)
else:
self.labels[name].setDisabled(not visible)
def set_parameters(self, parameter_objects):
for name in self._inputs:
element = getattr(self, name)
element.set_parameter(parameter_objects[name])
def get_procedure(self):
""" Returns the current procedure """
self._procedure = self._procedure_class()
parameter_values = {}
for name in self._inputs:
element = getattr(self, name)
parameter_values[name] = element.parameter.value
self._procedure.set_parameters(parameter_values)
return self._procedure
|
python
|
"""protected field on Address
Revision ID: 427743e76984
Revises: f8c342997aab
Create Date: 2021-02-02 11:39:45.955233
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '427743e76984'
down_revision = 'f8c342997aab'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('address', sa.Column('protected', sa.Boolean(), nullable=True))
op.drop_column('location', 'protected')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('location', sa.Column('protected', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_column('address', 'protected')
# ### end Alembic commands ###
|
python
|
import binance
from config import BINANCE_API_KEY, BINANCE_API_SECRET
async def connect():
binance_client = binance.Client(BINANCE_API_KEY, BINANCE_API_SECRET)
await binance_client.load()
return binance_client
|
python
|
#!/usr/bin/python
import os
import pyopencl as cl
import numpy as np
# initialize OpenCL
def initCl():
PACKAGE_PATH = os.path.dirname( os.path.realpath( __file__ ) ); print(PACKAGE_PATH)
#CL_PATH = os.path.normpath( PACKAGE_PATH + '../../cl/' )
CL_PATH = os.path.normpath( PACKAGE_PATH + '/../cl' )
#CL_PATH = PACKAGE_PATH+"/cl/"
print(CL_PATH)
plats = cl.get_platforms()
ctx = cl.Context(properties=[(cl.context_properties.PLATFORM, plats[0])], devices=None)
queue = cl.CommandQueue(ctx)
f = open(CL_PATH+"/STM.cl", 'r')
fstr = "".join(f.readlines())
program = cl.Program(ctx, fstr).build()
return ctx,queue,program
ctx,queue,program = initCl()
def initArgs(atoms, CAOs, Spectral, rTips ):
'''
int nAtoms, int nMOs,
__global float4* atoms, // [nAtoms]
__global float4* CAOs, // [nMOs*nAtoms]
__global float2* DOSs, // [nMOs] occupation
__global float4* rTips, // [global_size]
__global float * Iout, // [global_size] output current
'''
nDim = rTips.shape
ntot = (nDim[0]*nDim[1]*nDim[2],)
print("initArgs rTips ", rTips.shape, ntot)
nAtoms = np.int32( len(atoms) )
nMOs = np.int32( len(CAOs) )
print("initArgs nAtoms, nMOs", nAtoms, nMOs)
mf = cl.mem_flags
cl_Gout = cl.Buffer(ctx, mf.WRITE_ONLY , rTips.nbytes/4 )
cl_atoms = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=atoms )
cl_CAOs = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=CAOs )
cl_Spectral = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=Spectral )
cl_rTips = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=rTips )
kargs = ( nAtoms, nMOs, cl_atoms, cl_CAOs, cl_Spectral, cl_rTips, cl_Gout )
return kargs
def run( kargs, nDim, local_size=(32,) ):
print("run opencl kernel ...")
global_size = (nDim[0]*nDim[1]*nDim[2],)
assert ( global_size[0]%local_size[0]==0 ), "number of grid points %i must be divisible by local_group_size %i" %(global_size[0],local_size[0]);
Gout = np.zeros( nDim, dtype=np.float32 )
print("FE.shape", Gout.shape)
print("global_size: ", global_size)
print("local_size: ", local_size)
program.Conductance_s_sp( queue, global_size, local_size, *(kargs))
cl.enqueue_copy ( queue, Gout, kargs[6] );
queue.finish()
print("... opencl kernel DONE")
return Gout
def getPos(lvec, nDim=None, step=(0.1,0.1,0.1) ):
if nDim is None:
nDim = ( int(np.linalg.norm(lvec[3,:])/step[2]),
int(np.linalg.norm(lvec[2,:])/step[1]),
int(np.linalg.norm(lvec[1,:])/step[0]))
dCell = np.array( ( lvec[1,:]/nDim[2], lvec[2,:]/nDim[1], lvec[3,:]/nDim[0] ) )
ABC = np.mgrid[0:nDim[0],0:nDim[1],0:nDim[2]]
print("nDim",nDim)
print("ABC[0].shape ", ABC[0].shape)
X = lvec[0,0] + ABC[2]*dCell[0,0] + ABC[1]*dCell[1,0] + ABC[0]*dCell[2,0]
Y = lvec[0,1] + ABC[2]*dCell[0,1] + ABC[1]*dCell[1,1] + ABC[0]*dCell[2,1]
Z = lvec[0,2] + ABC[2]*dCell[0,2] + ABC[1]*dCell[1,2] + ABC[0]*dCell[2,2]
return X, Y, Z
def XYZ2float4(X,Y,Z):
nDim = X.shape
XYZW = np.zeros( (nDim[0],nDim[1],nDim[2],4), dtype=np.float32)
XYZW[:,:,:,0] = X
XYZW[:,:,:,1] = Y
XYZW[:,:,:,2] = Z
return XYZW
def getPos_f4( lvec, nDim=None, step=(0.1,0.1,0.1) ):
X,Y,Z = getPos(lvec, nDim=nDim, step=step )
return XYZ2float4(X,Y,Z)
def xyzq2float4(xyzs,qs):
atoms_ = np.zeros( (len(qs),4), dtype=np.float32)
atoms_[:,:3] = xyzs[:,:]
atoms_[:, 3] = qs[:]
return atoms_
def getSpectral( eigenvals, Wf = 1.0, w=0.2 ):
w2 = w*w
lorentz = np.ones( len(eigenvals), dtype=np.float32 )
lorentz[:] /= ( w**2 + (eigenvals-Wf)**2 )
return lorentz
def CAOsp2f4(CAOs,nAtoms):
CAOs = CAOs.reshape((-1,nAtoms,4))
print("CAOs.shape ", CAOs.shape)
CAOs_ = np.zeros( (len(CAOs),nAtoms,4), dtype=np.float32)
CAOs_[:,:,0] = CAOs[:,:,0] # s
CAOs_[:,:,1] = CAOs[:,:,3] # px
CAOs_[:,:,2] = CAOs[:,:,1] # py --- because fireball
CAOs_[:,:,3] = CAOs[:,:,2] # pz
return CAOs_
|
python
|
# -*- coding: utf-8 -*-
import sfml as sf
WIDTH = 640
HEIGHT = 480
TITLE = "Python SFML Events"
window = sf.RenderWindow(sf.VideoMode(WIDTH, HEIGHT), TITLE)
while window.is_open:
for event in window.events:
if type(event) is sf.CloseEvent:
window.close()
if type(event) is sf.MouseMoveEvent:
print("Fare hareket etti! %s" % event.position)
if type(event) is sf.KeyEvent:
if event.released and event.code is sf.Keyboard.ESCAPE:
print("ESC'ye basıldı!")
window.close()
if not event.released and event.code is sf.Keyboard.W:
print("W tuşuna basılıyor!")
window.clear()
window.display()
|
python
|
from flask import Flask, request
app = Flask(__name__)
def getAllPuppies():
return "Getting All the puppies!"
def makeANewPuppy():
return "Creating A New Puppy!"
def getPuppy(id):
return "Getting Puppy with id {}".format(id)
def updatePuppy(id):
return "Updating Puppy with id {}".format(id)
def deletePuppy(id):
return "Removing Puppy with id {}".format(id)
@app.route('/puppies', methods=['GET', 'POST'])
def puppiesFunction():
if request.method == 'GET':
return getAllPuppies()
elif request.method == 'POST':
return makeANewPuppy()
else:
print("This is a diffren request {}".format(request.method))
@app.route('/puppies/<int:id>',methods=['GET','PUT','DELETE'])
def puppiesFunctionId(id):
if request.method == 'GET':
return getPuppy(id)
elif request.method == 'PUT':
return updatePuppy(id)
elif request.method == 'DELETE':
return deletePuppy(id)
else:
print("This is a diffrent request {}".format(request.method))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
python
|
import numpy as np
import matplotlib.pylab as pl
pl.rcParams['pdf.fonttype'] = 42
pl.rcParams['ps.fonttype'] = 42
fig = pl.figure(figsize=(12,4))
ax = fig.add_subplot(1,2,1)
def plot_perf(nlist, err, color, label, errbar=False, perc=20):
pl.loglog(nlist, err.mean(0), label=label, color=color)
if errbar:
pl.fill_between(nlist, np.percentile(err,perc,axis=0), np.percentile(err,100-perc,axis=0),
alpha=0.2, facecolor=color)
fs = 16
list_d = [2, 5, 7]
list_m = [128]
list_n = [1000*i+300 for i in range(11)]
values_dim = np.load('samples_comp.npy')
values_m = np.load('samples_comp_d_fix.npy')
plot_perf(list_n, values_dim[0, 0], 'g', 'd=2', errbar=True, perc=20)
plot_perf(list_n, values_dim[1, 0], 'b', 'd=7', errbar=True, perc=20)
plot_perf(list_n, values_dim[2, 0], 'r', 'd=10', errbar=True, perc=20)
pl.title(r'Sample complexity of $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k$, $k=10^3$, m=128', fontsize=fs)
pl.ylabel(r'value $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k(\alpha_n, \beta_n)$', fontsize=fs-1)
pl.xlabel('n : number of data', fontsize=fs-1)
pl.legend()
pl.tight_layout()
ax = fig.add_subplot(1,2,2)
plot_perf(list_n, values_m[0, 0], 'g', 'm=64', errbar=True, perc=20)
plot_perf(list_n, values_m[0, 1], 'b', 'm=128', errbar=True, perc=20)
plot_perf(list_n, values_m[0, 2], 'r', 'm=256', errbar=True, perc=20)
pl.title(r'Sample complexity of $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k$, $k=10^3$, d=7', fontsize=fs)
pl.ylabel(r'value $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k(\alpha_n, \beta_n)$', fontsize=fs-1)
pl.xlabel('n : number of data', fontsize=fs-1)
pl.legend()
pl.tight_layout()
pl.savefig('imgs/sample_complexity.pdf')
pl.show()
|
python
|
from __future__ import unicode_literals
__all__ = ['core','util']
import os
import warnings
import ruamel.yaml as yaml
__author__ = "Pymatgen Development Team"
__email__ ="[email protected]"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="[email protected]"
__version__ = "2017.8.16"
SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml")
def _load_pmg_settings():
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
# If there are any errors, default to using environment variables
# if present.
d = {}
for k, v in os.environ.items():
if k.startswith("PMG_"):
d[k] = v
elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]:
d["PMG_" + k] = v
clean_d = {}
for k, v in d.items():
if not k.startswith("PMG_"):
warnings.warn('With effect from pmg 5.0, all pymatgen settings are'
' prefixed with a "PMG_". E.g., "PMG_VASP_PSP_DIR" '
'instead of "VASP_PSP_DIR".')
clean_d["PMG_" + k] = v
else:
clean_d[k] = v
return clean_d
SETTINGS = _load_pmg_settings()
|
python
|
import json
import requests
import xlrd
SOURCES = (
('awois_wrecks', 'http://wrecks.nauticalcharts.noaa.gov/downloads/AWOIS_Wrecks.xls'),
('enc_wrecks', 'http://wrecks.nauticalcharts.noaa.gov/downloads/ENC_Wrecks.xls'),
)
if __name__ == '__main__':
for source_item in SOURCES:
# Source and URL
source_name = source_item[0]
source_url = source_item[1]
# Request the Excel spreadsheet from the URL
request = requests.get(source_url)
data = request.content
# Open the data as an Excel Workbook and get the first sheet
workbook = xlrd.open_workbook(file_contents=data)
worksheet = workbook.sheets()[0]
# We are going to create a GeoJSON Point feature for each row
features = []
# Iterate over all the rows in the worksheet
for row_index in range(1, worksheet.nrows):
# The row will be returned as an array of cell objects
cells = worksheet.row(row_index)
# Construct a GeoJSON Feature dictionary stub that we can fill in
feature = {
'type': 'Feature',
'id': None,
'geometry': {
'type': 'Point',
'coordinates': None,
},
'properties': {},
}
# The column layouts/values are different between the two sources
if source_name == 'awois_wrecks':
source_id = '%.0f' % cells[0].value
vessel_name = cells[1].value
feature_type = cells[2].value
lat = float(cells[3].value)
lng = float(cells[4].value)
chart = None
gp_quality = cells[5].value
depth = cells[6].value
sounding = cells[7].value
year_sunk = cells[8].value
history = cells[9].value
sounding_quality = None
water_level_effect = None
else:
source_id = None
vessel_name = cells[1].value
feature_type = cells[2].value
chart = cells[3].value # Not Used
lat = float(cells[4].value)
lng = float(cells[5].value)
gp_quality = cells[6].value
depth = cells[7].value
sounding = cells[8].value
year_sunk = cells[9].value
history = cells[10].value
sounding_quality = cells[11].value
water_level_effect = cells[12].value
# Get the lat and lng from the cell values
feature['geometry']['coordinates'] = (lng, lat)
# Get the unique ID
feature['id'] = source_id
# Get the properties from the cell values
feature['properties']['vessel_name'] = vessel_name
feature['properties']['feature_type'] = feature_type
feature['properties']['gp_quality'] = gp_quality
feature['properties']['depth'] = depth
feature['properties']['chart'] = chart
feature['properties']['sounding'] = sounding
feature['properties']['yearsunk'] = year_sunk
feature['properties']['history'] = history
feature['properties']['sounding_quality'] = sounding_quality
feature['properties']['water_level_effect'] = water_level_effect
# Add the source to the properties
feature['properties']['source'] = source_name
# Add the feature to our array
features.append(feature)
# Output the GeoJSON Feature Collection
output = {
"type": "FeatureCollection",
"features": features
}
# Output to a GeoJSON file
with open('%s.geojson' % source_name, 'w') as f:
f.write(json.dumps(output, indent=4))
print 'Done.'
|
python
|
__author__ = 'arjun010'
from visObject import *
from chartDataFormatter import *
from dataFactGenerator import *
from itertools import combinations, permutations
def getPossibleVisualizations(attributeList, dataList, metadataMap):
possibleVisualizations = []
possibleDataFacts = []
itemAttribute = None # itemAttribute is used in charts like scatterplot and tick plot to enable referring to individual data items
for attribute in metadataMap:
if 'isItemAttr' in metadataMap[attribute]:
if metadataMap[attribute]['isItemAttr'] == "y":
itemAttribute = attribute
break
if len(attributeList) == 1:
attribute = attributeList[0]
if metadataMap[attribute]['type']=="quantitative":
singleAxisTickPlot = getSingleAxisTickPlot(attribute, itemAttribute, dataList)
possibleVisualizations.append(singleAxisTickPlot)
formattedData = getDataForSingleAxisTickPlot(dataList,attribute,itemAttribute)
# tickPlotDataFacts = getDataFacts_TickPlot_Q(attribute,formattedData)
# for dataFact in tickPlotDataFacts:
# dataFact['relatedVisObjects'].append(singleAxisTickPlot)
# possibleDataFacts.append(dataFact)
singleAxisBoxPlot = getSingleAxisBoxPlot(attribute)
possibleVisualizations.append(singleAxisBoxPlot)
singleAxisHistogram = getHistogram(attribute)
possibleVisualizations.append(singleAxisHistogram)
# commonDataFactsForTickAndBoxPlot = getCommonDataFactsForTickPlotAndBoxPlotAndHistogram_Q(attribute, formattedData)
# for dataFact in commonDataFactsForTickAndBoxPlot:
# dataFact['relatedVisObjects'].append(singleAxisTickPlot)
# dataFact['relatedVisObjects'].append(singleAxisBoxPlot)
# if dataFact['type']=="RangeDistributionFact":
# dataFact['relatedVisObjects'].append(singleAxisHistogram)
# possibleDataFacts.append(dataFact)
elif metadataMap[attribute]['type'] == "ordinal" or metadataMap[attribute]['type'] == "nominal":
barChartWithCount = getBarChartWithCount(attribute, dataList)
possibleVisualizations.append(barChartWithCount)
donutChartWithCount = getDonutChartWithCount(attribute, dataList)
possibleVisualizations.append(donutChartWithCount)
formattedData = getDataForBarChartWithCount(dataList,attribute)
commonDataFactsForBarAndDonutChartsWithCount = getCommonFacts_BarAndDonutChartWithCount_N(attribute,formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithCount:
dataFact['relatedVisObjects'].append(barChartWithCount)
dataFact['relatedVisObjects'].append(donutChartWithCount)
possibleDataFacts.append(dataFact)
elif len(attributeList) == 2:
attribute1 = attributeList[0]
attribute2 = attributeList[1]
attributeTypeList = [metadataMap[attribute1]['type'],metadataMap[attribute2]['type']]
if attributeTypeList.count("quantitative")==1 and (attributeTypeList.count("nominal")==1 or attributeTypeList.count("ordinal")==1): # N/O x Q
if metadataMap[attribute1]['type']=="quantitative":
yAttr = attribute1
xAttr = attribute2
else:
xAttr = attribute1
yAttr = attribute2
#====================
# generating two axis tick plot and dot plot
#====================
twoAxisTickPlot = getTwoAxisTickPlot(xAttr, yAttr, itemAttribute, dataList)
possibleVisualizations.append(twoAxisTickPlot)
scatterplot = getScatterplot(xAttr, yAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(scatterplot)
formattedData = getDataForTwoAxisTickPlot(dataList,xAttr,yAttr,itemAttribute)
# commonFactsForTickAndDotPlots = getCommonFacts_TickAndDotPlot_NxQ(xAttr,yAttr,None,formattedData)
# for dataFact in commonFactsForTickAndDotPlots:
# dataFact['relatedVisObjects'].append(twoAxisTickPlot)
# dataFact['relatedVisObjects'].append(scatterplot)
# possibleDataFacts.append(dataFact)
#====================
# generating AVG based bar and donut charts
#====================
barChartWithAvg = getBarChartWithAvg(xAttr, yAttr, dataList)
possibleVisualizations.append(barChartWithAvg)
donutChartWithAvg = getDonutChartWithAvg(xAttr, yAttr, dataList)
possibleVisualizations.append(donutChartWithAvg)
formattedData = getDataForBarChartWithAvg(dataList,xAttr,yAttr)
commonDataFactsForBarAndDonutChartsWithAvg = getCommonFacts_BarAndDonutChartWithAvg_NxQ(xAttr, yAttr, "AVG", formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithAvg:
dataFact['relatedVisObjects'].append(barChartWithAvg)
dataFact['relatedVisObjects'].append(donutChartWithAvg)
possibleDataFacts.append(dataFact)
#====================
# generating SUM based bar and donut charts
#====================
barChartWithSum = getBarChartWithSum(xAttr, yAttr, dataList)
possibleVisualizations.append(barChartWithSum)
donutChartWithSum = getDonutChartWithSum(xAttr, yAttr, dataList)
possibleVisualizations.append(donutChartWithSum)
formattedData = getDataForBarChartWithSum(dataList,xAttr,yAttr)
commonDataFactsForBarAndDonutChartsWithSum = getCommonFacts_BarAndDonutChartWithSum_NxQ(xAttr, yAttr, "SUM", formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithSum:
dataFact['relatedVisObjects'].append(barChartWithSum)
dataFact['relatedVisObjects'].append(donutChartWithSum)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==2: # Q x Q
# 2 permutations
scatterplot1 = getScatterplot(attribute1,attribute2,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(scatterplot1)
scatterplot2 = getScatterplot(attribute2,attribute1,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(scatterplot2)
formattedData = getDataForScatterplot(dataList,metadataMap,attribute1,attribute2,itemAttribute)
scatterplotDataFacts = getDataFacts_Scatterplot_QxQ(attribute1,attribute2,formattedData,metadataMap)
for dataFact in scatterplotDataFacts:
dataFact['relatedVisObjects'].append(scatterplot1)
dataFact['relatedVisObjects'].append(scatterplot2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==0: # N/O x N/O
# aggregated scatterplot with count (2 permutations)
aggregatedScatterplotWithCount1 = getAggregatedScatterplotWithCount(attribute1,attribute2,dataList)
possibleVisualizations.append(aggregatedScatterplotWithCount1)
aggregatedScatterplotWithCount2 = getAggregatedScatterplotWithCount(attribute2,attribute1,dataList)
possibleVisualizations.append(aggregatedScatterplotWithCount2)
# stacked bar chart (2 permutations)
stackedBarChart1 = getStackedBarChart(attribute1,attribute2,dataList)
possibleVisualizations.append(stackedBarChart1)
stackedBarChart2 = getStackedBarChart(attribute2,attribute1,dataList)
possibleVisualizations.append(stackedBarChart2)
# grouped bar chart (maybe)
formattedData1 = getDataForAggregatedScatterplotByCount(dataList,metadataMap,attribute1,attribute2)
commonDataFactsForStackedBarAndAggregatedDotPlotWithCount = getCommonDataFacts_StackedBarAndAggregatedDotPlotWithCount_NxN(attribute1,attribute2,formattedData1)
for dataFact in commonDataFactsForStackedBarAndAggregatedDotPlotWithCount:
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount1)
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
dataFact['relatedVisObjects'].append(stackedBarChart1)
dataFact['relatedVisObjects'].append(stackedBarChart2)
possibleDataFacts.append(dataFact)
dataFactsForStackedBarChartWithCount = getStackedBarCharDataFacts_NxN(attribute1,attribute2,formattedData1)
for dataFact in dataFactsForStackedBarChartWithCount:
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount1)
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
dataFact['relatedVisObjects'].append(stackedBarChart1)
dataFact['relatedVisObjects'].append(stackedBarChart2)
possibleDataFacts.append(dataFact)
# formattedData2 = getDataForAggregatedScatterplotByCount(dataList,metadataMap,attribute2,attribute1)
# commonDataFactsForStackedBarAndAggregatedDotPlotWithCount = getCommonDataFacts_StackedBarAndAggregatedDotPlotWithCount_NxN(attribute2,attribute1,formattedData2)
# for dataFact in commonDataFactsForStackedBarAndAggregatedDotPlotWithCount:
# dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
# dataFact['relatedVisObjects'].append(stackedBarChart2)
# possibleDataFacts.append(dataFact)
# dataFactsForStackedBarChartWithCount = getStackedBarCharDataFacts_NxN(attribute2,attribute1,formattedData2)
# for dataFact in dataFactsForStackedBarChartWithCount:
# dataFact['relatedVisObjects'].append(stackedBarChart2)
# possibleDataFacts.append(dataFact)
elif len(attributeList) == 3:
attribute1 = attributeList[0]
attribute2 = attributeList[1]
attribute3 = attributeList[2]
attributeTypeList = [metadataMap[attribute1]['type'],metadataMap[attribute2]['type'],metadataMap[attribute3]['type']]
if attributeTypeList.count("quantitative")==0: # 3 N/O
pass
elif attributeTypeList.count("quantitative")==1: # 1 Q x 2 N/O
if metadataMap[attribute1]['type']=="quantitative":
quantitativeAttr = attribute1
if len(metadataMap[attribute2]['domain']) <= len(metadataMap[attribute3]['domain']):
smallerNOAttr = attribute2
largerNOAttr = attribute3
else:
smallerNOAttr = attribute3
largerNOAttr = attribute2
elif metadataMap[attribute2]['type']=="quantitative":
quantitativeAttr = attribute2
if len(metadataMap[attribute1]['domain']) <= len(metadataMap[attribute3]['domain']):
smallerNOAttr = attribute1
largerNOAttr = attribute3
else:
smallerNOAttr = attribute3
largerNOAttr = attribute1
elif metadataMap[attribute3]['type']=="quantitative":
quantitativeAttr = attribute3
if len(metadataMap[attribute1]['domain']) <= len(metadataMap[attribute2]['domain']):
smallerNOAttr = attribute1
largerNOAttr = attribute2
else:
smallerNOAttr = attribute2
largerNOAttr = attribute1
# N/O x Q x N/O (2 coloring variations possible for each chart)
coloredTickPlot1 = getColoredTickPlot(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList)
possibleVisualizations.append(coloredTickPlot1)
coloredTickPlot2 = getColoredTickPlot(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList)
possibleVisualizations.append(coloredTickPlot2)
coloredScatterplot1 = getColoredScatterplot(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot1)
coloredScatterplot2 = getColoredScatterplot(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,largerNOAttr,quantitativeAttr,smallerNOAttr,itemAttribute)
# commonDataFactsForColoredTickPlotAndScatterplot = getCommonDataFacts_ColoredTickPlotAndScatterplot_NxQxN(largerNOAttr,quantitativeAttr,smallerNOAttr,formattedData,metadataMap,itemAttribute)
# for dataFact in commonDataFactsForColoredTickPlotAndScatterplot:
# dataFact['relatedVisObjects'].append(coloredTickPlot1)
# dataFact['relatedVisObjects'].append(coloredTickPlot2)
# dataFact['relatedVisObjects'].append(coloredScatterplot1)
# dataFact['relatedVisObjects'].append(coloredScatterplot2)
# possibleDataFacts.append(dataFact)
#========================
coloredScatterplotByAvg1 = getColoredScatterplotByAvg(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotByAvg1)
coloredScatterplotByAvg2 = getColoredScatterplotByAvg(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotByAvg2)
coloredTickPlotByAvg1 = getColoredTickPlotByAvg(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotByAvg1)
coloredTickPlotByAvg2 = getColoredTickPlotByAvg(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotByAvg2)
# N/O x N/O x Q (2 variations for AVG and SUM)
aggregatedAvgScatterplot1 = getAggregatedScatterplotByAvg(smallerNOAttr, largerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedAvgScatterplot1)
aggregatedAvgScatterplot2 = getAggregatedScatterplotByAvg(largerNOAttr, smallerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedAvgScatterplot2)
formattedData = getDataForAggregatedScatterplotByAvg(dataList,metadataMap,smallerNOAttr,largerNOAttr,quantitativeAttr)
dataFactsForAggregatedScatterplotByAvg = getDataFactsForAggregatedScatterplotByAvg_NxNxQ(smallerNOAttr, largerNOAttr, quantitativeAttr, formattedData)
for dataFact in dataFactsForAggregatedScatterplotByAvg:
dataFact['relatedVisObjects'].append(aggregatedAvgScatterplot1)
dataFact['relatedVisObjects'].append(aggregatedAvgScatterplot2)
dataFact['relatedVisObjects'].append(coloredScatterplotByAvg1)
dataFact['relatedVisObjects'].append(coloredScatterplotByAvg2)
dataFact['relatedVisObjects'].append(coloredTickPlotByAvg1)
dataFact['relatedVisObjects'].append(coloredTickPlotByAvg2)
possibleDataFacts.append(dataFact)
coloredScatterplotBySum1 = getColoredScatterplotBySum(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotBySum1)
coloredScatterplotBySum2 = getColoredScatterplotBySum(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotBySum2)
coloredTickPlotBySum1 = getColoredTickPlotBySum(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotBySum1)
coloredTickPlotBySum2 = getColoredTickPlotBySum(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotBySum2)
aggregatedSumScatterplot1 = getAggregatedScatterplotBySum(smallerNOAttr, largerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedSumScatterplot1)
aggregatedSumScatterplot2 = getAggregatedScatterplotBySum(largerNOAttr, smallerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedSumScatterplot2)
formattedData = getDataForAggregatedScatterplotBySum(dataList,metadataMap,smallerNOAttr,largerNOAttr,quantitativeAttr)
dataFactsForAggregatedScatterplotBySum = getDataFactsForAggregatedScatterplotBySum_NxNxQ(smallerNOAttr, largerNOAttr, quantitativeAttr, formattedData)
for dataFact in dataFactsForAggregatedScatterplotBySum:
dataFact['relatedVisObjects'].append(aggregatedSumScatterplot1)
dataFact['relatedVisObjects'].append(aggregatedSumScatterplot2)
dataFact['relatedVisObjects'].append(coloredScatterplotBySum1)
dataFact['relatedVisObjects'].append(coloredScatterplotBySum2)
dataFact['relatedVisObjects'].append(coloredTickPlotBySum1)
dataFact['relatedVisObjects'].append(coloredTickPlotBySum2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==2: # 2 Q x 1 N/O
if metadataMap[attribute1]['type']=="ordinal" or metadataMap[attribute1]['type']=="nominal":
nonQAttribute = attribute1
quantitativeAttr1 = attribute2
quantitativeAttr2 = attribute3
elif metadataMap[attribute2]['type']=="ordinal" or metadataMap[attribute2]['type']=="nominal":
nonQAttribute = attribute2
quantitativeAttr1 = attribute1
quantitativeAttr2 = attribute3
elif metadataMap[attribute3]['type']=="ordinal" or metadataMap[attribute3]['type']=="nominal":
nonQAttribute = attribute3
quantitativeAttr1 = attribute1
quantitativeAttr2 = attribute2
# 2 axis variations possible for scatterplot of QxQ +color
coloredScatterplot1 = getColoredScatterplot(quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot1)
coloredScatterplot2 = getColoredScatterplot(quantitativeAttr2,quantitativeAttr1,nonQAttribute,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute)
dataFactsForColoredScatterplots = getDataFactsForColoredScatterplot_QxQxN(quantitativeAttr1,quantitativeAttr2,nonQAttribute,formattedData,metadataMap)
for dataFact in dataFactsForColoredScatterplots:
dataFact['relatedVisObjects'].append(coloredScatterplot1)
dataFact['relatedVisObjects'].append(coloredScatterplot2)
possibleDataFacts.append(dataFact)
# 2 sizing variations possible for scatterplot of N/O x Q +size
sizedScatterplot1 = getSizedScatterplot(nonQAttribute, quantitativeAttr1, quantitativeAttr2, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot1)
sizedScatterplot2 = getSizedScatterplot(nonQAttribute, quantitativeAttr2, quantitativeAttr1, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute)
commonDataFactsForColoredAndSizedScatterplot = getCommonDataFactsForColoredAndSizedScatterplot_QxQxN(quantitativeAttr1,quantitativeAttr2,nonQAttribute,formattedData,metadataMap)
for dataFact in commonDataFactsForColoredAndSizedScatterplot:
dataFact['relatedVisObjects'].append(coloredScatterplot1)
dataFact['relatedVisObjects'].append(coloredScatterplot2)
dataFact['relatedVisObjects'].append(sizedScatterplot1)
dataFact['relatedVisObjects'].append(sizedScatterplot2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==3: # 3 Q
# 6 permutations
for attributePermutation in permutations(attributeList,3):
attributePermutation = list(attributePermutation)
sizedScatterplot = getSizedScatterplot(attributePermutation[0],attributePermutation[1],attributePermutation[2],itemAttribute,dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot)
formattedData = getDataForSizedScatterplot(dataList, metadataMap, attributePermutation[0],attributePermutation[1],attributePermutation[2],itemAttribute)
dataFactsForSizedScatterplot = getDataFactsForSizedScatterplot_QxQxQ(attributePermutation[0],attributePermutation[1],attributePermutation[2],formattedData,metadataMap)
for dataFact in dataFactsForSizedScatterplot:
dataFact['relatedVisObjects'].append(sizedScatterplot)
possibleDataFacts.append(dataFact)
return possibleVisualizations, possibleDataFacts
def getSingleAxisTickPlot(yAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "SingleAxisTickPlot"
visObject['mark'] = "tick"
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getSingleAxisBoxPlot(yAttr):
visObject = getEmptyVisObject()
visObject['type'] = "SingleAxisBoxPlot"
visObject['mark'] = "box"
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getHistogram(yAttr):
visObject = getEmptyVisObject()
visObject['type'] = "Histogram"
visObject['mark'] = "bar"
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "BIN"
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getBarChartWithCount(attribute, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithCount"
visObject['mark'] = "bar"
visObject['x']['attribute'] = attribute
visObject['y']['transform'] = "COUNT"
# visObject['shapedData'] = getDataForBarChartWithCount(dataList, attribute)
return visObject
def getDonutChartWithCount(attribute, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithCount"
visObject['mark'] = "arc"
visObject['x']['attribute'] = attribute
visObject['y']['transform'] = "COUNT"
visObject['color']['attribute'] = attribute
# visObject['shapedData'] = getDataForBarChartWithCount(dataList, attribute) # same data format as bar chart
return visObject
def getTwoAxisTickPlot(xAttr, yAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "TwoAxisTickPlot"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForTwoAxisTickPlot(dataList, xAttr, yAttr, itemAttr)
return visObject
def getBarChartWithAvg(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithAvg"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
# visObject['shapedData'] = getDataForBarChartWithAvg(dataList, xAttr, yAttr)
return visObject
def getBarChartWithSum(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithSum"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
# visObject['shapedData'] = getDataForBarChartWithSum(dataList, xAttr, yAttr)
return visObject
def getDonutChartWithAvg(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithAvg"
visObject['mark'] = "arc"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = xAttr
# visObject['shapedData'] = getDataForBarChartWithAvg(dataList, xAttr, yAttr) # same data format as bar chart
return visObject
def getDonutChartWithSum(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithSum"
visObject['mark'] = "arc"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = xAttr
# visObject['shapedData'] = getDataForBarChartWithSum(dataList, xAttr, yAttr) # same data format as bar chart
return visObject
def getScatterplot(xAttr, yAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "Scatterplot"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForScatterplot(dataList, metadataMap, xAttr,yAttr,itemAttr)
return visObject
def getColoredTickPlot(xAttr, yAttr, colorAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColor"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForColoredTickPlot(dataList,xAttr,yAttr,colorAttr,itemAttr)
return visObject
def getColoredScatterplot(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColor"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForColoredScatterplot(dataList, metadataMap,xAttr,yAttr,colorAttr,itemAttr)
return visObject
def getColoredScatterplotByAvg(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColorByAvg"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredTickPlotByAvg(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColorByAvg"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredScatterplotBySum(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColorBySum"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredTickPlotBySum(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColorBySum"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = colorAttr
return visObject
def getAggregatedScatterplotByAvg(xAttr, yAttr, sizeAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithAvgSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
visObject['size']['transform'] = "AVG"
# visObject['shapedData'] = getDataForAggregatedScatterplotByAvg(dataList, metadataMap, xAttr,yAttr,sizeAttr)
return visObject
def getAggregatedScatterplotBySum(xAttr, yAttr, sizeAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithSumSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
visObject['size']['transform'] = "SUM"
# visObject['shapedData'] = getDataForAggregatedScatterplotBySum(dataList, metadataMap, xAttr,yAttr,sizeAttr)
return visObject
def getAggregatedScatterplotWithCount(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithCountSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['transform'] = "COUNT"
# visObject['shapedData'] = getDataForAggregatedScatterplotByCount(dataList,metadataMap,xAttr,yAttr)
return visObject
def getSizedScatterplot(xAttr, yAttr, sizeAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
# visObject['shapedData'] = getDataForSizedScatterplot(dataList, metadataMap, xAttr,yAttr,sizeAttr,itemAttr)
return visObject
def getStackedBarChart(xAttr,colorAttr,dataList):
visObject = getEmptyVisObject()
visObject['type'] = "StackedBarChart"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['transform'] = "COUNT"
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForStackedBarChart(dataList,xAttr,colorAttr)
return visObject
if __name__ == '__main__':
pass
|
python
|
from __future__ import division
# X é o numero no qual queremos a raiz quadrada
# I quantidade de interações
def calc_raizq(x, chute, i):
if i < 1:
raise ValueError("É necessário pelo menos uma iteração")
if chute < 1:
chute = 1
if x < 0:
return complex(0, calc_raizq(-x, chute, i))
else:
for n in range(i):
chute = 1/2*(chute+x/chute)
return chute
print(calc_raizq(9, 3, 3))
#Para trabalhar com numeros complexos é mais comum o uso do modulo CMATH
#https://docs.python.org/3/library/cmath.html
|
python
|
############################################################################
# Copyright ESIEE Paris (2019) #
# #
# Contributor(s) : Giovanni Chierchia, Benjamin Perret #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
from collections import OrderedDict
from sklearn import datasets
import numpy as np
import scipy
from .plots import show_grid, plot_clustering
from .graph import build_graph
def load_datasets(n_samples, n_labeled, preprocess=lambda x: x):
sets = OrderedDict()
np.random.seed(2)
sets['circles'] = create_dataset(n_samples, n_labeled, preprocess, make_circles)
sets['moons'] = create_dataset(n_samples, 2*n_labeled, preprocess, make_moons)
sets['blobs'] = create_dataset(n_samples, n_labeled, preprocess, make_blobs)
sets['varied'] = create_dataset(n_samples, n_labeled, preprocess, make_varied)
sets['aniso'] = create_dataset(n_samples, n_labeled, preprocess, make_aniso)
return sets
def show_datasets(sets, show_labeled=False, figname=None):
get_list = lambda key: [sets[name][key] for name in sets]
X_list = get_list("X")
y_list = get_list("y")
i_list = get_list("labeled") if show_labeled else len(X_list)*[None]
i_list = [i[0] if i.ndim == 2 else i for i in i_list] # show only first fold if several exist
show_grid(plot_clustering, X_list, y_list, i_list, figname=figname)
#-----------------------------#
def create_dataset(n_samples, n_labeled, preprocess, make_data):
X, y= make_data(n_samples, n_labeled)
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
data = {
"X": preprocess(X),
"y": y,
"n_clusters": len(np.unique(y)),
"labeled": idx[:n_labeled],
"unlabeled": idx[n_labeled:],
}
return data
def make_circles(n_samples, n_labeled):
X, y = datasets.make_circles(n_samples, factor=.5, noise=.05, random_state=10)
return X, y
def make_moons(n_samples, n_labeled):
X, y = datasets.make_moons(n_samples, noise=.05, random_state=42)
X, y = np.concatenate((X, X + (2.5, 0))), np.concatenate((y, y+2))
return X, y
def make_blobs(n_samples, n_labeled):
X, y = datasets.make_blobs(n_samples, random_state=42)
return X, y
def make_varied(n_samples, n_labeled):
X, y = datasets.make_blobs(n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170)
return X, y
def make_aniso(n_samples, n_labeled):
X, y = datasets.make_blobs(n_samples, random_state=170)
X = np.dot(X, [[0.6, -0.6], [-0.4, 0.8]])
return X, y
|
python
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pytorch_lightning as pl
class Net(pl.LightningModule):
def __init__(self):
super(Net, self).__init__()
self.layer1 = nn.Linear(28*28, 1024)
self.layer2 = nn.Linear(1024, 128)
self.layer3 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
out = self.layer1(x)
out = F.relu(out)
out = self.layer2(out)
out = F.relu(out)
out = self.layer3(out)
return out
def training_step(self, batch, batch_idx):
data, target = batch
out = self.forward(data)
loss = F.cross_entropy(out, target)
preds = out.argmax(dim=1, keepdim=True)
corrects = torch.eq(loss, target.view(-1, 1)).sum() / 1.0
acc = torch.mean(corrects)
result = pl.TrainResult(loss)
result.log('train_loss', loss)
result.log('train_acc', acc)
return result
def validation_step(self, batch, batch_idx):
data, target = batch
out = self.forward(data)
loss = F.cross_entropy(out, target)
preds = out.argmax(dim=1, keepdim=True)
corrects = torch.eq(loss, target.view(-1, 1)).sum() / 1.0
acc = torch.mean(corrects)
result = pl.EvalResult(checkpoint_on=loss)
result.log('val_loss', loss)
result.log('val_acc', acc)
return result
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=1e-3)
return optimizer
|
python
|
def foo(x):
return x*x
y = [1,2,3,4,5]
z = list(map(foo,y))
for val in z:
print(val)
|
python
|
# -*- coding: utf-8 -*-
__author__ = "Michele Samorani"
import pandas as pd
import cplex
import time
import random
TIME_LIMIT_SECONDS = 60
def build_scenarios(show_probs, max_scenarios,seed):
"""
Builds the scenarios
:param show_probs:
:type show_probs: list[float]
:return: a list of (probability, 0-1 show list)
"""
random.seed(seed)
n = len(show_probs)
if 2 ** n <= max_scenarios:
import itertools
lst = [list(i) for i in itertools.product([0, 1], repeat=n)]
for s in lst:
p = 1
for j in range(n):
p *= (show_probs[j] if s[j] == 1 else 1 - show_probs[j])
yield p,s
else:
s = show_probs.copy()
for i in range(max_scenarios):
for j in range(n):
p2 = random.uniform(0, 1)
s[j] = 1 if p2 < show_probs[j] else 0
yield 1 / max_scenarios, s.copy()
# s = show_probs.copy()
# for i in range(max_scenarios):
# for j in range(n):
# p2 = random.uniform(0, 1)
# s[j] = 1 if p2 < show_probs[j] else 0
# p = 1
# for j in range(n):
# p *= (show_probs[j] if s[j] == 1 else 1 - show_probs[j])
#
# # input(f'returning {str(p)}->{str(s)}')
# yield p, s.copy()
def optimally_schedule(show_probs, wtc, otc, nslots,seed, max_scenarios = 100000, delta_sim = 0):
print_steps = False
# First, find the scenarios
qs = [] # a list of sets of patients that show under a scenario
ps = [] # a list of probabilities
init = time.time()
ser = pd.Series(data=show_probs)
sorted_indices = list(ser.sort_values().index)
# Similar index (for each index i, the index of the other patient for constraint 4)
similar = {}
for iii in range(len(sorted_indices)-1):
i = sorted_indices[iii]
j = sorted_indices[iii+1]
# check whether i is similar to j
if show_probs[j] - show_probs[i] <= delta_sim + 0.00000001:
similar[i] = j
else:
similar[i] = -1
similar[sorted_indices[-1]] = -1
if print_steps:
print('Building scenarios')
totp = 0
for p,s in build_scenarios(show_probs, max_scenarios,seed):
qs.append(set()) # set of showing indices
ps.append(p)
totp+=p
for i in range(len(s)):
if s[i] == 1:
qs[-1].add(i)
#print(f'totp={totp}')
# if abs(totp-1) > 0.01:
# input('TOT P < 1!!!!!!')
S = len(qs) # number of scenarios
F = nslots # number of slots
N = len(show_probs) # number of patients
F_max = N
if print_steps:
print(f'Done in {time.time() - init}. Built {S} scenarios. Setting up problem...')
c = cplex.Cplex()
# variables
c.variables.add(names=[f'x{i}_{j}' for i in range(N) for j in range(F)],types=[c.variables.type.binary for i in range(N) for j in range(F)])
c.variables.add(names=[f'b{s}_{j}' for j in range(F_max) for s in range(S)],lb=[0 for j in range(F_max) for s in range(S)])
c.set_log_stream(None)
c.set_results_stream(None)
c.set_warning_stream(None)
c.parameters.timelimit.set(TIME_LIMIT_SECONDS)
# objective
if print_steps:
print(f'Setting up objective...')
for s in range(S):
tot_shows = len(qs[s]) #N^s
#print(f'Scenario {s} with probability {ps[s]} and tot_shows = {tot_shows}:')
#print(qs[s])
if tot_shows == 0:
continue
for j in range(F_max):
#print(f'scenario {s}, j={j}: adding b{s}_{j} * (ps_s={ps[s]}) * (wtc={wtc}) / (tot_shows={tot_shows})')
c.objective.set_linear(f'b{s}_{j}',ps[s] * wtc)
c.objective.set_linear(f'b{s}_{F-1}', ps[s] * (otc + wtc))
#print(f'scenario {s}: adding b{s}_{F-1} * (ps_s={ps[s]}) * (otc={otc})')
# constraint set (1)
if print_steps:
print(f'Setting up constraint set 1...')
for i in range(N):
c.linear_constraints.add(lin_expr=[cplex.SparsePair(
ind = [f'x{i}_{j}' for j in range(F)], val = [1.0 for j in range(F)])],
senses = ['E'],
rhs=[1],
names=[f'(1_{i})'])
# constraint set (2)
if print_steps:
print(f'Setting up constraint set 2...')
for s in range(S):
if print_steps and s % 1000 == 0:
print(f'Built constraints for {s} scenarios')
for j in range(0,F_max):
expr = []
if j < F:
expr = [f'x{i}_{j}' for i in qs[s]]
expr.append(f'b{s}_{j}')
if j >= 1:
expr.append(f'b{s}_{j-1}')
vals = []
if j <F:
vals = [-1.0 for i in qs[s]]
vals.append(1)
if j >=1 :
vals.append(-1)
c.linear_constraints.add(lin_expr=[cplex.SparsePair(expr,vals)],
senses=['G'],
rhs=[-1],
names=[f'(2_{s}_{j})'])
# constraint set (3)
if print_steps:
print(f'Setting up constraint set 3...')
# original constraint 3
if (N >= F):
for j in range(0, F):
c.linear_constraints.add(lin_expr=[cplex.SparsePair(
ind=[f'x{i}_{j}' for i in range(N)], val=[1.0 for i in range(N)])],
senses=['G'],
rhs=[1],
names=[f'(3_{j})'])
# constraint set (4)
if print_steps:
print(f'Setting up constraint set 4...')
for i1 in range(N):
i2 = similar[i1]
if i2 == -1:
continue
for j_prime in range(F-1):
expr = []
vals = []
# old and faster
expr = [f'x{i1}_{j}' for j in range(j_prime+1,F)]
# new and slower
#expr = [f'x{i1}_{j_prime}']
# expr.extend([f'x{i2}_{j}' for j in range(0,j_prime+1)])
# vals = [1 for i in range(len(expr))]
# c.linear_constraints.add(lin_expr=[cplex.SparsePair(expr, vals)],
# senses=['L'],
# rhs=[1],
# names=[f'(4_{i1}_{j_prime})'])
#c.write(filename='model.txt', filetype='lp')
if print_steps:
print(f'Solving...')
c.solve()
time_taken = time.time() - init
# c.solution.write('solution.txt')
#print(f'Value = {c.solution.get_objective_value()}')
solution = []
try:
for i in range(N):
sols = c.solution.get_values([f'x{i}_{j}' for j in range(F)])
for j in range(F):
if sols[j] >= .9:
solution.append(j)
break
except:
import numpy as np
return np.nan, np.nan, np.nan, np.nan
return c.solution.get_objective_value(),c.solution.MIP.get_mip_relative_gap(), solution, time_taken
|
python
|
# This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <[email protected]>
# :license: MIT
"""Language and translation utilities"""
from flask import request, session
from etsin_finder.auth.authentication_fairdata_sso import get_sso_environment_prefix, get_decrypted_sso_session_details
languages = ['en', 'fi']
default_language = 'en'
# Map common locales to languages
locale_mapping = {
'en_US': 'en',
'en_GB': 'en',
'en': 'en',
'fi_FI': 'fi',
'fi': 'fi',
}
def set_language(language):
"""
Set session language
Returns True if language is supported, otherwise False.
"""
if language in languages:
session['language'] = language
return True
return False
def get_language():
"""
Get language for request.
Returns first found language in the following order
* Session language setting
* SSO language setting
* Accept-Languages request header
* Default language
"""
session_lang = session.get('language')
if session_lang in languages:
return session_lang
sso_session = get_decrypted_sso_session_details() or {}
sso_lang = sso_session.get('language')
if sso_lang in languages:
return sso_lang
supported_locales = locale_mapping.keys()
locale = request.accept_languages.best_match(supported_locales)
return locale_mapping.get(locale, default_language)
translations = {
'fi': {
'etsin.download.notification.subject': 'Lataus on aloitettavissa Etsimessä',
'etsin.download.notification.body.partial': 'Lataus paketille {folder} aineistossa {pref_id} voidaan aloittaa Etsimessä:\n\n{data_url}\n',
'etsin.download.notification.body.full': 'Lataus aineistolle {pref_id} voidaan aloittaa Etsimessä:\n\n{data_url}\n',
'etsin.title': 'Etsin | Tutkimusaineistojen hakupalvelu',
'etsin.description': ('Kuvailutietojen perusteella käyttäjät voivat etsiä aineistoja ja arvioida'
'löytämiensä aineistojen käyttökelpoisuutta tarpeisiinsa.'),
'qvain.title': 'Qvain | Tutkimusaineiston metatietotyökalu',
'qvain.description': ('Fairdata Qvain -työkalu tekee datasi '
'kuvailun ja julkaisemisen helpoksi.')
},
'en': {
'etsin.download.notification.subject': 'Download can be started in Etsin',
'etsin.download.notification.body.partial': 'Download for package {folder} in dataset {pref_id} can now be started in Etsin:\n\n{data_url}\n',
'etsin.download.notification.body.full': 'Download for dataset {pref_id} can now be started in Etsin:\n\n{data_url}\n',
'etsin.title': 'Etsin | Research Dataset Finder ',
'etsin.description': 'Etsin enables you to find research datasets from all fields of science.',
'qvain.title': 'Qvain | Research Dataset Description Tool',
'qvain.description': 'Fairdata Qvain tool makes describing and publishing your research data effortless for you.',
}
}
def translate(lang, key, context=None):
"""Return translation from the translations dict for a given language."""
if context is None:
context = {}
lang_translations = translations.get(lang)
if not lang_translations:
return f'invalid language: {lang}' % lang
translation = lang_translations.get(key)
if not translation:
return f'missing translation: {lang}.{key}'
return translation.format(**context)
|
python
|
import pygame;
from .drawable import Drawable;
# --------------------------------------------------- *\
# [class] Image()
#
# * Image element *
#
# --------------------------------------------------- */
class Image(Drawable):
# --------------------------------------------------- *\
# [function] __init__():
#
# * Constructor *
# --------------------------------------------------- */
def __init__(self, imagePath):
super().__init__();
self.type = "image";
surface = pygame.image.load(imagePath).convert_alpha();
if surface:
self.setTexture(surface);
size = surface.get_size();
self.setSize(size[0], size[1]);
else:
print("Couldn't load the image...");
|
python
|
"""
This file contains all the sales related resources
"""
# Third party imports
from flask import request, json, abort
from flask_restplus import Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
# Local application imports
from app.api.v1.models.sales import Sale
from app.api.v1.models.db import Db
from app.api.v1.views.expect import SaleEtn
from app.api.common.validators import sales_validator, admin_required
new_s = SaleEtn().sales
v1 = SaleEtn.v1
@v1.route('/<int:id>')
class SalesRecords(Resource):
@v1.doc( security='apikey')
@jwt_required
@admin_required
def get(self, id):
"""
Get a specicific sale record
"""
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
sale = Db.get_s_by_id(id)
if sale.store_id != store_id:
msg = 'That record does not exist'
return abort(404, msg)
sk = sale.json_dump()
return {"status": "Success!", "data": sk}, 200
@v1.doc( security='apikey')
@jwt_required
@admin_required
def delete(self, id):
"""
Delete a sale
"""
sale = Db.get_s_by_id(id)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
if sale.store_id != store_id:
msg = 'That record does not exist'
return abort(404, msg)
sk = sale.json_dump()
Db.sales.remove(sale)
return {"status": "Deleted!", "data": sk}, 200
@v1.doc( security='apikey')
@jwt_required
@admin_required
@v1.expect(new_s)
def put(self, id):
"""
Update a sale
"""
s = Db.get_s_by_id(id)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
if s.store_id != store_id:
msg = 'Sale does not exist'
abort(404, msg)
json_data = request.get_json(force=True)
sales_validator(json_data)
number = json_data['number']
s.number = number
return {"status": "Success!", "data": s.json_dump()}, 200
@v1.route('/')
class SalesRecord(Resource):
@v1.doc( security='apikey')
@jwt_required
@admin_required
def get(self):
"""
Get all sales
"""
sales = Db.sales
if len(sales) < 1:
res ={"message":'There are no sale records'},404
return res
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
s_list = [s.json_dump() for s in sales if s.store_id == store_id]
return {"status": "Success!", "data": s_list}, 200
|
python
|
from flask import Flask, render_template,request, session , redirect , url_for , g,flash, jsonify, make_response, json,flash
from flask_mail import Mail,Message
from flask_cors import CORS
from pusher import pusher
from flask_wtf import FlaskForm
from wtforms import (StringField ,PasswordField,SubmitField)
from wtforms.fields.html5 import EmailField
from wtforms.validators import ValidationError,DataRequired,InputRequired
import model
from flask_apscheduler import APScheduler
app = Flask(__name__)
mail = Mail(app)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
scheduler = APScheduler()
app.secret_key = 'mohsin5432'
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_DEFAULT_SENDER'] = '[email protected]'
app.config['MAIL_USERNAME'] = '[email protected]'
app.config['MAIL_PASSWORD'] = 'Moh$in531'
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
pusher = pusher_client = pusher.Pusher(
app_id = "1118828",
key = "4f4c1cd696946236d54c",
secret = "dee623f36d26edb30254",
cluster = "ap1",
ssl=True
)
username = ''
user = model.check_users()
admin = ''
class loginform(FlaskForm):
username = StringField(validators=[DataRequired(message="enter username")],render_kw={"placeholder": "username"})
password = PasswordField(validators=[DataRequired()],render_kw={"placeholder": "Password"})
submit = SubmitField('submit')
class signupform(FlaskForm):
username = StringField(validators=[DataRequired(message="enter username")],render_kw={"placeholder": "username"})
email = EmailField(validators=[InputRequired()],render_kw={"placeholder": "Email"})
password = PasswordField(validators=[DataRequired()],render_kw={"placeholder": "Password"})
submit = SubmitField('submit')
class newsletter(FlaskForm):
email = EmailField(validators=[InputRequired()],render_kw={"placeholder": "Email"})
submit = SubmitField('Submit')
@app.route('/',methods = ['GET'])
def home():
if 'username' in session:
g.user = session['username']
pending=model.pendingtask(g.user)
progress=model.progresstask(g.user)
completed=model.completedtask(g.user)
if not progress:
pmsg = 'NO PROGRESS TASK'
else:
pmsg=""
if not pending:
dmsg = 'NO PENDING TASK'
else:
dmsg=""
if not completed:
cmsg = 'NO COMPLETED TASK'
else:
cmsg=""
return render_template('homepage.html',pending=pending,progress=progress,completed=completed,pmsg=pmsg,dmsg=dmsg,cmsg=cmsg)
return redirect(url_for('login'))
@app.route('/login',methods = ['GET' ,'POST'])
def login():
username = False
password = False
form = loginform()
nform = newsletter()
sform = signupform()
if request.method == 'GET':
return render_template('login.html',form=form,nform=nform,sform=sform)
else:
session.pop('username', None)
areyouuser = form.username.data
pwd = model.pass_check(areyouuser)
if form.password.data == pwd:
session['username'] = form.username.data
return redirect(url_for('home'))
return render_template('login.html',form=form,nform=nform,sform=sform)
@app.before_request
def before_request():
g.username = None
if 'username' in session:
g.username = session['username']
@app.route('/logout', methods = ['POST'])
def logout():
session.pop('username' , None)
return redirect(url_for('home'))
@app.route('/getsession')
def getsession():
if 'username' in session:
return session['username']
return redirect(url_for('login'))
@app.route('/new/guest', methods=['POST'])
def guestUser():
data = request.json
pusher.trigger(u'general-channel', u'new-guest-details', {
'name' : data['name'],
'email' : data['email']
})
return json.dumps(data)
@app.route("/pusher/auth", methods=['POST'])
def pusher_authentication():
auth = pusher.authenticate(channel=request.form['channel_name'],socket_id=request.form['socket_id'])
return json.dumps(auth)
@app.route('/admin/livechat')
def adminchat():
return render_template('adminchat.html')
@app.route('/signup',methods = ['POST'])
def signup():
form = loginform()
nform = newsletter()
sform = signupform()
email = sform.email.data
username = sform.username.data
password = sform.password.data
agp = model.signup(email,username,password)
if agp is True:
msg = Message('TimePay', recipients=[sform.email.data])
msg.body = "THANKS FOR SIGNING UP"
mail.send(msg)
message = "Signed up successfully"
else:
message = "USER Already Exist"
return render_template('login.html',message = message,nform=nform,sform=sform,form=form)
@app.route('/addtask',methods = ['GET','POST'])
def addtask():
if 'username' in session:
if request.method == 'GET':
return render_template('addtask.html')
else:
g.user = session['username']
username = g.user
email = model.email(g.user)
subject = request.form["subject"]
memo = request.form["memo"]
status = "pending"
date = request.form["date"]
message = model.addtask(username,email,subject,memo,status,date)
return redirect(url_for('home'))
else:
return redirect(url_for('login'))
@app.route('/start/<string:id_data>', methods = ['GET'])
def tdelete(id_data):
model.start(id_data)
return redirect(url_for('home'))
@app.route('/completed/<string:id_data>', methods = ['GET'])
def completed(id_data):
model.completed(id_data)
return redirect(url_for('home'))
@app.route('/delete/<string:id_data>', methods = ['GET'])
def delete(id_data):
model.delete(id_data)
return redirect(url_for('home'))
#for Newsletter
@app.route('/news',methods = ['POST'])
def news():
form = loginform()
nform = newsletter()
email = nform.email.data
con = model.news(email)
if con is True:
msg = Message('Welcome To TimePay', recipients=[nform.email.data])
msg.body = "THANKS FOR SUBSCRIBING OUR NEWSLETTER WE WILL BE LAUNCHING SOON GREAT SERVICES"
mail.send(msg)
flash("THANKS FOR SUBSCRIBING")
else:
flash("YOU ARE Already SUBSCRIBED")
return redirect(url_for('login'))
#admin section
@app.route('/admin',methods = ['GET','POST'])
def admin():
if 'admin' in session:
return redirect(url_for('adminpanel'))
else:
if request.method == 'GET':
return render_template('adminlog.html')
else:
admin = request.form['user']
password = request.form['password']
db_pass = model.admpass_check(admin)
if password == db_pass:
session["admin"] = admin
return redirect(url_for('adminpanel'))
else:
return redirect(url_for('admin'))
@app.route('/adminpanel',methods = ['GET','POST'])
def adminpanel():
if 'admin' in session:
mail = model.emails()
return render_template('admin.html',mail=mail)
return redirect(url_for('admin'))
@app.route('/logoutadm')
def logoutadm():
session.pop('admin' , None)
return redirect(url_for('admin'))
#remainder for task
def remainder():
emails = model.remainder()
if not emails:
print("NO Email Found")
else:
with app.app_context():
for mails in emails:
msg = Message('TASK SUBMISSION DATE IS SO CLOSE', recipients=['{}'.format(mails[0])])
msg.body = "HI there \n your task submission date is so close start your project"
mail.send(msg)
print("remainder email sended to :")
print(mails[0])
return True
if __name__ == '__main__':
scheduler.add_job(id ='Scheduled task', func = remainder , trigger="interval" , hours = 20 )
scheduler.start()
app.run(port=8000 ,debug = True,use_reloader=False)
|
python
|
#!/usr/bin/env python3
# -----------------------------------------------------------------------
# VIZPAIRWISESEQ
# Copyright 2015, Stephen Gould <[email protected]>
# -----------------------------------------------------------------------
# Script to visualize an integer sequence based on the visualization of
# the decimal expansion of \pi by Martin Krzywinski and Cristian Vasile.
# -----------------------------------------------------------------------
import math
import matplotlib.pyplot as plt
import matplotlib.patches as pth
import random
import sys
# --- distance to arc centre --------------------------------------------
def distance_to_centres(x, y, r):
"""Calculates the distance to the mid-point of an chord on the unit
circle and the distance to the centre of a circle of a circle of
radius r with the same chord."""
h1 = 0.5 * math.sqrt((x[0] + x[1]) ** 2 + (y[0] + y[1]) ** 2)
h2 = 0.5 * math.sqrt(4.0 * r ** 2 - (x[0] - x[1]) ** 2 - (y[0] - y[1]) ** 2)
return (h1, h2)
# --- visualization -----------------------------------------------------
def visualize_sequence(int_seq, block=True):
"""Visualize a sequence of integers"""
seq_length = len(int_seq)
min_value = min(int_seq)
max_value = max(int_seq)
val_range = max_value - min_value + 1
# convert a sequence of numbers to a sequence in [0.0, 1.0]
counts = [0.02 * seq_length for i in range(val_range)]
counted_seq = []
for n in int_seq:
counted_seq.append((n - min_value, counts[n - min_value]))
counts[n - min_value] += 1.0
linear_seq = [(p[0] + p[1] / counts[p[0]]) / float(val_range) for p in counted_seq]
# set up plots
fig = plt.figure()
ax = plt.axes()
ax.set_axis_off()
ax.set_ylim([-1, 1])
ax.set_xlim([-1, 1])
fig.set_facecolor('black')
fig.add_axes(ax)
cm = plt.get_cmap('Paired')
radius = 1.1
# plot arcs connecting consecutive elements
last_point = linear_seq.pop()
for next_point in linear_seq:
theta_last = 2 * math.pi * last_point
theta_next = 2 * math.pi * next_point
x = [math.cos(theta_last), math.cos(theta_next)]
y = [math.sin(theta_last), math.sin(theta_next)]
d = distance_to_centres(x, y, radius)
scale = 0.5 * d[1] / d[0] + 0.5
x_centre = scale * (x[0] + x[1])
y_centre = scale * (y[0] + y[1])
theta_1 = math.degrees(math.atan2(y[0] - y_centre, x[0] - x_centre))
theta_2 = math.degrees(math.atan2(y[1] - y_centre, x[1] - x_centre))
if (math.fmod(theta_2 - theta_1 + 720.0, 360.0) > 180.0):
theta_1, theta_2 = theta_2, theta_1
colour = cm(last_point)
ax.add_patch(pth.Arc((x_centre, y_centre), 2.0 * radius, 2.0 * radius, 0,
theta_1, theta_2, color=colour, fill=False, linewidth=0.25))
last_point = next_point
plt.show(block)
# --- main --------------------------------------------------------------
if __name__ == "__main__":
int_seq = []
if (len(sys.argv) == 1):
print("Generating a random sequence of digits...");
int_seq = [random.randint(0, 9) for i in range(2500)]
else:
print("Reading sequence from {0}...".format(sys.argv[1]))
fh = open(sys.argv[1])
int_seq = [int(i) for i in fh.read().split()]
fh.close()
visualize_sequence(int_seq)
|
python
|
import sys
sys.path.append("../bundle_adjustment/ceres-solver/ceres-bin/lib/") # so
import PyCeres
import numpy as np
import scipy.io as sio
import cv2
from utils import geo_utils
def order_cam_param_for_c(Rs, ts, Ks):
"""
Orders a [m, 12] matrix for the ceres function as follows:
Ps_for_c[i, 0:3] 3 parameters for the vector representing the rotation
Ps_for_c[i, 3:6] 3 parameters for the location of the camera
Ps_for_c[i, 6:11] 5 parameters for the upper triangular part of the calibration matrix
:param Rs: [m,3,3]
:param ts: [m,3]
:param Ks: [m,3,3]
:return: Ps_for_c [m, 12]
"""
n_cam = len(Rs)
Ps_for_c = np.zeros([n_cam, 12])
for i in range(n_cam):
Ps_for_c[i, 0:3] = cv2.Rodrigues(Rs[i].T)[0].T
Ps_for_c[i, 3:6] = (-Rs[i].T @ ts[i].reshape([3, 1])).T
Ps_for_c[i, 6:11] = [Ks[i, 0, 0], Ks[i, 0, 1], Ks[i, 0, 2], Ks[i, 1, 1], Ks[i, 1, 2]]
Ps_for_c[i, -1] = 1.0
return Ps_for_c
def reorder_from_c_to_py(Ps_for_c, Ks):
"""
Read back the camera parameters from the
:param Ps_for_c:
:return: Rs, ts, Ps
"""
n_cam = len(Ps_for_c)
Rs = np.zeros([n_cam, 3, 3])
ts = np.zeros([n_cam, 3])
Ps = np.zeros([n_cam, 3,4])
for i in range(n_cam):
Rs[i] = cv2.Rodrigues(Ps_for_c[i, 0:3])[0].T
ts[i] = -Rs[i] @ Ps_for_c[i, 3:6].reshape([3, 1]).flatten()
Ps[i] = geo_utils.get_camera_matrix(R=Rs[i], t=ts[i], K=Ks[i])
return Rs, ts, Ps
def run_euclidean_ceres(Xs, xs, Rs, ts, Ks, point_indices):
"""
Calls a c++ function that optimizes the camera parameters and the 3D points for a lower reprojection error.
:param Xs: [n, 3]
:param xs: [v,2]
:param Rs: [m,3,3]
:param ts: [m,3]
:param Ks: [m,3,3]
:param point_indices: [2,v]
:return:
new_Rs, new_ts, new_Ps, new_Xs Which have a lower reprojection error
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
n_cam = len(Rs)
n_pts = Xs.shape[0]
n_observe = xs.shape[0]
Ps_for_c = order_cam_param_for_c(Rs, ts, Ks).astype(np.double)
Xs_flat = Xs.flatten("C").astype(np.double)
Ps_for_c_flat = Ps_for_c.flatten("C").astype(np.double)
xs_flat = xs.flatten("C").astype(np.double)
point_indices = point_indices.flatten("C")
Xsu = np.zeros_like(Xs_flat)
Psu = np.zeros_like(Ps_for_c_flat)
PyCeres.eucPythonFunctionOursBA(Xs_flat, xs_flat, Ps_for_c_flat, point_indices, Xsu, Psu, n_cam, n_pts, n_observe)
new_Ps_for_c = Ps_for_c + Psu.reshape([n_cam, 12], order="C")
new_Rs, new_ts, new_Ps = reorder_from_c_to_py(new_Ps_for_c, Ks)
new_Xs = Xs + Xsu.reshape([n_pts,3], order="C")
return new_Rs, new_ts, new_Ps, new_Xs
def run_projective_ceres(Ps, Xs, xs, point_indices):
"""
Calls the c++ function, that loops over the variables:
for i in range(v):
xs[2*i], xs[2*i + 1], Ps + 12 * (camIndex), Xs + 3 * (point3DIndex)
:param Ps: [m, 3, 4]
:param Xs: [n, 3]
:param xs: [v, 2]
:param point_indices: [2,v]
:return: new_Ps: [m, 12]
new_Xs: [n,3]
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
m = Ps.shape[0]
n = Xs.shape[0]
v = point_indices.shape[1]
Ps_single_flat = Ps.reshape([-1, 12], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Ps_flat = Ps_single_flat.flatten("C") # row major as in python
Xs_flat = Xs.flatten("C")
xs_flat = xs.flatten("C")
point_idx_flat = point_indices.flatten("C")
Psu = np.zeros_like(Ps_flat)
Xsu = np.zeros_like(Xs_flat)
PyCeres.pythonFunctionOursBA(Xs_flat, xs_flat, Ps_flat, point_idx_flat, Xsu, Psu, m, n, v)
Psu = Psu.reshape([m,12], order="C")
Psu = Psu.reshape([m,3,4], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Xsu = Xsu.reshape([n,3])
new_Ps = Ps + Psu
new_Xs = Xs + Xsu
return new_Ps, new_Xs
def run_euclidean_python_ceres(Xs, xs, Rs, ts, Ks, point_indices, print_out=True):
"""
Calls a c++ function that optimizes the camera parameters and the 3D points for a lower reprojection error.
:param Xs: [n, 3]
:param xs: [v,2]
:param Rs: [m,3,3]
:param ts: [m,3]
:param Ks: [m,3,3]
:param point_indices: [2,v]
:return:
new_Rs, new_ts, new_Ps, new_Xs Which have a lower reprojection error
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
n_cam = len(Rs)
n_pts = Xs.shape[0]
n_observe = xs.shape[0]
Ps_for_c = order_cam_param_for_c(Rs, ts, Ks).astype(np.double)
Xs_flat = Xs.flatten("C").astype(np.double)
Ps_for_c_flat = Ps_for_c.flatten("C").astype(np.double)
xs_flat = xs.flatten("C").astype(np.double)
point_indices = point_indices.flatten("C")
Xsu = np.zeros_like(Xs_flat)
Psu = np.zeros_like(Ps_for_c_flat)
problem = PyCeres.Problem()
for i in range(n_observe): # loop over the observations
camIndex = int(point_indices[i])
point3DIndex = int(point_indices[i + n_observe])
cost_function = PyCeres.eucReprojectionError(xs_flat[2 * i], xs_flat[2 * i + 1],
Ps_for_c_flat[12 * camIndex:12 * (camIndex + 1)],
Xs_flat[3 * point3DIndex:3 * (point3DIndex + 1)])
loss_function = PyCeres.HuberLoss(0.1)
problem.AddResidualBlock(cost_function, loss_function, Psu[12 * camIndex:12 * (camIndex + 1)],
Xsu[3 * point3DIndex:3 * (point3DIndex + 1)])
options = PyCeres.SolverOptions()
options.function_tolerance = 0.0001
options.max_num_iterations = 100
options.num_threads = 24
options.linear_solver_type = PyCeres.LinearSolverType.DENSE_SCHUR
options.minimizer_progress_to_stdout = True
if not print_out:
PyCeres.LoggingType = PyCeres.LoggingType.SILENT
summary = PyCeres.Summary()
PyCeres.Solve(options, problem, summary)
if print_out:
print(summary.FullReport())
if ~Psu.any():
print('Warning no change to Ps')
if ~Xsu.any():
print('Warning no change to Xs')
new_Ps_for_c = Ps_for_c + Psu.reshape([n_cam, 12], order="C")
new_Rs, new_ts, new_Ps = reorder_from_c_to_py(new_Ps_for_c, Ks)
new_Xs = Xs + Xsu.reshape([n_pts,3], order="C")
return new_Rs, new_ts, new_Ps, new_Xs
def run_projective_python_ceres(Ps, Xs, xs, point_indices, print_out=True):
"""
Calls the c++ function, that loops over the variables:
for i in range(v):
xs[2*i], xs[2*i + 1], Ps + 12 * (camIndex), Xs + 3 * (point3DIndex)
:param Ps: [m, 3, 4]
:param Xs: [n, 3]
:param xs: [v, 2]
:param point_indices: [2,v]
:return: new_Ps: [m, 12]
new_Xs: [n,3]
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
m = Ps.shape[0]
n = Xs.shape[0]
v = point_indices.shape[1]
Ps_single_flat = Ps.reshape([-1, 12], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Ps_flat = Ps_single_flat.flatten("C").astype(np.double) # row major as in python
Xs_flat = Xs.flatten("C").astype(np.double)
xs_flat = xs.flatten("C")
point_idx_flat = point_indices.flatten("C")
Psu = np.zeros_like(Ps_flat)
Xsu = np.zeros_like(Xs_flat)
problem = PyCeres.Problem()
for i in range(v): # loop over the observations
camIndex = int(point_idx_flat[i])
point3DIndex = int(point_idx_flat[i + v])
cost_function = PyCeres.projReprojectionError(xs_flat[2*i], xs_flat[2*i + 1], Ps_flat[12*camIndex:12*(camIndex+1)], Xs_flat[3 *point3DIndex:3*(point3DIndex+1)])
loss_function = PyCeres.HuberLoss(0.1)
problem.AddResidualBlock(cost_function, loss_function, Psu[12*camIndex:12*(camIndex+1)], Xsu[3 *point3DIndex:3*(point3DIndex+1)])
options = PyCeres.SolverOptions()
options.function_tolerance = 0.0001
options.max_num_iterations = 100
options.num_threads = 24
options.linear_solver_type = PyCeres.LinearSolverType.DENSE_SCHUR
options.minimizer_progress_to_stdout = True
summary = PyCeres.Summary()
PyCeres.Solve(options, problem, summary)
if print_out:
print(summary.FullReport())
Psu = Psu.reshape([m,12], order="C")
Psu = Psu.reshape([m,3,4], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Xsu = Xsu.reshape([n,3])
new_Ps = Ps + Psu
new_Xs = Xs + Xsu
return new_Ps, new_Xs
|
python
|
"""Turrets and torpedo mpunt data in a useable form"""
from schemas import TURRETS
class Turret:
"""Container for the data needed to draw a turret
Args:
caliber (int): caliber of the gun in inches (urgh)
pos (string): the letter of the turret, like "A", "X", etc...
positions 1 to 4 are also passed as strings
guns (int): how many guns in the turret
half_length (int): the length from middle to bow of the ship, in funnel coordinates
all_turrs (list[string]): the list of all the turret position used on the ship
parameters (Parameters): parameters for the whole program
Attr:
outline (list[(x,y)]): a list of vertexes for the turret's outline. In funnel coordinates
"""
def __init__(self, caliber, pos, guns, half_length, all_turrs, parameters):
to_bow = parameters.turrets_positions[pos]["to_bow"]
scale = parameters.turrets_scale[caliber]
rel_position = rel_tur_or_torp_position(pos, all_turrs, parameters)
position = (rel_position[0]*half_length, rel_position[1]*half_length)
raw_outline = parameters.turrets_outlines[guns]
#mirror if the turret should be backward
if not to_bow:
mirrored_outline = [(vertex[0], -vertex[1]) for vertex in raw_outline]
else:
mirrored_outline = raw_outline
#also mirror if the turret is to starboard
if position[0] >0:
mirrored_outline = [(-vertex[0], vertex[1]) for vertex in mirrored_outline]
#scale according to gun caliber
scaled_outline = [(vertex[0]*scale, vertex[1]*scale) for vertex in mirrored_outline]
#move according to position
self.outline = [(vertex[0]+position[0], vertex[1]+position[1]) for vertex in scaled_outline]
def rel_tur_or_torp_position(pos, all_turrs, parameters):
"""Apply the game's logic to get a turret or toorp mount position
Args:
pos (string): the letter of the turret, like "A", "X", etc...
positions 1 to 4 are also passed as strings
all_turrs (list[string]): the list of all the turret position used on the ship
parameters (Parameters): parameters for the whole program
"""
rel_position = parameters.turrets_positions[pos]["positions"][0]
if pos == "X":
if ("W" in all_turrs or "V" in all_turrs or
"R" in all_turrs or "C" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "W":
if ("X" in all_turrs or "V" in all_turrs or "B" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "A":
if ("V" in all_turrs or
{"W", "X", "Y"}.issubset(all_turrs) or
"C" in all_turrs and "X" in all_turrs or
"B" in all_turrs and "R" in all_turrs and (
("W" in all_turrs or "X" in all_turrs or "Y" in all_turrs))):
rel_position = parameters.turrets_positions[pos]["positions"][2]
elif ("X" in all_turrs or "W" in all_turrs or
"B" in all_turrs and ("C" in all_turrs or "R" in all_turrs or "W" in all_turrs)):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "B":
if ("V" in all_turrs or
"W" in all_turrs or
"C" in all_turrs and ("X" in all_turrs or "Y" in all_turrs) or
"A" in all_turrs and "R" in all_turrs and ("X" in all_turrs or "Y" in all_turrs)):
rel_position = parameters.turrets_positions[pos]["positions"][2]
elif ("X" in all_turrs or "Y" in all_turrs or "C" in all_turrs or "R" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "Y":
if (("X" in all_turrs and "W" in all_turrs.keys()) or
("V" in all_turrs and "W"in all_turrs)):
rel_position = parameters.turrets_positions[pos]["positions"][3]
elif ("V" in all_turrs or "W" in all_turrs or
({"A", "B", "C"}.issubset(all_turrs)) or
({"A", "B", "R"}.issubset(all_turrs))):
rel_position = parameters.turrets_positions[pos]["positions"][2]
elif ("B" in all_turrs or "C" in all_turrs or "R" in all_turrs or "X" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
return rel_position
class Torpedo:
"""Container for the data needed to draw a torpedo mount
Args:
section_content (dict): the a TorpedoMount<x> section from the parser
that read the ship file
half_length (int): the length from middle to bow of the ship, in funnel coordinates
parameters (Parameters): parameters for the whole program
Attr:
outline (list[(x,y)]): a list of vertexes for the mount's outline. In funnel coordinates
"""
def __init__(self, section_content, half_length, parameters):
pos = section_content["Pos"]
tubes_count = int(section_content["Tubes"])
if pos in TURRETS:
to_bow = parameters.turrets_positions[pos]["to_bow"]
rel_position = parameters.turrets_positions[pos]["positions"][0]
else:
to_bow = True
#draw the mount outside of the visible area, so hidden
rel_position = [0, 1.5]
position = (rel_position[0]*half_length, rel_position[1]*half_length)
raw_outline = parameters.torpedo_outlines[tubes_count]
#rotate if the turret should be backward
if not to_bow:
rotated_outline = [(point[0], -point[1]) for point in raw_outline]
else:
rotated_outline = raw_outline
#move according to position
self.outline = [(point[0]+position[0], point[1]+position[1]) for point in rotated_outline]
|
python
|
# Copyright (c) 2021, Carlos Millett
# All rights reserved.
# This software may be modified and distributed under the terms
# of the Simplified BSD License. See the LICENSE file for details.
import abc
from pathlib import Path
from .types import Types
class Media(abc.ABC):
def __init__(self, media_type: Types, path: Path) -> None:
self._type: Types = media_type
self._path: Path = path
self._title: str = ''
@property
def type(self) -> Types:
return self._type
@property
def path(self) -> Path:
return self._path
@property
def title(self) -> str:
if not self._title:
self._title = '{0}{1}'.format(self.format(self._path.name), self._path.suffix)
return self._title
@title.setter
def title(self, title: str) -> None:
self._title = title
@classmethod
@abc.abstractmethod
def match(cls, filename: str) -> bool:
pass
@classmethod
@abc.abstractmethod
def parse(cls, filename: str) -> str:
pass
@classmethod
@abc.abstractmethod
def format(cls, filename: str) -> str:
pass
|
python
|
class Solution(object):
def minDistance(self, w1, w2):
if w1 == "":
return len(w2)
if w2 == "":
return len(w1)
l1 = len(w1)
l2 = len(w2)
d=[]
for i in range(l1+1):
d.append([0] * (l2 + 1))
for i in range(0, l1 + 1):
for j in range(0, l2 + 1):
if i==0 and j!=0:
d[i][j] = j
if i!=0 and j==0:
d[i][j] = i
for i in range(1, l1 + 1):
for j in range(1, l2 + 1):
if w1[i-1] == w2[j-1]:
d[i][j] = d[i-1][j-1]
else:
#print("--d[i-1][j]", d[i-1][j])
#print("--d[i][j-1]", d[i][j-1])
d[i][j] = min(d[i-1][j], d[i][j-1], d[i-1][j-1]) + 1
#print(i,j,w1[:i], w2[:j], d[i][j])
return d[l1][l2]
def test():
s=Solution()
w1 = "abc"
w2 = "1abc"
w1 = "abc1"
w2 = "abc"
w1 = "abc"
w2 = "abc1"
w1="sma"
w2="uism"
w1="ity"
w2="ties"
w1="asma"
w2="truism"
w1="prosperity"
w2="properties"
w1 = "horse"
w2 = "ros"
w1 = "abcde"
w2 = "bcdea"
w1 = "intention"
w2 = "execution"
r=s.minDistance(w1,w2)
print(w1)
print(w2)
print(r)
test()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.