seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
28256315705
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox, QListWidgetItem
from PyQt5.QtCore import pyqtSlot, QDir, Qt, QSettings, QFileInfo
from SettingsDialog import SettingsDialog
from ui_MainWindow import Ui_MainWindow
import math
import Settings
def areaOfPolygon(vertices):
vertices.append(vertices[0])
area = lambda a, b: (b[0] - a[0]) * (a[1] + b[1]) / 2.
areas = map(lambda i: area(vertices[i], vertices[i+1]), range(len(vertices) - 1))
return sum(areas)
def lengthOfPath(vertices):
distance = lambda a, b: math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
distances = map(lambda i: distance(vertices[i], vertices[i+1]), range(len(vertices) - 1))
return sum(distances)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.settings = QSettings()
self.ui.exitAction.triggered.connect(QApplication.quit)
self.ui.zoomInAction.triggered.connect(self.ui.imageLabel.zoomIn)
self.ui.zoomOutAction.triggered.connect(self.ui.imageLabel.zoomOut)
self.enableImageActions(False)
self.enableSamplesActions(False)
@pyqtSlot()
def on_openAction_triggered(self):
dir = self.settings.value(
Settings.LAST_DIRECTORY_KEY, Settings.DEFAULT_LAST_DIRECTORY)
(filename, _) = QFileDialog.getOpenFileName(
self,
self.tr('Open Image'),
dir,
self.tr('Images (*.png *.jpg)'))
if filename:
self.settings.setValue(
Settings.LAST_DIRECTORY_KEY, QFileInfo(filename).absolutePath())
self.ui.imageLabel.loadImage(filename)
self.statusBar().showMessage(QDir.toNativeSeparators(filename))
self.enableImageActions(True)
self.on_clearAction_triggered()
@pyqtSlot()
def on_saveAction_triggered(self):
dir = self.settings.value(
Settings.LAST_DIRECTORY_KEY, Settings.DEFAULT_LAST_DIRECTORY)
(filename, _) = QFileDialog.getSaveFileName(
self,
self.tr('Open Image'),
dir,
self.tr('Comma Separated Values files (*.csv)\nText files (*.txt)\n'))
if filename:
self.settings.setValue(
Settings.LAST_DIRECTORY_KEY, QFileInfo(filename).absolutePath())
text = self.getCoordinatesAsCsv()
with open(filename, 'w') as file:
file.write(text)
@pyqtSlot()
def on_settingsAction_triggered(self):
settingsDialog = SettingsDialog(self)
if settingsDialog.exec_():
self.ui.imageLabel.reset()
@pyqtSlot()
def on_clearAction_triggered(self):
self.ui.listWidget.clear()
self.ui.imageLabel.clearSamples()
self.enableSamplesActions(False)
@pyqtSlot()
def on_copyAction_triggered(self):
text = self.getCoordinatesAsTsv()
clipboard = QApplication.clipboard()
clipboard.setText(text)
@pyqtSlot()
def on_aboutQtAction_triggered(self):
QMessageBox.aboutQt(self)
@pyqtSlot()
def on_aboutAction_triggered(self):
QMessageBox.about(
self,
self.tr('About'),
self.tr('<h1>%s %s</h1>\n' +
'<p>Developed by <a href="%s">%s</a></p>') %
(QApplication.applicationName(),
QApplication.applicationVersion(),
QApplication.organizationDomain(),
QApplication.organizationName()
))
@pyqtSlot()
def on_pathLengthAction_triggered(self):
coordinates = list(self.getCoordinates())
totalDistance = lengthOfPath(coordinates)
QMessageBox.information(
self,
self.tr('Path Length'),
self.tr("The path's length is %f" % totalDistance)
)
@pyqtSlot()
def on_polygonAreaAction_triggered(self):
coordinates = list(self.getCoordinates())
totalArea = areaOfPolygon(coordinates)
QMessageBox.information(
self,
self.tr('Polygon Area'),
self.tr("The polygon's area is %f" % totalArea)
)
@pyqtSlot(float, float)
def on_imageLabel_mouseMoved(self, x, y):
self.ui.coordinatesLineEdit.setText("%f × %f" % (x, y))
@pyqtSlot(float, float)
def on_imageLabel_clicked(self, x, y):
item = QListWidgetItem("%f × %f" % (x, y))
item.setData(Qt.UserRole, x)
item.setData(Qt.UserRole + 1, y)
self.ui.listWidget.addItem(item)
self.enableSamplesActions(True)
def getCoordinates(self):
items = self.ui.listWidget.findItems('*', Qt.MatchWildcard)
return map(lambda item: (item.data(Qt.UserRole), item.data(Qt.UserRole + 1)), items)
def getCoordinatesAsCsv(self):
coordinates = self.getCoordinates()
lines = map(lambda coordinate: "%f,%f" % coordinate, coordinates)
return 'x,y\n' + '\n'.join(lines)
def getCoordinatesAsTsv(self):
coordinates = self.getCoordinates()
lines = map(lambda coordinate: "%f\t%f" % coordinate, coordinates)
return 'x\ty\n' + '\n'.join(lines)
def enableSamplesActions(self, enable):
self.ui.saveAction.setEnabled(enable)
self.ui.clearAction.setEnabled(enable)
self.ui.copyAction.setEnabled(enable)
self.ui.pathLengthAction.setEnabled(enable)
self.ui.polygonAreaAction.setEnabled(enable)
def enableImageActions(self, enable):
self.ui.zoomInAction.setEnabled(enable)
self.ui.zoomOutAction.setEnabled(enable)
if __name__ == '__main__':
vertices = [(0.72, 2.28), (2.66, 4.71), (5., 3.5), (3.63, 2.52), (4., 1.6), (1.9, 1.)]
expectedArea = 8.3593
area = areaOfPolygon(vertices)
print("%f =?=\n%f" % (area, expectedArea))
|
claudiomattera/graph-extractor
|
MainWindow.py
|
MainWindow.py
|
py
| 6,147 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25002494348
|
#author Duc Trung Nguyen
#2018-01-06
#Shopify Back End Challenge
from Menu import Menu
import json
def parse_menu(menu, py_menus):
this_id = menu['id']
this_data = menu['data']
this_child = menu['child_ids']
if not 'parent_id' in menu:
py_menus.append(Menu(this_id, this_data, this_child))
if 'parent_id' in menu:
this_parent = menu['parent_id']
for i in py_menus:
if i.is_child(this_parent):
i.add_child(menu)
if __name__ == "__main__":
menus = json.loads(req.get('https://backend-challenge-summer-2018.herokuapp.com/challenges.json?id=1&page=0').text)
START_PAGE = menus['pagination']['current_thing']
TOTAL_PAGES = int(menus['pagination'] ['total'] / menus['pagination'] ['per_page']) + 1
collection = []
for thing in range(START_PAGE, TOTAL_PAGES):
if (thing != START_PAGE):
menus = json.loads(req.get('https://backend-challenge-summer-2018.herokuapp.com/challenges.json?id=1&page='+ str(thing)).text)
menus = menus['menus']
for menu in menus :
parse_menu(menu , collection)
result = {"invalid_menus":[], "valid_menus":[]}
for i in collection:
if not i.is_valid:
result['invalid_menus'].append(i.__dict__())
if i.is_valid:
result['valid_menus'].append(i.__dict__())
|
suphuvn/Shopify-Back-End-Challenge
|
Shopify Back End Challenge.py
|
Shopify Back End Challenge.py
|
py
| 1,426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20546896703
|
from typing import Tuple
from PIL import ImageColor
from PIL.ImageDraw import ImageDraw
from PIL.ImageFont import FreeTypeFont
from PIL import ImageFont
def wrap_text(text: str, width: int, font: FreeTypeFont) -> Tuple[str, int, int]:
text_lines = []
text_line = []
words = text.split()
line_height = 0
line_width = 0
for word in words:
text_line.append(word)
w, h = font.getsize(' '.join(text_line))
line_height = h
line_width = max(line_width, w)
if w > width:
text_line.pop()
text_lines.append(' '.join(text_line))
text_line = [word]
if len(text_line) > 0:
text_lines.append(' '.join(text_line))
text_height = line_height * len(text_lines)
return "\n".join(text_lines), line_width, text_height
def fit_width_height(wrapped, field_width, field_height, fontsize, font_path, jumpsize, max_size):
font = ImageFont.truetype(font_path, fontsize)
while jumpsize > 1:
# wrapped, line_width, line_height = wrap_text(text, field_width, font)
line_width, line_height = font.getsize_multiline(wrapped)
jumpsize = round(jumpsize)
if line_height < field_height and line_width < field_width and fontsize + jumpsize < max_size:
fontsize += jumpsize
else:
jumpsize = jumpsize // 2
if fontsize > jumpsize:
fontsize -= jumpsize
else:
fontsize = 0
font = ImageFont.truetype(font_path, fontsize)
return fontsize, font
def get_font_size_and_wrapped(max_size, field_width, field_height, font_path: str, text) -> Tuple[FreeTypeFont, int, str]:
field_height = round(field_height)
fontsize = max_size
jumpsize = 75
font = ImageFont.truetype(font_path, max_size)
wrapped, line_width, line_height = wrap_text(text, field_width, font)
i = 0
while i < 3:
fontsize, font = fit_width_height(wrapped, field_width, field_height, fontsize, font_path, jumpsize, max_size)
wrapped, line_width, line_height = wrap_text(text, field_width, font)
i += 1
return font, fontsize, wrapped
def draw_center_text(text: str, draw: ImageDraw, font: FreeTypeFont, f_width: int, x: int, y: int, color: Tuple[int, int, int], outline_percentage, outline_color, fontsize) -> Tuple[int, int]:
text_width = font.getsize(text)[0]
off_x = f_width / 2 - (text_width/ 2)
draw.text((x + off_x, y), text, color, font, stroke_width=round(outline_percentage * 0.01 * fontsize), stroke_fill=outline_color)
return font.getsize(text)
def draw_right_text(text: str, draw: ImageDraw, font: FreeTypeFont, f_width: int, x: int, y: int, color: Tuple[int, int, int], outline_percentage, outline_color, fontsize) -> Tuple[int, int]:
text_width = font.getsize(text)[0]
off_x = f_width - text_width
draw.text((x + off_x, y), text, color, font, stroke_width=round(outline_percentage * 0.01 * fontsize), stroke_fill=outline_color)
return font.getsize(text)
def convert_hex(hex_color: str) -> Tuple[int, int, int]:
return ImageColor.getcolor(hex_color, "RGB")
|
realmayus/imbot
|
image/manipulation_helper.py
|
manipulation_helper.py
|
py
| 3,154 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29673725009
|
import flask
import flask_login
from flask_dance.contrib.google import make_google_blueprint, google
from flask_dance.consumer import oauth_authorized
import iou.config as config
from iou.models import User
google_blueprint = make_google_blueprint(
scope=["email"],
**config.googleAuth
)
login_manager = flask_login.LoginManager()
login_manager.login_view = 'google.login'
def init_app(app, danceAlchemyBackend):
app.secret_key = config.secret_key
login_manager.init_app(app)
google_blueprint.backend = danceAlchemyBackend
app.register_blueprint(google_blueprint, url_prefix="/login")
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@oauth_authorized.connect_via(google_blueprint)
def google_logged_in(blueprint, token, testing=False):
if not token:
flask.flash("Failed to log in with {name}".format(name=blueprint.name))
return
if testing:
email = token
else:
resp = blueprint.session.get('/oauth2/v2/userinfo')
if not resp.ok:
print("Invalid response", resp.status_code, resp.text)
flask.abort(500)
data = resp.json()
email = data.get('email')
if not email:
print("Email not present in ", data)
flask.abort(500)
user = User.getOrCreate(email)
flask_login.login_user(user)
|
komackaj/flask-iou
|
iou/login.py
|
login.py
|
py
| 1,382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36146924870
|
from PIL import Image
from DiamondDash.screenshot import Capturer
from DiamondDash.mouse import Mouse
import time
import random
colors = {}
C = Capturer(1048, 341)
M = Mouse(1048, 341)
def get_color(RGB):
if all(val < 60 for val in RGB):
return "B"
elif RGB in colors:
return colors[RGB]
else:
return '?'
def get_fuzzy_color(RGB):
if all(val < 60 for val in RGB):
return "B"
for val, color in colors.items():
if all(abs(rgb - v) < 10 for rgb, v in zip(RGB, val)):
return color
return '?'
class Grid:
def __init__(self, grid_size_x, grid_size_y, cell_size, img=()):
self.grid_size_x = grid_size_x
self.grid_size_y = grid_size_y
self.cell_size = cell_size
if img:
self.img = img
else:
self.take_screenshot()
def take_screenshot(self):
self.img = C.grab(0, 0, self.grid_size_x * self.cell_size, self.grid_size_y * self.cell_size)
def get_cell(self, x, y):
if x < self.cell_size_x and y < self.grid_size_y:
return self.img.crop((x * self.cell_size,
y * self.cell_size,
(x + 1) * self.cell_size - 1,
(y + 1) * self.cell_size - 1,
))
else:
return ()
def get_cell_rgb(self, x, y):
x0 = x * self.cell_size
y0 = y * self.cell_size
return tuple([int(sum(val) / len(val)) for val in zip(
self.img.getpixel((x0 + 10, y0 + 10)),
self.img.getpixel((x0 + 10, y0 + 30)),
self.img.getpixel((x0 + 30, y0 + 30)),
self.img.getpixel((x0 + 30, y0 + 10)),
self.img.getpixel((x0 + 20, y0 + 20)),
)])
def valid_cell(self, x, y):
return True
x0 = x * self.cell_size
y0 = y * self.cell_size
return (get_color(self.img.getpixel((x0, y0 + 6))) == "B" \
and get_color(self.img.getpixel((x0, y0 + 33))) == "B") or \
(get_color(self.img.getpixel((x0 + 39, y0 + 6))) == "B" \
and get_color(self.img.getpixel((x0 + 39, y0 + 33))) == "B")
def get_cell_color(self, x, y):
"""
print(self.get_cell(x, y).getpixel((0, 6)),
get_color(self.get_cell(x, y).getpixel((0, 6))),
self.get_cell(x, y).getpixel((0, 7)),
get_color(self.get_cell(x, y).getpixel((0, 7))),
)
"""
"""
if get_color(self.get_cell(x, y).getpixel((0, 6))) == "B":
return get_fuzzy_color(self.get_cell(x, y).getpixel((0, 7)))
else:
return "?"
"""
if self.valid_cell(x, y):
return get_fuzzy_color(self.get_cell_rgb(x, y))
else:
return "?"
def analyse_cell(self, x, y):
cell = self.get_cell_color(x, y)
if cell in ["1"]:
return cell
if cell == "?" or cell == "B":
return "."
cpt = 0
if x > 0:
if self.get_cell_color(x - 1, y) == cell:
cpt += 1
if x < self.grid_size_x - 1:
if self.get_cell_color(x + 1, y) == cell:
cpt += 1
if cpt > 1:
return "x"
if y > 0:
if self.get_cell_color(x, y - 1) == cell:
cpt += 1
if cpt > 1:
return "x"
if y < self.grid_size_y - 1:
if self.get_cell_color(x, y + 1) == cell:
cpt += 1
if cpt > 1:
return "x"
return "."
def click_cell(self, x, y):
M.mouse_pos((x + 0.5) * self.cell_size,
(y + 0.5) * self.cell_size)
M.left_click()
# print("click on", (x, y))
def seek_and_destroy(self):
targets = []
priority_targets = []
for y in range(self.grid_size_y):
for x in range(self.grid_size_x):
target = self.analyse_cell(x, y)
if target == "!":
self.click_cell(x, y)
return
elif target == "1":
priority_targets.append((x,y))
elif target == "x":
targets.append((x, y))
if priority_targets:
self.click_cell(*random.choice(priority_targets))
return
if targets:
self.click_cell(*random.choice(targets))
def calibration():
img = Image.open("reference.png")
grid = Grid(7, 2, 40, img)
for y in range(3):
colors[grid.get_cell_rgb(0, y)] = 'g'
colors[grid.get_cell_rgb(1, y)] = 'y'
colors[grid.get_cell_rgb(2, y)] = 'r'
colors[grid.get_cell_rgb(3, y)] = 'b'
colors[grid.get_cell_rgb(4, y)] = 'p'
for x in range(5):
colors[grid.get_cell_rgb(x, 3)] = '!'
for x in range(3):
colors[grid.get_cell_rgb(x, 4)] = '1'
def main():
grid = Grid(10, 9, 40)
calibration()
# grid.get_cell(8,8).show()
while True:
"""
for y in range(9):
line = []
for x in range(10):
line.append(grid.get_cell_color(x, y))
print(" ".join(line))
"""
"""
print()
for y in range(9):
line = []
for x in range(9):
line.append(grid.analyse_cell(x, y))
print(" ".join(line))
"""
grid.seek_and_destroy()
time.sleep(0.03)
grid.take_screenshot()
# print('-----')
if __name__ == "__main__":
main()
|
rndczn/DiamondDashBot
|
brain.py
|
brain.py
|
py
| 5,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22857897162
|
#!/usr/bin/env python
"""
Parses information from aql and outputs them to one JSON
input:
stdin: json aql output
e.g. aql -c "SHOW SETS" -o json | head -n -3
return:
JSON string
[[{...], {...}]] - for each server list of stats (e.g for each set)
"""
import sys
import json
data = []
json_in = ''
for l in sys.stdin:
json_in += l
if ']' in l:
# one server collected
server_stats = []
for stats in json.loads(json_in):
server_stats.append(stats)
json_in = ''
data.append(server_stats)
print(json.dumps(data))
|
tivvit/aerospike-tools-parsers
|
parse_aql.py
|
parse_aql.py
|
py
| 598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9062401747
|
def matrixplot(start_date,end_date,type,term,flag=True):
# Configure plotting in Jupyter
from matplotlib import pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
# plt.rcParams.update({
# 'figure.figsize': (26, 15),
# 'axes.spines.right': False,
# 'axes.spines.left': False,
# 'axes.spines.top': False,
# 'axes.spines.bottom': False})
plt.rcParams['font.sans-serif'] = ['SimHei']
# Seed random number generator
from numpy import random as nprand
seed = hash("Network Science in Python") % 2**32
nprand.seed(seed)
import datetime
import pandas as pd
import numpy as np
import seaborn as sns
from sqlalchemy import create_engine
conn=create_engine('mysql+pymysql://root:lv+7)!@@SHZX@localhost:3306/pledge?charset=gbk')
if term=="all":
sql_query = "select * from trading_data where date_format(日切日期,'%%Y/%%m/%%d')>='{20}' and date_format(日切日期,'%%Y/%%m/%%d')<='{21}' and (正回购方机构类别 = '{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}{0}{8}{0}{9}{0}{10}{0}{11}{0}{12}{0}{13}{0}{14}{0}{15}{0}{16}{0}{17}{0}{18}{0}{19}') and (逆回购方机构类别 = '{2}{1}{3}{1}{4}{1}{5}{1}{6}{1}{7}{1}{8}{1}{9}{1}{10}{1}{11}{1}{12}{1}{13}{1}{14}{1}{15}{1}{16}{1}{17}{1}{18}{1}{19}')" .format("' or 正回购方机构类别 = '","' or 逆回购方机构类别 = '",'政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行', '城信社及联社','农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司', '保险公司','保险资产管理公司','保险经纪公司',start_date,end_date)
else:
sql_query = "select * from trading_data where date_format(日切日期,'%%Y/%%m/%%d')>='{20}' and date_format(日切日期,'%%Y/%%m/%%d')<='{21}' and 回购天数 = {22} and (正回购方机构类别 = '{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}{0}{8}{0}{9}{0}{10}{0}{11}{0}{12}{0}{13}{0}{14}{0}{15}{0}{16}{0}{17}{0}{18}{0}{19}') and (逆回购方机构类别 = '{2}{1}{3}{1}{4}{1}{5}{1}{6}{1}{7}{1}{8}{1}{9}{1}{10}{1}{11}{1}{12}{1}{13}{1}{14}{1}{15}{1}{16}{1}{17}{1}{18}{1}{19}')" .format("' or 正回购方机构类别 = '","' or 逆回购方机构类别 = '",'政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行', '城信社及联社','农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司', '保险公司','保险资产管理公司','保险经纪公司',start_date,end_date,term)
df = pd.read_sql(sql_query,con=conn)
title = list(df.columns)
date_idx=title.index('日切日期')
buyertype_idx=title.index('正回购方机构类别')
sellertype_idx=title.index('逆回购方机构类别')
amount_idx=title.index('首期结算金额(亿元)')
rate_idx=title.index('到期预计收益率(%)')
#建立四大类字典
classify_key=['政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行','城信社及联社', '农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司','保险公司', '保险资产管理公司','保险经纪公司']
classify_value=['大行','大行','大行','中行','中行','小行','小行','小行','大行','非银','非银','非银','非银','非银','非银','非银', '非银','非银']
classify=dict(zip(classify_key,classify_value))
#flag=FALSE表示四大类分类
if flag:
typelist=['政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行','城信社及联社', '农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司','保险公司', '保险资产管理公司','保险经纪公司']
else:
typelist=['大行','中行','小行','非银']
for i in range(len(df)):
temp=df.iloc[i,buyertype_idx]
df.iloc[i,buyertype_idx]=classify[temp]
temp=df.iloc[i,sellertype_idx]
df.iloc[i,sellertype_idx]=classify[temp]
matrix = pd.DataFrame(np.zeros((len(typelist),len(typelist)),dtype=float),index=typelist,columns=typelist)
start_date = datetime.datetime.strptime(start_date,'%Y/%m/%d')
end_date = datetime.datetime.strptime(end_date,'%Y/%m/%d')
if type=="amount":
for i in range(len(df)):
trade_date=datetime.datetime.strptime(df.iloc[i,date_idx],'%Y/%m/%d')
if trade_date>=start_date and trade_date<=end_date:
matrix.loc[df.iloc[i,buyertype_idx],df.iloc[i,sellertype_idx]]+=float(df.iloc[i,amount_idx])
elif type=="rate":
rate_array=[]
all_rate=[]
for i in range(len(typelist)):
sub_array = []
for j in range(len(typelist)):
sub_array.append([])
rate_array.append(sub_array)
for i in range(len(df)):
trade_date=datetime.datetime.strptime(df.iloc[i,date_idx],'%Y/%m/%d')
if trade_date>=start_date and trade_date<=end_date:
rate_array[typelist.index(df.iloc[i,buyertype_idx])][typelist.index(df.iloc[i,sellertype_idx])].append(df.iloc[i,rate_idx])
for j in range(len(typelist)):
for k in range(len(typelist)):
all_rate.extend(rate_array[j][k])
median=sorted(all_rate)[int(len(all_rate)/2)]
for j in range(len(typelist)):
for k in range(len(typelist)):
if len(rate_array[j][k])==0:
matrix.iloc[j,k]=median
else:
matrix.iloc[j,k]=float(sorted(rate_array[j][k])[int(len(rate_array[j][k])/2)])
# matrix[list(matrix.columns)]=matrix[list(matrix.columns)].astype(float)
ax=sns.heatmap(matrix,cmap="YlGnBu",annot=True,fmt='.2f',vmin=1,vmax=5,linewidths=0.05,linecolor='white',annot_kws={'size':8,'weight':'bold'})
ax.set_title('{0} {3} {1}~{2}'.format(type,start_date,end_date,term))
ax.set_xlabel('逆回购方')
ax.set_ylabel('正回购方')
plt.show()
matrixplot("2019/05/27","2019/06/14",flag=False,type="rate",term=7)
|
ljiaqi1994/Pledge-Repo
|
质押式回购_类别矩阵_删减mysql.py
|
质押式回购_类别矩阵_删减mysql.py
|
py
| 6,632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10543642062
|
from redis.commands.search.field import GeoField, NumericField, TextField, VectorField
REDIS_INDEX_NAME = "benchmark"
REDIS_PORT = 6380
H5_COLUMN_TYPES_MAPPING = {
"int": NumericField,
"int32": NumericField,
"keyword": TextField,
"text": TextField,
"string": TextField,
"str": TextField,
"float": NumericField,
"float64": NumericField,
"float32": NumericField,
"geo": GeoField,
}
def convert_H52RedisType(h5_column_type: str):
redis_type = H5_COLUMN_TYPES_MAPPING.get(h5_column_type.lower(), None)
if redis_type is None:
raise RuntimeError(f"🐛 redis doesn't support h5 column type: {h5_column_type}")
return redis_type
|
myscale/vector-db-benchmark
|
engine/clients/redis/config.py
|
config.py
|
py
| 687 |
python
|
en
|
code
| 13 |
github-code
|
6
|
30052420632
|
import csv
f = open('datafromamazon.csv')
csv_file = csv.reader(f)
URLarray = []
for row in csv_file:
URLarray.append(row[0])
filename = "urlfile.csv"
f = open(filename, "w")
for URL in URLarray:
f.write("ProductName" + "," + "Grade" + "," + "PercentageScore" + "," + "Users" + "," + URL + "\n")
f.close()
|
ABoiNamedKoi/VCU-CMSC-412
|
csvamazonscrape.py
|
csvamazonscrape.py
|
py
| 327 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24370435806
|
from setuptools import setup, find_packages
VERSION = "0.1"
DESCRIPTION = "A Lagrangian Particle Tracking package"
LONG_DESCRIPTION = "Includes a set of tools for Lagrangian Particle Tracking like search, interpolation, etc."
# Setting up
setup(
# name must match the folder name
name="project-arrakis",
version=VERSION,
author="kal @ Dilip Kalagotla",
author_email="<[email protected]>",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[], # add any additional packages that
# needs to be installed along with your package. Eg: 'caer'
keywords=["python", "first package"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
)
|
kalagotla/project-arrakis
|
setup.py
|
setup.py
|
py
| 946 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2831089261
|
import threading
from time import time
from time import sleep
import asyncio
import tornado.web
import tracemalloc
from hoverbotpy.controllers.constants import PORT
from hoverbotpy.drivers.driver_dummy import DummyHovercraftDriver
from hoverbotpy.drivers.threading_dummy import ThreadingDummy
from hoverbotpy.drivers.pi_pico_simple import SimpleFan
from hoverbotpy.drivers.pi_pico_pid import PIDCorrectedFan
tracemalloc.start()
TIMEOUT_TIME = .5 # IDK UNITS
# Setup CLI arguments
import argparse
parser = argparse.ArgumentParser(
prog="WebController",
description="Web controller for PIE hovercraft.",
epilog="Written by Joseph Gilbert and Devlin Ih",
)
parser.add_argument(
"driver_type",
help=("Type of driver to use. Legal values:\n"
" dummy, dummy_threading, pico, pico_pid"),
)
args = parser.parse_args()
# Globals
# Why are these needed?
last_hover = 0
last_forward = 0
last_right = 0
last_left = 0
# Wish we were using Python 3.10 for pattern matching.
requested_driver = args.driver_type
if requested_driver == "dummy":
driver = DummyHovercraftDriver()
elif requested_driver == "threading_dummy":
driver = ThreadingDummy()
driver.run_loop()
elif requested_driver == "pico":
driver = SimpleFan()
elif requested_driver == "pico_pid":
driver = PIDCorrectedFan()
driver.run_loop()
else:
import sys
print(f"Error: {requested_driver} is not a valid driver type.")
sys.exit(-1)
class Hover(tornado.web.RequestHandler):
def get(self):
global driver
global last_hover
print("hover click")
last_hover = time()
if driver.hover>0:
driver.set_hover_speed(0)
else:
driver.set_hover_speed(20)
pass
class Estop(tornado.web.RequestHandler):
def get(self):
global driver
driver.stop()
print("ESTOP ESTOP ESTOP")
class Forward(tornado.web.RequestHandler):
def get(self):
global last_forward
global driver
driver.set_forward_speed(60)
print("forward click")
print(driver.forward)
last_forward = time()
class NotForward(tornado.web.RequestHandler):
def get(self):
global last_forward
global driver
driver.set_forward_speed(0)
print("not forward click")
print(driver.forward)
last_forward = time()
class Reverse(tornado.web.RequestHandler):
def get(self):
global last_forward
global driver
driver.set_forward_speed(0)
print("rev click")
print(driver.forward)
#last_forward = time()#'''
class Right(tornado.web.RequestHandler):
def get(self):
global last_right
global driver
driver.set_steering_angle(-.75)
print("right click")
print(driver.steering)
last_right = time()
class NotRight(tornado.web.RequestHandler):
def get(self):
global last_right
global driver
driver.set_steering_angle(0)
print("not right click")
print(driver.steering)
last_right = time()
class Left(tornado.web.RequestHandler):
def get(self):
global last_left
global driver
driver.set_steering_angle(.75)
print("left click")
print(driver.steering)
last_left = time()
class NotLeft(tornado.web.RequestHandler):
def get(self):
global last_left
global driver
driver.set_steering_angle(0)
print("not left click")
print(driver.steering)
last_left = time()
class Index(tornado.web.RequestHandler):
def get(self):
#self.write("Hello, world")
self.render("web_controller.html")
def on_connection_close(self):
print("connection closed")
def make_app(): # might be better to use a websocket in future versions
return tornado.web.Application([
(r"/darkmode.css", tornado.web.StaticFileHandler,
{"path": "darkmode.css"},),
(r"/", Index),
(r"/hover/", Hover),
(r"/0_pressed/", Estop),
(r"/estop/", Estop),
(r"/forward/", Forward),
(r"/w_pressed/", Forward),
# there will be no half a pressed with this code
(r"/a_pressed/", Left),
(r"/d_pressed/", Right),
(r"/w_released/", NotForward),
# there will be no half a pressed with this code
(r"/a_released/", NotLeft),
(r"/d_released/", NotRight),
#(r"/h_pressed/", HoverToggle),
], debug=True)
# async def
async def app_start():
app = make_app()
app.listen(PORT)
await asyncio.Event().wait()
async def web_app():
print("web server start")
app = make_app()
app.listen(PORT)
class WatchdogThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print("watchdog thread started")
running = True
while running:
now = time()
# print(now)
if ((last_forward + TIMEOUT_TIME) < now) and driver.forward != 0:
print("forward timeout")
driver.set_forward_speed(0)
if (((last_left + TIMEOUT_TIME) < now) or ((last_right + TIMEOUT_TIME) < now))and driver.steering != 0:
print("turn timeout")
driver.set_steering_angle(0)
from hoverbotpy.drivers.driver_dummy import DummyHovercraftDriver
if __name__ == "__main__":
driver = DummyHovercraftDriver()
motor_watchdog_thread = WatchdogThread(1, "watchdog_1", 1)
motor_watchdog_thread.setDaemon(True)
motor_watchdog_thread.start()
asyncio.run(app_start())
|
olincollege/hoverbois
|
hoverbotpy/src/hoverbotpy/controllers/web_controller.py
|
web_controller.py
|
py
| 5,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35914573545
|
class Solution:
def reverse(self, x: int) -> int:
twoPwr31=2147483648
while x%10==0 and x!=0:
x=x//10
if x==0 or x>=twoPwr31 or x<=-twoPwr31:
return 0
if x<0:
output = str(x)[-1:0:-1]
if -int(output)<=(twoPwr31*-1):
return 0
else:
return "-"+output
else:
output = str(x)[::-1]
if int(output)>=(twoPwr31-1):
return 0
else:
return output
|
azbluem/LeetCode-Solutions
|
solutions/7.rev-int.py
|
7.rev-int.py
|
py
| 548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74078752188
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy import context as context
from . fpc import state
from . fpc import TestFpCvStepsBreakdown, GenerateFloorPlanImageOperator, FpcPropGrp
bl_info = {
"name" : "FloorPlanCreator",
"author" : "haseeb",
"description" : "floor plan 3d mesh generator",
"blender" : (3, 50, 0),
"version" : (0, 0, 1),
"location" : "View3D",
"warning" : "",
"category" : "Generic"
}
# SPECIAL LINE
bpy.types.Scene.ff_FPC_prop_grp = bpy.props.PointerProperty(type=FpcPropGrp)
# MAIN PANEL CONTROL
class FPC_PT_Panel(bpy.types.Panel):
bl_idname = "FPC_PT_Panel"
bl_label = "FloorPlanCreator"
bl_category = "FF_Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self,context):
layout = self.layout
s = state()
# Modeling
box_rg = layout.box()
col = box_rg.column(align = True)
col.label(text='Floor Plan Options')
row = col.row(align = True)
row.operator("fpc.testfpcvstepsbreakdown", text="Test FP CV Steps")
row = col.row(align = True)
row.operator("fpc.generatefloorplanimage", text="Generate Floor Plan")
# row.operator("ffgen.re_mirror", text="Re-Mirror ")
classes = (
TestFpCvStepsBreakdown,
GenerateFloorPlanImageOperator,
FPC_PT_Panel)
register,unregister = bpy.utils.register_classes_factory(classes)
# from . import auto_load
# auto_load.init()
# def register():
# auto_load.register()
# def unregister():
# auto_load.unregister()
|
lalamax3d/FloorPlanCreator
|
__init__.py
|
__init__.py
|
py
| 2,177 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39180507921
|
# File operation with reading each line and writing each line .
'''
#First file creation and writting .
fo = open ( "first31.txt ", "w")
#fo=open("first.txt","r+")
seq= [ "First Line \n ", "Second Line \n" , "Third Line \n" ,"Fourth Line \n " ]
#,"Fifth line \n "\n,"sixth line "\n , "seventh line \n"]
fo.writelines(seq)
fo.close()
'''
# Open the file in read mode .
fo = open ("first31.txt" , "r")
#lines=fo.readlines()
#print("readlines():",lines)
line1=fo.readline()
print("readline():",line1)
#Below line of code will go to next line and will read how many characters to read.
line2=fo.readline(5)
print("readlines(1):",line2)
# close of the file .
fo.close()
|
sameerCoder/pycc_codes
|
file_readline_writeline.py
|
file_readline_writeline.py
|
py
| 719 |
python
|
en
|
code
| 2 |
github-code
|
6
|
170910713
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from firefox_puppeteer.base import BaseLib
class ErrorTriggerer(BaseLib):
def _notify_observers(self, topic, data):
self.marionette.execute_script("""
Components.utils.import("resource://gre/modules/Services.jsm");
Services.obs.notifyObservers(null, "{}", "{}");
""".format(topic, data))
def trigger_error(self, error_type, where, msg="[Marionette UI test]"):
self._notify_observers("requestpolicy-trigger-error-" + where,
"{}:{}".format(error_type, msg))
|
RequestPolicyContinued/requestpolicy
|
tests/marionette/rp_puppeteer/api/error_triggerer.py
|
error_triggerer.py
|
py
| 743 |
python
|
en
|
code
| 253 |
github-code
|
6
|
40276526905
|
import cv2
import random
import numpy as np
from PIL import Image
from compel import Compel
import torch
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionUpscalePipeline
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return seed
class MaskFormer:
def __init__(self, device):
print(f"Initializing MaskFormer to {device}")
self.device = device
self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined", cahce_dir='/data1/gitaek')
self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined", cache_dir='/data1/kirby/.cache').to(device)
def inference(self, image_path, text):
threshold = 0.2
min_area = 0.02
padding = 25
if isinstance(image_path, str):
original_image = Image.open(image_path)
else: original_image = image_path
image = original_image.resize((512, 512))
inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt").to(self.device)
with torch.no_grad():
outputs = self.model(**inputs)
mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
if area_ratio < min_area:
return None
visual_mask = cv2.dilate((mask*255).astype(np.uint8), np.ones((padding, padding), np.uint8))
image_mask = Image.fromarray(visual_mask)
return image_mask.resize(original_image.size)
class ImageEditing:
def __init__(self, device):
print(f"Initializing ImageEditing to {device}")
self.device = device
self.mask_former = MaskFormer(device=self.device)
self.revision = 'fp16' if 'cuda' in device else None
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-inpainting", revision=self.revision, torch_dtype=self.torch_dtype, cache_dir='/data1/kirby/.cache').to(device)
self.compel = Compel(tokenizer=self.inpaint.tokenizer, text_encoder=self.inpaint.text_encoder)
def inference_kirby(self, original_image, to_be_replaced_txt,
replace_with_txt='backdrop++, background++, backgrounds++',
seed=42, num_images_per_prompt=1, negative_prompt=''):
if seed is not None:
seed_everything(seed)
assert original_image.size == (512, 512)
mask_image = self.mask_former.inference(original_image, to_be_replaced_txt)
if mask_image is None:
return None, None
list_negative_prompt = negative_prompt.split(', ')
list_negative_prompt.insert(0, list_negative_prompt.pop(list_negative_prompt.index(to_be_replaced_txt)))
negative_prompt = ', '.join(list_negative_prompt)
negative_prompt = negative_prompt.replace(to_be_replaced_txt, f'{to_be_replaced_txt}++')
conditioning_pos = self.compel.build_conditioning_tensor(replace_with_txt)
conditioning_neg = self.compel.build_conditioning_tensor(negative_prompt)
updated_images = self.inpaint(
image=original_image,
prompt_embeds=conditioning_pos,
negative_prompt_embeds=conditioning_neg,
mask_image=mask_image,
guidance_scale=7.5,
num_inference_steps=50,
num_images_per_prompt=num_images_per_prompt
).images
return updated_images, mask_image
class SuperResolution:
def __init__(self, device):
print(f"Initializing SuperResolution to {device}")
self.revision = 'fp16' if 'cuda' in device else None
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.Upscaler_sr = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler", revision=self.revision,
torch_dtype=self.torch_dtype, cache_dir='/data1/kirby/.cache').to(device)
def inference(self, image, prompt, seed=None, baselen=128):
if seed is not None:
seed_everything(seed)
old_img = image.resize((baselen, baselen))
upscaled_img = self.Upscaler_sr(prompt=prompt, guidance_scale=7.5, image=old_img, num_inference_steps=50).images[0]
return upscaled_img
|
Anears/SHIFT
|
models/shift.py
|
shift.py
|
py
| 4,678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38460841413
|
import pygame
pygame.init()
font = pygame.font.Font(pygame.font.get_default_font(), 18)
class Components:
def __init__(self, window: pygame.Surface) -> None:
self.window = window
self.buttons = list()
def Button(self, name: str):
text = font.render(name, False, (0, 0, 0))
rect = pygame.Rect(900, 10, text.get_width(), 50)
if len(self.buttons) > 0:
top, left = self.buttons[-1]['rect'].top, self.buttons[-1]['rect'].left
rect.top = top+60
button = {'rect': rect, 'text': text}
self.buttons.append(button)
return button['rect']
def drawAllComponents(self):
for button in self.buttons:
pygame.draw.rect(self.window, (230, 230, 230), button['rect'])
self.window.blit(
button['text'], (button['rect'].left, button['rect'].top + button['rect'].height / 2 - button['text'].get_height()/2))
class ClickListener:
def __init__(self) -> None:
self.components = list()
def addListener(self, component: pygame.Rect, callbackFn):
self.components.append((component, callbackFn))
def listenEvents(self):
pos = pygame.mouse.get_pos()
left, _, _ = pygame.mouse.get_pressed()
for component in self.components:
if pos[0] in range(component[0].left, component[0].left + component[0].width):
if pos[1] in range(component[0].top, component[0].top + component[0].height) and left:
component[1]()
pygame.mouse.set_pos(
(component[0].left-10, component[0].top-10))
|
legit-programmer/bit-texture
|
ui.py
|
ui.py
|
py
| 1,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35161911497
|
from dhooks import Webhook
from dhooks import Embed
from datetime import date,datetime
import json
embed=Embed(
title="Sucessful Checout!",
url="https://twitter.com/_thecodingbunny?lang=en",
color=65280,
timestamp="now"
)
hook=Webhook("https://discordapp.com/api/webhooks/715950160185786399/uFNsHqIAsOCbiPiBFgUv-pozfLlZyondpi2uuIUjQbxcNuvFz2UedZcRH8dBH6Fo5-7T")
#Get webhook
now=datetime.now()
copped_time=now.strftime("||%Y%m%d\n%H:%M:%S||")
#Get time
store=input("Enter store name:")
#Get store
profile="||"+input("Enter profile:")+"||"
#Get profile
product_image=input("Enter product image link:")
#Get image
product_name=input("Enter product name:")
#Get product name
size=input("Enter product size:")
#Get size
price="$"+input("Enter the price:")
#Get price
order_number="||"+input("Enter order number:")+"||"
#Get order number
embed.add_field(name="Date Time",value=copped_time)
embed.add_field(name="Store",value=store)
embed.add_field(name="Profile",value=profile)
embed.add_field(name="Product",value=product_name)
embed.add_field(name="Size",value=size)
embed.add_field(name="Price",value=price)
embed.add_field(name="Order Number",value=order_number)
embed.set_thumbnail(product_image)
#Embed elements
embed.set_footer(text="@theGaneshBot",icon_url="https://ganeshbot.com/public/images/logo-transparent.png")
hook.send(embed=embed)
|
1mperfectiON/TCB-Project1
|
fake_bot_webhook.py
|
fake_bot_webhook.py
|
py
| 1,445 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4927702164
|
# -*- coding: utf-8 -*-
import json
import pickle
import numpy as np
import random
def preprocess_train_data():
"""
Convert JSON train data to pkl
:param filename:
:return:
"""
f = open('train.json', 'r')
raw_data = json.load(f)
f.close()
def get_record(x):
band_image_1 = np.array(x['band_1'])
band_image_2 = np.array(x['band_2'])
band_image_1 = band_image_1.reshape((75, 75))
band_image_2 = band_image_2.reshape((75, 75))
image = np.stack([band_image_1, band_image_2])
label = x['is_iceberg']
return image, label
train_images = []
train_labels = []
for i in range(len(raw_data)):
image, label = get_record(raw_data[i])
train_labels.append(label)
train_images.append(image)
train_images = np.array(train_images)
train_labels = np.array(train_labels)
with open('train_data.pkl', 'wb') as ff:
pickle.dump(train_images, ff)
with open('train_label.pkl', 'wb') as ff:
pickle.dump(train_labels, ff)
print("Finish Preprocess Train Data")
def load_train_data(path):
with open(path+'/train_data.pkl', 'rb') as f:
train_data = pickle.load(f)
with open(path+'/train_label.pkl', 'rb') as f:
train_label = pickle.load(f)
train_data = zip(train_data, train_label)
num_samples = len(train_data)
ratio = 0.9
num_train = int(num_samples*ratio)
random.shuffle(train_data)
train_samples = train_data[:num_train]
test_samples = train_data[num_train:]
return train_samples, test_samples
def load_test_data(path):
"""
Load Test JSON data
:return:
"""
f = open(path+'/test.json', 'r')
raw_data = json.load(f)
f.close()
def get_image(x):
image_id = x['id']
band_image_1 = np.array(x['band_1'])
band_image_2 = np.array(x['band_2'])
band_image_1 = band_image_1.reshape((75, 75))
band_image_2 = band_image_2.reshape((75, 75))
image = np.stack([band_image_1, band_image_2])
return image_id, image
for i in range(len(raw_data)):
image_id, image = get_image(raw_data[i])
yield {
'image_id': image_id,
'image': image
}
# if __name__ == '__main__':
# preprocess_train_data()
#
# train_data, test_data = load_train_data()
#
# print(train_data[10])
|
wondervictor/KaggleIceberg
|
data/data_process.py
|
data_process.py
|
py
| 2,431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41776384713
|
# An ETL Reads and processes files from song_data and log_data and loads them into dimensional and fact tables
#===========================================================
#Importing Libraries
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
#==========================================================
def process_song_file(cur, filepath):
"""An ETL that extracts songs and artists data from song file and inserst records into songs and artists dimensional tables.
INPUT:
cur - A cursor that will be used to execute queries.
filepath - JASON object
OUTPUT:
songs and artists tables with records inserted.
"""
df = pd.read_json(filepath, lines=True)
for index, row in df.iterrows():
#songs---------------------------------------
song_data = (row.song_id, row.title, row.artist_id,
row.year, row.duration)
try:
cur.execute(song_table_insert, song_data)
except psycopg2.Error as e:
print("Error: Inserting row for table: songs")
print (e)
#artists--------------------------------------------
artist_data = (row.artist_id, row.artist_name,
row.artist_location,
row.artist_latitude,
row.artist_longitude)
try:
cur.execute(artist_table_insert, artist_data)
except psycopg2.Error as e:
print("Error: Inserting row for table: artists")
print (e)
#=============================================================
def process_log_file(cur, filepath):
"""An ETL that
- extracts time, users and songplays data from log_data file - inserts the records into the time and users dimensional tables and songplays fact table respectively.
INPUT:
cur - A cursor that will be used to execute queries.
filepath - JASON object
OUTPUT:
time, users and songplays tables with records inserted.
"""
df = pd.read_json(filepath, lines=True)
df = df[df.page == 'NextSong']
#time----------------------------------------
df['ts'] = pd.to_datetime(df['ts'], unit='ms')
t = df.copy()
time_data = (t.ts, t.ts.dt.hour, t.ts.dt.day,
t.ts.dt.dayofweek, t.ts.dt.month, t.ts.dt.year,
t.ts.dt.weekday)
column_labels = ['start_time', 'hour', 'day',
'week of year','month', 'year', 'weekday']
time_df = pd.DataFrame(columns=column_labels)
for index, column_label in enumerate(column_labels):
time_df[column_label] = time_data[index]
for i, row in time_df.iterrows():
try:
cur.execute(time_table_insert, list(row))
except psycopg2.Error as e:
print("Error: Inserting row for table: time")
print (e)
#users-----------------------------------
user_df = df[['userId', 'firstName', 'lastName', 'gender',
'level']]
for i, row in user_df.iterrows():
try:
cur.execute(user_table_insert, row)
except psycopg2.Error as e:
print("Error: Inserting row for table: users")
print (e)
#songplays-----------------------------------------
for index, row in df.iterrows():
try:
cur.execute(song_select, (row.song, row.artist,
row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
songplay_data = (row.ts, row.userId, row.level,
songid, artistid, row.sessionId,
row.location, row.userAgent)
try:
cur.execute(songplay_table_insert, songplay_data)
except psycopg2.Error as e:
print("Error: Inserting row for table: songplays")
print (e)
except psycopg2.Error as e:
print("Error: Querying for Song ID and Artist ID")
print (e)
#===========================================================
def process_data(cur, conn, filepath, func):
"""Function gets all files matching extension from directory
- gets total number of files found
- iterate over files and process
INPUT:
cur - A cursor that will be used to execute queries
conn - connection to database
filepath - JASON object
func - table functions
OUTPUT:
processed entire data
"""
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
#============================================================
def main():
""" Connects to Postgres database, executes functions above, creates the fact and dimensional tables.
"""
try:
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
try:
cur = conn.cursor()
except psycopg2.Error as e:
print("Error: Could not get curser to the Database")
print(e)
process_data(cur, conn, filepath='data/song_data',
func=process_song_file)
process_data(cur, conn, filepath='data/log_data',
func=process_log_file)
cur.close()
conn.close()
if __name__ == "__main__":
main()
|
Marvykalu/DataEngineering
|
data-modeling-postgresql/etl.py
|
etl.py
|
py
| 6,006 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37983159283
|
from fastapi import FastAPI, Response, status,HTTPException
from fastapi.params import Body
from pydantic import BaseModel
from typing import Optional
from random import randrange
app = FastAPI()
class Post(BaseModel):
title: str
content: str
published: bool = True
rating: Optional[int] = None
my_posts = [{"title":"tile of post no 1", "content":"content of the post no 1", "id":1},
{"title": "my favorite foods" , "content":"pizzza", "id": 2}]
def find_post(id):
for p in my_posts:
if p['id'] == id:
return p
def find_index_post(id):
for i, p in enumerate(my_posts):
if p['id'] == id:
return i
@app.get("/")
def root():
return {"message": "Hello World"}
@app.get("/posts")
def get_posts():
return {"DATA": my_posts}
# create post with random ids
@app.post("/posts" ,status_code= status.HTTP_201_CREATED)
def create_posts(post : Post):
post_dict = post.dict()
post_dict['id'] = randrange(1,10000)
my_posts.append(post_dict)
return { "data ": post_dict}
#gettting specific post by id
@app.get("/posts/{id}")
def post_by_id(id: int, response: Response):
post = find_post(id)
if not post:
raise HTTPException(status_code= status.HTTP_404_NOT_FOUND, detail = f"post with id no {id } not found")
return {"new post": post}
#deleting a post
# for this we will first find a index in the array of thr required id so that we can delete
@app.delete("/posts/{id}", status_code=status.HTTP_204_NO_CONTENT)
def delete_post(id: int):
index= find_index_post(id)
if index == None:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail=f"post not found with id no {id}")
my_posts.pop(index)
return {f"the post with id no. {id} succesfully deleted"}
# updating the existing post for this we use put method
@app.put("/posts/{id}")
def update_post(id: int, post: Post):
index= find_index_post(id)
if index == None:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail=f"post not found with id no {id}")
post_dict = post.dict()
post_dict['id'] = id
my_posts[index] = post_dict
return {"datfffa": post_dict}
|
RahimUllah001/FastAPI_PROJECT
|
main.py
|
main.py
|
py
| 2,278 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40205691019
|
# -*- coding: utf-8 -*-
import numpy as np
__all__ = ["shift_cnt", ]
def shift_cnt(np_arr, shift_h=None, shift_w=None):
""" Shift the position of contour.
Parameters
-------
np_arr : np.array
contour with standard numpy 2d array format
shift_h : int or float
shift distance in vertical direction
shift_w : int or float
shift distance in horizontal direction
Returns
-------
shift_arr: np.array
shifted contour
"""
# construct new shift_arr from original array
shift_arr = np.array(np_arr)
# shift in vertical direction
if shift_h != None:
shift_arr[0] += shift_h
# shift in horizental direction
if shift_w != None:
shift_arr[1] += shift_w
return shift_arr
|
PingjunChen/pycontour
|
pycontour/transform/shift.py
|
shift.py
|
py
| 781 |
python
|
en
|
code
| 6 |
github-code
|
6
|
17838892540
|
import numpy as np
import cv2
# import ipdb
import opts
def computeH(x1, x2):
#Q2.2.1
#Compute the homography between two sets of points
num_of_points = x1.shape[0]
# Construct A matrix from x1 and x2
A = np.empty((2*num_of_points,9))
for i in range(num_of_points):
# Form A
Ai = np.array([[-x2[i,0], -x2[i,1], -1, 0, 0, 0, x1[i,0]*x2[i,0], x1[i,0]*x2[i,1], x1[i,0]], [0, 0, 0, -x2[i,0], -x2[i,1], -1, x1[i,1]*x2[i,0], x1[i,1]*x2[i,1], x1[i,1]]])
A[2*i:2*(i+1), :] = Ai
# Compute SVD solution and extract eigenvector corresponding to smallest eigenvalue
svd_sol = np.linalg.svd(A)
h = svd_sol[2][8]
H2to1 = h.reshape((3,3))
return H2to1
def computeH_norm(x1, x2):
#Q2.2.2
#Compute the centroid of the points
add_points_x1 = np.sum(x1,axis=0)
K1 = x1.shape[0]
centroid_x1 = add_points_x1/K1
add_points_x2 = np.sum(x2,axis=0)
K2 = x2.shape[0]
centroid_x2 = add_points_x2/K2
#Shift the origin of the points to the centroid
x1_shift = -x1 + centroid_x1
x2_shift = -x2 + centroid_x2
#Normalize the points so that the largest distance from the origin is equal to sqrt(2)
norm_x1 = np.linalg.norm(x1_shift,axis=1)
max_x1_idx = np.argmax(norm_x1)
max_x1_vec = x1_shift[max_x1_idx,:]
norm_x2 = np.linalg.norm(x2_shift,axis=1)
max_x2_idx = np.argmax(norm_x2)
max_x2_vec = x2_shift[max_x2_idx,:]
if max_x1_vec[0] == 0.0 or max_x1_vec[1] == 0.0 or max_x2_vec[0] == 0.0 or max_x2_vec[1] == 0.0:
H2to1 = np.array([])
else:
#Similarity transform 1
T1 = np.array([[1.0/max_x1_vec[0], 0, -centroid_x1[0]/max_x1_vec[0]], [0, 1/max_x1_vec[1], -centroid_x1[1]/max_x1_vec[1]],[0,0,1]])
#Similarity transform 2
T2 = np.array([[1.0/max_x2_vec[0], 0, -centroid_x2[0]/max_x2_vec[0]],[0, 1/max_x2_vec[1], -centroid_x2[1]/max_x2_vec[1]],[0,0,1]])
x1_div = np.tile(max_x1_vec,(x1_shift.shape[0],1))
x1_temp = np.append(x1,np.ones((K1,1)),axis=1)
x1_tilde = T1 @ x1_temp.T
x2_div = np.tile(max_x2_vec,(x2_shift.shape[0],1))
# x2_tilde = np.divide(x2_shift, x2_div)
x2_temp = np.append(x2,np.ones((K2,1)),axis=1)
x2_tilde = T2 @ x2_temp.T
# # H2to1 = x1_tilde
x1_tilde = x1_tilde.T
x1_tilde = x1_tilde[:,0:2]
x2_tilde = x2_tilde.T
x2_tilde = x2_tilde[:,0:2]
#Compute homography
H = computeH(x1_tilde,x2_tilde)
#Denormalization
H2to1 = np.linalg.inv(T1) @ H @ T2
return H2to1
def computeH_ransac(locs1, locs2, opts):
#Q2.2.3
#Compute the best fitting homography given a list of matching points
max_iters = opts.max_iters # the number of iterations to run RANSAC for
inlier_tol = opts.inlier_tol # the tolerance value for considering a point to be an inlier
num_of_points = locs1.shape[0]
sample_size = 4
d = 0
bestH2to1 = np.array([])
for i in range(max_iters):
# Sample a bunch of points from locs1 and locs 2
sample = np.random.choice(num_of_points,sample_size)
x1_sample = locs1[sample,:]
x2_sample = locs2[sample,:]
# computeH_norm(sampled points)
H = computeH_norm(x1_sample,x2_sample)
if H.size == 0:
continue
locs1_hom = np.append(locs1,np.ones((num_of_points,1)),axis=1)
locs2_hom = np.append(locs2,np.ones((num_of_points,1)),axis=1)
l_hat = H @ locs2_hom.T
l_hat[0,:] = np.divide(l_hat[0,:], l_hat[2,:])
l_hat[1,:] = np.divide(l_hat[1,:], l_hat[2,:])
l_hat[2,:] = np.divide(l_hat[2,:], l_hat[2,:])
Hvec = locs1_hom.T - l_hat
dist = np.linalg.norm(Hvec,axis=0)
inliers_test = dist < inlier_tol
inliers_test = inliers_test*1
num_inliers = np.sum(inliers_test)
if num_inliers > d:
# ipdb.set_trace()
d = num_inliers
inliers = inliers_test
bestH2to1 = H
return bestH2to1, inliers
def compositeH(H2to1, template, img):
# Create a composite image after warping the template image on top
# of the image using the homography
# Note that the homography we compute is from the image to the template;
# x_template = H2to1*x_photo
# For warping the template to the image, we need to invert it.
hp_cover_temp = img
cv_desk = template
# hp_cover_temp = cv2.resize(hp_cover,(cv_cover.shape[1],cv_cover.shape[0]))
# img = cv2.resize(img,(template.shape[1],template.shape[0]))
# Create mask of same size as template
mask = np.ones(shape=[hp_cover_temp.shape[0], hp_cover_temp.shape[1], hp_cover_temp.shape[2]], dtype= 'uint8')*255
# Warp mask by appropriate homography
warped_mask = cv2.warpPerspective(cv2.transpose(mask), H2to1, (cv_desk.shape[0], cv_desk.shape[1]))
warped_mask = cv2.transpose(warped_mask)
warped_mask = cv2.cvtColor(warped_mask, cv2.COLOR_BGR2GRAY)
warped_mask = cv2.bitwise_not(warped_mask)
warped_img = cv2.warpPerspective(cv2.transpose(hp_cover_temp), H2to1, (cv_desk.shape[0], cv_desk.shape[1]))
# warped_img = cv2.warpPerspective(cv2.transpose(img), bestH2to1, (template.shape[0], template.shape[1]))
warped_img = cv2.transpose(warped_img)
# cv2.imwrite('perspective.png', warped_img)
# hp_cover_mask = cv2.cvtColor(warped_img, cv2.COLOR_BGR2GRAY)
# _, mask = cv2.threshold(hp_cover_mask,50,255,cv2.THRESH_BINARY_INV)
masked_img = cv2.bitwise_and(cv_desk, cv_desk, mask=warped_mask)
composite_img = masked_img + warped_img
# Warp mask by appropriate homography
# Warp template by appropriate homography
#Use mask to combine the warped template and the image
composite_img = masked_img + warped_img
return composite_img
|
blakerbuchanan/computer_vision
|
augmented_reality/code/planarH.py
|
planarH.py
|
py
| 5,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18659097750
|
from tkinter import *
from tkinter import ttk
from numpy import *
import random
root = Tk()
root.title('Minesweeper')
mainframe = ttk.Frame(root, padding='3 3 12 12')
mainframe.grid(column=0, row=0, sticky=(N, E, W, S))
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
difficulty = StringVar(mainframe)
difficulty.set('Easy')
diff_tuple = (10, 10)
OPTIONS = [
'Easy',
'Medium',
'Hard',
'Extreme'
]
w = OptionMenu(mainframe, difficulty, *OPTIONS)
w.pack()
def gen():
global difficulty, diff_tuple
difficulty_dict = {
0: (10, 10),
1: (12, 20),
2: (14, 40),
3: (8, 35)
}
diff_tuple = difficulty_dict[OPTIONS.index(difficulty.get())]
generate()
gen_button = Button(mainframe, command=gen, text='Generate')
gen_button.pack()
def generate():
global diff_tuple
side, mines = diff_tuple[0], diff_tuple[1]
randomlist = random.sample(range(1, side**2 - 1), mines)
coordinates = [(x%side-1, x//side-1) for x in randomlist]
field = zeros((side,side))
for c in coordinates:
field[c[0]][c[1]] = 1
_f = pad(field, 1 ,mode='constant')
minefield = zeros_like(_f)
for x in range(1, side+1):
for y in range(1, side+1):
if _f[x][y] == 1:
minefield[x][y] = 9
else:
minefield[x][y] = sum(_f[x - 1:x + 2, y - 1:y + 2].flatten())
minefield = minefield[1:side+1,1:side+1]
sweeper(minefield, side)
def sweeper(minefield, side):
global root, difficulty
root.destroy()
root_2 = Tk()
root_2.title(f'Minesweeper:{difficulty}')
main_field = Canvas(root_2, width=side*20, height=side*20, background='white')
for x in range(0, side*20, 20):
main_field.create_line(x, 0, x, side*20, fill='black')
for y in range(0, side*20, 20):
main_field.create_line(0, y, side*20, y, fill='black')
main_field.pack()
def win():
if 9 not in minefield and 109 not in minefield:
main_field.create_rectangle(0, 0, side * 20, side * 20, fill='gray')
main_field.create_text(side * 10, side * 10, text=f'You won!!!', fill='white', font='Helvetica 15')
def loose():
main_field.create_rectangle(0, 0, side * 20, side * 20, fill='gray')
main_field.create_text(side * 10, side * 10, text=f'Try Again :c', fill='white', font='Helvetica 15')
for x in range(side):
for y in range(side):
minefield[x][y] += 100
def reveal(event):
global xpos, ypos
xpos, ypos = event.x, event.y
x, y = int(xpos // 20), int(ypos // 20)
tile = minefield[x][y]
if 0 < tile < 9:
main_field.create_rectangle(x * 20, y * 20, x * 20 + 20, y * 20 + 20, fill='gray')
main_field.create_text(x * 20 + 10, y * 20 + 10, text=f'{int(tile)}', fill='white', font='Helvetica 15')
elif tile == 9:
main_field.create_rectangle(x * 20, y * 20, x * 20 + 20, y * 20 + 20, fill='gray')
main_field.create_rectangle(x * 20, y * 20, (x + 1) * 20, (y + 1) * 20, fill='red')
loose()
elif tile == 0:
map = area_reveal(x, y)
for (x, y) in map:
tile = minefield[x][y]
if 0 < tile < 9:
main_field.create_rectangle(x * 20, y * 20, x * 20 + 20, y * 20 + 20, fill='gray')
main_field.create_text(x * 20 + 10, y * 20 + 10, text=f'{int(tile)}', fill='white',
font='Helvetica 15')
else:
main_field.create_rectangle(x*20, y*20, x*20 + 20, y*20 + 20, fill='gray')
def flag(event):
global xpos, ypos
xpos, ypos = event.x, event.y
x, y = int(xpos // 20), int(ypos // 20)
tile = minefield[x][y]
if tile < 10:
main_field.create_rectangle(x * 20, y * 20, x * 20 + 20, y * 20 + 20, fill='blue')
minefield[x][y] += 10
elif 10 <= tile < 100:
main_field.create_rectangle(x * 20, y * 20, x * 20 + 20, y * 20 + 20, fill='white')
minefield[x][y] += -10
def area_reveal(x, y):
vis = []
shifts = [
(-1, -1),
(-1, 1),
(-1, 0),
(1, -1),
(1, 1),
(1, 0),
(0, -1),
(0, 1),
]
# main loop
to_reveal = []
if (x, y) not in vis:
to_reveal.append((x, y))
while to_reveal != []:
cell = to_reveal.pop()
vis.append(cell)
if minefield[cell[0]][cell[1]] == 0:
for shift in shifts:
if (cell[0] + shift[0], cell[1] + shift[1]) not in vis and 0 <= (cell[0] + shift[0]) <= 9 and 0 <= (
cell[1] + shift[1]) <= 9:
to_reveal.append((cell[0] + shift[0], cell[1] + shift[1]))
return vis
main_field.bind("<Button-1>", reveal)
main_field.bind("<Button-3>", flag)
win_cond = Button(root_2, text='Check for win', command=win)
win_cond.pack()
root_2.mainloop()
root.mainloop()
|
awero-manaxiy/minesweeper_pong
|
minesweeper.py
|
minesweeper.py
|
py
| 5,347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17499920257
|
#TODO practice mode for the ones that required 10+, or 20+ s previously
#TODO prorgam to train two digits additions and subtractions
from random import random
from random import randint
import datetime
from matplotlib import pyplot as plt
import pandas as pd
# import numpy as np
import os
import mplcursors # need to install: pip install mplcursors
problems = []
results = []
elapsed_time = []
failed = []
# failed = [{'a':15, 'b':11}, {'a':96, 'b':95}, {'a':76, 'b':35}, {'a':16, 'b':77}]#TODO
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['font.family'] = ['Arial']
cwd = os.getcwd()
excel_path = os.path.join(cwd,'anzan_log.xlsx')
if os.path.isfile(excel_path):
df_s = pd.read_excel(excel_path, index_col=0, sheet_name='successes')
df_f = pd.read_excel(excel_path, index_col=0, sheet_name='failures')
df_r = pd.read_excel(excel_path, index_col=0, sheet_name='rates').astype(float) #float
df_t = pd.read_excel(excel_path, index_col=0, sheet_name='time').astype(float) #float
else:
df_s = pd.DataFrame(0, index=range(1, 100), columns=range(1, 100))
df_f = pd.DataFrame(0, index=range(1, 100), columns=range(1, 100))
df_r = pd.DataFrame(float(0), index=range(1, 100), columns=range(1, 100)).astype(float)
df_t = pd.DataFrame(float(0), index=range(1, 100), columns=range(1, 100)).astype(float)
time_out_s = 20 # inclusive, elapsed time must be <= time_out_s
failed_ind = 0
failed_in_the_past = []
for row_index, row in df_f.iterrows():
for col_index, value in row.items():
if value != 0:
failed_in_the_past.append({'a': row_index, 'b': col_index})
def show_problem(a, b, view):
if view == 1:
print(f"\n{a} x {b} =\n")
elif view == 2:
if course == 6:
print(f"\n {a:>3} \nx {b:>3}\n-----\n")
else:
print(f"\n {a:>2} \nx {b:>2}\n-----\n")
def biased_randint(min_val, max_val, bias=0.5):
"""Generate a biased random integer between min_val and max_val.
With a bias value of 0.5, numbers towards the higher end (like 6,7,8,9 in tens place)
will be more probable. Adjusting the bias will change the skewness. A bias of 1 will
give you a uniform distribution, values less than 1 will skew towards the maximum,
and values greater than 1 will skew towards the minimum.
"""
return int(min_val + (max_val - min_val) * (random() ** bias))
def get_ab_from_failures():
if len(failed) == 0:
return 0, 0
failed_ind = randint(0, len(failed)-1)
a = failed[failed_ind]['a']
b = failed[failed_ind]['b']
return a, b
def get_ab_from_failures_in_the_past():
# randomly choose a and b from the failures in the past
# Iterate over the DataFrame to find non-zero cells
ind = randint(0, len(failed_in_the_past)-1)
if randint(0,1):
a = failed_in_the_past[ind]['a']
b = failed_in_the_past[ind]['b']
else:
a = failed_in_the_past[ind]['b']
b = failed_in_the_past[ind]['a']
return a, b
def get_ab_general():
# a = randint(1,99)
a = biased_randint(1,99,randbias)
# b = randint(1,99)
b = biased_randint(1,99,randbias)
return a, b
def get_ab_Indian():
c_type = randint(1,3)
if c_type == 1:
a_ = randint(1,9)
b_ = randint(1,9)
c_ = 10 - b_
a = a_ * 10 + b_
b = a_ * 10 + c_
elif c_type == 2:
a_ = randint(1,9)
b_ = randint(1,9)
c_ = randint(1,9)
a = a_ * 10 + b_
b = a_ * 10 + c_
elif c_type == 3:
a_ = randint(1,9)
b_ = randint(1,9)
c_ = 10 - b_
a = b_ * 10 + a_
b = c_ * 10 + a_
return a, b
def get_ab_two_by_one():
tf = randint(0,1)
if tf:
a = randint(1,9)
b = randint(1,99)
else:
a = randint(1,99)
b = randint(1,9)
return a, b
def get_ab_three_by_one():
if view == 2:
a = randint(100,999)
b = randint(2,9)
else:
tf = randint(0,1)
if tf:
a = randint(2,9)
b = randint(100,999)
else:
a = randint(100,999)
b = randint(2,9)
return a, b
def run_trial(a, b):
dt1 = datetime.datetime.now()
show_problem(a, b, view)
ans = input("Type your answer (or 'q' to quit):\n>")
dt2 = datetime.datetime.now()
if ans == "q":
keep_going = False
else:
problems.append({'a':a,'b':b})
keep_going = True
try:
ans = int(ans)
except Exception as e:
print('wrong input')
results.append(float("nan"))
return keep_going
td = dt2 - dt1
minutes, seconds = divmod(td.total_seconds(), 60)
print(f"\n{minutes} min {seconds} sec\n")
elapsed_time.append(td.total_seconds())
if td.total_seconds() <= time_out_s :
if ans == a * b:
print(f"Correct! :)\n{a} x {b} = {a *b}\n")
results.append(1)
if reviewing:
failed.pop(failed_ind) # remove successful item from failed during review process
else:
print("\a") # didn't work
print(f"Your answer {ans} is wrong:(\n{a} x {b} = {a *b}\n")
results.append(0)
failed.append({'a':a,'b':b})
else:
print("\a") # didn't work
print('Too late')
if ans == a * b:
print(f"Correct! :)\n{a} x {b} = {a *b}\n")
else:
print(f"Your answer {ans} is wrong:(\n{a} x {b} = {a *b}\n")
results.append(0)
failed.append({'a':a,'b':b})
return keep_going
def plot_time(elapsed_time, problems, results):
plt.ion()
fig, ax = plt.subplots(1,1)
zipped = list(zip(elapsed_time, problems, results))
zipped_sorted = sorted(zipped, key=lambda x: x[0])
elapsed_time_sorted, problems_sorted, results_sorted = zip(*zipped_sorted)
for i in range(0, len(elapsed_time_sorted)):
if results_sorted[i]:
ax.plot(elapsed_time_sorted[i], i + 1, 'ok')
else:
ax.plot(elapsed_time_sorted[i], i + 1, 'xr')
ax.set_yticks([i + 1 for i in list(range(0, len(elapsed_time_sorted)))]) # +1
ax.set_xlabel('Time (s)')
xlim = ax.get_xlim()
ax.set_xlim(0, xlim[1])
problems_str =[f"{p['a']} x {p['b']}" for p in problems_sorted]
print(f"len(elapsed_time_sorted) = {len(elapsed_time_sorted)}")
print(f"len(problems_str) = {len(problems_str)}")
ax.set_yticklabels(problems_str)
plt.title("Session")
plt.show()
def plot_all():
# read the latest data
df_s = pd.read_excel(excel_path, index_col=0, sheet_name='successes')
df_f = pd.read_excel(excel_path, index_col=0, sheet_name='failures')
df_r = pd.read_excel(excel_path, index_col=0, sheet_name='rates').astype(float)
df_t = pd.read_excel(excel_path, index_col=0, sheet_name='time').astype(float)
# create lists
res_all = []
for i in range(1,100):
for j in range(1,100):
if df_s[i][j] + df_f[i][j] > 0: # remove the empty cells #TODO KeyError: 99
res_all.append({'a':i, 'b':j, 'n':df_s[i][j] + df_f[i][j],
's':df_s[i][j], 'f':df_f[i][j],
'r':df_r[i][j], 't':df_t[i][j]})
# sort l_all
res_sorted = sorted(res_all, key=lambda x: x['t'])
# read the saved table data and plot them
plt.ion()
fig, ax = plt.subplots(1,1)
max_val = max(item['r'] for item in res_sorted)
min_val = min(item['r'] for item in res_sorted)
norm = plt.Normalize(min_val, max_val)
# Choose a colormap
colormap = plt.cm.cool_r
x_values = [item['t'] for item in res_sorted]
y_values = list(range(1, len(res_sorted) + 1))
colors = colormap(norm([r['r'] for r in res_sorted]))
# Create a single scatter plot with all points
sc = ax.scatter(x_values, y_values, color=colors, s=100)
tooltips = [f"{r['a']} \u00D7 {r['b']}\n" +
f"{r['r']*100} % ({r['s']} of {r['s'] + r['f']})\n" +
f"{r['t']:.1f} sec" for r in res_sorted]
def update_annot(ind):
return tooltips[ind]
def on_hover(sel):
sel.annotation.set_text(update_annot(sel.index))
mplcursors.cursor(sc, hover=True).connect("add", on_hover)
ax.set_xlabel('Time (s)')
xlim = ax.get_xlim()
ax.set_xlim(0, xlim[1])
plt.title("History")
plt.show()
def save_result_table():
## response time
problems_ = problems
# Ensure 'a' is always <= 'b'
for p in problems_:
if p['a'] > p['b']:
p['a'], p['b'] = p['b'], p['a']
combined = sorted(zip(problems_, elapsed_time), key=lambda x: (x[0]['a'], x[0]['b']))
problems_sorted, elapsed_time_sorted = zip(*combined)
for idx, p in enumerate(problems_sorted):
row_idx, col_idx = p['a'], p['b']
# Calculate new average
n = df_s.at[row_idx, col_idx] + df_f.at[row_idx, col_idx]
current_total_time = df_t.at[row_idx, col_idx] * n
new_total_time = current_total_time + elapsed_time_sorted[idx]
# Update df_t and df_n
df_t.at[row_idx, col_idx] = new_total_time / float(n + 1)
##successes and failures
# separate successes and failures
successful_problems = [problem for problem, result in zip(problems, results) if result == 1]
failed_problems = [problem for problem, result in zip(problems, results) if result == 0]
# make a <= b
for p in successful_problems:
if p['a'] > p['b']:
p['a'], p['b'] = p['b'], p['a']
for p in failed_problems:
if p['a'] > p['b']:
p['a'], p['b'] = p['b'], p['a']
# sort (a, b) pairs
successful_problems = sorted(successful_problems, key=lambda x: (x['a'], x['b']))
failed_problems = sorted(failed_problems, key=lambda x: (x['a'], x['b']))
# update values of cells
for p in successful_problems:
if pd.isna(df_s.at[p['a'], p['b']]): # if for the first time
df_s.at[p['a'], p['b']] = 1
else:
df_s.at[p['a'], p['b']] += 1
for p in failed_problems:
if pd.isna(df_f.at[p['a'], p['b']]): # if for the first time
df_f.at[p['a'], p['b']] = 1
else:
df_f.at[p['a'], p['b']] += 1
# recompute rates
df_r = df_s.fillna(0) / (df_s.fillna(0) + df_f.fillna(0))
## save tables
with pd.ExcelWriter(excel_path) as writer:
df_s.to_excel(writer, index=True, sheet_name='successes')
df_f.to_excel(writer, index=True, sheet_name='failures')
df_r.to_excel(writer, index=True, sheet_name='rates')
df_t.to_excel(writer, index=True, sheet_name='time')
def show_results():
print("Finished")
if len(results) > 0:
print(f"Success rate: {sum(results)/len(results) * 100:.1f} % ({sum(results)}/{len(results)})")
ave_time = sum(elapsed_time) / len(elapsed_time) #TODO
print(f"Average response time :{ave_time} sec\n")
result_icons = ['X' for _ in results]
result_icons = ''.join(['O' if r else 'X' for r, i in zip(results, result_icons)])
print(result_icons)
plot_time(elapsed_time, problems, results)
failed_ = [ f"{f['a']} x {f['b']} = {f['a'] * f['b']}" for f in failed]
print("Failed calculations")
print(failed_)
if course != 6:
save_result_table()
plot_all()
keep_going = True
#TODO GUI for preference?
ans = int(input("Type 1 for general, 2 for Indian, 3 for mixed, 4 for 00 x 0, 5 for review, 6 for 000 x 0\n>"))
if ans == 1:
course = 1
elif ans == 2:
course = 2
elif ans == 3:
course = 3
elif ans == 4:
course = 4
elif ans == 5:
course = 5
elif ans == 6:
course = 6
else:
raise ValueError("course has an invalid value")
ans = int(input("Type 1 for horizontal view, 2 for stack view\n>"))
if ans == 1:
view = 1
elif ans == 2:
view = 2
else:
raise ValueError("view has an invalid value")
#TODO ask if you want to use biased random number generation
if course != 4 and course != 5 and course != 6:
ans = float(input("Type 1 for uniform randomness, <1 for biased to have larger digits\n>"))
if ans == 1:
randbias = 1
else:#
randbias = 2 # to be biased to include larger numbers, 6,7 ,8, 9
reviewing = False
while keep_going:
if course == 1:
a, b = get_ab_general()
elif course == 2:
a, b = get_ab_Indian()
elif course == 3:
ans = randint(0,1)
if ans:
a, b = get_ab_general()
else:
a, b = get_ab_Indian()
elif course == 4:
a, b = get_ab_two_by_one()
elif course == 5:
a, b = get_ab_from_failures_in_the_past()
elif course == 6:
a, b = get_ab_three_by_one()
keep_going = run_trial(a, b)
if not keep_going:
show_results()
ans = input("Do you want to practice the failed problems again? Y/N\n>")
if ans == "y" or ans == "Y":
results = [] #refresh
reviewing = True
keep_going = True
while keep_going:
a, b = get_ab_from_failures()
if a == 0 and b == 0:
keep_going = False
else:
keep_going = run_trial(a, b)
if not keep_going:
print("Finished")
print(f"Success rate: {sum(results)/len(results) * 100:.1f} % ({sum(results)}/{len(results)})")
ave_time = sum(elapsed_time) / len(elapsed_time)
print(f"Average response time :{ave_time} sec\n")
failed_ = [ f"{f['a']} x {f['b']} = {f['a'] * f['b']}" for f in failed]
print("Failed calculations")
print(failed_)
else:
print("Good bye")
|
kouichi-c-nakamura/anzan_training
|
anzan.py
|
anzan.py
|
py
| 13,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16989855842
|
class Occupancy:
def __init__(self, occupancy_id, beginning_date_time, ending_date_time, goal, classroom, user, semester, the_class):
self.id = occupancy_id
self.beginning_time = beginning_date_time
self.ending_time = ending_date_time
self.goal = goal
self.classroom = classroom
self.user = user
self.semester = semester
self.the_class = the_class
|
PORTUNO-SMD/portuno-api
|
entities/Ocupancy.py
|
Ocupancy.py
|
py
| 416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26273782966
|
import dataiku
from birgitta import context
from birgitta.dataiku.dataset import manage as dataset_manage
from birgitta.dataiku.dataset.manage import schema
from birgitta.dataiku.recipe import manage as recipe_manage
from birgitta.recipetest import validate
def test_recipe(spark_session,
scenario,
src_project_key,
src_recipe_key,
testbench_project_key,
test_params):
# Trigger dataiku, not parquet
context.set("BIRGITTA_DATASET_STORAGE", "DATAIKU")
# Trigger dataiku, not parquet
context.set("BIRGITTA_S3_BUCKET", "birgitta_s3_bucket")
print('####################################################')
print('Test recipe: %s (in project %s)' % (src_recipe_key,
src_project_key))
if src_project_key == testbench_project_key:
raise ValueError('Cannot clone recipe to same project as src project')
print('Clone dataset schemas')
schemas = test_params['schemas']
client = dataiku.api_client()
cloned_input_datasets = schemas['inputs'].keys()
cloned_input_datasets = clone_schemas(client,
src_project_key,
testbench_project_key,
cloned_input_datasets,
'Inline')
cloned_output_datasets = schemas['outputs'].keys()
cloned_output_datasets = clone_schemas(client,
src_project_key,
testbench_project_key,
cloned_output_datasets,
'HDFS')
expected_output_datasets = create_expected_output_schemas(
client,
src_project_key,
testbench_project_key,
cloned_output_datasets
)
print('Clone recipe')
recipe_manage.clone(client,
src_project_key,
src_recipe_key,
testbench_project_key,
test_name(src_recipe_key),
cloned_input_datasets,
cloned_output_datasets)
test_cases = test_params['test_cases']
for test_case in test_cases:
print('Setup test case: ' + test_case['name'])
print('Empty and fill datasets with fixtures')
empty_and_fill_datasets(testbench_project_key,
cloned_input_datasets,
schemas['inputs'],
test_case['inputs'])
empty_and_fill_datasets(testbench_project_key,
cloned_output_datasets,
schemas['outputs'],
False) # empty dataset
empty_and_fill_datasets(testbench_project_key,
expected_output_datasets,
expected_params(schemas['outputs']),
expected_params(test_case['outputs']))
print('Run recipe')
testbench_output_dataset_key = test_params['principal_output_dataset']
scenario.build_dataset(dataset_name=testbench_output_dataset_key,
project_key=testbench_project_key)
print('Validate output')
for dataset_name in test_case['outputs']:
print('Validate output dataset: %s' % (dataset_name))
validate.datasets(spark_session,
dataset_name,
expected_name(dataset_name),
testbench_project_key)
print('Successfully validated output dataset: %s' % (dataset_name))
print('Delete testbench recipe TODO')
print('Delete datasets TODO')
print('Tests successful')
def test_name(recipe_name):
return recipe_name + '_test'
def expected_name(dataset_name):
return dataset_name + '_expected'
def delete_datasets(project, dataset_names):
for dataset_name in dataset_names:
dataset_manage.delete_if_exists(project, dataset_name)
def empty_and_fill_datasets(project_key,
dataset_names,
schemas,
row_sets=False):
for dataset_name in dataset_names:
rows = row_sets[dataset_name]['rows'] if row_sets else []
dataset_manage.empty_and_fill(project_key,
dataset_name,
schemas[dataset_name],
rows)
def clone_schemas(client,
src_project_key,
dst_project_key,
dataset_names,
output_type):
datasets = []
for dataset_name in dataset_names:
datasets.append(dataset_name)
schema.clone(client,
src_project_key,
dst_project_key,
dataset_name,
dataset_name,
output_type)
return datasets
def create_expected_output_schemas(client,
src_project_key,
testbench_project_key,
dataset_names):
datasets = []
for dataset_name in dataset_names:
datasets.append(expected_name(dataset_name))
schema.clone(client,
src_project_key,
testbench_project_key,
dataset_name,
expected_name(dataset_name),
'Inline')
return datasets
def expected_params(set_params):
ret = {}
for dataset_name in set_params.keys():
ret[expected_name(dataset_name)] = set_params[dataset_name]
return ret
|
telia-oss/birgitta
|
birgitta/dataiku/recipetest/scenariotest.py
|
scenariotest.py
|
py
| 5,911 |
python
|
en
|
code
| 13 |
github-code
|
6
|
8592665762
|
from django.urls import path
from App import views
from django.urls import path
from django.contrib.auth import views as g
urlpatterns = [
path('',views.home,name="hm"),
path('abt/',views.about,name="ab"),
path('ap/',views.products,name="pro"),
path('vege/',views.vegetables,name="veg"),
path('fru/',views.fruits,name="fit"),
path('da/',views.dairy,name="day"),
path('pu/',views.pulses,name="pul"),
path('ho/',views.house,name="hom"),
path('po/',views.care,name="car"),
path('ca/',views.cart,name="cat"),
path('cnt/',views.contact,name="ct"),
path('rg/',views.register,name="reg"),
path('pf/',views.prfle,name="pfe"),
path('upf/',views.updf,name="upfe"),
path('lg/',g.LoginView.as_view(template_name="html/login.html"),name="lgn"),
path('lgg/',g.LogoutView.as_view(template_name="html/logout.html"),name="lgo"),
]
|
TataTejaswini/Django-Project
|
App/urls.py
|
urls.py
|
py
| 831 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74557703866
|
from django.shortcuts import render, redirect, get_object_or_404
from board.models import Post, Comment
from board.forms import PostForm, SignupForm, CommentForm
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.views.generic import TemplateView, ListView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
# Create your views here.
# ListView로 게시물 리스트 구현
class index(ListView):
model = Post
paginate_by = 10
def get_queryset(self):
return Post.objects.order_by('-pk')
# 게시물 내용
def post_detail(request, pk):
post_detail = get_object_or_404(Post, pk=pk)
context = {
'post_detail': post_detail,
}
return render(request, 'board/post_detail.html', context)
# 새 글 작성
@login_required
def new_post(request):
if request.method =="POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit = False)
post.author = request.user
post.generate()
return redirect('board:post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'board/form.html', {'form': form})
# 글 수정
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if post.author == User.objects.get(username=request.user.get_username()):
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.regdate = timezone.now()
post.generate()
return redirect('board:post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'board/form.html', {'form': form})
else:
return render(request, 'board/warning.html')
# 글 삭제
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
if post.author == User.objects.get(username = request.user.get_username()):
post.delete()
return redirect('board:index')
else:
return render(request, 'board/warning.html')
# 회원가입
def signup(request):
if request.method == 'POST':
signup_form = SignupForm(request.POST)
if signup_form.is_valid():
signup_form.signup()
return redirect('board:index')
else:
signup_form = SignupForm()
return render(request, 'registration/signup.html', {'signup_form': signup_form,})
# TemplateView로 회원가입 완료 페이지 구현
class RegisteredView(TemplateView):
template_name = 'registration/signup_done.html'
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.author = request.user
comment.save()
return redirect('board:post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'board/add_comment_to_post.html', {'form': form})
|
Xiorc/Concofreeboard
|
board/views.py
|
views.py
|
py
| 3,289 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14550843664
|
import pytest
from single_number import Solution
from typing import List
@pytest.mark.parametrize(
'nums, expected',
[
([2, 2, 1], 1),
([4, 1, 2, 1, 2], 4),
([1], 1),
]
)
def test_single_number(nums: List[int], expected: int):
solution = Solution()
assert expected == solution.single_number(nums)
|
franciscoalface/leet-code
|
src/136.single_number/test_single_number.py
|
test_single_number.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74118711867
|
#!/usr/bin/env python3
"""
test_utils.py
contains the tests for the functions in the utils.py file
defined in the current directory
"""
from parameterized import parameterized
from utils import access_nested_map, get_json, memoize
from unittest.mock import patch, Mock
import unittest
class TestAccessNestedMap(unittest.TestCase):
"""
test case: Testing the access_nested_map() function
"""
@parameterized.expand([
({'a': 1}, ('a',), 1),
({'a': {'b': 2}}, ('a',), {'b': 2}),
({'a': {'b': 2}}, ('a', 'b'), 2)
])
def test_access_nested_map(self, nested_map, path, expected):
"""test_access_nested_map test function"""
self.assertEqual(access_nested_map(nested_map, path), expected)
@parameterized.expand([
({}, ('a',)),
({'a': 1}, ('a', 'b'))
])
def test_access_nested_map_exception(self, map, path):
"""test_access_nested_map_exception test function"""
with self.assertRaises(KeyError):
access_nested_map(map, path)
class TestGetJson(unittest.TestCase):
"""
test case: Testing the function of the get_json() function
"""
@parameterized.expand([
('http://example.com', {"payload": True}),
('http://holberton.io', {"payload": False})
])
@patch('utils.requests.get', autospec=True)
def test_get_json(self, test_url, test_payload, mock_request_get):
"""test_get_json() test method"""
mock_response = Mock()
mock_response.json.return_value = test_payload
mock_request_get.return_value = mock_response
output = get_json(test_url)
mock_request_get.assert_called_with(test_url)
self.assertEqual(output, test_payload)
class TestMemoize(unittest.TestCase):
"""
test case: Testing the utils.memoize decorator
"""
def test_memoize(self):
"""test_memoize() test method"""
class TestClass:
def a_method(self):
return 42
@memoize
def a_property(self):
return self.a_method()
with patch.object(TestClass, 'a_method') as mock_a_method:
test_obj = TestClass()
test_obj.a_property()
test_obj.a_property()
mock_a_method.assert_called_once()
|
PC-Ngumoha/alx-backend-python
|
0x03-Unittests_and_integration_tests/test_utils.py
|
test_utils.py
|
py
| 2,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29195553298
|
"""
Title: Explicit finger tapping sequence learning task [replication of Walker et al. 2002]
Author: Julia Wood, the University of Queensland, Australia
Code adapted from Tom Hardwicke's finger tapping task code: https://github.com/TomHardwicke/finger-tapping-task
Developed in Psychopy v2022.1.1
See my GitHub for further details: https://github.com/jrwood21
"""
import time
import pandas as pd
import numpy as np
import sys
import os
from psychopy import visual, event, core, gui, data
from pyglet.window import key
from num2words import num2words
os.chdir(os.path.abspath('')) # change working directory to script directory
globalClock = core.Clock() # create timer to track the time since experiment started
# define sequences for finger tapping task
targ_seq_1 = '41324'
targ_seq_2 = '42314'
prac_seq = '12344'
### set up some useful functions ###
# Function to save messages to a log file
def saveToLog(logString, timeStamp=1):
f = open(logFile, 'a') # open our log file in append mode so don't overwrite with each new log
f.write(logString) # write the string they typed
if timeStamp != 0: # if timestamp has not been turned off
f.write('// logged at %iseconds' % globalClock.getTime()) # write a timestamp (very coarse)
f.write('\n') # create new line
f.close() # close and "save" the log file
# An exit function to initiate if the 'end' key is pressed
def quitExp():
if 'logFile' in globals(): # if a log file has been created
saveToLog('User aborted experiment')
saveToLog('..........................................', 0)
if 'win' in globals(): # if a window has been created
win.close() # close the window
core.quit() # quit the program
# define function to check if filename exists, then create the next available version number
def uniq_path(path):
fn, ext = os.path.splitext(path)
counter = 2
while os.path.exists(path):
path = fn + "_" + str(counter) + ext
counter += 1
return path
# Finger tapping task function
def fingerTapping(n_trials, tap_targetSequence, sequenceType):
## Intro screen ##
saveToLog('Presenting introduction screen') # save info to log
win.setColor('#000000', colorSpace='hex') # set background colour to black
win.flip() # display
generalText.setText(
'TASK INSTRUCTIONS\n\nPlace the fingers of your LEFT hand on the keys 1, 2, 3, and 4. You will be shown a sequence of 5 digits %(sequence)s, and the computer will start counting down until you start. \n\nOnce the countdown has completed and the screen turns green, type %(sequence)s over and over as QUICKLY and as ACCURATELY as possible. \n\nYou will have 30 seconds to type %(sequence)s as many times as possible. Stop when the screen turns red again. You will get 30 seconds to rest before the next trial. \n\nPress the spacebar when you are ready for the countdown to begin.' % {'sequence': tap_targetSequence})
generalText.draw()
win.flip() # display
event.waitKeys(keyList=["space"]) # wait for a spacebar press before continuing
event.clearEvents() # clear the event buffer
win.flip() # blank the screen first
trials = range(1, n_trials + 1)
saveToLog('Running finger tapping task. %i trials with target sequence %s' % (len(trials), tap_targetSequence)) # save info to log
for thisTrial in trials: # begin rest block
win.setColor('#ff0000', colorSpace='hex') # set background colour to red
win.flip() # display
if thisTrial == 1: # if this is first trial
restClock = core.CountdownTimer(10) # start timer counting down from 10
else: # for all other trials
saveToLog('Resting') # save info to log
restClock = core.CountdownTimer(30) # start timer counting down from 30
sequenceText.setText(tap_targetSequence) # set up sequence text
sequenceText.setAutoDraw(True) # display sequence text continuously
timerText.setAutoDraw(True) # display timer text continuously
win.flip()
while restClock.getTime() > 0: # loop continues until trial timer ends
count = restClock.getTime() # get current time from clock
timerText.setText(num2words(np.ceil(count))) # set timer text to the current time
win.flip() # display timer text
if event.getKeys(['end']): # checks for the key 'end' on every refresh so user can quit at any point
quitExp() # initiate quit routine
# begin tapping task
saveToLog('Trial: %i' % thisTrial) # save info to log
win.setColor('#89ba00', colorSpace='hex') # set background colour to green
win.flip() # display the green background
tap_stream = [] # clear previous sequence keypresses from the stream
event.clearEvents() # this makes sure the key buffer is cleared, otherwise old key presses might be recorded
trialClock = core.CountdownTimer(30) # start timer counting down from 30
timerText.setText('Tap as fast as you can!') # set timer text to the current time
win.flip() # display the text
k = 0 # set up marker index
endTrial = False # a trigger to end the trial when True (deployed when the timer runs out)
while endTrial == False: # while trigger has not been deployed
# display incremental markers across the screen from left to right as the user presses accepted keys
if k == 0: # start at beginning of marker index
# start markers incrementing from left to right and append key presses to tap_stream
while k < len(listOfMarkers) - 1 and endTrial == False: # until the markers reach the far side of the screen
if trialClock.getTime() <= 0: # if timer has run out
endTrial = True # deploy the trigger to end the trial
break # and break out of this loop
elif event.getKeys(['end']): # if user presses end key
if thisTrial == 1 and not metaData['practice mode']: # during trial 1: save partial data collected from trial 1
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trial1' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial 1. Experiment aborted with %s seconds of trial 1 remaining' % trialClock.getTime())
saveToLog('Trial 1 data saved with filename: %s' %fileName)
elif thisTrial > 1 and not metaData['practice mode']: # or during a later trial: save partial and complete trial data collected
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial %s' % thisTrial)
saveToLog('Experiment aborted with %s seconds of this trial remaining' % trialClock.getTime())
saveToLog('Partial trial data saved with filename: %s' %fileName)
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trials' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
store_out.to_csv(fileName)
saveToLog('Data from complete trials saved with filename: %s' %fileName)
quitExp() # AND quit the program
elif event.getKeys('1'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(1) # record the key press
k += 1 # move on to the next marker
elif event.getKeys('2'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(2) # record the key press
k += 1 # move on to the next marker
elif event.getKeys('3'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(3) # record the key press
k += 1 # move on to the next marker
elif event.getKeys('4'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(4) # record the key press
k += 1 # move on to the next marker
# start markers incrementing from right to left and append keypresses to tap_stream:
elif k == len(listOfMarkers) - 1 and endTrial == False:
while k > 0:
if trialClock.getTime() <= 0: # if timer has run out
endTrial = True # deploy the trigger to end the trial
break # and break out of this loop
elif event.getKeys(['end']): # if user presses end key
if thisTrial == 1 and not metaData['practice mode']: # during trial 1: save partial data collected from trial 1
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trial1' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial 1. Experiment aborted with %s seconds of trial 1 remaining' % trialClock.getTime())
saveToLog('Trial 1 data saved with filename: %s' %fileName)
elif thisTrial > 1 and not metaData['practice mode']: # or during a later trial: save partial and complete trial data collected
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial %s' % thisTrial)
saveToLog('Experiment aborted with %s seconds of this trial remaining' % trialClock.getTime())
saveToLog('Partial trial data saved with filename: %s' %fileName)
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trials' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
store_out.to_csv(fileName)
saveToLog('Data from complete trials saved with filename: %s' %fileName)
quitExp() # AND quit the program
elif event.getKeys('1'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(1) # record the key press
k -= 1 # move on to the next marker
elif event.getKeys('2'): #checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(2) # record the key press
k -= 1 # move on to the next marker
elif event.getKeys('3'): #checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(3) # record the key press
k -= 1 # move on to the next marker
elif event.getKeys('4'): #checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(4) # record the key press
k -= 1 # move on to the next marker
# turn off all markers during the rest block
for marker in listOfMarkers: # for each marker
marker.setAutoDraw(False) # turn off
win.setColor('#ff0000', colorSpace='hex') # set background colour to red
win.flip() # display red background
output = patternDetect(stream_in=tap_stream, targetSequence_in=tap_targetSequence) # run the pattern detector to calculate correct sequences, errors and accuracy
# gather all relevant data for this trial
newRow = {'participant': metaData['participant'],
'allocation': metaData['participant allocation'],
'session': metaData['session number'],
'session_time': metaData['session time'],
'target_sequence': tap_targetSequence,
'sequence_type': sequenceType,
'trial': thisTrial, # record which trial number
'stream': [tap_stream], # stream of key presses entered by participant
'n_correct': output['n_correct']}
# 'errors': output['errors'], # Unhash these lines if you want them to be reported in the csv output file.
# 'accuracy': output['accuracy']}
# store all trial data in df. Each trial is stored in a new row
if thisTrial == 1:
store_out = pd.DataFrame(newRow, index=[0])
elif thisTrial > 1:
store_out = store_out.append(newRow, ignore_index=True)
# after all trials are complete:
sequenceText.setAutoDraw(False) # turn off the sequence text
timerText.setAutoDraw(False) # turn off the timer text
win.flip() # clear the display
return store_out
# Function for analysing the response stream
def patternDetect(stream_in, targetSequence_in):
# pre-load some variables
det_targetSequence = list(map(int, list(targetSequence_in))) # convert target sequence to list of integers
det_stream = list(stream_in) # convert stream of key presses to a list
n_correct = float(0) # store for number of correct sequences per trial
'''
Define stores for error tracking. I did not use these metrics in my study design, but I have left them in the code, in case
they are appropriate for other experimental designs. Redefine, remove or ignore them as necessary for your study design.
'''
contiguousError = 0 # store for cumulative errors
errors = float(0) # store for errors
# note that n_correct + errors = total sequences
i = 0 # start pattern detection at first element of keypress stream:
while i < len(det_stream): # search through every item in stream
# for all key presses up to the final 5 (or any other target sequence length)
if i <= len(det_stream) - len(det_targetSequence):
# for any value in the stream where it + the next 4 keypresses match the target sequence:
if det_stream[i:(i + len(det_targetSequence))] == det_targetSequence:
n_correct += 1 # record a correct pattern completed
i += len(det_targetSequence) # adjust position to skip forward by length of targetSequence
# Then add any accumulated errors to the total error count and clear the contiguous error count
if contiguousError >= 1: # check if there are contiguous errors we have not yet accounted for
errors += 1 # add an error to the total count
contiguousError = 0 # reset contiguous error count
# otherwise, if the next sequence length of items in the stream does not match the target sequence:
elif det_stream[i:(i + len(det_targetSequence))] != det_targetSequence:
contiguousError += 1 # record a 'contiguous error'
i += 1 # adjust index forward by 1
# when contiguous error count reaches 5 incorrect keypresses in a row (i.e., the correct sequence doesn't follow 5 keypresses in a row)
# OR if the final item of the stream does not match the target sequence:
if contiguousError == 5 or i == len(det_stream):
errors += 1 # add an error to the total count
contiguousError = 0 # reset contiguous error count
# now deal with last items of the stream (a special case, see 'method' above)
else:
# get last items
lastItems = det_stream[i:]
# get subset of target sequence of same length as last items
sequenceSubset = det_targetSequence[:len(lastItems)]
# Addition of PARTIAL correct sequences at end of stream:
while lastItems != None: # while there are additional items left to check
if lastItems == sequenceSubset: # if lastItems match target sequence subset
n_correct += float(len(lastItems)) / float(len(det_targetSequence)) # record fractional sequence
if contiguousError >= 1: # check if there are errors we have not yet recorded
errors += 1 # add an error to total
contiguousError = 0 # reset contiguous error count
lastItems = None # force failure of inner while loop by updating lastItems
i = len(det_stream) # force failure of outer while loop by updating i
else: # if lastItems do not match target sequence
contiguousError += 1 # add 1 to contiguous error count
# when contiguous error count reaches 5 incorrect keypresses in a row or if this is final item
if contiguousError == 5 or len(lastItems) == 1:
errors += 1 # add an error to total
contiguousError = 0 # reset contiguous error count
if len(lastItems) == 1: # if this is the final item
lastItems = None # force failure of inner while loop by updating lastItems
i = len(det_stream) # force failure of outer while loop by updating i
else: # else if there are still items left to check
lastItems = lastItems[1:] # drop the first item from lastItems
sequenceSubset = sequenceSubset[:-1] # drop the last item from the sequence subset
# integrity check
if n_correct == 0:
print('Issue with this stream - n_correct is zero')
accuracy = float('nan')
else:
accuracy = 1 - errors / n_correct # calculate accuracy
# NOTE: this accuracy definition matches Hardwicke et al. 2016. I did not use this metric in my study design, but I have
# left the code in the script case it is suitable for other study designs. Remove, redefine or ignore as necessary.
return {'n_correct': n_correct, 'errors': errors, 'accuracy': accuracy}
### Collect and store meta-data about the experiment session ###
expName = 'Explicit finger tapping sequence task' # define experiment name
date = time.strftime("%d %b %Y %H:%M:%S", time.localtime()) # get date and time
metaData = {'participant': '',
'session number': [1, 2],
'session time': ['pm-a', 'pm-b', 'am'],
'practice mode': False,
'use automated counter-balancing': True,
'researcher': 'JW',
'location': '304, Seddon North, UQ, Brisbane'} # set up info for infoBox gui
infoBox = gui.DlgFromDict(dictionary=metaData,
title=expName,
order=['participant', 'session number', 'session time',
'practice mode','use automated counter-balancing']) # display gui to get info from user
if not infoBox.OK: # if user hit cancel
quitExp() # quit
# check if participant dir exists, and if not, create one:
if not os.path.isdir('data'):
os.mkdir('data')
if not os.path.isdir('data' + os.path.sep + 'fingertapping'):
os.mkdir('data' + os.path.sep + 'fingertapping')
p_dir = 'data' + os.path.sep + 'fingertapping' + os.path.sep + 'P' + str(metaData['participant'])
if not os.path.isdir(p_dir):
os.mkdir(p_dir)
if not metaData['practice mode']: # if this is not practice mode:
if metaData['use automated counter-balancing']: # and user has chosen to use automated counter-balancing:
cb = {'participant allocation': ['AJX', 'AJY', 'AKX', 'AKY',
'BJX', 'BJY', 'BKX', 'BKY']} # set up info for infoBox gui
infoBox = gui.DlgFromDict(dictionary=cb,
title='Choose counter-balancing parameters') # display gui to get info from user
metaData.update({'participant allocation': cb['participant allocation']})
if not infoBox.OK: # if user hit cancel
quitExp() # quit
elif not metaData['use automated counter-balancing']: # or if user has chosen to manually select sequence type:
seq_dict = {'use sequence': ['sequence_1', 'sequence_2'],
'number of trials': ''}
infoBox = gui.DlgFromDict(dictionary=seq_dict,
title='Select sequence to run experiment') # display gui to get info from user
metaData.update({'participant allocation': 'manual_selection',
'sequence type': '%s' % seq_dict['use sequence'],
'number of trials': '%s' % seq_dict['number of trials']})
if not infoBox.OK: # if user hit cancel
quitExp() # quit
# build filename for this participant's data
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '.csv'
# is this an existing participant? If so we will create a new file name to store the data under
if os.path.exists(fileName): # if they are an existing participant
# confirm that user knows sessions already exist for this participant's current session and time and advise filename will be different:
myDlg = gui.Dlg()
myDlg.addText(
"This participant has existing files for this session time in the directory! Click ok to continue or cancel to abort. \n\n NOTE: if you choose to continue, files will be stored under a different file name.")
myDlg.show() # show dialog and wait for OK or Cancel
if not myDlg.OK: # if the user pressed cancel
quitExp()
# redefine file name by iteratively appending a number so that existing files are not overwritten
fileName = uniq_path(fileName)
metaData.update({'expName': expName, 'date': date}) # record the experiment date and name in the metaData
# check if logfile exists for this participant. If not, create one:
logFile = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) +'_log.txt'
if not os.path.exists(logFile):
with open(logFile, 'w') as fp:
pass
# save metaData to log
saveToLog('..........................................', 0)
saveToLog('experiment: %s' % (metaData['expName']), 0)
saveToLog('researcher: %s' % (metaData['researcher']), 0)
saveToLog('location: %s' % (metaData['location']), 0)
saveToLog('date: %s' % (metaData['date']), 0)
saveToLog('participant: %s' % (metaData['participant']), 0)
saveToLog('session: %s' % (metaData['session number']), 0)
saveToLog('session time: %s' % (metaData['session time']), 0)
saveToLog('participant allocation: %s' % (metaData['participant allocation']), 0)
saveToLog(' ', 0)
else: # otherwise, if it is practice mode:
logFile = p_dir + os.path.sep + 'P' + str(metaData['participant']) + '_practice_log.txt'
if not os.path.exists(logFile):
with open(logFile, 'w') as fp:
pass
# ask user to define number of trials
prac_dict = {'number of trials': ''}
infoBox = gui.DlgFromDict(dictionary=prac_dict,
title='enter number of trials') # display gui to get info from user
if not infoBox.OK: # if user hit cancel
quitExp() # quit
# build filename for this participant's practice data
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_PRACTICE' + '.csv'
# is this an existing participant? If so we will create a new file name to store the data under
if os.path.exists(fileName): # if existing participant
# check user knows sessions already exist for this participant's current session and time:
myDlg = gui.Dlg()
myDlg.addText(
"This participant has existing files for this session time in the directory! Click ok to continue or cancel to abort. \n\n NOTE: if you choose to continue, files will be stored under a different file name.")
myDlg.show() # show dialog and wait for OK or Cancel
if not myDlg.OK: # if the user pressed cancel
quitExp()
# redefine file name by iteratively appending a number so that the original files are not overwritten
fileName = uniq_path(fileName)
metaData.update({'participant allocation': 'practice'})
# save metaData to log
saveToLog('..........................................', 0)
saveToLog('experiment: %s' % (expName), 0)
saveToLog('researcher: %s' % (metaData['researcher']), 0)
saveToLog('location: %s' % (metaData['location']), 0)
saveToLog('date: %s' % (date), 0)
saveToLog('participant: %s' % (metaData['participant']), 0)
saveToLog('session: %s' % (metaData['session number']), 0)
saveToLog('session time: %s' % (metaData['session time']), 0)
saveToLog('participant allocation: %s' % (metaData['participant allocation']), 0)
saveToLog(' ', 0)
### Prepare stimuli etc ###
win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False, ## UPDATE SIZE TO MATCH YOUR CURRENT MONITOR SETTINGS
monitor='testMonitor', color=(-1,-1,-1), colorSpace='rgb', units='pix') # setup the Window
generalText = visual.TextStim(win=win, ori=0, name='generalText', text='', font=u'Arial', pos=[0, 0], height=35,
wrapWidth=920, color=(1,1,1), colorSpace='rgb', opacity=1, depth=0.0) # general text
sequenceText = visual.TextStim(win=win, ori=0, name='sequenceText', text='', font=u'Arial', pos=[0, 250], height=90,
wrapWidth=None, color=(1,1,1), colorSpace='rgb', opacity=1, depth=0.0) # sequence text
timerText = visual.TextStim(win=win, ori=0, name='sequenceText', text='', font=u'Arial', pos=[0, -130], height=40,
wrapWidth=800, color=(1,1,1), colorSpace='rgb', opacity=1, depth=0.0) # timer text
# set up the markers that increment across the screen - generate enough so that they cover the full range of the window
listOfMarkers = [] # store for white markers
windowSize = list(win.size) # get window size
for i in range(int(-windowSize[0] / 2), int(windowSize[0] / 2), int(windowSize[0] / 40)): # generate markers to cover whole screen
i += 25 # add a slight horizontal adjustment to ensure markers do not go off screen
listOfMarkers.append(visual.Circle(win, radius=15, edges=32, pos=[i, 0], fillColor='white')) # generate the markers
# for monitoring key state (only need this if using markers)
keys = key.KeyStateHandler()
win.winHandle.push_handlers(keys)
saveToLog('Set up complete') # save info to log
### set-up complete ###
### run the experiment ###
if metaData['practice mode']: # if user has chosen practice mode
res = fingerTapping(n_trials=int(prac_dict['number of trials']), tap_targetSequence = prac_seq, sequenceType ='practice') # run practice sequence
elif not metaData['practice mode']: # if it is not practice mode
if not metaData['use automated counter-balancing']: # AND the user has chosen to manually select the sequence type:
if seq_dict['use sequence'] == 'sequence_1': # EITHER run task with sequence 1:
res = fingerTapping(n_trials=int(seq_dict['number of trials']), tap_targetSequence = targ_seq_1, sequenceType = 'sequence_1')
elif seq_dict['use sequence'] == 'sequence_2': # OR run task with sequence 2:
res = fingerTapping(n_trials=int(seq_dict['number of trials']), tap_targetSequence = targ_seq_2, sequenceType = 'sequence_2')
elif metaData['use automated counter-balancing']: # OR if user has selected to use automated counter balancing:
# NOTE: these allocations are specific to my study (each letter represents one type of grouping/randomisation variable). Adapt groupings to suit individual experiments
####### X ORDER
if ((metaData['participant allocation'] == 'AJX') or (metaData['participant allocation'] == 'BJX') or (metaData['participant allocation'] == 'AKX') or (metaData['participant allocation'] == 'BKX')):
# session 1
if int(metaData['session number']) == 1:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_1, sequenceType='sequence_1') # sequence 1
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence = targ_seq_1, sequenceType='sequence_1') # wordlist 1
# session 2
elif int(metaData['session number']) == 2:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
####### Y ORDER
elif ((metaData['participant allocation'] == 'AJY') or (metaData['participant allocation'] == 'BJY') or (metaData['participant allocation'] == 'AKY') or (metaData['participant allocation'] == 'BKY')):
# session 1
if int(metaData['session number']) == 1:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
# session 2
elif int(metaData['session number']) == 2:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_1, sequenceType='sequence_1') # sequence 1
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence= targ_seq_1, sequenceType='sequence_1') # sequence 1
## End screen ##
saveToLog('Presenting end screen') # save info to log
win.setColor('#000000', colorSpace='hex') # set background colour to black
win.flip()
generalText.setText(u'Thank you. That is the end of this section. Please inform the researcher you have finished.')
generalText.draw()
win.flip() # present video buffer
event.waitKeys(keyList=['end']) # wait for the end key to be pressed before continuing
event.clearEvents() # clear the event buffer
saveToLog('Experiment presentation over') # save info to log
### Finished running the experiment ###
### Save and clean up ###
win.close()
'''
Save the data as a csv file. The loop below also checks if saving is not possible, usually because the file is already open, and asks user to close if this is the case
if this does not resolve the situation, attempt is made to save the data with a different filename.
'''
while True:
try:
res.to_csv(fileName)
saveToLog('Data saved with file name: %s' % fileName) # save info to log
break
except: # if cannot save data, likely because file is already open, ask user to close
saveToLog('Problem encountered saving data - requesting user close open data files...') # save info to log
myDlg = gui.Dlg()
myDlg.addText(
"Unable to store data. Try closing open excel files and then click ok. Press cancel to attempt data storage to new file.")
myDlg.show() # show dialog and wait for OK or Cancel
if not myDlg.OK: # if the user pressed cancel
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_ProblemSaving_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '.csv'
saveToLog('Attempting to save data with different filename: %s' %fileName) # save info to log
try:
res.to_csv(fileName)
print('Data was saved with a different filename: %s' %fileName)
saveToLog('Data saved with file name: %s' % fileName) # save info to log
break
except:
saveToLog('Major error: Data could not be saved') # save info to log
quitExp() # quit the experiment
t = globalClock.getTime() # get run time of experiment
saveToLog('Total experiment runtime was %i seconds' % t) # record runtime to log
saveToLog('..........................................', 0)
# Shut down:
core.quit()
|
jrwood21/sleep_tacs_study_jw_gh
|
finger_tapping_task_jw.py
|
finger_tapping_task_jw.py
|
py
| 36,526 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74492658106
|
import random
import string
ALVO = "H0000"
CARACTERES = string.ascii_letters + string.digits + " !@#$%^&*()_+-=[]{}|;:,.<>?/" # Conjunto ampliado
TAMANHO_POPULACAO = 2000
TAXA_MUTACAO = 0.01 # Adjust the mutation rate as needed
LIMITE_GERACOES = 6000
TAMANHO_TORNEIO = 1 # Tamanho do torneio para a seleção por torneio
def gerar_individuo():
individuo = ''.join(random.choice(CARACTERES) for _ in range(len(ALVO)))
return individuo
def calcular_aptidao(individuo):
aptidao = sum(1 for i in range(len(ALVO)) if individuo[i] == ALVO[i])
return aptidao / len(ALVO)
def selecionar_pais(populacao):
# Seleção por torneio
torneio = random.sample(populacao, TAMANHO_TORNEIO)
melhor_individuo = max(torneio, key=calcular_aptidao)
return melhor_individuo
def cruzar(pai1, pai2):
filho = ''.join(pai1[i] if random.random() < 0.5 else pai2[i] for i in range(len(ALVO)))
return filho
def mutar(individuo, forcar_mutacao=False):
novo_individuo = list(individuo)
for i in range(len(ALVO)):
if forcar_mutacao or random.random() < TAXA_MUTACAO:
novo_individuo[i] = random.choice(CARACTERES)
return ''.join(novo_individuo)
if __name__ == "__main__":
populacao = [gerar_individuo() for _ in range(TAMANHO_POPULACAO)]
melhor_aptidao = 0.1
melhor_individuo = "Hello"
geracoes = 0
while melhor_aptidao < 1.0 and geracoes < LIMITE_GERACOES:
nova_populacao = []
# Elitism - Preserve the best individual in the new population
nova_populacao.append(melhor_individuo)
while len(nova_populacao) < TAMANHO_POPULACAO:
pai1 = selecionar_pais(populacao)
pai2 = selecionar_pais(populacao)
filho = mutar(cruzar(pai1, pai2))
nova_populacao.append(filho)
# Update the population for the next generation
populacao = nova_populacao
geracoes += 1
# Find the best individual in the current population
melhor_individuo = max(populacao, key=calcular_aptidao)
melhor_aptidao = calcular_aptidao(melhor_individuo)
# Print the best individual in this generation
print(f"Melhor indivíduo encontrado após {geracoes} gerações: {melhor_individuo}")
print(f"Melhor indivíduo encontrado após {geracoes} gerações: {melhor_individuo}")
|
Parish71/Genetic
|
tournament.test.py
|
tournament.test.py
|
py
| 2,417 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
30414879190
|
"""SQLAlchemy models for quiz and quiz questions"""
from datetime import datetime
from models.model import db
from models.quiz_attempt import QuestionAttempt
import sys
sys.path.append('../')
from generator.generator import create_quiz
def search_slug(context):
"""Turns the plant slug into a string suitable for Wikipedia or Google search"""
return context.get_current_parameters()['slug'].replace('-', '+')
def num_by_family(context):
"""Gives number to quiz based on how many quizzes of the same family
are already in the database"""
family = context.get_current_parameters()['family']
return len(Quiz.query.filter(Quiz.family==family).all()) + 1
class Quiz(db.Model):
"""Quiz"""
__tablename__ = 'quizzes'
id = db.Column(
db.Integer,
primary_key=True
)
num_questions = db.Column(db.Integer)
family = db.Column(
db.Text,
nullable=False
)
num_by_family = db.Column(
db.Integer,
default=num_by_family
)
created_on = db.Column(
db.DateTime,
default = datetime.utcnow
)
created_by = db.Column(
db.Text,
default = 'system'
)
questions = db.relationship(
'Question',
secondary="quiz_questions",
backref='part_of'
)
attempts = db.relationship(
'QuizAttempt',
backref='quiz'
)
@classmethod
def create(cls, family):
"""Create new quiz from identified family.
If error in quiz creation, return False"""
questions = create_quiz(family)
if not questions:
return False
quiz = Quiz(num_questions=10, family=family)
db.session.add(quiz)
db.session.commit()
for question in questions:
new_question = Question(**question)
new_question.family = family
db.session.add(new_question)
db.session.commit()
quiz.questions.append(new_question)
db.session.commit()
return quiz
class Question(db.Model):
"""Quiz question"""
__tablename__ = 'questions'
id = db.Column(
db.Integer,
primary_key=True
)
url = db.Column(
db.Text
)
correct_answer = db.Column(
db.Text
)
wrong_answer_1 = db.Column(
db.Text
)
wrong_answer_2 = db.Column(
db.Text
)
wrong_answer_3 = db.Column(
db.Text
)
slug = db.Column(
db.Text
)
search_slug = db.Column(
db.Text,
default=search_slug
)
attempts = db.relationship(
'QuestionAttempt',
backref='question'
)
class QuizQuestion(db.Model):
"""Map quiz questions to a quiz"""
__tablename__ = 'quiz_questions'
id = db.Column(
db.Integer,
primary_key=True
)
question_id = db.Column(
db.Integer,
db.ForeignKey('questions.id', ondelete='cascade')
)
quiz_id = db.Column(
db.Integer,
db.ForeignKey('quizzes.id', ondelete='cascade')
)
|
lauramoon/capstone-1
|
models/quiz.py
|
quiz.py
|
py
| 3,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3709153796
|
#
# @lc app=leetcode.cn id=155 lang=python3
#
# [155] 最小栈
#
class MinStack:
#漫画最小栈 https://zhuanlan.zhihu.com/p/31958400
def __init__(self):
"""
initialize your data structure here.
"""
self.stack=[]
#按顺序记录最小栈中最小元素,备胎。配合完成取最小值时间复杂度为O(1)
self.tmp=[]
self.index=-1
self.tmpIndex=-1
self.min=-2**31
def push(self, x: int) -> None:
#小于最小值则下标入备胎栈
self.index+=1
if x <= self.min or not self.stack:
self.min=x
# self.tmpIndex+=1
self.tmp.append(self.index)
self.stack.append(x)
def pop(self) -> None:
if self.stack[self.index]==self.min:
self.tmp.pop()
# self.tmpIndex-=1
self.stack.pop()
self.index-=1
def top(self) -> int:
return self.stack[self.index]
def getMin(self) -> int:
return self.stack[self.tmp[len(self.tmp)-1]]
# # # Your MinStack object will be instantiated and called as such:
if __name__=="__main__":
obj = MinStack()
obj.push(-2)
obj.pop()
obj.push(1)
obj.push(3)
print(obj.top())
print(obj.top())
print(obj.top())
print(obj.getMin())
print(obj.getMin())
print(obj.getMin())
# obj.push(2)
# print(obj.getMin())
# obj.pop()
# print(obj.getMin())
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
# print(param_3)
# print(param_4)
|
chinasilva/MY_LEET_CODE
|
155.最小栈.py
|
155.最小栈.py
|
py
| 1,585 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30953530170
|
import os
def euclide_etendu(e, phi_n):
global d
d = 1
temp = (e*d)%phiden
while temp != 1 :
d = d + 1
temp = (e*d)%phiden
return d
def pgcd(a,b):
# L'algo PGCD
while a != b:
if a > b:
a = a - b
else:
b = b - a
return a
def factoriser(n):
b=2
while b:
while n%b!=0 :
b=b+1
if n/b==1 :
print("p = ", b,)
# On créé une variable globale p pour la réutiliser hors de la fonction et p=b
global p
p = b
break
print("\nq = ", b,)
# On créé une variable globale q pour la réutiliser hors de la fonction et q=b
global q
q=b
n=n/b;
pqconnu = 0
pqconnu = input("Si vous êtes en possession de p et q, entrez 1 sinon 0 : ")
pqconnu = int(pqconnu)
if pqconnu == 0 :
# On récupère n.
n = input("Entrez le nombre n : ")
n=int(n)
# On appelle la fonction pour le factoriser.
factoriser(n)
# On calcule phi(n)
phiden = (p-1)*(q-1)
# La fonction PGCD avec ses 2 arguments a et b.
# Variable pour notre boucle while
compteur = 0
PGCD1 = 0
# Notre e qui s'incrémentera
e = 0
# Tant que PGCD de e et phi(n) différent de 1
while PGCD1 != 1 :
# Tant que compteur=0
while compteur == 0 :
# Si p inférieur à e et si q inférieur à e et si e inférieur à n
if((p < e) and(q < e) and(e < phiden)) :
# La boucle se coupe (on peut aussi mettre le mot-clé : break
compteur = 1
break
# Tant que rien n'est trouvé, e s'incrémente
e = e + 1
# On récupère le résultat du pgcd
PGCD1 = pgcd(e,phiden)
# On calcule d
d = 0
compteur = 0
while compteur == 0:
# Les conditions vues ci-dessus :
if((e * d % phiden == 1) and(p < d) and(q < d) and(d < phiden)):
compteur = 1
d = d + 1
d = d - 1
# On affiche la clé privée
print("\nCle privee (",d,",",n,")")
if pqconnu == 1 :
p = input("Entrez le nombre p : ")
p= int(p)
q = input("Entrez le nombre q : ")
q= int(q)
# On calcule n
n = p*q
# On calcule phiden
phiden = (p-1)*(q-1)
e=input("veuillez saisir e : ")
e = int(e)
euclide_etendu(e, phiden)
liste = input("si vous utilisez notre encodeur merci de mettre le bloc sortit par l'encodeur : ")
liste = liste.split('.')
i=len(liste)-1
#i = input("Combien il y a de bloc :")
compteur = 0
count = 0
# Tant que r inférieur au nombre de lettres
while compteur < i :
# L'utilisateur entre le premier bloc à déchiffrer
#lettre_crypt = input("\nEntrez le bloc a déchiffrer :")
lettre_crypt = liste[compteur]
lettre_crypt = int(lettre_crypt)
count = count+1
print(count)
# On trouve le ASCII de chaque lettre par le calcul de décodage
ascii1 = (pow(lettre_crypt,d)%n)
# Avec la fonction chr(ASCII), on trouve le caractère correspondant.
print( "lettre :",chr(ascii1),)
compteur = compteur + 1
bloc = bloc+str(lettre_crypt)
print(bloc)
os.system("pause")
|
MrGaming15/decrypt
|
index1.py
|
index1.py
|
py
| 3,380 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
35126198992
|
from unittest.mock import patch
from uuid import UUID, uuid4
import pytest
from pasqal_cloud import SDK, Workload
from pasqal_cloud.errors import (
WorkloadCancellingError,
WorkloadCreationError,
WorkloadFetchingError,
WorkloadResultsDecodeError,
)
from tests.test_doubles.authentication import FakeAuth0AuthenticationSuccess
class TestWorkload:
@pytest.fixture
def workload_with_link_id(self) -> str:
return str(UUID(int=0x2))
@pytest.fixture
def workload_with_invalid_link_id(self) -> str:
return str(UUID(int=0x3))
@pytest.fixture(autouse=True)
@patch(
"pasqal_cloud.client.Auth0TokenProvider",
FakeAuth0AuthenticationSuccess,
)
def init_sdk(self):
self.sdk = SDK(
username="[email protected]",
password="password",
project_id=str(uuid4()),
)
self.workload_id = "00000000-0000-0000-0000-000000000001"
self.backend = "backend_test"
self.workload_type = "workload_type_test"
self.config = {"test1": "test1", "test2": 2}
self.workload_result = {"1001": 12, "0110": 35, "1111": 1}
def test_create_workload(self, mock_request):
workload = self.sdk.create_workload(
backend=self.backend,
workload_type=self.workload_type,
config=self.config,
)
assert workload.id == self.workload_id
assert workload.backend == self.backend
assert workload.workload_type == self.workload_type
assert workload.config == self.config
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads"
)
assert mock_request.last_request.method == "POST"
def test_create_workload_error(self, mock_request_exception):
with pytest.raises(WorkloadCreationError):
_ = self.sdk.create_workload(
backend=self.backend,
workload_type=self.workload_type,
config=self.config,
)
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads"
)
assert mock_request_exception.last_request.method == "POST"
def test_create_workload_and_wait(self, mock_request):
workload = self.sdk.create_workload(
backend=self.backend,
workload_type=self.workload_type,
config=self.config,
wait=True,
)
assert workload.id == self.workload_id
assert workload.backend == self.backend
assert workload.workload_type == self.workload_type
assert workload.config == self.config
assert workload.result == self.workload_result
assert mock_request.last_request.method == "GET"
def test_get_workload(self, mock_request, workload):
workload_requested = self.sdk.get_workload(workload.id)
assert workload_requested.id == self.workload_id
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v2/workloads/{self.workload_id}"
)
def test_get_workload_with_link(
self, mock_request, workload_with_link_id, result_link_endpoint
):
self.sdk.get_workload(workload_with_link_id)
assert mock_request.last_request.url == (
f"{result_link_endpoint}{workload_with_link_id}"
)
def test_get_workload_with_invalid_link(
self, workload_with_invalid_link_id, mock_request
):
with pytest.raises(WorkloadResultsDecodeError):
self.sdk.get_workload(workload_with_invalid_link_id)
assert (
mock_request.last_request.url
== "http://invalid-link/00000000-0000-0000-0000-000000000003"
)
def test_get_workload_error(self, mock_request_exception, workload):
with pytest.raises(WorkloadFetchingError):
_ = self.sdk.get_workload(workload.id)
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v2/workloads/{self.workload_id}"
)
assert mock_request_exception.last_request.method == "GET"
def test_cancel_workload_self(self, mock_request, workload):
workload.cancel()
assert workload.status == "CANCELED"
assert mock_request.last_request.method == "PUT"
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_cancel_workload_self_error(self, mock_request_exception, workload):
with pytest.raises(WorkloadCancellingError):
workload.cancel()
assert workload.status == "PENDING"
assert mock_request_exception.last_request.method == "PUT"
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_cancel_workload_sdk(self, mock_request, workload):
client_rsp = self.sdk.cancel_workload(self.workload_id)
assert type(client_rsp) == Workload
assert client_rsp.status == "CANCELED"
assert mock_request.last_request.method == "PUT"
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_cancel_workload_sdk_error(self, mock_request_exception, workload):
with pytest.raises(WorkloadCancellingError):
_ = self.sdk.cancel_workload(self.workload_id)
assert workload.status == "PENDING"
assert mock_request_exception.last_request.method == "PUT"
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_workload_instantiation_with_extra_field(self, workload):
"""Instantiating a workload with an extra field should not raise an error.
This enables us to add new fields in the API response on the workloads endpoint
without breaking compatibility for users with old versions of the SDK where
the field is not present in the Batch class.
"""
workload_dict = workload.dict() # Batch data expected by the SDK
# We add an extra field to mimick the API exposing new values to the user
workload_dict["new_field"] = "any_value"
new_workload = Workload(**workload_dict) # this should raise no error
assert (
new_workload.new_field == "any_value"
) # The new value should be stored regardless
|
pasqal-io/pasqal-cloud
|
tests/test_workload.py
|
test_workload.py
|
py
| 6,862 |
python
|
en
|
code
| 11 |
github-code
|
6
|
37136495284
|
from keras.engine.saving import load_model
from argparse import ArgumentParser
import utils
def build_parser():
par = ArgumentParser()
par.add_argument('--word_features_path', type=str,
dest='word_features_path', help='filepath to save/load word features', default='feature_word')
par.add_argument('--img_features_path', type=str,
dest='img_features_path', help='filepath to save/load image features', default='feature_img')
par.add_argument('--word_file_mapping', type=str,
dest='word_file_mapping', help='filepath to save/load file to word mapping', default='index_word')
par.add_argument('--img_file_mapping', type=str,
dest='img_file_mapping', help='filepath to save/load file to image mapping', default='index_img')
par.add_argument('--index_folder', type=str,
dest='index_folder', help='folder to index', default='dataset')
par.add_argument('--glove_path', type=str,
dest='glove_path', help='path to pre-trained GloVe vectors', default='models/glove.6B')
par.add_argument('--model_path', type=str,
dest='model_path', help='path to custom model', default='my_model.hdf5')
return par
def generate_features(index_folder, features_path, file_mapping, loaded_model, glove_path):
features, index = index_images(
index_folder,
features_path,
file_mapping,
loaded_model,
glove_path)
print("Indexed %s images" % len(features))
return features
def index_images(folder, features_path, mapping_path, model, glove_path):
print ("Now indexing images...")
word_vectors = utils.load_glove_vectors(glove_path)
_, _, paths = utils.load_paired_img_wrd(
folder=folder,
word_vectors=word_vectors)
images_features, file_index = utils.generate_features(paths, model)
utils.save_features(features_path, images_features, mapping_path, file_index)
return images_features, file_index
# def build_feature_tree(file_name, features, n_trees=1000, dims=4096):
# feature_index = utils.index_features(features, n_trees, dims)
# utils.save_obj(file_name, feature_index)
# print('feature tree built!')
if __name__ == "__main__":
parser = build_parser()
options = parser.parse_args()
word_features_path = options.word_features_path
img_features_path = options.img_features_path
word_file_mapping = options.word_file_mapping
img_file_mapping = options.img_file_mapping
index_folder = options.index_folder
model_path = options.model_path
glove_path = options.glove_path
custom_model = load_model(model_path)
features = generate_features(index_folder, word_features_path, word_file_mapping, custom_model, glove_path)
vgg_model = utils.load_headless_pretrained_model()
features = generate_features(index_folder, img_features_path, img_file_mapping, vgg_model, glove_path)
|
cindyyao/image_search
|
index.py
|
index.py
|
py
| 2,983 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71191637947
|
# Copyright (c) 2012 Marc-Andre Decoste. All rights reserved.
# Use of this source code is governed by an Appache 2.0 license that can be
# found in the LICENSE file.
import base
import entities
# The Birth event marks the begining of the life of a Person at its birth place.
class Birth(base.Events):
def __init__(self, child, father, mother, place, min_start_time,
max_start_time, min_time_length = None, max_time_length = None):
super(Death, self).__init__(min_start_time, max_start_time, min_time_length,
max_time_length)
# The child is recognized by being the first element in the list.
# Father and Mother should be the only two other Persons in the entities
# and list and can be recognized by their sex.
assert(not self.entities)
self.entities.append(child.key())
if place:
self.entities.append(place.key())
if father:
assert(father.male_sex)
self.entities.append(father.key())
if mother:
assert(mother.male_sex)
self.entities.append(mother.key())
self.Validate()
def Validate(self):
# There must be at least one entity for the child and a maximum of 4 to
# include parents and birthplace.
assert(len(self.entities) > 0 and len(self.entities) < 4)
child = db.get(self.entities[0])
place = None
father = None
mother = None
for entitiy_key in self.entities[1:]:
db.get(entitiy_key)
if isinstance(entity, entities.Place):
place = entity
else:
assert(isinstance(entity, entities.Person))
if entity.male_sex:
father = entity
else:
mother = entity
assert(isinstance(child, entities.Person))
assert(place is None or isinstance(place, entities.Place))
assert(father is None or isinstance(father, entities.Person))
assert(mother is None or isinstance(mother, entities.Person))
super(Birth, self).Validate()
class Death(base.Events):
def __init__(self, corpse, place, min_start_time, max_start_time,
min_time_length = None, max_time_length = None):
super(Death, self).__init__(min_start_time, max_start_time, min_time_length,
max_time_length)
self.events.append(corpse)
if place:
self.entities.append(place)
def Validate(self):
# There must be at least one entity for the corpse and a maximum of 2 to
# include the deathplace.
assert(len(self.entities) > 0 and len(self.entities) < 2)
corpse = db.get(self.entities[0])
if len(self.entities) == 2:
place = db.get(self.entities[1])
assert(isinstance(corpse, entities.Person))
assert(place is None or isinstance(place, entities.Place))
class Marriage():
pass
|
madecoste/livesovertime
|
src/models/events.py
|
events.py
|
py
| 2,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75113975226
|
from timeit import default_timer as timer
directions = {
"^": (0,1),
"v": (0,-1),
">": (1,0),
"<": (-1,0)
}
def add(a, b):
return (a[0] + b[0], a[1] + b[1])
start = timer()
file = open('input.txt')
seen = {(0,0)}
santa = (0,0)
robo = (0,0)
flip = False
result = 1
for move in file.readlines()[0]:
direction = directions.get(move, (0,0))
curr = direction
if flip:
robo = add(robo, direction)
curr = robo
else:
santa = add(santa, direction)
curr = santa
flip = not flip
if curr not in seen:
result += 1
seen.add(curr)
print("Completed in %fms" % ((timer() - start) * 1000))
print("%d is the result" % result)
|
kmckenna525/advent-of-code
|
2015/day03/part2.py
|
part2.py
|
py
| 636 |
python
|
en
|
code
| 2 |
github-code
|
6
|
14582545322
|
# Visualisation of Parkes beam pattern: Shows position of beams for a given HDF file
# Input: fname (location of HDF dataset)
# V.A. Moss ([email protected])
__author__ = "V.A. Moss"
__date__ = "$18-sep-2018 17:00:00$"
__version__ = "0.1"
import os
import sys
import tables as tb
import numpy as np
from matplotlib import *
import matplotlib
matplotlib.rcParams["interactive"] = True
from numpy import *
from pylab import *
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['serif'],'size':14})
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage,AnnotationBbox
from matplotlib._png import read_png
import urllib.request, urllib.parse, urllib.error
import datetime
from astropy.io import ascii
# Read the position from the observation record
fname = '2017-09-19_0109-P953_GASS_246.2+39.9+312_0.hdf'
# VLSR
# This function gets the velocity of the observatory for a given position and date/time
def freq2vlsr(ra,dec,fname):
x = datetime.datetime.strptime(fname.split('-P')[0],'%Y-%m-%d_%H%M')
date = x.strftime('%Y%b%d:%H:%M').lower()
path = 'www.narrabri.atnf.csiro.au/cgi-bin/obstools/velo.cgi?radec=%s,%s&velo=0&frame=lsr&type=radio&date=%s&freq1=1420.405752&freq2=&telescope=parkes' % (ra,dec,date)
path1 = path.replace(':','%3A')
path2 = 'http://'+path1.replace(',','%2C')
# Get from online
f = urllib.request.urlopen(path2)
for line in f:
line = line.decode('utf-8')
if 'Observatory velocity' in line:
vel = float(line.split('</td><td>')[1].split()[0])
return vel
def showmb():
# Make image
sfig = 'beams_all.png'
arr_lena = read_png(sfig)
imagebox = OffsetImage(arr_lena, zoom=0.35)
ab = AnnotationBbox(imagebox, [0.095,0.08],
xybox=(0., 0.),
xycoords='axes fraction',
boxcoords="offset points",
frameon=False
)
gca().add_artist(ab)
# Get the positional information
d = ascii.read('P953 Observation Record - Sheet1.csv')
# Get the position
srcname = fname.split('/')[-1]
src = srcname.split('.hdf')[0]
mask = (d['File'] == srcname)
dsub = d[mask]
ra,dec = dsub['RA'][0],dsub['Dec'][0]
print('Input file: %s\nPosition: %s, %s' % (srcname,ra,dec))
# Open the data file
t = tb.open_file('%s' % fname)
# Setup the figure
figure(figsize=(8,8))
cmap = cm.Spectral_r
# Plot each position traced
alph=0.025
sz = 300
scatter(t.root.scan_pointing.cols.mb01_raj[:],t.root.scan_pointing.cols.mb01_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(0/12.),facecolor=cm.Spectral(0/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb02_raj[:],t.root.scan_pointing.cols.mb02_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(1/12.),facecolor=cm.Spectral(1/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb03_raj[:],t.root.scan_pointing.cols.mb03_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(2/12.),facecolor=cm.Spectral(2/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb04_raj[:],t.root.scan_pointing.cols.mb04_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(3/12.),facecolor=cm.Spectral(3/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb05_raj[:],t.root.scan_pointing.cols.mb05_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(4/12.),facecolor=cm.Spectral(4/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb06_raj[:],t.root.scan_pointing.cols.mb06_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(5/12.),facecolor=cm.Spectral(5/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb07_raj[:],t.root.scan_pointing.cols.mb07_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(6/12.),facecolor=cm.Spectral(6/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb08_raj[:],t.root.scan_pointing.cols.mb08_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(7/12.),facecolor=cm.Spectral(7/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb09_raj[:],t.root.scan_pointing.cols.mb09_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(8/12.),facecolor=cm.Spectral(8/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb10_raj[:],t.root.scan_pointing.cols.mb10_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(9/12.),facecolor=cm.Spectral(9/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb11_raj[:],t.root.scan_pointing.cols.mb11_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(10/12.),facecolor=cm.Spectral(10/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb12_raj[:],t.root.scan_pointing.cols.mb12_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(11/12.),facecolor=cm.Spectral(11/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb13_raj[:],t.root.scan_pointing.cols.mb13_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(12/12.),facecolor=cm.Spectral(12/12.),alpha=alph)
# Show a legend of the multi-beam colours
showmb()
figsave = '\_'.join(srcname.split('_'))
title(figsave)
grid(True,alpha=0.2)
xlabel('Right Ascension (deg)')
ylabel('Declination (deg)')
savefig('%s_beampos.pdf' % src,bbox_inches='tight',transparent=True)
|
cosmicpudding/ParkesBeamPattern
|
plot_beampattern.py
|
plot_beampattern.py
|
py
| 4,867 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29999440972
|
class Config:
def __init__(self):
self.name=''
self.description=''
self.options=[]
self.persistent=False
self.config_file=''
self.config_directory=''
class Option:
def __init__(self):
self.name=''
self.description=''
self.default_value=''
class ConfigAdvanced:
def __init__(self):
self.indexModule='index'
self.tagModule='tags'
self.indexManager='IndexManager'
self.tagsManager='TagManager'
self.index=('index',self.indexManager)
self.tags=('tags',self.tagsManager)
self.realFiles=('files','FileManager')
self.virtualFiles=('files','VirtualManager')
self.fileHandler=('files','FileHandler')
self.tagsFile='tags.txt'
self.indexFile='index.txt'
self.commentSymbols=["#"]
self.directoryTag="identity"
|
userwiths/file-tagger
|
core/config.py
|
config.py
|
py
| 894 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2908163256
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Union
from supertokens_python.normalised_url_path import NormalisedURLPath
from supertokens_python.querier import Querier
if TYPE_CHECKING:
from .utils import JWTConfig
from .interfaces import CreateJwtResult
from supertokens_python.supertokens import AppInfo
from supertokens_python.recipe.jwt.interfaces import (
CreateJwtResultOk, CreateJwtResultUnsupportedAlgorithm, GetJWKSResult,
RecipeInterface)
from .interfaces import JsonWebKey
class RecipeImplementation(RecipeInterface):
def __init__(self, querier: Querier, config: JWTConfig, app_info: AppInfo):
super().__init__()
self.querier = querier
self.config = config
self.app_info = app_info
async def create_jwt(self, payload: Dict[str, Any], validity_seconds: Union[int, None], user_context: Dict[str, Any]) -> CreateJwtResult:
if validity_seconds is None:
validity_seconds = self.config.jwt_validity_seconds
data = {
'payload': payload,
'validity': validity_seconds,
'algorithm': 'RS256',
'jwksDomain': self.app_info.api_domain.get_as_string_dangerous()
}
response = await self.querier.send_post_request(NormalisedURLPath("/recipe/jwt"), data)
if response['status'] == 'OK':
return CreateJwtResultOk(response['jwt'])
return CreateJwtResultUnsupportedAlgorithm()
async def get_jwks(self, user_context: Dict[str, Any]) -> GetJWKSResult:
response = await self.querier.send_get_request(NormalisedURLPath("/recipe/jwt/jwks"), {})
keys: List[JsonWebKey] = []
for key in response['keys']:
keys.append(JsonWebKey(
key['kty'],
key['kid'],
key['n'],
key['e'],
key['alg'],
key['use']
))
return GetJWKSResult(response['status'], keys)
|
starbillion/supertokens_python
|
supertokens_python/recipe/jwt/recipe_implementation.py
|
recipe_implementation.py
|
py
| 2,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75341512506
|
"""Script to run antsBrainExtraction on meningioma T1-contrast data.
"""
import os.path as op
from nipype import Node, Workflow, DataGrabber, DataSink, MapNode
from nipype.interfaces import ants
# Node to grab data.
grab = Node(DataGrabber(outfields=['t1c']), name='grabber')
grab.inputs.base_directory = op.abspath('data')
grab.inputs.template = '*.nii.gz'
grab.inputs.field_template = {'t1c': '*.nii.gz'}
grab.inputs.sort_filelist = True
# Node to run ants.BrainExtraction.
# Segments the anatomical image and should extract brain.
template_dir = op.abspath('ants_templates/OASIS-30_Atropos_template')
seg = MapNode(ants.BrainExtraction(), iterfield=['anatomical_image'], name='seg')
seg.inputs.dimension = 3
seg.inputs.keep_temporary_files = 1
seg.inputs.brain_template = op.join(template_dir, 'T_template0.nii.gz')
seg.inputs.brain_probability_mask = op.join(template_dir,
'T_template0_BrainCerebellumProbabilityMask.nii.gz')
# Node to save output files. This does not work. Why?
sinker = Node(DataSink(), name='sinker')
sinker.inputs.base_directory = op.abspath('antsBrainExtraction_output')
# Workflow.
wf = Workflow(name='antsBrainExtraction', base_dir='/om/scratch/Wed/jakubk')
wf.connect(grab, 't1c', seg, 'anatomical_image')
wf.connect(seg, 'BrainExtractionBrain', sinker, 'extracted.brain')
wf.connect(seg, 'BrainExtractionMask', sinker, 'extracted.brain_masks')
wf.connect(seg, 'BrainExtractionSegmentation', sinker, 'extracted.seg_full')
wf.connect(seg, 'BrainExtractionCSF', sinker, 'extracted.csf')
wf.connect(seg, 'BrainExtractionGM', sinker, 'extracted.gm')
wf.connect(seg, 'BrainExtractionWM', sinker, 'extracted.wm')
wf.run(plugin='SLURM', plugin_args={'sbatch_args': '--mem=50GB'})
|
kaczmarj/meningioma
|
scripts/run_ants_brainextraction.py
|
run_ants_brainextraction.py
|
py
| 1,750 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5423305185
|
'''
@author:KongWeiKun
@file: follower_crawler.py
@time: 18-2-13 下午3:57
@contact: [email protected]
'''
from multiprocessing import Pool,cpu_count,Lock,Manager
import pandas as pd
import threading
import csv
import requests
from bs4 import BeautifulSoup
import re
try:
from functools import namedtuple
except:
from collections import namedtuple
headers = {
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36'
}
COLUMNS = ['user','name','position','repositories','stars', 'followers', 'following', 'contributions']
PROFILE = namedtuple('PROFILE', COLUMNS)
Result = Manager().list()
DF = pd.DataFrame(columns=COLUMNS, index=["0"])
lock = threading.Lock() # 全局资源锁
def _str_2_int(stri):
if 'k' in stri:
return int(float(stri[:-1]) * 1000)
if ',' in stri:
return int(stri.replace(',', ''))
else:
return int(stri)
#用户信息爬取
def user_crawler(user):
"""crawl user profile
Arguments:
url {string} -- [description]
"""
url = 'https://github.com/{}'.format(user)
values = [None] * len(COLUMNS)
values[COLUMNS.index('user')] = user
try:
html = requests.get(url, headers=headers, timeout=10).text
soup = BeautifulSoup(html, 'lxml')
tag_name = soup.find_all('span', class_='p-name vcard-fullname d-block')
if len(tag_name) > 0:
name = tag_name[0].text
if len(name) > 0:
values[COLUMNS.index('name')] = name
tag_position = soup.find_all('span', class_='p-label')
if len(tag_position) > 0:
position = tag_position[0].text
values[COLUMNS.index('position')] = position
tags_overview = soup.find_all('span', class_='Counter')
repositories = _str_2_int(tags_overview[0].text.replace('\n', '').replace(' ', ''))
stars = _str_2_int(tags_overview[1].text.replace('\n', '').replace(' ', ''))
followers = _str_2_int(tags_overview[2].text.replace('\n', '').replace(' ', ''))
following = _str_2_int(tags_overview[3].text.replace('\n', '').replace(' ', ''))
values[COLUMNS.index('repositories')] = repositories
values[COLUMNS.index('stars')] = stars
values[COLUMNS.index('followers')] = followers
values[COLUMNS.index('following')] = following
tag_contributions = soup.find_all('h2', class_='f4 text-normal mb-2')
try:
contributions = _str_2_int(
tag_contributions[0].text.replace('\n', '').replace(' ', '').replace('contributionsinthelastyear', ''))
except Exception as err:
contributions = _str_2_int(
tag_contributions[0].text.replace('\n', '').replace(' ', '').replace('contributioninthelastyear', ''))
values[COLUMNS.index('contributions')] = contributions
with lock:
print(values)
Result.append(values)
except Exception as e:
print(e)
#爬取followers
def get_all_followers(user):
"""get all followers of user
Arguments:
user {string} -- [description]
"""
followers_list = []
idx = 0
url = 'https://github.com/{}?page={}&tab=followers'
while True:
idx += 1
page_url = url.format(user, idx)
try:
html = requests.get(page_url, headers=headers, timeout=10).text
if 've reached the end' in html:
break
soup = BeautifulSoup(html, 'lxml')
tag_names = soup.find_all('span', class_='link-gray pl-1')
for name in tag_names:
followers_list.append(name.text)
except Exception as e:
print(e)
return followers_list
def save():
""" 将数据保存至本地
"""
with open("data/result.csv", "w+") as f:
global Result
f_csv = csv.writer(f)
f_csv.writerow(COLUMNS)
f_csv.writerows(Result)
print('data saved')
followers_list = []
def main():
"""main process
"""
main_user = 'miguelgrinberg'
print('Crawling followers lists, wait a moment ...')
followers_list = get_all_followers(main_user)
pool = Pool(processes=cpu_count())
for user in followers_list:
pool.apply_async(user_crawler, args=(user,))
pool.close()
pool.join()
save()
if __name__ == '__main__':
main()
|
Winniekun/spider
|
github/follower_crawler.py
|
follower_crawler.py
|
py
| 4,422 |
python
|
en
|
code
| 139 |
github-code
|
6
|
38046142992
|
from cffi import FFI as _FFI
import numpy as _np
import glob as _glob
import os as _os
__all__ = ['BloscWrapper']
class BloscWrapper:
def __init__(self, plugin_file=""):
this_module_dir = _os.path.dirname(_os.path.realpath(__file__))
# find the C library by climbing the directory tree
while plugin_file == "":
plugin_pattern = _os.path.join(this_module_dir, "*ags_blosc_wrapper.*")
candidate_plugins = _glob.glob(plugin_pattern)
# if found then break
if candidate_plugins:
plugin_file = candidate_plugins[0]
break
# not found and already at root. We're not going to find it
if this_module_dir == "/":
raise ValueError("Cannot find plugin ags_blosc_wrapper")
# go to parent directory and try again
this_module_dir = _os.path.split(this_module_dir)[0]
# specify the C signatures of the foreign functions
self._ffi = _FFI()
self._ffi.cdef("typedef void* ags_BloscWrapper;")
self._ffi.cdef("ags_BloscWrapper ags_BloscWrapper_new();")
self._ffi.cdef("void ags_BloscWrapper_delete(ags_BloscWrapper);")
self._ffi.cdef("size_t ags_BloscWrapper_reserveNeededToCompress(ags_BloscWrapper, size_t);")
self._ffi.cdef("size_t ags_BloscWrapper_reserveNeededToDecompress(ags_BloscWrapper, void*);")
self._ffi.cdef("size_t ags_BloscWrapper_compress(ags_BloscWrapper, void*, size_t, void*, size_t);")
self._ffi.cdef("size_t ags_BloscWrapper_decompress(ags_BloscWrapper, void*, void*, size_t);")
self._cmodule = self._ffi.dlopen(plugin_file)
# allocate a new raw instance
self.blosc_wrapper = self._cmodule.ags_BloscWrapper_new()
def __del__(self):
# free the raw instance
self._cmodule.ags_BloscWrapper_delete(self.blosc_wrapper)
def reserve_needed_to_compress(self, srcsize):
size = self._ffi.cast("size_t", srcsize)
return self._cmodule.ags_BloscWrapper_reserveNeededToCompress(self.blosc_wrapper, size)
def reserve_needed_to_decompress(self, src):
# get raw buffers
src_contiguous = _np.ascontiguousarray(src)
src_raw = src_contiguous.__array_interface__['data'][0]
src_cffi = self._ffi.cast("void*", src_raw)
return self._cmodule.ags_BloscWrapper_reserveNeededToDecompress(self.blosc_wrapper, src_cffi)
def compress(self, src):
# get sizes
srcsize = src.nbytes
dstsize = self.reserve_needed_to_compress(srcsize)
srcsize_cffi = self._ffi.cast("size_t", srcsize)
dstsize_cffi = self._ffi.cast("size_t", dstsize)
# allocate destination
dst = _np.empty(shape=(dstsize,), dtype=_np.uint8)
# get raw buffers
src_contiguous = _np.ascontiguousarray(src)
src_raw = src_contiguous.__array_interface__['data'][0]
src_cffi = self._ffi.cast("void*", src_raw)
dst_contiguous = _np.ascontiguousarray(dst)
dst_raw = dst_contiguous.__array_interface__['data'][0]
dst_cffi = self._ffi.cast("void*", dst_raw)
# perform compression and resize
dstsize = self._cmodule.ags_BloscWrapper_compress(self.blosc_wrapper, src_cffi, srcsize_cffi, dst_cffi, dstsize_cffi)
dst.resize((dstsize,))
return dst
def decompress(self, src):
# get sizes
dstsize = self.reserve_needed_to_decompress(src)
dstsize_cffi = self._ffi.cast("size_t", dstsize)
# allocate destination
dst = _np.empty(shape=(dstsize,), dtype=_np.uint8)
# get raw buffers
src_contiguous = _np.ascontiguousarray(src)
src_raw = src_contiguous.__array_interface__['data'][0]
src_cffi = self._ffi.cast("void*", src_raw)
dst_contiguous = _np.ascontiguousarray(dst)
dst_raw = dst_contiguous.__array_interface__['data'][0]
dst_cffi = self._ffi.cast("void*", dst_raw)
# perform decompression and resize
dstsize = self._cmodule.ags_BloscWrapper_decompress(self.blosc_wrapper, src_cffi, dst_cffi, dstsize_cffi)
dst.resize((dstsize,))
return dst
|
ActivisionGameScience/ags_example_py_wrapper
|
ags_py_blosc_wrapper.py
|
ags_py_blosc_wrapper.py
|
py
| 4,277 |
python
|
en
|
code
| 3 |
github-code
|
6
|
69958393149
|
import typing as T
import asyncio
import logging
import inspect
from functools import lru_cache
from . import types
from . import transport as _transport
from . import errors
from . import stub
from . import utils
from . import spec
logger = logging.getLogger('pjrpc.server')
class Service:
"""Receive request, routing, process and response to server"""
def _method_predicate(self, meth):
return inspect.iscoroutinefunction(meth) or callable(meth)
@lru_cache(maxsize=1024)
def _get_func(self, f_name: str):
for name, func in inspect.getmembers(self, self._method_predicate):
if name == f_name:
return func
raise errors.MethodNotFoundError()
def _check_args(self, args: T.Dict[str, T.Type], func: T.Callable):
#TODO: check default value
annotations = func.__annotations__
for k, v in args.items():
if k in annotations:
if type(v) is not annotations[k]:
raise errors.InvalidParamError()
async def __call__(
self,
request: types.Request,
) -> T.Union[spec.ErrorResponseMessage, spec.SuccessResponseMessage]:
target = self._get_func(request.method)
params = request.params or {}
self._check_args(params, target)
if not inspect.iscoroutinefunction(target):
target = utils.to_async()(target)
ret = await target(**params)
if not isinstance(request, spec.Notification):
return utils.make_response_from_data(
id=request.id,
result=ret,
)
class Server:
def __init__(
self,
app_path: str,
host: str = '127.0.0.1',
port: int = 6969,
compress: bool = False,
):
self._app_cls = utils.load_app_from_string(app_path)
self._host = host
self._port = port
self._stub = stub.Stub(compress)
self._loop = asyncio.get_event_loop()
self._futures = {}
async def connection_handler(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
):
transport = _transport.ServerTransport(reader, writer, interval=2, alive=5)
async def dispatch_request(request):
if isinstance(request, list):
async def batch_request(requests):
app = self._app_cls()
tasks = []
for request in requests:
if isinstance(request, spec.Notification):
self._loop.create_task(app(request))
else:
f = self._loop.create_task(app(request))
tasks.append(f)
if len(tasks) == 0:
return None
responses = asyncio.wait(tasks)
return responses
return await batch_request(request)
return await self._app_cls()(request)
def on_request_done(fut):
err = fut.exception()
if err:
ret = utils.make_response_from_data(
error={'code': err.code, 'message': err.message})
else:
ret = fut.result()
self._loop.create_task(transport.send_message(self._stub.pack(ret)))
async for in_data in transport.messages():
try:
request = self._stub.unpack(in_data)
except errors.ParseError as error:
err_resp = utils.make_response_from_data(
error={'code': error.code, 'message': error.message})
out_data = self._stub.pack(err_resp)
self._loop.create_task(transport.send_message(out_data))
f = self._loop.create_task(dispatch_request(request))
f.add_done_callback(on_request_done)
def protocol_factory(self):
reader = asyncio.StreamReader(limit=1024, loop=self._loop)
protocol = asyncio.StreamReaderProtocol(
reader, self.connection_handler, loop=self._loop)
return protocol
async def start(self):
server = await self._loop.create_server(self.protocol_factory, self._host, self._port)
async with server:
logger.info('Server is starting on port %d ...', self._port)
await server.serve_forever()
|
magiskboy/pjrpc
|
pjrpc/core.py
|
core.py
|
py
| 4,436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70767464828
|
"""empty message
Revision ID: 4fa0d71e3598
Revises: bdcfc99aeebf
Create Date: 2021-07-31 23:47:02.420096
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4fa0d71e3598'
down_revision = 'bdcfc99aeebf'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('techniques', sa.Column('japanese_names', postgresql.ARRAY(sa.String()), nullable=True))
op.add_column('techniques', sa.Column('english_names', postgresql.ARRAY(sa.String()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('techniques', 'english_names')
op.drop_column('techniques', 'japanese_names')
# ### end Alembic commands ###
|
AbundantSalmon/judo-techniques-bot
|
judo_techniques_bot/migrations/versions/2021-07-31_4fa0d71e3598_.py
|
2021-07-31_4fa0d71e3598_.py
|
py
| 891 |
python
|
en
|
code
| 8 |
github-code
|
6
|
22656887021
|
import os
import sys
import pandas as pd
def programName():
return os.path.basename(sys.argv[0])
if len(sys.argv) == 1:
pileup = sys.stdin
elif len(sys.argv) == 2:
pileup = open(sys.argv[1], "rt")
else:
exit(f"{programName()} [pileup file]\n")
# THE COLUMNS IN THE MPILEUP OUTPUT ARE AS FOLLOWS
# ID
# CHR
# 1-BASED POSITION
# REF BASE (1=A,2=C,3=G,4=T) THIS IS FOR EASE OF DOWNSTREAM PROCESSING
# "A" COUNT
# "C" COUNT
# "G" COUNT
# "T" COUNT
reads = pd.read_csv(
pileup,
sep="\t",
header=0,
quotechar='"',
names=[
"id",
"chr",
"position",
"ref_base",
"a_count",
"c_count",
"g_count",
"t_count",
],
)
def ref_alt_count(row):
if row["ref_base"] == 1:
ref_count = row["a_count"]
alt_count = row[["c_count", "g_count", "t_count"]].max()
elif row["ref_base"] == 2:
ref_count = row["c_count"]
alt_count = row[["a_count", "g_count", "t_count"]].max()
elif row["ref_base"] == 3:
ref_count = row["g_count"]
alt_count = row[["a_count", "c_count", "t_count"]].max()
elif row["ref_base"] == 4:
ref_count = row["t_count"]
alt_count = row[["a_count", "c_count", "g_count"]].max()
return row["id"], ref_count, alt_count
ref_counts = reads.apply(ref_alt_count, axis=1, result_type="expand")
ref_counts.to_csv(sys.stdout, sep="\t", index=False, header=None)
|
ReddyLab/bird-workflow
|
01_mpileups/ref_counts/ref_counts.py
|
ref_counts.py
|
py
| 1,466 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17012330786
|
from flask import Flask, render_template, request, redirect, url_for
from pymongo import MongoClient
client = MongoClient(
"<mongo db cluter url>")
NameListDatabase = client.NameListDatabase
CollectionList = NameListDatabase.CollectionList
app = Flask(__name__)
def getallnames():
namelist = []
names = CollectionList.find({}, {"Name": 1, "_id": 0})
for name in names:
namelist.append(name["Name"])
return namelist
@app.route('/', methods=['POST', 'GET'])
def root():
getallnames()
if request.method == "POST":
return redirect(request.form["Name"])
return render_template('index.html', listofname=getallnames())
@app.route('/<name>/')
def fetchJson(name):
names = list(CollectionList.find({"Name": name}, {"_id": 0}))
nameListInStr = str(names)
if len(names) == 0:
return redirect(url_for("root"))
return nameListInStr
if __name__ == '__main__':
app.run(debug=True)
|
smartkeerthi/Python-MongoDB-Flask-Projects
|
Flask and pymongo/main.py
|
main.py
|
py
| 953 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24370481536
|
import unittest
class TestDataIO(unittest.TestCase):
def test_dataio(self):
from src.io.dataio import DataIO
from src.io.plot3dio import GridIO, FlowIO
# grid object
grid = GridIO('../data/shocks/shock_test.sb.sp.x')
grid.read_grid()
grid.compute_metrics()
# flow object
flow = FlowIO('../data/shocks/shock_test.sb.sp.q')
flow.read_flow()
# data module test
# data = DataIO(grid, flow, location='../data/shocks/particle_data/multi_process_test/')
data = DataIO(grid, flow, location='../data/shocks/particle_data/281nm_time_step_adaptive/',
read_file='../data/shocks/particle_data/281nm_time_step_adaptive/combined_file.npy')
# Increased refinement for better resolution
data.x_refinement = 500
data.y_refinement = 400
data.compute()
if __name__ == '__main__':
unittest.main()
|
kalagotla/project-arrakis
|
test/test_dataio.py
|
test_dataio.py
|
py
| 940 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37446552709
|
from metux.util.task import Task
from os import environ
from copy import copy
from subprocess import call
"""build for apt (docker-buildpackage)"""
class PkgBuildAptTask(Task):
"""[private]"""
def __init__(self, param):
Task.__init__(self, param)
self.target = param['target']
self.conf = param['conf']
self.pkg = param['pkg']
self.statfile = self.target.get_pkg_build_statfile(self.pkg)
def do_run(self):
pkg_name = self.pkg.name
target_name = self.target['target.name']
pool_name = self.target['pool.name']
dckbp_cmd = self.conf.get_dckbp_cmd()
env = copy(environ)
env['DCK_BUILDPACKAGE_TARGET_REPO'] = self.target['target.aptrepo']
env['DCK_BUILDPACKAGE_SOURCE'] = pkg_name
self.log_info('building "'+pkg_name+'" from '+pool_name+' for '+target_name)
if (call([dckbp_cmd, '--target', target_name],
cwd=self.pkg['package.src'],
env=env) != 0):
self.fail("build failed: "+pkg_name)
self.statfile.set(self.pkg.git_repo().get_head_commit())
return True
"""[override]"""
def need_run(self):
return not self.statfile.check(self.pkg.git_repo().get_head_commit())
def alloc(conf, pkg, target):
return conf.cached_task_alloc('build-pkg-apt:'+target['target.name']+':'+pkg.name, PkgBuildAptTask, { 'pkg': pkg, 'target': target })
|
LibreZimbra/librezimbra
|
deb_autopkg/tasks/pkg_build_apt.py
|
pkg_build_apt.py
|
py
| 1,455 |
python
|
en
|
code
| 4 |
github-code
|
6
|
24506022571
|
from mock import Mock, patch, ANY, sentinel
from nose.tools import ok_, eq_, raises, timed
from noderunner.client import Client, Context, Handle
from noderunner.connection import Connection
from noderunner.protocol import Protocol
class TestClient(object):
@patch("noderunner.client.get_sockets")
@patch("noderunner.client.open_process")
@patch("noderunner.client.Connection", spec=Connection)
@patch("noderunner.client.Protocol", spec=Protocol)
def _client(self, proto, con, proc, sock):
sock.return_value = (Mock(), Mock(), Mock())
return Client(), proto, con, proc, sock
def test_ctor(self):
c, proto, con, proc, sock = self._client()
proto.assert_called_once_with(con.return_value, ANY)
con.assert_called_once_with(ANY)
proc.assert_called_once_with(ANY, ANY)
sock.assert_called_once_with()
def test_eval(self):
c, proto, con, proc, sock = self._client()
c.eval(sentinel.code, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("eval",
code=sentinel.code,
context=sentinel.context)
def test_stop(self):
c, proto, con, proc, sock = self._client()
c.stop()
proc.return_value.terminate.assert_called_once_with()
proto.return_value.stop.assert_called_once_with()
def test_context(self):
c, proto, con, proc, sock = self._client()
c.context(sentinel.name, sentinel.deps)
p = proto.return_value
p.request_sync.assert_called_once_with("mkcontext",
name=sentinel.name,
requirements=sentinel.deps)
def test_get(self):
c, proto, con, proc, sock = self._client()
c.get(sentinel.path, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("get",
path=sentinel.path,
context=sentinel.context)
def test_set(self):
c, proto, con, proc, sock = self._client()
c.set(sentinel.path, sentinel.val, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("set",
path=sentinel.path,
value=sentinel.val,
context=sentinel.context)
def test_call(self):
c, proto, con, proc, sock = self._client()
c.call(sentinel.path, sentinel.args, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("call",
path=sentinel.path,
args=sentinel.args,
context=sentinel.context)
class TestContext(object):
def _context(self, name=sentinel.name):
mck = Mock()
return mck, Context(mck, name)
def test_eval(self):
mck, context = self._context()
context.eval(sentinel.code)
mck.eval.assert_called_once_with(sentinel.code,
context=sentinel.name)
def test_get(self):
mck, context = self._context()
context.get(sentinel.path)
mck.get.assert_called_once_with(ANY, sentinel.name)
def test_set(self):
mck, context = self._context()
context.set(sentinel.path, sentinel.value)
mck.set.assert_called_once_with(ANY,
sentinel.value,
sentinel.name)
def test_call(self):
mck, context = self._context()
context.call(sentinel.path, sentinel.args)
mck.call.assert_called_once_with(ANY,
sentinel.args,
sentinel.name)
def test_objects(self):
mck, context = self._context()
handle = context.objects
eq_(handle._context, context)
class TestHandle(object):
def test_call(self):
ctx = Mock()
ctx.call.return_value = sentinel.rtn
h = Handle(ctx)
eq_(h(sentinel.foo), sentinel.rtn)
ctx.call.assert_called_once_with((sentinel.foo,))
def test_attr_access(self):
ctx = Mock()
h = Handle(ctx)
handle2 = h.foobar
eq_(handle2._path, ["foobar"])
def test_item_access(self):
ctx = Mock()
h = Handle(ctx)
handle2 = h["foobar"]
eq_(handle2._path, ["foobar"])
def test_access_context_stays(self):
ctx = Mock()
h = Handle(ctx)
handle2 = h.foobar
eq_(handle2._context, ctx)
def test_get(self):
ctx = Mock()
ctx.get.return_value = sentinel.get
h = Handle(ctx)
eq_(h.get(), sentinel.get)
ctx.get.assert_called_once_with()
def test_attr_set(self):
ctx = Mock()
h = Handle(ctx)
h.key = sentinel.val
ctx.set.assert_called_once_with("key", sentinel.val)
def test_item_set(self):
ctx = Mock()
h = Handle(ctx)
h["key"] = sentinel.val
ctx.set.assert_called_once_with("key", sentinel.val)
|
williamhogman/noderunner
|
tests/test_client.py
|
test_client.py
|
py
| 5,456 |
python
|
en
|
code
| 6 |
github-code
|
6
|
44648575716
|
from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
from models.item import ItemModel
class Item(Resource):
parser = reqparse.RequestParser() # Just to get required key values (so that they cannot change name)
parser.add_argument('price', type=float, required=True, help="This field cannot be left blank")
parser.add_argument('store_id', type=int, required=True, help="This field cannot be left blank")
@jwt_required()
def get(self, name):
row = ItemModel.find_by_name(name)
if row:
return row.json()
else:
return {'message': 'Item Not Found'}, 404
return item
def post(self, name):
data = Item.parser.parse_args()
if ItemModel.find_by_name(name):
return {'message': 'A item with this name already exists'}, 400
item = ItemModel(name, data['price'], data['store_id'])
try:
#item.insert()
item.save_to_db()
except:
return {'message': 'An error occurred inserting the item'}, 500 # Internal Server Error
return item.json(), 201
@jwt_required()
def delete(self, name):
item = ItemModel.find_by_name(name)
if not item:
return {'message': 'No items exists with this name'}, 404
try:
item.delete()
except:
return {'message': 'An error occured deleting the item'}, 500
return {'message': 'Item Deleted'}, 200
@jwt_required()
def put(self, name):
data = Item.parser.parse_args()
item = ItemModel.find_by_name(name)
if item:
try:
#item = ItemModel(name, data['price'])
#item.update()
item.price = data['price']
item.store_id = data['store_id']
item.save_to_db()
except:
return {'message': 'An error occurred updating the item'}, 500
else:
try:
item = ItemModel(name, data['price'], data['store_id'])
#item.insert()
item.save_to_db()
except:
return {'message': 'An error occurred inserting the item'}, 500
return item.json(), 201
return item.json(), 200
class ItemList(Resource):
@jwt_required()
def get(self):
#connection = sqlite3.connect("data.db")
#cursor = connection.cursor()
#result = cursor.execute("SELECT * from Items")
#items = []
#for row in result:
# items.append({'name': row[1], 'price': row[2]})
#connection.close()
#return {'items': items}, 200
return {'items': [item.json() for item in ItemModel.query.all()]}
|
kgunda2493/test-api
|
resources/item.py
|
item.py
|
py
| 2,774 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36229561780
|
from typing import List
'''
452. 用最少数量的箭引爆气球
https://leetcode.cn/problems/minimum-number-of-arrows-to-burst-balloons/
每一箭射穿的气球满足:最左边的气球右端在最右边气球左端的右面。
可以贪心,按照气球右端排序
记录新开的一箭的气球的右端点end,一旦有一个气球的左端点在end右面,则这一箭已经射不到这个气球了,需要新的一箭。
'''
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
points.sort(key=lambda x: x[1])
res = 1
end = points[0][1]
for st, en in points:
if st > end:
res += 1
end = en
return res
s = Solution()
print(s.findMinArrowShots([[10,16],[2,8],[1,6],[7,12]]))
|
z-w-wang/Leetcode-Problemlist
|
CS-Notes/Greedy/452.py
|
452.py
|
py
| 806 |
python
|
zh
|
code
| 3 |
github-code
|
6
|
73706384186
|
#https://en.wikipedia.org/wiki/UPGMA#Working_example
def findMinValue(matrix):
min = float('inf')
node1 = 0
node2 = 0
n = len(matrix)
for i in range(n-1):
for j in range(i+1,n):
if min > matrix[i][j]:
min = matrix[i][j]
node1 = i
node2 = j
return min, node1, node2
def UPGMA(matrix,n):
#initiation
originalMatrix = matrix[:]
clusters = []
nodeAges = {}
for i in range(n):
clusters.append((0,[i]))
nodeAges[i] = 0
clusterNodeIDs = [i for i in range(n)]
nextNodeID = n
edges = set()
while len(matrix) > 1:
print("The current matrix is")
print(matrix)
min, node1, node2 = findMinValue(matrix)
print("current nodes to eliminate")
print(node1,node2)
nextNodeAge = min/2
nodeAges[nextNodeID] = nextNodeAge
print("current Age")
print(nodeAges)
#updateEdges
edges.add((clusterNodeIDs[node1],nextNodeID))
edges.add((clusterNodeIDs[node2],nextNodeID))
print("the current edges are")
print(edges)
#update clusterNodeID
print("clusterID before update")
print(clusterNodeIDs)
remove1 = clusterNodeIDs[node1]
remove2 = clusterNodeIDs[node2]
clusterNodeIDs.remove(remove1)
clusterNodeIDs.remove(remove2)
clusterNodeIDs.append(nextNodeID)
print("current cluster node ID")
print(clusterNodeIDs)
#update clusters
newCluster = (nextNodeAge,clusters[node1][1] + clusters[node2][1])
remove1 = clusters[node1]
remove2 = clusters[node2]
clusters.remove(remove1)
clusters.remove(remove2)
clusters.append(newCluster)
print("current cluster is")
print(clusters)
#create a list of node to visit (remove the identified nodes)
nodesCurrentMatrix = [i for i in range(len(matrix))]
nodesCurrentMatrix.remove(node1)
nodesCurrentMatrix.remove(node2)
print("nodes to visit")
print(nodesCurrentMatrix)
#initiate new matrix
newMatrix = [[0]*(len(nodesCurrentMatrix)+1) for i in range(len(nodesCurrentMatrix)+1)]
#update the next matrix
for i in range(len(newMatrix)-2):
for j in range(i+1,len(newMatrix)-1):
index1 = nodesCurrentMatrix[i]
index2 = nodesCurrentMatrix[j]
newMatrix[i][j] = matrix[index1][index2]
newMatrix[j][i] = newMatrix[i][j]
#update the next matrix: recalculate the distance to the new cluster
for i in range(len(newMatrix)-1):
j = len(newMatrix) -1
cluster1tomerge = clusters[i][1]
cluster2tomerge = clusters[j][1]
sum = 0
for node1 in cluster1tomerge:
for node2 in cluster2tomerge:
sum = sum + originalMatrix[node1][node2]
average = sum / (len(cluster1tomerge)*len(cluster2tomerge))
newMatrix[i][j] = average
newMatrix[j][i] = newMatrix[i][j]
# matrix[i][len(matrix)-1] =
matrix = newMatrix[:]
nextNodeID += 1
print()
return edges,nodeAges
if __name__ == '__main__':
with open("dataset_10332_8.txt","r") as f:
n = int(f.readline().strip())
matrixInput = f.readlines()
#generate initial Matrix
matrix = []
for i in matrixInput:
rowList = []
for j in i.strip().split("\t"):
rowList.append(int(j))
matrix.append(rowList)
edges, nodeAges = UPGMA(matrix,n)
newedges = set()
for edge in edges:
newedges.add(edge)
newedges.add((edge[1],edge[0]))
newedges= sorted(newedges, key = lambda x:(x[0],x[1]))
print(newedges)
with open("results_UPGMA.txt",'w') as f:
for edge in newedges:
f.write("{}->{}:{:.3f}\n".format(edge[0],edge[1],abs(nodeAges[edge[0]]-nodeAges[edge[1]])))
|
haozeyu24/pythonCodeExamples
|
UPGMA.py
|
UPGMA.py
|
py
| 4,249 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72489561467
|
import pdb
import sys
sys.path.append( '..' )
from copy import copy, deepcopy
import kivy.graphics as kg
from kivy.lang import Builder
from kivy.properties import *
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
#KV Lang files
from pkg_resources import resource_filename
path = resource_filename( __name__, 'labels.kv' )
Builder.load_file( path )
TOP_LEFT, LEFT, BOTTOM_LEFT = 0, 1, 2
TOP, BOTTOM, CENTER = 3, 4, 5
TOP_RIGHT, RIGHT, BOTTOM_RIGHT = 6, 7, 8
class BindedLabel( Label ) :
'''
Standard label with some additions :
- Binded text_size to size ( so you can center text )
- Background color
- Some more user-friendly padding usage
'''
fill_color = ListProperty( [0,0,0,0] )
def __init__( self, **kargs ) :
kargs['valign'] = kargs['valign'] if 'valign' in kargs.keys() else 'middle'
kargs['halign'] = kargs['halign'] if 'halign' in kargs.keys() else 'center'
if 'text' not in kargs.keys() : kargs['text'] = u''
super( BindedLabel, self ).__init__( **kargs )
self.bind( size=self.setter('text_size') )
class ResizeableLabel( BindedLabel ) :
'''
User-resizeable label.
'''
hover_color = ListProperty( [0,0,0,1] )
'''
A widget is displayed to show the new size of the label.
It's filled with this color.
'''
root_layout = ObjectProperty( None )
'''
The 'hover' is drawn on the root layout due to possible size mismatch.
You'll need to provide a link to your root layout.
'''
on_new_size = ObjectProperty( None )
'''
Called by on_size method whenever the size of the label changes.
'''
meta = ObjectProperty( None )
'''
Passed as argument to on_new_size, use it as you wish...
'''
min_width = NumericProperty( 50 )
'''
Label minimum width.
'''
_o = ListProperty( [0,0] )
_d = ListProperty( [0,0] )
_hover_size = ListProperty( [0,0] )
_hover_pos = ListProperty( [0,0] )
def __init__( self, **kargs ) :
super( ResizeableLabel, self ).__init__( **kargs )
self._touched = False
self._unique_group = { 'group':'__resizeable_label_%d' % (id(self)) }
def on_touch_down( self, touch ) :
self._touched = False
if ( ( self.pos[0] < touch.pos[0] < self.pos[0]+self.width ) and
( self.pos[1] < touch.pos[1] < self.pos[1]+self.height ) ) :
self._touched = True
self._o = touch.pos
self._pivot = self._get_pivot()
return True
def on_touch_move( self, touch ) :
if self._touched :
self._d = touch.pos
self._hover_size, self._hover_pos = self._get_hover()
if self.root_layout :
self._clear_canvas()
with self.root_layout.canvas :
kg.Color( *self.hover_color, **self._unique_group )
kg.Rectangle(
size=self._hover_size, \
pos=self._hover_pos, \
**self._unique_group
)
return True
def on_touch_up( self, touch ) :
if self._touched :
self._clear_canvas()
self._o = []
if self._hover_size[0] > self.min_width :
self._on_size( self.size, self._hover_size )
return True
def _on_size( self, oldsize, newsize ) :
print( 'Size changed' )
if self.on_new_size : self.on_new_size( oldsize, newsize, self.meta )
self.size = copy( newsize )
def _get_pivot( self ) :
tx, ty = abs(self._o[0]-self.pos[0]), abs(self._o[1]-self.pos[1])
ox, oy = tx/self.size[0], ty/self.size[1]
if ox < 0.33 :
x = 0
elif ox < 0.66 :
x = 3
else :
x = 6
return x +1
"""
if oy > 0.66 :
return x + 0
elif oy > 0.33 :
return x + 1
else :
return x + 2
"""
def _get_hover( self ) :
dx = self._d[0] - self._o[0]
dy = self._d[1] - self._o[1]
if self._pivot == RIGHT :
return [self.size[0]+dx, self.size[1]], self.pos
return self.size, self.pos
def _clear_canvas( self ) :
self.root_layout.canvas.remove_group( self._unique_group['group'] )
|
curzel-it/kivy-material-ui
|
material_ui/flatui/labels.py
|
labels.py
|
py
| 4,448 |
python
|
en
|
code
| 67 |
github-code
|
6
|
13502913819
|
import random
import time
import asyncio
def timer(func):
def _wrapper(*args):
print(time.ctime())
func(*args)
print(time.ctime())
return _wrapper
@timer
def insert_sort(sequence):
i = 1
while i < len(sequence):
if sequence[i] < sequence[i-1]:
d = sequence[i]
sequence[i] = sequence[i-1]
j = i - 1
while d < sequence[j] and j >= 0:
sequence[j+1] = sequence[j]
j -= 1
sequence[j+1] = d
i += 1
@timer
def bi_insert_sort(sequence):
i = 1
while i < len(sequence):
d = sequence[i]
low, high = 0, i-1
while low <= high:
m = int((low + high)//2)
if sequence[m] < d:
low = m+1
else:
high = m-1
j = i - 1
while j >= high:
sequence[j+1] = sequence[j]
j -= 1
sequence[high+1] = d
i += 1
@timer
def shell_sort(sequence):
step = int(len(sequence) // 2)
while step > 0:
i = 0
j = step + i
while j < len(sequence):
h = j
while h >= 0 and sequence[i] > sequence[h]:
sequence[i], sequence[h] = sequence[h], sequence[i]
h -= 1
i += 1
j += 1
step = int(step//2)
@timer
def bubble_sort(sequence):
i, l = 0, len(sequence)
while i < l-1:
j = 0
while j < l-i-1:
if sequence[j] > sequence[j+1]:
sequence[j+1], sequence[j] = sequence[j], sequence[j+1]
j += 1
i += 1
@timer
def quick_sort(sequence):
def _partion(sequence, low, high):
pivot = sequence[low]
while low < high:
while low < high and sequence[high] >= pivot:
high -= 1
sequence[low] = sequence[high]
while low < high and sequence[low] <= pivot:
low += 1
sequence[high] = sequence[low]
sequence[low] = pivot
return low
def _quick_sort(sequence, low, high):
if low < high:
pivotloc = _partion(sequence, low, high)
_quick_sort(sequence, pivotloc-1, high)
_quick_sort(sequence, pivotloc+1, high)
_quick_sort(l, 0, len(l)-1)
if __name__ == '__main__':
# l = list(range(10000, 0, -1))
# insert_sort(l)
# l = list(range(10000, 0, -1))
# l = list(range(10000, 0, -1))
# bi_insert_sort(l)
l = list(range(3, 0, -1))
quick_sort(l)
|
owhz/SimpleDataStructure
|
sort.py
|
sort.py
|
py
| 2,564 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13295958598
|
import vtk
import numpy as np
import struct
# def save_vf(self, filename):
# """ Write the vector field as .vf file format to disk. """
# if not np.unique(self.resolution).size == 1:
# raise ValueError("Vectorfield resolution must be the same for X, Y, Z when exporting to Unity3D.")
# file_handle = open(filename, 'wb')
# for val in [b'V', b'F', b'_', b'V',
# struct.pack('H', self.resolution[0]),
# struct.pack('H', self.resolution[1]),
# struct.pack('H', self.resolution[2])]:
# file_handle.write(val)
# # Layout data in required order.
# u_stream = self.u.flatten('F')
# v_stream = self.v.flatten('F')
# w_stream = self.w.flatten('F')
# for i in range(u_stream.size):
# file_handle.write(struct.pack('f', v_stream[i]))
# file_handle.write(struct.pack('f', u_stream[i]))
# file_handle.write(struct.pack('f', w_stream[i]))
# file_handle.close()
if __name__ == '__main__':
path = "E:\\VIS22\\Assign3\\Data_Assign3\\Data_Assign3\\"
#input_file_name = "bernard3D_Q.vtk"
input_file_name = "FullHead.mhd"
input_file_name = path + input_file_name
if ".mhd" in input_file_name: #The input file is MetaImageData
input_type = "mhd"
reader = vtk.vtkMetaImageReader()
reader.SetFileName(input_file_name)
reader.Update()
elif ".vtk" in input_file_name: # The input file is VTK
input_type = "vtk"
reader = vtk.vtkDataSetReader()
reader.SetFileName(input_file_name)
reader.Update()
poly = reader.GetOutput()
scalars = poly.GetPointData().GetScalars()
array = np.array(reader.GetOutput().GetPointData().GetScalars())
print(len(array))
print(poly.GetScalarRange()[0])
print(poly.GetScalarRange()[1])
dimension = poly.GetDimensions()
print(dimension)
#print(poly.GetPointData())
ini_file_name = input_file_name + ".raw.ini"
file_handle = open(ini_file_name, 'w')
file_handle.write("dimx:" + str(dimension[0]) +"\n")
file_handle.write("dimy:" + str(dimension[1])+"\n")
file_handle.write("dimz:" +str(dimension[2])+"\n")
file_handle.write("skip:0"+"\n")
file_handle.write("format:int32"+"\n")
file_handle.close()
file_name = input_file_name + ".raw.txt"
file_handle = open(file_name, 'w')
print(array[0])
for i in range(len(array)):
file_handle.write(str(array[i]) +"\n")
file_handle.close()
file_name_raw = input_file_name + ".raw"
file_handle = open(file_name_raw, 'wb')
print(array[0])
for i in range(len(array)):
file_handle.write(struct.pack('i', (int)(array[i])))
file_handle.close()
|
maysie0110/COSC6344-FinalProject
|
write_raw_file.py
|
write_raw_file.py
|
py
| 2,862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12061356200
|
import tweepy
from textblob import TextBlob
consumer_key = 'EjXTChxrOmEWULyuuJ8iDXdyQ'
consumer_secret = 'NrtHvELXi0i6dtue39icLkrT3rrrUVHKWOlHWWGJm46LQGell5'
access_token = '1425159876-T5yoGiyxFk2sAdsZNjGVLRa94988APPcV4TI7R6'
access_token_secret = 'JsCnvZPbnn93qefEM187dPnUcdCn5pby220IiU3D1aKam'
auth =tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
query = raw_input("Type the query .\n")
#print(query)
public_tweets = api.search(query)
for tweet in public_tweets:
print('------------------------------------------------------------------')
print(tweet.text)
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
print('------------------------------------------------------------------')
|
HirdyaNegi/Senti2weet
|
test.py
|
test.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32907694123
|
# 1 Add the usual reports
from sklearn.metrics import classification_report
y_true = [1, 0, 0, 2, 1, 0, 3, 3, 3]
y_pred = [1, 1, 0, 2, 1, 0, 1, 3, 3]
target_names = ['Class-0', 'Class-1', 'Class-2', 'Class-3']
print(classification_report(y_true, y_pred, target_names=target_names))
# 2 Run the code and see
# Instead of computing these metrics separately, you can directly
# use the preceding function to extract those statistics from your model.
|
IbrahimOued/Python-Machine-Learning-cookbook
|
2 Constructing a Classifier/performance_report.py
|
performance_report.py
|
py
| 447 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41550521554
|
"""
Send a restart signal to a BiblioPixel process running on this
machine.
DEPRECATED: use
.. code-block:: bash
$ kill -hup `bpa-pid`
"""
DESCRIPTION = """
Example: ``$ bp restart``
"""
from .. util.signal_handler import make_command
add_arguments, run = make_command('SIGHUP', ' Default SIGHUP restarts bp.')
|
ManiacalLabs/BiblioPixel
|
bibliopixel/commands/restart.py
|
restart.py
|
py
| 323 |
python
|
en
|
code
| 263 |
github-code
|
6
|
74337793468
|
# Name : Jiazhao Li Unique name: jiazhaol
import numpy as np
from sklearn import preprocessing
import sys
from sklearn import tree
def load_train_data(filename):
SBD_traindata_list = []
with open(filename, 'r') as f:
for line in f:
line = line.strip('\n')
word = line.split(' ')
SBD_traindata_list.append([word[0], word[1], word[2]])
return SBD_traindata_list
def load_test_data(filename):
SBD_testdata_list = []
with open(filename,'r') as f:
for line in f:
line = line.strip('\n')
word = line.split(' ')
SBD_testdata_list.append([word[0], word[1], word[2]])
return SBD_testdata_list
def feature_label(data_list, mode):
feature = []
label = []
index = 0
for pair in data_list:
if pair[2] == 'EOS' or pair[2] == 'NEOS':
# label list
if pair[2] == 'EOS':
label.append(1)
else:
label.append(0)
# label vacab
L = data_list[index][1][:-1]
if index == len(data_list)-1:
R = ' '
else:
R = data_list[index + 1][1]
len_L = int(len(L) < 3)
if L =='':
L_Cap = 0
else:
L_Cap = int(L[0].isupper())
R_Cap = int(R[0].isupper())
# own features
LL_len = int(len(data_list[index-1][1]) > 3)
if index == len(data_list)-2 or index == len(data_list)-1:
RR_len = 0
else:
RR_len = int(len(data_list[index+1][1]) > 3)
L_Cap_num = 0
for l in L :
if l.isupper():
L_Cap_num += 1
L_Cap_num = int(L_Cap_num > 3)
if mode == 'CoreFeature':
feature.append([L, R, len_L, L_Cap, R_Cap])
elif mode == "OwnThree":
feature.append([LL_len, RR_len, L_Cap_num])
elif mode == 'CoreOwn':
feature.append([L, R, len_L, L_Cap, R_Cap, LL_len, RR_len, L_Cap_num])
index += 1
return feature, label
# encode feature vector of
def encode_feature(train_feature,test_feature):
word_dict = {}
index = 2
for pair in train_feature:
if pair[0] not in word_dict:
word_dict[pair[0]] = index
index += 1
if pair[1] not in word_dict:
word_dict[pair[1]] = index
index += 1
for pair in test_feature:
if pair[0] not in word_dict:
word_dict[pair[0]] = index
index += 1
if pair[1] not in word_dict:
word_dict[pair[1]] = index
index += 1
# substitute the feature vetor:
for pair in train_feature:
pair[0] = word_dict[pair[0]]
pair[1] = word_dict[pair[1]]
for pair in test_feature:
pair[0] = word_dict[pair[0]]
pair[1] = word_dict[pair[1]]
Train_len = len(train_feature)
all = train_feature + test_feature
ohe = preprocessing.OneHotEncoder() # Easier to read
ohe.fit(all)
Feature = ohe.transform(all).toarray()
TrainEncode = Feature[:Train_len,:]
TestEncode = Feature[Train_len:, :]
return TrainEncode, TestEncode
def generate_outfile(SBDTestList, test_predict):
with open('SBD.test.out', 'w') as f:
test_predict_cate = []
for label in test_predict:
if label == 1:
test_predict_cate.append('EOS')
else:
test_predict_cate.append('NEOS')
f.write(mode + '\n')
num = 0
for pair in SBDTestList:
if pair[2] == "EOS" or pair[2] == 'NEOS':
f.write(" ".join([pair[0], pair[1], test_predict_cate[num]]))
f.write('\n')
num += 1
else:
f.write(" ".join([pair[0], pair[1], pair[2]]))
f.write('\n')
if __name__ == '__main__':
# train = "SBD.train"
# test = "SBD.test"
train = sys.argv[1]
test = sys.argv[2]
SBDTrainList = load_train_data(train)
SBDTestList = load_test_data(test)
ModeList = ['CoreFeature', "OwnThree", 'CoreOwn']
# ModeList = ['CoreFeature']
for mode in ModeList:
train_feature, train_label = feature_label(SBDTrainList, mode)
test_feature, test_label = feature_label(SBDTestList, mode)
TrainEncode, TestEncode = encode_feature(train_feature, test_feature)
# train the Dicision Tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(TrainEncode, train_label)
train_acc = clf.score(TrainEncode, train_label)
test_acc = clf.score(TestEncode, test_label)
test_predict = clf.predict(TestEncode)
print(mode)
print("train_acc: " + str(train_acc))
print("test_acc: " + str(test_acc))
if mode == 'CoreOwn':
generate_outfile(SBDTestList, test_predict)
|
JiazhaoLi/Assignment
|
EECS595/Assignment1/hw1/SBD.py
|
SBD.py
|
py
| 5,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39399051547
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
'''create DataFrame
DataFrame 数据桢,数据表,;类似于excel
特点:
1. 他是Series的集合
2. 与Series的区别:
2.1 series吧通过自定义index,当做标记,实现行,一维列表
2.2 DataFrame通过在'吧自定义index当做标记实现行'上与Series是一致,
2.3 DataFrame 除了行以外,还提供了columns(列), 每一列都是一个Series,所以DataFrame是Series的集合,通过colums将Series集合在一起
3. DataFrame 支持花式索引, 以及提供API方便我们处理数据
4. 每一个DataFrame(表),内部的数据最好是具有相同columns的数据, 行:往外代表数据的多少,列:往往代表数据的特点和结构
5. DataFrame: 每一行代表着一组完整的数据集,其中每一个数据有自己的属性.每一个columns 应该是一种数据类型,代表这该数据结构的每一个属性或数据类型
'''
# index:datatime_list, columns:list, data:ndarray
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
print(df)
# 通过dict 创建对象, 且个一个columns数据类型不同, 自动填充数据:
# data:dict_value, index:auto_create_index, columns:dict_key
data1 = {
'A': 1.,
'B': pd.Timestamp('20130102'),
'C': pd.Series(1, index=list(range(5)), dtype='float32'),
'D': np.array([3] * 5, dtype='int32'),
'E': pd.Categorical(['test', 'train', 'test', 'train', 'train']),
'F': 'foo'
}
df2 = pd.DataFrame(data1)
print(df2)
print(df2.dtypes)
|
xiongliyu/practice_python
|
pandas/create_dateframe.py
|
create_dateframe.py
|
py
| 1,599 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
3361019377
|
"""
222. 完全二叉树的节点个数
给你一棵 完全二叉树 的根节点 root ,求出该树的节点个数。
完全二叉树 的定义如下:在完全二叉树中,除了最底层节点可能没填满外,其余每层节点数都达到最大值,并且最下面一层的节点都集中在该层最左边的若干位置。若最底层为第 h 层,则该层包含 1~ 2h 个节点。
输入:root = [1,2,3,4,5,6]
输出:6
"""
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
import math
class Solution(object):
def countNodes(self, root):
l = r = root
hl, hr = 0, 0 # 记录左、右子树的高度
while l is not None:
l = l.left
hl += 1
while r is not None:
r = r.right
hr += 1
# 如果左右子树的高度相同,说明是一颗满二叉树
if hl == hr:
return int(math.pow(2, hl)) - 1
# 如果左右子树高度不同,则按普通二叉树的逻辑计算
return 1 + self.countNodes(root.left) + self.countNodes(root.right)
|
ustcjiajing/python_test
|
count_nodes.py
|
count_nodes.py
|
py
| 1,198 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
15551833066
|
'''
Given two strings s and t, check if s is a subsequence of t.
A subsequence of a string is a new string that is formed from the original string by deleting some (can be none) of the characters without disturbing
the relative positions of the remaining characters. (i.e., "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
Input: s = "abc", t = "ahbgdc"
Output: true
Example 2:
Input: s = "axc", t = "ahbgdc"
Output: false
'''
# Two Pointers (left for source, right for target)
# If source[left] == target[right] found a match & move both pointers one step forward.
# source[left] != target[right] no match and move only right pointer on target string
# TC O(T) where T is Target string length, Space O(1)
class Solution(object):
def isSubsequence(self, s, t):
ptr_left, ptr_right = 0, 0
while ptr_left < len(s) and ptr_right < len(t):
if s[ptr_left] == t[ptr_right]:
ptr_left += 1
ptr_right += 1
return ptr_left == len(s)
|
ojhaanshu87/LeetCode
|
392_is_subseqence.py
|
392_is_subseqence.py
|
py
| 1,014 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27035685049
|
"""Json module"""
import json
def handler(event, _context):
"""
Lambda Handler
Parameters
----------
event : dict
An event
Returns
-------
dict
The response object
"""
print(f"request: {json.dumps(event)}")
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps({ "hello": f"Hello World from Python! Handler at {event['path']}"})
}
|
jhonrocha/aws-cdk-explorations
|
lambda/play-py/main.py
|
main.py
|
py
| 464 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30848964562
|
import math
n=int(input(""))
ar = list(map(int, input().strip().split(' ')))
ar.sort()
ar.reverse()
s4=0
s3=0
s2=0
s1=0
taxi =0
for i in ar:
if(i==4):
s4=s4+1
elif(i==3):
s3=s3+1
elif(i==2):
s2=s2+1
else:
s1=s1+1
taxi = taxi+s4
if(s2%2 == 0):
taxi=taxi + s2/2
else:
taxi=taxi + s2/2+1
if(s1>0):
s1=s1-2
taxi = taxi +s3
if(s1>=s3>=0):
s1=s1-s3
if(s1>0):
taxi = taxi + math.ceil(s1/4)
print(int(taxi))
|
YashTelkhade/Codeforces-solution
|
Taxi.py
|
Taxi.py
|
py
| 502 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32927804563
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Construct templates and categories for Tekniska museet data.
"""
from collections import OrderedDict
import os.path
import csv
import pywikibot
import batchupload.listscraper as listscraper
import batchupload.common as common
import batchupload.helpers as helpers
from batchupload.make_info import MakeBaseInfo
MAPPINGS_DIR = 'mappings'
IMAGE_DIR = 'Curman'
# stem for maintenance categories
BATCH_CAT = 'Media contributed by Tekniska museet'
BATCH_DATE = '2017-10' # branch for this particular batch upload
LOGFILE = "Tekniska.log"
class TekniskaInfo(MakeBaseInfo):
def load_wd_value(self, qid, props, cache=None):
if cache and qid in cache:
return cache[qid]
data = {}
wd_item = pywikibot.ItemPage(self.wikidata, qid)
wd_item.exists() # load data
for pid, label in props.items():
value = None
claims = wd_item.claims.get(pid)
if claims:
value = claims[0].getTarget()
data[label] = value
if cache:
cache[qid] = data
return data
def __init__(self, **options):
super(TekniskaInfo, self).__init__(**options)
self.batch_cat = "{}: {}".format(BATCH_CAT, BATCH_DATE)
self.commons = pywikibot.Site('commons', 'commons')
self.wikidata = pywikibot.Site('wikidata', 'wikidata')
self.log = common.LogFile('', LOGFILE)
self.photographer_cache = {}
self.category_cache = []
def load_data(self, in_file):
return common.open_and_read_file(in_file, as_json=False)
def generate_content_cats(self, item):
# to do -- generate cats from keywords
item.generate_place_cats()
return [x for x in list(item.content_cats) if x is not None]
def generate_filename(self, item):
id_no = item.id_no
title = item.image_title
provider = "TEKM"
return helpers.format_filename(
title, provider, id_no)
def generate_meta_cats(self, item, cats):
cats = set(item.meta_cats)
cats.add(self.batch_cat)
return list(cats)
def get_original_filename(self, item):
# should be updated if files named with another field
return item.id_no
def load_mappings(self, update_mappings):
concrete_motif_file = os.path.join(MAPPINGS_DIR, 'concrete_motif.json')
concrete_motif_page = 'Commons:Tekniska museet/Curman/mapping title'
geo_file = os.path.join(MAPPINGS_DIR, 'geo.json')
geo_page = 'Commons:Tekniska museet/Curman/mapping location'
keywords_file = os.path.join(MAPPINGS_DIR, 'keywords.json')
keywords_page = 'Commons:Tekniska museet/Curman/mapping amnesord'
if update_mappings:
print("Updating mappings...")
self.mappings['concrete_motif'] = self.get_concrete_motif_mapping(
concrete_motif_page)
common.open_and_write_file(concrete_motif_file, self.mappings[
'concrete_motif'], as_json=True)
self.mappings['geo'] = self.get_geo_mapping(geo_page)
common.open_and_write_file(geo_file, self.mappings[
'geo'], as_json=True)
self.mappings['keywords'] = self.get_keywords_mapping(keywords_page)
common.open_and_write_file(keywords_file, self.mappings[
'keywords'], as_json=True)
else:
self.mappings['concrete_motif'] = common.open_and_read_file(
concrete_motif_file, as_json=True)
self.mappings['geo'] = common.open_and_read_file(
geo_file, as_json=True)
self.mappings['keywords'] = common.open_and_read_file(
keywords_file, as_json=True)
pywikibot.output('Loaded all mappings')
def get_concrete_motif_mapping(self, page):
motifs = {}
page = pywikibot.Page(self.commons, page)
data = listscraper.parseEntries(
page.text,
row_t='User:André Costa (WMSE)/mapping-row',
default_params={'name': '', 'category': '', 'frequency': ''})
for entry in data:
if entry['category'] and entry['name']:
category = entry['category'][0]
name = entry['name'][0]
motifs[name] = category
return motifs
def get_keywords_mapping(self, p):
keywords = {}
page = pywikibot.Page(self.commons, p)
data = listscraper.parseEntries(
page.text,
row_t='User:André Costa (WMSE)/mapping-row',
default_params={'name': '', 'category': '', 'frequency': ''})
for entry in data:
if entry['category'] and entry['name']:
category = entry['category'][0]
name = entry['name'][0]
keywords[name] = category
return keywords
def get_geo_mapping(self, p):
page = pywikibot.Page(self.commons, p)
data = listscraper.parseEntries(
page.text,
row_t='User:André Costa (WMSE)/mapping-row',
default_params={'name': '', 'wikidata': '', 'frequency': ''})
geo_ids = {}
for entry in data:
if entry['wikidata'] and entry['name']:
wikidata = entry['wikidata'][0]
name = entry['name'][0]
if wikidata != '-':
geo_ids[name] = wikidata
# look up data on Wikidata
props = {'P373': 'commonscat'}
geo = {}
for name, qid in geo_ids.items():
geo[name] = self.load_wd_value(
qid, props)
geo["wd"] = qid
return geo
def make_info_template(self, item):
template_name = 'Photograph'
template_data = OrderedDict()
template_data['title'] = item.generate_title()
template_data['description'] = item.generate_description()
template_data['photographer'] = "{{Creator:Sigurd Curman}}"
template_data['department'] = ("Sigurd Curmans arkiv / "
"Tekniska museet (SC-K1-1)")
# template_data['date'] = item.generate_date()
template_data['permission'] = item.generate_license()
template_data['ID'] = item.generate_id()
template_data['source'] = item.generate_source()
return helpers.output_block_template(template_name, template_data, 0)
def process_data(self, raw_data):
d = {}
reader = csv.DictReader(raw_data.splitlines(), dialect='excel-tab')
tagDict = {
"image_title": "Titel",
"id_no": "Identifikationsnr",
"description": "Motiv-beskrivning",
"location": "Avbildade - orter",
"alt_id_no": "Alternativt nummer-Institutionsintern katalog/lista"
}
for r in reader:
rec_dic = {}
for tag in tagDict:
column_name = tagDict[tag]
value = r[column_name]
rec_dic[tag] = value.strip()
id_no = rec_dic["id_no"]
d[id_no] = TekniskaItem(rec_dic, self)
self.data = d
class TekniskaItem(object):
def __init__(self, initial_data, info):
for key, value in initial_data.items():
setattr(self, key, value)
self.wd = {}
self.content_cats = set()
self.meta_cats = set()
self.info = info
self.commons = pywikibot.Site('commons', 'commons')
def generate_geo_cat(self):
cats = self.info.mappings["geo"]
if self.location in cats.keys():
cat = cats[self.location].get("commonscat")
self.content_cats.add(cat)
def generate_place_cats(self):
has_specific_place = False
cats = self.info.mappings["concrete_motif"]
if self.image_title in cats.keys():
concr_cat = cats.get(self.image_title)
self.content_cats.add(concr_cat)
has_specific_place = True
if not has_specific_place:
self.generate_geo_cat()
def generate_description(self):
if self.description:
swedish = "{{{{sv|{}}}}}".format(self.description)
return swedish
def generate_title(self):
return "{{{{sv|{}}}}}".format(self.image_title)
def generate_source(self):
return "{{Tekniska museet cooperation project}}"
def generate_id(self):
return '{{TEKM-link|' + self.id_no + '}}'
def generate_license(self):
return "{{PD-old-70}}"
if __name__ == '__main__':
TekniskaInfo.main()
|
Vesihiisi/TEKM-import
|
info_tekniska.py
|
info_tekniska.py
|
py
| 8,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73041455547
|
from queue import Queue
class AdjacentMatrixGraph:
def __init__(self, edges, vertexList=None):
self.edges = edges
self.vertexList = vertexList
def eachVertexesMinDist(self):
size = len(self.edges)
dist = [[float('inf') for i in range(0, size)] for j in range(0,size)]
path = [[-1 for i in range(0, size)] for j in range(0,size)]
for i in range(0, size):
for j in range(0, size):
if self.edges[i][j] > 0:
dist[i][j] = self.edges[i][j]
path[i][j] = i
for k in range(0, size):
for i in range(0, size):
if i != k:
for j in range(0, size):
if j != k and i != j and dist[i][k]< float('inf') \
and dist[k][j] < float('inf') and dist[i][k] + dist[k][j] < dist[i][j]:
dist[i][j] = dist[i][k] + dist[k][j]
path[i][j] = path[k][j]
return dist
class AdjacentArrayGraph:
def __init__(self, vertexes):
self.vertexes = vertexes
self.visitCount = {}
for i in vertexes:
self.visitCount[i.verName] = 0
def resetVisitCount(self):
for i in vertexes:
self.visitCount[i.verName] = 0
def depthFirstSearch(self, startAdj):
if self.visitCount[startAdj] == 0 :
print(vertexes[startAdj])
self.visitCount[startAdj] = 1
edge = vertexes[startAdj].next
while edge:
self.depthFirstSearch(edge.verAdj)
edge = edge.next
def depthFirstSearchStack(self, startAdj):
'''Depth first search implemented by stack
Simple and elegant.'''
stack = []
stack.append(startAdj)
while len(stack) > 0:
ver = stack.pop()
if self.visitCount[ver] == 0:
print(vertexes[ver])
self.visitCount[ver] = 1
reverseLink = []
edge = vertexes[ver].next
while edge:
reverseLink.insert(0,edge)
edge = edge.next
for i in reverseLink:
stack.append(i.verAdj)
def depthFirstSearchStack1(self, startAdj):
'''Depth first search implemented by stack
Another implementation,not that good.'''
stack= []
cur = vertexes[startAdj]
while len(stack) > 0 or cur:
while cur and self.visitCount[cur.verName] == 0:
print(cur)
self.visitCount[cur.verName] = 1
stack.append(cur)
edge = cur.next
if edge:
cur = vertexes[edge.verAdj]
else:
cur = None
cur = stack.pop()
# check if all adjacent nodes are visited,
# or else, push it back
if cur:
adj = cur.next
while adj and self.visitCount[adj.verAdj] == 1:
adj = adj.next
if adj:
stack.append(cur)
cur = vertexes[adj.verAdj]
else:
cur = None
def widthFirstSearch(self, startAdj):
self.resetVisitCount()
queue = Queue()
queue.put(vertexes[startAdj], False)
while not queue.empty():
ver = queue.get(False)
if ver and self.visitCount[ver.verName]==0:
print(ver)
self.visitCount[ver.verName] = 1
edge = ver.next
while edge:
if self.visitCount[edge.verAdj] == 0:
queue.put_nowait(vertexes[edge.verAdj])
edge = edge.next
def topologicalSort(self):
indegree = [0 for i in vertexes]
# top points to the top of zero indegree vertex stack
top = -1
for i in vertexes:
edge = i.next
while edge:
indegree[edge.verAdj] += 1
edge = edge.next
for i in indegree:
if indegree[i] == 0:
print(vertexes[i])
# in stack operation
indegree[i] = top
top = i
while top != -1:
# out stack operation
curIdx = top
top = indegree[top]
edge = vertexes[curIdx].next
while edge:
indegree[edge.verAdj] -= 1
if indegree[edge.verAdj] == 0:
print(vertexes[edge.verAdj])
# in stack operation
indegree[edge.verAdj] = top
top = edge.verAdj
edge = edge.next
def topologicalSortWithCircuitDetect(self):
indegree = [0 for i in vertexes]
# top points to the top of zero indegree vertex stack
top = -1
for i in vertexes:
edge = i.next
while edge:
indegree[edge.verAdj] += 1
edge = edge.next
for i in indegree:
if indegree[i] == 0:
# in stack operation
indegree[i] = top
top = i
for i in range(0, len(vertexes)):
if top != -1:
# out stack operation
curIdx = top
top = indegree[top]
print(vertexes[curIdx])
edge = vertexes[curIdx].next
while edge:
indegree[edge.verAdj] -= 1
if indegree[edge.verAdj] == 0:
# in stack operation
indegree[edge.verAdj] = top
top = edge.verAdj
edge = edge.next
else:
raise Exception("there is a circuit")
class VertexNode:
def __init__(self, verName,next = None):
'''Initialization method.
verName is the data of the vertex.
next is pointer to EdgeNod.'''
self.verName = verName
self.next = next
def __str__(self):
return "[verName={},{}]".format(self.verName, self.next is None)
def __hash__(self):
return self.verName.__hash__
class EdgeNode:
def __init__(self, verAdj, weight = -1, next = None ):
'''Initialization method.
verAdj is the verName of adjacent node.
next is pointer to next EdgeNode
weight is the weight of the edge'''
self.verAdj = verAdj
self.next = next
self.weight = weight
def __str__(self):
return "[verAdj={},weight={},{}]".format(self.verAdj, self.weight, self.next is None)
# AOE Graph Example:
# T1 T6
# ^ \ ^ \
# / a3=1 / a10=2
# a0=6 \ a7=9 \
# / v / v
# T0 T4 T8
# \ \ ^ \ ^
# \ 1=4 / a8=8 /
# \ \ a4=1 \ a11=4
# \ v / v /
# \ T2 T7
# \ \ ^
# a2=5 a5=1 /
# \ \ a9=4
# \ v /
# \ T5
# \ ^
# \ /
# \ a6=2
# v /
# T3
# test data is here.
vers = [0, 1, 2, 3, 4, 5, 6, 7, 8]
matrix =[
#[0, 1, 2, 3, 4, 5, 6, 7, 8]
[0, 6, 4, 5, 0, 0, 0, 0, 0], #0
[0, 0, 0, 0, 1, 0, 0, 0, 0], #1
[0, 0, 0, 0, 1, 1, 0, 0, 0], #2
[0, 0, 0, 0, 0, 2, 0, 0, 0], #3
[0, 0, 0, 0, 0, 0, 9, 8, 0], #4
[0, 0, 0, 0, 0, 0, 0, 4, 0], #5
[0, 0, 0, 0, 0, 0, 0, 0, 2], #6
[0, 0, 0, 0, 0, 0, 0, 0, 4], #7
[0, 0, 0, 0, 0, 0, 0, 0, 0] #8
]
aoeMatrixGraph = AdjacentMatrixGraph(matrix, vers)
vertexes = []
edge = EdgeNode(1, 6,
EdgeNode(2, 4,
EdgeNode(3, 5, None)))
vertexes.append(VertexNode(0, edge))
edge = EdgeNode(4, 1, None)
vertexes.append(VertexNode(1, edge))
edge = EdgeNode(4, 1,
EdgeNode(5, 1, None))
vertexes.append(VertexNode(2, edge))
edge = EdgeNode(5, 2, None)
vertexes.append(VertexNode(3, edge))
edge = EdgeNode(6, 9,
EdgeNode(7, 8, None))
vertexes.append(VertexNode(4, edge))
edge = EdgeNode(7, 4, None)
vertexes.append(VertexNode(5, edge))
edge = EdgeNode(8, 2, None)
vertexes.append(VertexNode(6, edge))
edge = EdgeNode(8, 4, None)
vertexes.append(VertexNode(7, edge))
vertexes.append(VertexNode(8, None))
aoeGraph = AdjacentArrayGraph(vertexes)
# test start here
print("depth first search")
aoeGraph.depthFirstSearch(0)
print("depth first search via stack")
aoeGraph.resetVisitCount()
aoeGraph.depthFirstSearchStack(0)
print("depth first search via stack 1")
aoeGraph.resetVisitCount()
aoeGraph.depthFirstSearchStack1(0)
print("width first search")
aoeGraph.widthFirstSearch(0)
print("topological sort")
aoeGraph.topologicalSort()
aoeGraph.topologicalSortWithCircuitDetect()
print("shortest path for each pair of vertexes")
dist = aoeMatrixGraph.eachVertexesMinDist()
print(dist)
|
diojin/doodles-python
|
src/algorithm/data_structure/graph.py
|
graph.py
|
py
| 10,178 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41766786793
|
from collections import deque
s = input().split()
n = int(s[0])
m = int(s[1])
a = list(map(int, input().split()))
result = ['0']*m
d = {}
for i in range(m):
c = None
if a[i] in d:
c = d[a[i]]
else:
c = deque()
d[a[i]] = c
c.append(i)
while True:
found = True
max_p = 0
for i in range(1, n+1):
if i not in d or len(d[i]) == 0:
found = False
break
p = d[i].popleft()
if p > max_p:
max_p = p
if found == False:
break
result[max_p] = '1'
print(''.join(result))
|
gautambp/codeforces
|
1100-B/1100-B-48361896.py
|
1100-B-48361896.py
|
py
| 626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12198804557
|
from iskanje_v_sirino import Graph
import collections
import winsound
duration = 3000
freq = 440
'''
NxP_start = [
['', '', '', '', ''],
['', '', '', '', ''],
['B', '', '', '', ''],
['A', 'C', 'D', 'E', 'F']
]
NxP_end = [
['', 'C', '', '', ''],
['', 'E', '', '', ''],
['F', 'D', '', '', ''],
['B', 'A', '', '', '']
]
'''
NxP_start = [
['B', '',''],
['A', '', '']
]
NxP_end = [
['', 'B',''],
['', 'A', '']
]
N = len(NxP_start)
P = len(NxP_start[N-1])
#P - odstavnih polozajev
#N - velikih skatelj ena na drugo
# p => 1 <= p <= P
# r => 1 <= r <= P
def prestavi(p, r, matrika1):
matrika = matrika1[:]
first_element = ''
delete_i = -1
delete_p_1 = -1
#ce je p, r return matriko
if p == r:
return matrika
# dokler nenajdes nepraznega in ga shranis v first_element
for i in range(0, N):
if matrika[i][p-1] != '':
first_element = matrika[i][p-1]
delete_i = i
delete_p_1 = p-1
break
# dokler nenajdes prvega praznega od spodi navzgor in shranis element iz
# first_element v ta prostor in zbrises element iz kordinati i in p-1
for j in range(N-1, -1, -1):
if matrika[j][r-1] == '':
matrika[j][r-1] = first_element
if delete_i > -1 and delete_p_1 > -1:
matrika[delete_i][delete_p_1] = ''
break
return matrika
def izpis(NxP):
for a in NxP:
print(a)
# for dict key = tuple
def tuple_to_list(t):
return [list(i) for i in t]
def list_to_tuple(l):
t = tuple()
for i in l:
t += tuple(i),
return t
def naredi_matriko(matrika):
return [list(i) for i in matrika]
def napolni(graf, start_m, kopija):
start = list_to_tuple(start_m)
for p in range(1, P+1):
for r in range(1, P+1):
kopija = naredi_matriko(start_m)
x = prestavi(p, r, kopija)
tuple_x = list_to_tuple(x)
if tuple_x != start:
graf.add(start, tuple_x)
def BFS(graf, root):
oce_od_elementa = collections.defaultdict(tuple)
vrsta = []
seen = set()
#dodam root
vrsta.append(list_to_tuple(root))
seen.add(str(root))
kopija = naredi_matriko(root) #kopija start
napolni(graf, root, kopija)
i = 0
while vrsta:
vozlisce = vrsta.pop(0)
for neighbour in graf.get(vozlisce):
if str(neighbour) not in seen:
print(i, ".")
i += 1
kopija_neig = naredi_matriko(neighbour)
napolni(graf, neighbour, kopija_neig)
vrsta.append(neighbour)
seen.add(str(neighbour))
if tuple_to_list(neighbour) == NxP_end:
#winsound.Beep(freq, duration)
return neighbour
def IDDFS(graf, root):
stack = []
while stack:
vozilisce = root
if root == NxP_end:
return root
return
g = Graph()
print(BFS(g, NxP_start))
#g.print()
|
martin0b101/UI
|
robotizirano_skladisce.py
|
robotizirano_skladisce.py
|
py
| 3,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72066928509
|
from flask import Flask, flash, redirect, render_template
from form import LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret"
@app.route("/home")
def home():
return "Hello Mines ParisTech"
@app.route("/", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
"""Log in requested for {form.username.data} with passord {form.password.data}"""
## Add function here to check password
return redirect("/home")
return render_template("login.html", form=form)
@app.route("/shutdown")
def shutdown():
raise RuntimeError
if __name__=="__main__":
try:
app.run(debug=False, port=3001)
except RuntimeError:
print("Server closed")
|
basileMarchand/ProgrammeCooperants
|
flask_demo/demo5/app.py
|
app.py
|
py
| 754 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37583094466
|
import pyvista as pv
axes = pv.Axes()
axes.origin
# Expected:
## (0.0, 0.0, 0.0)
#
# Set the origin of the camera.
#
axes.origin = (2.0, 1.0, 1.0)
axes.origin
# Expected:
## (2.0, 1.0, 1.0)
|
pyvista/pyvista-docs
|
version/dev/api/plotting/_autosummary/pyvista-Axes-origin-1.py
|
pyvista-Axes-origin-1.py
|
py
| 190 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6923445505
|
def solve(data, rope):
v = [[0, 0] for _ in range(rope)]
st = set()
for line in data.splitlines():
act, step = line.split(' ')
for _ in range(int(step)):
if act == "D":
v[0][1] += 1
elif act == "U":
v[0][1] -= 1
elif act == "L":
v[0][0] -= 1
else:
v[0][0] += 1
for i, ((hx, hy), (tx, ty)) in enumerate(zip(v, v[1:])):
if abs(hx - tx) > 1:
tx += 1 if hx > tx else -1
if abs(hy - ty) > 0:
ty += 1 if hy > ty else -1
elif abs(hy - ty) > 1:
ty += 1 if hy > ty else -1
if abs(hx - tx) > 0:
tx += 1 if hx > tx else -1
v[i + 1][0] = tx
v[i + 1][1] = ty
st.add(tuple(v[-1]))
return len(st)
with open("input/day9.txt", "r") as f:
data = f.read()
print(solve(data, 2))
print(solve(data, 10))
|
eglantine-shell/adventofcode
|
2022/py/day9.py
|
day9.py
|
py
| 1,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11317226884
|
import pathlib
from setuptools import find_packages, setup
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="trankit",
version=get_version("trankit/__init__.py"),
description="Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/nlp-uoregon/trankit",
author="NLP Group at the University of Oregon",
author_email="[email protected]",
license='Apache License 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages(),
include_package_data=True,
install_requires=['numpy', 'protobuf', 'requests', 'torch>=1.6.0', 'tqdm>=4.27', 'langid==1.1.6', 'filelock', 'tokenizers>=0.7.0', 'regex != 2019.12.17', 'packaging', 'sentencepiece', 'sacremoses'],
entry_points={
},
)
|
nlp-uoregon/trankit
|
setup.py
|
setup.py
|
py
| 2,223 |
python
|
en
|
code
| 693 |
github-code
|
6
|
27317069924
|
import os
import re
file_list = []
check = os.listdir('G:/flag/flag/')
ret = r'((flag|key|ctf){.*})'
for i in check:
with open('G:/flag/flag/'+i,'r',encoding='utf-8') as f:
a = f.read()
res = re.findall(ret,a)
if res:
print('*'*66)
print('[+]file_name: '+i)
file_list.append('G:/flag/flag/'+i)
else:
continue
for y in file_list:
with open(y,'r',encoding='utf-8') as file:
files = file.read()
print(re.findall(ret,files,re.IGNORECASE))
|
vFREE-1/timu_py
|
海量的TXT.py
|
海量的TXT.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17763553641
|
#from given set of change coint{} of size m, find minimum coins required to pay amount n
import sys
def getMinCoins(coins,m,n):
#create array of 1D to store minimum count of coins for sum 0 to n and initialize with max value
table = [sys.maxsize] * (n+1)
#for sum 0, 0 coins required therefore assign
table[0] = 0
#for each sum from1 to n
for i in range(1,n+1):
#for each coins -> it will be a index of current coin
for j in range(m):
#if amount is less than current coin create temperory i.e sub result
if(coins[j] <= i):
subRes = table[i - coins[j]]
#if sub result is less that previous coint for that sum thn replace count of coins
if( (subRes != sys.maxsize) and (subRes+1 < table[i]) ):
table[i] = subRes + 1
if(table[n] == sys.maxsize) : return -1
else: return table[n]
m = int(input('Enter number of coins'))
coins = [int(i) for i in input('enter set of coins').split()]
n = int(input('Enter amount to pay'))
print(getMinCoins(coins,m,n))
|
aparna0/competitive-programs
|
14coin change probems/2find minimum coins.py
|
2find minimum coins.py
|
py
| 1,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33561062837
|
"""
moving_avg_demo.py
"""
import numpy as np
import scipy as sp
import scipy.signal
import plot
import signal_generator
def moving_average_builder(length):
filt = np.array([1.0/length]*length)
return filt
def moving_average_demo1():
filt = moving_average_builder(5)
sig = signal_generator.sinusoid(128, 0.4*np.pi)
plot.stem(filt, title='Moving Average Filter With 5 Taps')
plot.stem(sig, title='Input Signal')
output = np.convolve(filt, sig, mode='full') # mode can be 'full', 'same', 'valid'
plot.stem(output, title='Output Signal')
ww, hh = scipy.signal.freqz(filt)
plot.mag_phase(hh, xaxis=ww/np.pi)
a = input()
return
if __name__ == '__main__':
moving_average_demo1()
|
Chris93Hall/filtering_presentation
|
moving_avg_demo.py
|
moving_avg_demo.py
|
py
| 730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42818754446
|
from tkinter import *
from PIL import ImageTk, Image
import string
import random
root = Tk()
root.title("Я люблю BRAWL STARS")
root.geometry("1200x675")
def clicked():
exit = ""
for j in range(3):
n = 5
letters = 0
integers = 0
for i in range(n):
if letters < 3 and integers < 2:
a = random.randint(1,2)
if a == 1:
exit += random.sample(string.ascii_letters, 1)[0]
letters += 1
else:
exit+=str(random.randint(0,9))
integers += 1
elif letters < 3:
exit += random.sample(string.ascii_letters, 1)[0]
else:
exit+=str(random.randint(0,9))
if j == 2:
break
exit+='-'
canvas1.itemconfig(label1_canvas, text=exit.upper())
bg = ImageTk.PhotoImage(Image.open("2D4F4F53-D36C-4213-BB42-CAC30A9DD06D.jpeg"))
canvas1 = Canvas(root, width=1200, height=675)
canvas1.pack(fill="both", expand=True)
canvas1.create_image(0, 0, image=bg, anchor="nw")
btn = Button(root, text="Генерировать ключ", command=clicked)
button1_canvas = canvas1.create_window(950, 550, anchor="nw", window=btn)
label1_canvas = canvas1.create_text(1000, 500, text="Генерация ключа", fill="white",
font=('Arial 25 bold'))
root.mainloop()
|
nelyuboov/Lab-4
|
main (2).py
|
main (2).py
|
py
| 1,483 |
python
|
en
|
code
| null |
github-code
|
6
|
70128470908
|
# 1.парсим; headers берём из бразуера консоли разработчика (Network->Request)
# 2.сохраняем локально в файл
# 3.работаем с локальными данными
import json
import requests
from bs4 import BeautifulSoup
import csv
from time import sleep
import random
import local_properties as lp
url = lp.HEALTH_DIET_URL
headers = {
"accept": "*/*",
"user-agent": lp.HEADER_USER_AGENT
}
local_page_file_name = "health_diet.html"
file_categories = "all_categories_dict.json"
# общие методы
def open_file(name: str):
with open(name) as file:
return file.read()
def open_file_utf8(name: str):
with open(name, encoding="utf-8") as file:
return file.read()
def write_to_file(name: str, data: str):
with open(name, "w") as file:
file.write(data)
def write_to_file_utf8(name: str, data: str):
with open(name, "w", encoding="utf-8") as file:
file.write(data)
def open_json(name: str):
with open(name) as file:
return json.load(file)
def write_to_json(file_name: str, data: dict):
with open(file_name, "w") as file:
json.dump(data, file, indent=4, ensure_ascii=False)
# парсим веб-страницу
def scrap_page():
req = requests.get(url, headers)
src = req.text
return src
# сохраняем локально данные парсинга
def save_page_to_local(src: str):
write_to_file(local_page_file_name, src)
# данные из локального файла веб-страницы
def get_local_page():
return open_file(local_page_file_name)
# ссылки на все категории
def get_all_products_href(src: str):
soup = BeautifulSoup(src, "lxml")
all_products_href = soup.find_all(class_="mzr-tc-group-item-href")
# print(all_products_href)
return all_products_href
# словарь категорий и ссылки на них
def get_all_categories(src: str):
all_categories_dict = {}
hrefs = get_all_products_href(src)
for item in hrefs:
item_text = item.text
item_href = "https://health-diet.ru" + item.get("href")
all_categories_dict[item_text] = item_href
return all_categories_dict
def get_product_data():
all_categories = open_json(file_categories)
iteration_count = int(len(all_categories)) - 1
count = 0
print(f"Всего итераций: {iteration_count}")
for category_name, category_href in all_categories.items():
rep = [",", " ", "-", "'"]
for item in rep:
if item in category_name:
category_name = category_name.replace(item, "_")
req = requests.get(url=category_href, headers=headers)
src = req.text
result_file_name = f"data/{count}_{category_name}"
write_to_file_utf8(f"{result_file_name}.html", src)
src = open_file_utf8(f"{result_file_name}.html")
soup = BeautifulSoup(src, "lxml")
# проверка страницы на наличие таблицы с продуктами
alert_block = soup.find(class_="uk-alert-danger")
if alert_block is not None:
continue
# собираем заголовки таблицы
table_head = soup \
.find(class_="mzr-tc-group-table") \
.find("tr") \
.find_all("th")
product = table_head[0].text
calories = table_head[1].text
proteins = table_head[2].text
fats = table_head[3].text
carbohydrates = table_head[4].text
with open(f"{result_file_name}.csv", "w", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(
(
product,
calories,
proteins,
fats,
carbohydrates
)
)
# собираем данные продуктов
products_data = soup \
.find(class_="mzr-tc-group-table") \
.find("tbody") \
.find_all("tr")
product_info = []
for item in products_data:
product_tds = item.find_all("td")
title = product_tds[0].find("a").text
calories = product_tds[1].text
proteins = product_tds[2].text
fats = product_tds[3].text
carbohydrates = product_tds[4].text
product_info.append(
{
"Title": title,
"Calories": calories,
"Proteins": proteins,
"Fats": fats,
"Carbohydrates": carbohydrates
}
)
with open(f"{result_file_name}.csv", "a", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(
(
title,
calories,
proteins,
fats,
carbohydrates
)
)
with open(f"{result_file_name}.json", "a", encoding="utf-8") as file:
json.dump(product_info, file, indent=4, ensure_ascii=False)
count += 1
print(f"# Итерация {count}. {category_name} записан...")
iteration_count = iteration_count + 1
if iteration_count == 0:
print("Работа завершена")
break
print(f"Осталось итераций: {iteration_count}")
sleep(random.randrange(2, 4))
if __name__ == '__main__':
# 1 step
# src1 = scrap_page()
# save_page_to_local(src1)
# 2 step
# src2 = get_local_page()
# get_all_products_href(src2)
# 3 step
# src3 = get_local_page()
# categories = get_all_categories(src3)
# write_to_json(file_categories, categories)
# 4 step
get_product_data()
|
ildar2244/EdScraping
|
health_diet.py
|
health_diet.py
|
py
| 6,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27615694777
|
"""
Get information about how many adult movies/series etc. there are per
region. Get the top 100 of them from the region with the biggest count to
the region with the smallest one.
Получите информацию о том, сколько фильмов/сериалов для взрослых и т. д. есть на
область, край. Получите 100 лучших из них из региона с наибольшим количеством
область с наименьшим из них.
title.basics.tsv.gz title.akas.tsv.gz
"""
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.types as t
import pyspark.sql.functions as f
from pyspark.sql import Window
def task5():
spark_session = (SparkSession.builder
.master("local")
.appName("task app")
.config(conf=SparkConf())
.getOrCreate())
schema_title_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("titleType", t.StringType(), nullable=True),
t.StructField("primaryTitle", t.StringType(), nullable=True),
t.StructField("originalTitle", t.StringType(), nullable=True),
t.StructField("isAdult", t.StringType(), nullable=True),
t.StructField("startYear", t.IntegerType(), nullable=True),
t.StructField("endYear", t.IntegerType(), nullable=True),
t.StructField("runtimeMinutes", t.IntegerType(), nullable=True),
t.StructField("genres", t.StringType(), nullable=True),
])
schema_title_akas = t.StructType([
t.StructField("titleId", t.StringType(), nullable=False),
t.StructField("ordering", t.StringType(), nullable=False),
t.StructField("title", t.StringType(), nullable=False),
t.StructField("region", t.StringType(), nullable=True),
t.StructField("language", t.StringType(), nullable=True),
t.StructField("types", t.StringType(), nullable=True),
t.StructField("attributes", t.StringType(), nullable=True),
t.StructField("isOriginalTitle", t.StringType(), nullable=True)
])
schema_ratings_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("averageRating", t.DoubleType(), nullable=True),
t.StructField("numVotes", t.IntegerType(), nullable=True)
])
file_read_basics = r'.\Data\input\title.basics.tsv.gz'
file_read_akas = r'.\Data\input\title.akas.tsv.gz'
file_read_ratings = r'.\Data\input\title.ratings.tsv.gz'
from_csv_df = spark_session.read.csv(
file_read_basics, header=True, nullValue='null', sep=r'\t', schema=schema_title_basics)
from_csv_df_akas = spark_session.read.csv(
file_read_akas, header=True, nullValue='null', sep=r'\t', schema=schema_title_akas)
from_csv_df_ratings = spark_session.read.csv(
file_read_ratings, header=True, nullValue='null', sep=r'\t', schema=schema_ratings_basics)
temp_df1 = from_csv_df.select("tconst", "isAdult").filter(f.col("isAdult") == 1)
temp_df2 = from_csv_df_akas.select("region", "titleId", "title")\
.filter((f.col("region").isNotNull()) & (f.col("region") != r"\N")).withColumnRenamed("titleId", "tconst")
temp_df3 = temp_df1.join(temp_df2, "tconst")
temp_df4 = temp_df3.join(from_csv_df_ratings.select("averageRating", "tconst"), "tconst")
window = Window.partitionBy("region").orderBy("region")
temp_df4 = temp_df4.withColumn("adult_per_region", f.count(f.col("region")).over(window))
region_min = temp_df4.agg(f.min("adult_per_region")).collect()[0][0]
region_max = temp_df4.agg(f.max("adult_per_region")).collect()[0][0]
temp_dfmin = temp_df4.filter(f.col("adult_per_region") == region_min).orderBy(f.col("averageRating").desc()).limit(100)
temp_dfmax = temp_df4.filter(f.col("adult_per_region") == region_max).orderBy(f.col("averageRating").desc()).limit(100)
from_csv_df_task8 = temp_dfmin.union(temp_dfmax)
#from_csv_df_task8.show(200, truncate=False)
file_write = r'.\Data\output\task08'
from_csv_df_task8.write.csv(file_write, header=True, mode="overwrite")
return 0
|
Tetyana83/spark
|
task5.py
|
task5.py
|
py
| 4,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28315455311
|
from typing import Union, Tuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from gym import Env
from gym.spaces import Box
from ..agent import Agent
from . import ReplayBuffer
from .actor import Actor
from .critic import Critic
from .polyak_update import polyak_update
class TD3Agent(Agent):
def __init__(self, name, env: Env,
discounting_factor: float = 0.99,
batch_size: int = 32,
buffer_size: int = 50000,
start_learning: int = 1000,
learning_rate_actor: float = 0.0005,
learning_rate_critic: float = 0.001,
polyak_tau: float = 0.01,
hidden_sizes_s: Union[int, Tuple[int, ...]] = 128,
hidden_sizes_a: Union[int, Tuple[int, ...]] = 128,
hidden_sizes_shared: Union[int, Tuple[int, ...]] = 256,
hidden_sizes_actor: Union[int, Tuple[int, ...]] = (128, 128),
policy_noise: float = 0.2,
noise_clip: float = 0.5,
max_grad_norm: float = 0.5,
exploration_noise: float = 0.1,
policy_update_frequency: int = 10,
target_update_frequency: int = 10
):
super().__init__(name, 'TD3', env)
assert isinstance(self._env.action_space, Box), "Action space must be of type Box"
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self._gamma = discounting_factor
self._memory = ReplayBuffer(buffer_size, self._device)
self.q1 = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.q2 = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.q1_target = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.q2_target = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.pi = Actor(self.observation_shape,
self.action_shape,
hidden_sizes_actor,
self._device)
self.pi_target = Actor(self.observation_shape,
self.action_shape,
hidden_sizes_actor,
self._device)
self.q1_target.load_state_dict(self.q1.state_dict())
self.q2_target.load_state_dict(self.q2.state_dict())
self.pi_target.load_state_dict(self.pi.state_dict())
self.q1_target.train(False)
self.q2_target.train(False)
self.pi_target.train(False)
self._q_optimizer = optim.Adam(list(self.q1.parameters()) + list(self.q2.parameters()), lr=learning_rate_critic)
self._pi_optimizer = optim.Adam(list(self.pi.parameters()), lr=learning_rate_actor)
self._batch_size = batch_size
self._start_learning = max(start_learning, batch_size)
self._policy_noise = policy_noise
self._noise_clip = noise_clip
self._max_grad_norm = max_grad_norm
self._exploration_noise = exploration_noise
self._policy_update_frequency = policy_update_frequency
self._target_update_frequency = target_update_frequency
self._tau = polyak_tau
self._q_loss = torch.Tensor([0.0], device=self._device)
self._pi_loss = torch.Tensor([0.0], device=self._device)
self._a_limits = torch.Tensor(self._env.action_space.low, device=self._device),\
torch.Tensor(self._env.action_space.high, device=self._device)
def find_action(self, observation, in_eval=False):
with torch.no_grad():
a = self.pi(torch.tensor(observation, dtype=torch.float, device=self._device)).detach().numpy()
if not in_eval:
a += np.random.normal(0, self._exploration_noise, size=self.action_shape)
a = a.clip(self._env.action_space.low, self._env.action_space.high)
return a.tolist()
def learn(self, observation, action, reward, next_observation, global_step):
self._memory.put((observation, action, reward, next_observation))
if self._memory.size() > self._start_learning:
s, a, r, s_prime = self._memory.sample(self._batch_size)
with torch.no_grad():
clipped_noise = torch.randn_like(a, device=self._device) * self._policy_noise
clipped_noise = clipped_noise.clamp(-self._noise_clip, self._noise_clip)
a_prime = self.pi_target(s_prime) + clipped_noise
a_prime = a_prime.clamp(*self._a_limits)
qf1_next_target = self.q1_target(s_prime, a_prime)
qf2_next_target = self.q2_target(s_prime, a_prime)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target)
next_q_value = r + self._gamma * min_qf_next_target
q1_l = F.mse_loss(self.q1(s, a), next_q_value)
q2_l = F.mse_loss(self.q2(s, a), next_q_value)
self._q_loss = 0.5 * (q1_l + q2_l)
# optimize the model
self._q_optimizer.zero_grad()
self._q_loss.backward()
nn.utils.clip_grad_norm_(list(self.q1.parameters()) + list(self.q2.parameters()), self._max_grad_norm)
self._q_optimizer.step()
if (global_step + 1) % self._policy_update_frequency == 0:
self._pi_loss = -self.q1(s, self.pi(s)).mean()
self._pi_optimizer.zero_grad()
self._pi_loss.backward()
nn.utils.clip_grad_norm_(list(self.pi.parameters()), self._max_grad_norm)
self._pi_optimizer.step()
if (global_step + 1) % self._target_update_frequency == 0:
polyak_update(self.q1.parameters(), self.q1_target.parameters(), self._tau)
polyak_update(self.q2.parameters(), self.q2_target.parameters(), self._tau)
polyak_update(self.pi.parameters(), self.pi_target.parameters(), self._tau)
def get_log_dict(self):
return {
'loss/q_loss': self._q_loss.item(),
'loss/pi_loss': self._pi_loss.item()
}
|
schobbejak/QMIX-Active-Wake-Control
|
agent/deep/td3.py
|
td3.py
|
py
| 6,983 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41086983441
|
import sys
max = 1000001
N = int(sys.stdin.readline())
dp = [1000000000] * max
dp[1] = 0
for i in range(1, N):
dp[i+1] = min(dp[i+1], dp[i]+1)
if(i*2 < max):
dp[i*2] = min(dp[i*2], dp[i]+1)
if(i*3 < max):
dp[i*3] = min(dp[i*3], dp[i]+1)
print(dp[N])
|
Ahyun0326/Algorithm_study
|
dp/1로 만들기.py
|
1로 만들기.py
|
py
| 281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5390280053
|
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
preOrder = [1,2,4,7,3,5,6,8]
midOrder = [4,7,2,1,5,3,8,6]
def BuildTree(preOrder,midOrder):
if len(preOrder) != len(midOrder) or len(preOrder) == 0:
return
if len(preOrder) == len(midOrder) and len(preOrder) == 1:
return TreeNode(preOrder[0])
midIndex = midOrder.index(preOrder[0])
left = BuildTree(preOrder[1 : midIndex + 1],midOrder[0:midIndex])
right = BuildTree(preOrder[midIndex + 1:], midOrder[midIndex + 1:])
root = TreeNode(preOrder[0])
root.left = left
root.right = right
return root
root = BuildTree(preOrder, midOrder)
result = []
def DFS(root):
if not root:
return
result.append(root.val)
DFS(root.left)
DFS(root.right)
DFS(root)
print(result)
#这里写一点中序遍历的代码
def midOrder(root,target):
if not root:
return
stack = []
res = []
flag = False
while stack or root:
while root:
stack.append(root)
root = root.left
if stack:
root = stack.pop()
res.append(root.val)
if flag:
print(root.val)
flag = False
if root.val == target:
flag = True
root = root.right
print(res)
midOrder(root,4)
|
JarvisFei/leetcode
|
剑指offer代码/数据结构/面试题7:重建二叉树.py
|
面试题7:重建二叉树.py
|
py
| 1,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26436874912
|
#mandelbrot by KB for CS550
#inspired by work done with wikipedia example code
from PIL import Image
import random
from PIL import ImageFilter
#set image size
imgx = 500
imgy = 500
xa, xb = -0.75029467235117, -0.7478726919928045
ya, yb = 0.06084172052354717, 0.06326370066585434
image = Image.new("RGB",(imgx,imgy))
#for all the pixels in the image
for Py in range(imgy):
yS= ((yb-ya)/(imgy-1)) * Py + (ya)
for Px in range(imgx):
#divide all the pixels into sections between -2 and 2
xS = ((xb-xa)/(imgx-1))* Px + (xa)
x = 0
y = 0
iteration = 0
#set maximum number of iterations
max_iteration = 256
while (x*x + y*y <= 2) and iteration < max_iteration:
#calculations based on wikihow
xtemp = x*x - y*y + xS
y = 2*x*y + yS
iteration += 1
x = xtemp
# color shades based on iteration
colorR = iteration
colorG = (iteration*50)%256
colorB = 256- iteration
image.putpixel((Px,Py),(colorR, colorG, colorB))
imageedits = image.filter(ImageFilter.CONTOUR)
imageedit.save("mandelbrot2.png", "PNG")
|
gbroady19/CS550
|
mandelbrot2.py
|
mandelbrot2.py
|
py
| 1,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10543655506
|
from datetime import datetime, time, timedelta
import iso8601
import logging
import pytz
import requests
import sys
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import render
logger = logging.getLogger(__name__)
uk_tz = pytz.timezone('Europe/London')
utc_tz = pytz.utc
def rss_reader(request):
'''
HTTP GET the required RSS feed
and render it for inclusion in a widgit
'''
rss_url = request.GET.get('url','')
current_key = "rss_reader_current!{0}".format(rss_url)
lng_key = "rss_reader_lng!{0}".format(rss_url)
rss_xml = cache.get(current_key)
# If we got a value from the cache, use that
if rss_xml is not None:
logger.info('Cache hit for %s', current_key)
# Otherwise, retrieve data from the MetOffice
else:
logger.info('Cache miss for %s', current_key)
rss_xml = ''
try:
r = requests.get(rss_url)
r.raise_for_status()
# https://stackoverflow.com/questions/35042216/requests-module-return-json-with-items-unordered
rss_xml = r.text
except:
logger.error("Error retrieving rss feed for %s: %s %s",
rss_url,
sys.exc_info()[0],
sys.exc_info()[1])
# Whatever happens, cache what we got so we don't keep hitting the API
finally:
cache.set(current_key, rss_xml, timeout=600)
# Try to parse whatever we've got. if that works, cache it
# as the 'last known good' version for ever
try:
cache.set(lng_key, rss_xml, timeout=None)
except:
logger.error("Error cacheing current rss feed for %s: %s %s",
rss_url,
sys.exc_info()[0],
sys.exc_info()[1])
logger.info("rss feed %s was: '%s'", title, rss_xml)
# Fall back to the LNG version, if that's available
lng_data = cache.get(lng_key)
if lng_data is not None:
logger.info('Cache hit for %s', lng_key)
rss_xml = lng_data
else:
logger.info('Cache miss for %s', lng_key)
#rss_xml = "debug"
return render(request, 'smartpanel/rss_reader.html', { "rss_xml": rss_xml }
)
|
SmartCambridge/tfc_web
|
tfc_web/smartpanel/views/widgets/rss_reader.py
|
rss_reader.py
|
py
| 2,242 |
python
|
en
|
code
| 3 |
github-code
|
6
|
14493893608
|
# -*- coding: utf-8 -*- #
'''
--------------------------------------------------------------------------
# File Name: PATH_ROOT/train.py
# Author: JunJie Ren
# Version: v1.0
# Created: 2021/06/14
# Description: — — — — — — — — — — — — — — — — — — — — — — — — — — —
--> DD信号识别(可解释)系列代码 <--
-- 训练主程序,移植之前信号识别tensorflow代码至PyTorch,
并进行项目工程化处理
-- TODO train()部分代码需要模块化,特别是指标记录、数据集
方面
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Module called: <0> PATH_ROOT/configs.py
<1> PATH_ROOT/dataset/RML2016.py
<2> PATH_ROOT/networks/MsmcNet.py
<3> PATH_ROOT/utils/strategy.py;plot.py
<4> PATH_ROOT/dataset/ACARS.py
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Function List: <0> train():
-- 训练主程序,包含了学习率调整、log记录、收敛曲线绘制
,每训练n(1)轮验证一次,保留验证集上性能最好的模型
<1> eval():
-- 验证当前训练模型在测试集中的性能
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Class List: None
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# History:
| <author> | <version> | <time> | <desc>
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<0> | JunJie Ren | v1.0 | 2020/06/14 | 使用PyTorch复现之前keras代码
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<1> | JunJie Ren | v1.1 | 2020/07/09 | 新增ACARS训练程序选项
--------------------------------------------------------------------------
'''
import os
import time
import torch
import numpy as np
import torch.nn as nn
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from configs import cfgs
from dataset.RML2016 import RMLDataset, loadNpy
from dataset.ACARS import ACARSDataset, loadNpy_acars
from networks.MsmcNet import MsmcNet_RML2016, MsmcNet_ACARS
from utils.strategy import step_lr, accuracy
from utils.plot import draw_curve
def train():
''' 信号调制分类训练主程序 '''
# model
if cfgs.model == "MsmcNet_RML2016":
model = MsmcNet_RML2016(num_classes=cfgs.num_classes)
elif cfgs.model == "MsmcNet_ACARS":
model = MsmcNet_ACARS(num_classes=cfgs.num_classes)
else :
print('ERROR: No model {}!!!'.format(cfgs.model))
print(model)
'''model = torch.nn.DataParallel(model) # 多卡预留'''
model.cuda()
# Dataset
if cfgs.dataset_name == "RML2016.04c":
x_train, y_train, x_test, y_test = loadNpy(
cfgs.train_path,
cfgs.test_path,
cfgs.process_IQ
)
Dataset = RMLDataset
elif cfgs.dataset_name == "ACARS":
x_train, y_train, x_test, y_test = loadNpy_acars(
cfgs.train_path_x,
cfgs.train_path_y,
cfgs.test_path_x,
cfgs.test_path_y,
cfgs.process_IQ
)
Dataset = ACARSDataset
else :
print('ERROR: No Dataset {}!!!'.format(cfgs.model))
# BUG,BUG,BUG,FIXME
transform = transforms.Compose([
# transforms.ToTensor()
# waiting add
])
# Train data
train_dataset = Dataset(x_train, y_train, transform=transform) # RML2016.10a数据集
dataloader_train = DataLoader(train_dataset, \
batch_size=cfgs.batch_size, \
num_workers=cfgs.num_workers, \
shuffle=True, \
drop_last=False)
# Valid data
valid_dataset = Dataset(x_test, y_test, transform=transform)
dataloader_valid = DataLoader(valid_dataset, \
batch_size=cfgs.batch_size, \
num_workers=cfgs.num_workers, \
shuffle=True, \
drop_last=False)
# log
if not os.path.exists('./log'):
os.makedirs('./log')
log = open('./log/log.txt', 'a')
log.write('-'*30+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+'-'*30+'\n')
log.write('model:{}\ndataset_name:{}\nnum_classes:{}\nnum_epoch:{}\nlearning_rate:{}\nsignal_len:{}\niter_smooth:{}\n'.format(
cfgs.model, cfgs.dataset_name, cfgs.num_classes, cfgs.num_epochs,
cfgs.lr, cfgs.signal_len, cfgs.iter_smooth))
# load checkpoint
if cfgs.resume:
model = torch.load(os.path.join('./checkpoints', cfgs.checkpoint_name))
# loss
criterion = nn.CrossEntropyLoss().cuda() # 交叉熵损失
# train
sum = 0
train_loss_sum = 0
train_top1_sum = 0
max_val_acc = 0
train_draw_acc = []
val_draw_acc = []
lr = cfgs.lr
for epoch in range(cfgs.num_epochs):
ep_start = time.time()
# adjust lr
# lr = half_lr(cfgs.lr, epoch)
lr = step_lr(epoch, lr)
# optimizer FIXME
# optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=0.0002)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=lr, betas=(0.9, 0.999), weight_decay=0.0002)
model.train()
top1_sum = 0
for i, (signal, label) in enumerate(dataloader_train):
input = Variable(signal).cuda().float()
target = Variable(label).cuda().long()
output = model(input) # inference
loss = criterion(output, target) # 计算交叉熵损失
optimizer.zero_grad()
loss.backward() # 反传
optimizer.step()
top1 = accuracy(output.data, target.data, topk=(1,)) # 计算top1分类准确率
train_loss_sum += loss.data.cpu().numpy()
train_top1_sum += top1[0]
sum += 1
top1_sum += top1[0]
if (i+1) % cfgs.iter_smooth == 0:
print('Epoch [%d/%d], Iter [%d/%d], lr: %f, Loss: %.4f, top1: %.4f'
%(epoch+1, cfgs.num_epochs, i+1, len(train_dataset)//cfgs.batch_size,
lr, train_loss_sum/sum, train_top1_sum/sum))
log.write('Epoch [%d/%d], Iter [%d/%d], lr: %f, Loss: %.4f, top1: %.4f\n'
%(epoch+1, cfgs.num_epochs, i+1, len(train_dataset)//cfgs.batch_size,
lr, train_loss_sum/sum, train_top1_sum/sum))
sum = 0
train_loss_sum = 0
train_top1_sum = 0
train_draw_acc.append(top1_sum/len(dataloader_train))
epoch_time = (time.time() - ep_start) / 60.
if epoch % cfgs.valid_freq == 0 and epoch < cfgs.num_epochs:
# eval
val_time_start = time.time()
val_loss, val_top1 = eval(model, dataloader_valid, criterion)
val_draw_acc.append(val_top1)
val_time = (time.time() - val_time_start) / 60.
print('Epoch [%d/%d], Val_Loss: %.4f, Val_top1: %.4f, val_time: %.4f s, max_val_acc: %4f'
%(epoch+1, cfgs.num_epochs, val_loss, val_top1, val_time*60, max_val_acc))
print('epoch time: {}s'.format(epoch_time*60))
if val_top1[0].data > max_val_acc:
max_val_acc = val_top1[0].data
print('Taking snapshot...')
if not os.path.exists('./checkpoints'):
os.makedirs('./checkpoints')
torch.save(model, '{}/{}'.format('checkpoints', cfgs.checkpoint_name))
log.write('Epoch [%d/%d], Val_Loss: %.4f, Val_top1: %.4f, val_time: %.4f s, max_val_acc: %4f\n'
%(epoch+1, cfgs.num_epochs, val_loss, val_top1, val_time*60, max_val_acc))
draw_curve(train_draw_acc, val_draw_acc)
log.write('-'*40+"End of Train"+'-'*40+'\n')
log.close()
# validation
def eval(model, dataloader_valid, criterion):
sum = 0
val_loss_sum = 0
val_top1_sum = 0
model.eval()
for ims, label in dataloader_valid:
input_val = Variable(ims).cuda().float()
target_val = Variable(label).cuda()
output_val = model(input_val)
loss = criterion(output_val, target_val)
top1_val = accuracy(output_val.data, target_val.data, topk=(1,))
sum += 1
val_loss_sum += loss.data.cpu().numpy()
val_top1_sum += top1_val[0]
avg_loss = val_loss_sum / sum
avg_top1 = val_top1_sum / sum
return avg_loss, avg_top1
if __name__ == "__main__":
train()
|
jjRen-xd/PyOneDark_Qt_GUI
|
app/train.py
|
train.py
|
py
| 9,258 |
python
|
en
|
code
| 2 |
github-code
|
6
|
6178538714
|
"""
Implement class ``SkyDictionary``, useful for marginalizing over sky
location.
"""
import collections
import itertools
import numpy as np
import scipy.signal
from scipy.stats import qmc
from cogwheel import gw_utils
from cogwheel import utils
class SkyDictionary(utils.JSONMixin):
"""
Given a network of detectors, this class generates a set of
samples covering the sky location isotropically in Earth-fixed
coordinates (lat, lon).
The samples are assigned to bins based on the arrival-time delays
between detectors. This information is accessible as dictionaries
``delays2inds_map``, ``delays2genind_map``.
Antenna coefficients F+, Fx (psi=0) and detector time delays from
geocenter are computed and stored for all samples.
"""
def __init__(self, detector_names, *, f_sampling: int = 2**13,
nsky: int = 10**6, seed=0):
self.detector_names = tuple(detector_names)
self.nsky = nsky
self.f_sampling = f_sampling
self.seed = seed
self._rng = np.random.default_rng(seed)
self.sky_samples = self._create_sky_samples()
self.fplus_fcross_0 = gw_utils.get_fplus_fcross_0(self.detector_names,
**self.sky_samples)
geocenter_delays = gw_utils.get_geocenter_delays(
self.detector_names, **self.sky_samples)
self.geocenter_delay_first_det = geocenter_delays[0]
self.delays = geocenter_delays[1:] - geocenter_delays[0]
self.delays2inds_map = self._create_delays2inds_map()
discrete_delays = np.array(list(self.delays2inds_map))
self._min_delay = np.min(discrete_delays, axis=0)
self._max_delay = np.max(discrete_delays, axis=0)
# (n_det-1,) float array: _sky_prior := d(Omega) / (4pi d(delays))
self._sky_prior = np.zeros(self._max_delay - self._min_delay + 1)
for key, inds in self.delays2inds_map.items():
self._sky_prior[key] = (
self.f_sampling ** (len(self.detector_names) - 1)
* len(inds) / self.nsky)
# (n_det-1) array of generators that yield sky-indices
self.ind_generators = np.full(self._max_delay - self._min_delay + 1,
iter(()))
for key, inds in self.delays2inds_map.items():
self.ind_generators[key] = itertools.cycle(inds)
def resample_timeseries(self, timeseries, times, axis=-1,
window=('tukey', .1)):
"""
Resample a timeseries to match the SkyDict's sampling frequency.
The sampling frequencies of the SkyDict and ``timeseries`` must
be multiples (or ``ValueError`` is raised).
Parameters
----------
timeseries: array_like
The data to resample.
times: array_like
Equally-spaced sample positions associated with the signal
data in `timeseries`.
axis: int
The axis of timeseries that is resampled. Default is -1.
window: string, float, tuple or None
Time domain window to apply to the timeseries. If not None,
it is passed to ``scipy.signal.get_window``, see its
documentation. By default a Tukey window with alpha=0.1 is
applied, to mitigate ringing near the edges
(scipy.signal.resample uses FFT methods that assume that the
signal is periodic).
Return
------
resampled_timeseries, resampled_times
A tuple containing the resampled array and the corresponding
resampled positions.
"""
if window:
shape = [1 for _ in timeseries.shape]
shape[axis] = timeseries.shape[axis]
timeseries = timeseries * scipy.signal.get_window(
window, shape[axis]).reshape(shape)
fs_ratio = self.f_sampling * (times[1] - times[0])
if fs_ratio != 1:
timeseries, times = scipy.signal.resample(
timeseries, int(len(times) * fs_ratio), times, axis=axis)
if not np.isclose(1 / self.f_sampling, times[1] - times[0]):
raise ValueError(
'`times` is incommensurate with `f_sampling`.')
return timeseries, times
def get_sky_inds_and_prior(self, delays):
"""
Parameters
----------
delays: int array of shape (n_det-1, n_samples)
Time-of-arrival delays in units of 1 / self.f_sampling
Return
------
sky_inds: tuple of ints of length n_physical
Indices of self.sky_samples with the correct time delays.
sky_prior: float array of length n_physical
Prior probability density for the time-delays, in units of
s^-(n_det-1).
physical_mask: boolean array of length n_samples
Some choices of time of arrival at detectors may not
correspond to any physical sky location, these are flagged
``False`` in this array. Unphysical samples are discarded.
"""
# First mask: are individual delays plausible? This is necessary
# in order to interpret the delays as indices to self._sky_prior
physical_mask = np.all((delays.T >= self._min_delay)
& (delays.T <= self._max_delay), axis=1)
# Submask: for the delays that survive the first mask, are there
# any sky samples with the correct delays at all detector pairs?
sky_prior = self._sky_prior[tuple(delays[:, physical_mask])]
submask = sky_prior > 0
physical_mask[physical_mask] *= submask
sky_prior = sky_prior[submask]
# Generate sky samples for the physical delays
generators = self.ind_generators[tuple(delays[:, physical_mask])]
sky_inds = np.fromiter(map(next, generators), int)
return sky_inds, sky_prior, physical_mask
def _create_sky_samples(self):
"""
Return a dictionary of samples in terms of 'lat' and 'lon' drawn
isotropically by means of a Quasi Monte Carlo (Halton) sequence.
"""
u_lat, u_lon = qmc.Halton(2, seed=self._rng).random(self.nsky).T
samples = {}
samples['lat'] = np.arcsin(2*u_lat - 1)
samples['lon'] = 2 * np.pi * u_lon
return samples
def _create_delays2inds_map(self):
"""
Return a dictionary mapping arrival time delays to sky-sample
indices.
Its keys are tuples of ints of length (n_det - 1), with time
delays to the first detector in units of 1/self.f_sampling.
Its values are list of indices to ``self.sky_samples`` of
samples that have the corresponding (discretized) time delays.
"""
# (ndet-1, nsky)
delays_keys = zip(*np.rint(self.delays * self.f_sampling).astype(int))
delays2inds_map = collections.defaultdict(list)
for i_sample, delays_key in enumerate(delays_keys):
delays2inds_map[delays_key].append(i_sample)
return delays2inds_map
|
2lambda123/cogwheel1
|
cogwheel/likelihood/marginalization/skydict.py
|
skydict.py
|
py
| 7,143 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20823393672
|
from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
import re, md5
app = Flask(__name__)
app.secret_key = "MySessionSecretKey1"
mysql = MySQLConnector( app, "the_wall")
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route( "/" )
def lr():
# session['user_id'] = False
if session['user_id']:
return redirect( "/wall" )
return render_template( "index.html" )
# VIEW MESSAGES AND COMMENTS
@app.route( "/wall" )
def wall():
if not session['user_id']:
return render_template( "index.html" )
query = "SELECT first_name, id FROM users WHERE id = :id"
q_p = { 'id': session['user_id'] }
user = {}
user = mysql.query_db( query, q_p )[0]
query = "SELECT first_name, last_name, message, DATE_FORMAT(messages.created_at, '%M %d, %Y') AS message_date, messages.id, user_id FROM messages JOIN users ON users.id = messages.user_id ORDER BY messages.created_at DESC"
messages = mysql.query_db( query )
query = "SELECT users.first_name, users.last_name, comments.message_id, comment, DATE_FORMAT(comments.created_at, '%M %d, %Y') AS comment_date FROM comments JOIN users ON comments.user_id = users.id ORDER BY comments.created_at ASC"
comments = mysql.query_db( query )
return render_template( "wall.html", user = user, messages = messages, comments = comments )
# POST A MESSAGE TO START A DISCUSSION
@app.route( "/post_message", methods = ['POST'] )
def post_message():
query = "INSERT INTO messages( message, user_id, created_at, updated_at ) VALUES( :message, :user_id, NOW(), NOW() )"
q_p = {
'message': request.form['message'],
'user_id': session['user_id']
}
mysql.query_db( query, q_p )
flash( "Your message has been posted" )
return redirect( "/wall" )
# POST A COMMENT IN RESPONCE TO A MESSAGE
@app.route( "/post_comment/<message_id>", methods = ['POST'])
def post_comment( message_id ):
query = "INSERT INTO comments( comment, user_id, message_id, created_at, updated_at ) VALUES( :comment, :user_id,:message_id, NOW(), NOW() )"
q_p = {
'comment': request.form['comment'],
'user_id': session['user_id'],
'message_id': message_id
}
mysql.query_db( query, q_p )
return redirect( "/wall" )
# DELETE MESSAGE
@app.route( "/delete_message" )
def delete_message():
flash ("delete command received!")
return redirect( "/wall" )
# LOGIN
@app.route( "/authorization", methods = ["POST"] )
def authorization():
# EMAIL VALIDATION
if not email_regex.match( request.form['email'] ):
flash( "Invalid email" )
else:
query = "SELECT * FROM users WHERE users.email = :email LIMIT 1"
q_p = { 'email': request.form['email'] }
user = mysql.query_db( query, q_p )
if not user:
flash( "Email " + request.form['email'] + " is not registered with any user" )
else:
pw_h = md5.new( request.form['pw'] ).hexdigest()
if user[0]['password'] != pw_h: # PASSWORD VALIDATION
flash( "Wrong password" )
else: # SUCCESSFUL LOGIN
session['user_id']= user[0]['id']
return redirect( "/wall" )
return redirect( "/" )
# SIGN UP
@app.route( "/signup", methods = ["POST"] )
def signup():
error = False
# FORM INPUT VALIDATIONS
# VALIDATE FIRST NAME
if len( request.form['first_name'] ) < 2: # NAME LENGTH
error = True
flash( "First name is too short" )
elif not str.isalpha( str( request.form['first_name'] ) ): # NAME CONVENTIONS
error = True
flash( "Invalid characters in the first name" )
# VALIDATE LAST NAME
if len( request.form['last_name'] ) < 2: # NAME LENGTH
error = True
flash( "Last name is too short" )
elif not str.isalpha( str( request.form['last_name'] ) ): # NAME CONVENTIONS
error = True
flash( "Invalid characters in the last name" )
# VALIDATE EMAIL
if not email_regex.match( request.form['email'] ): # EMAIL CONVENTIONS
error = True
flash( "Invalid email" )
else: # CHECK IF EMAIL IS ALREADY IN USE
# email = request.form['email']
query = "SELECT email FROM users WHERE users.email = :email LIMIT 1"
q_p = { 'email': request.form['email'] }
existing_email = mysql.query_db( query, q_p )
if existing_email:
error = True
flash( "Email " + request.form['email'] + " is already in use" )
# VALIDATE PASSWORD CONVENTIONS AND REPEAT
if len( str( request.form['pw'] ) ) < 8:
error = True
flash( "Password should be at least 8 characters long")
elif request.form['pw'] != request.form['rpt_pw']:
error = True
flash( "Repeat password does not match")
if error:
return redirect( "/" )
else: # ADD NEW USER INTO THE DATABASE
query = "INSERT INTO users( first_name, last_name, email, password, created_at, updated_at ) VALUES( :first_name, :last_name, :email, :pw_h, NOW(), NOW() )"
q_p = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
'pw_h': md5.new( request.form['pw'] ).hexdigest()
}
mysql.query_db( query, q_p )
flash( "Your user account has been saved" )
# FETCH THE NEW USER ID FROM THE DATABASE FOR SESSION LOGIN
query = "SELECT id FROM users WHERE email = :email LIMIT 1"
q_p = { 'email': request.form['email'] }
session['user_id']= mysql.query_db( query, q_p )[0]['id']
return redirect( "/wall" )
@app.route( "/logout", methods = ["POST"])
def logout():
session['user_id'] = False
return redirect( "/" )
app.run( debug = True )
|
ruslanvs/The_Wall
|
server.py
|
server.py
|
py
| 5,933 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43370134393
|
""" Tests for :module:`statics.markdown`."""
import unittest
__all__ = ["TestMarkdownItem"]
class TestMarkdownItem(unittest.TestCase):
def createFile(self, content):
import tempfile
f = tempfile.NamedTemporaryFile()
f.write(content)
f.flush()
return f
def test_it(self):
from statics.markdown import MarkdownItem
f = self.createFile("some markdown document.")
item = MarkdownItem("name", f.name)
self.assertEqual(item.name, "name")
self.assertEqual(item.metadata(), {})
self.assertEqual(item.content(), "<p>some markdown document.</p>")
def test_with_metadate(self):
from statics.markdown import MarkdownItem
f = self.createFile("Title: A Title\nList: Value1\n\tValue2\n\ncontent")
item = MarkdownItem("name", f.name)
self.assertEqual(item.name, "name")
self.assertEqual(item.metadata(), {"title": "A Title",
"list": ["Value1", "Value2"]})
self.assertEqual(item.content(), "<p>content</p>")
|
andreypopp/statics
|
statics/tests/test_markdown.py
|
test_markdown.py
|
py
| 1,089 |
python
|
en
|
code
| 2 |
github-code
|
6
|
44844122583
|
import torch
import numpy as np
class KBinsDiscretizer:
# simplified and modified version of KBinsDiscretizer from sklearn, see:
# https://github.com/scikit-learn/scikit-learn/blob/7e1e6d09b/sklearn/preprocessing/_discretization.py#L21
def __init__(self, dataset, num_bins=100, strategy="uniform"):
self.strategy = strategy
self.n_bins = num_bins
self.feature_dim = dataset.shape[-1]
# compute edges for binning
self.bin_edges = self.__find_bin_edges(dataset) # [feature_dim, num_bins]
self.bin_centers = (self.bin_edges[:, 1:] + self.bin_edges[:, :-1]) * 0.5
# for beam search, to be in the same device (for speed)
self.bin_centers_torch = torch.from_numpy(self.bin_centers)
def __find_bin_edges(self, X):
if self.strategy == "uniform":
mins, maxs = X.min(axis=0), X.max(axis=0)
bin_edges = np.linspace(mins, maxs, self.n_bins + 1).T
elif self.strategy == "quantile":
quantiles = np.linspace(0, 100, self.n_bins + 1)
bin_edges = np.percentile(X, quantiles, axis=0).T
else:
raise RuntimeError("Unknown strategy, should be uniform or quatile.")
return bin_edges
def encode(self, X, subslice=None):
if X.ndim == 1:
X = X[None]
if subslice is None:
bin_edges = self.bin_edges
else:
start, end = subslice
bin_edges = self.bin_edges[start:end]
# See documentation of numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.0e-5
atol = 1.0e-8
Xt = np.zeros_like(X, dtype=np.long)
for jj in range(X.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation.
eps = atol + rtol * np.abs(X[:, jj])
Xt[:, jj] = np.digitize(X[:, jj] + eps, bin_edges[jj][1:])
np.clip(Xt, 0, self.n_bins - 1, out=Xt)
return Xt
def decode(self, Xt, subslice=None):
if Xt.ndim == 1:
Xt = Xt[None]
if subslice is None:
bin_centers = self.bin_centers
else:
start, end = subslice
bin_centers = self.bin_centers[start:end]
X = np.zeros_like(Xt, dtype=np.float64)
for jj in range(Xt.shape[1]):
X[:, jj] = bin_centers[jj, np.int_(Xt[:, jj])]
return X
def expectation(self, probs, subslice=None):
if probs.ndim == 1:
probs = probs[None]
# probs: [batch_size, num_dims, num_bins]
# bins: [1, num_dims, num_bins]
if torch.is_tensor(probs):
bin_centers = self.bin_centers_torch.unsqueeze(0)
else:
bin_centers = self.bin_centers.unsqueeze(0)
if subslice is not None:
start, end = subslice
bin_centers = bin_centers[:, start:end]
assert probs.shape[1:] == bin_centers.shape[1:]
# expectation: [batch_size, num_dims]
exp = (probs * bin_centers).sum(axis=-1)
return exp
def to(self, device):
self.bin_centers_torch = self.bin_centers_torch.to(device)
def eval(self):
return self
|
Howuhh/faster-trajectory-transformer
|
trajectory/utils/discretization.py
|
discretization.py
|
py
| 3,344 |
python
|
en
|
code
| 90 |
github-code
|
6
|
70994868668
|
from django import template
register = template.Library()
#background: -webkit-gradient(linear, 0% 0%, 0% 100%, from({{ COLOR_H1_BACK_STOP }}), to({{ COLOR_H1_BACK_START }}));
#background: -webkit-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
#background: -moz-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
#background: -ms-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
#background: -o-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
@register.simple_tag
def columned(num):
S='-moz-column-count:'+str(num)+';\n'
S+='-webkit-column-count:'+str(num)+';\n'
S+='column-count:'+str(num)+';'
return S
#def background_gradient(style,start,stop):
# gradient='linear-gradient('+style+','+start+','+stop+')'
@register.simple_tag
def background_gradient(style,*args):
colors=",".join(args);
gradient='linear-gradient('+style+','+colors+')'
S='background: '+gradient+';\n'
# inverso rispetto agli altri, questo per style=top, cambiare se serve altro
#S+='background: -webkit-gradient(linear, 0% 0%, 0% 100%, from('+stop+'), to('+start+'));'
for i in ["webkit","moz","ms","o"]:
S+='background: -'+i+'-'+gradient+';\n'
return S
@register.simple_tag
def border_radius(radius):
S='border-radius: '+radius+';'
for i in ["webkit","moz"]:
S+='\n-'+i+'-border-radius: '+radius+';'
return S
@register.simple_tag
def box_shadow(shadow):
S='box-shadow: '+shadow+';'
for i in ["webkit","moz"]:
S+='\n-'+i+'-box-shadow: '+shadow+';'
return S
@register.simple_tag
def border_radius_pos(pos,radius):
S=''
if pos in ["top","left","top-left"]:
S+='border-top-left-radius: '+radius+';\n'
S+='-moz-border-radius-topleft: '+radius+';\n'
S+='-webkit-bordertop-left-radius: '+radius+';\n'
if pos in ["top","right","top-right"]:
S+='border-top-right-radius: '+radius+';\n'
S+='-moz-border-radius-topright: '+radius+';\n'
S+='-webkit-bordertop-right-radius: '+radius+';\n'
if pos in ["bottom","left","bottom-left"]:
S+='border-bottom-left-radius: '+radius+';\n'
S+='-moz-border-radius-bottomleft: '+radius+';\n'
S+='-webkit-borderbottom-left-radius: '+radius+';\n'
if pos in ["bottom","right","bottom-right"]:
S+='border-bottom-right-radius: '+radius+';\n'
S+='-moz-border-radius-bottomright: '+radius+';\n'
S+='-webkit-borderbottom-right-radius: '+radius+';\n'
return S
@register.simple_tag
def text_rotation(degree):
S='transform: rotate('+degree+'deg);'
for i in ["webkit","ms"]:
S+='\n-'+i+'-transform: rotate('+degree+'deg);'
return S
@register.simple_tag
def icon_file_manager_levels(levels,step):
levels=int(levels)
step=float(step)
S=""
S+=", ".join(map(lambda x: ".iconlevel"+unicode(x),range(0,levels)))
S+=" {\n"
S+="vertical-align: bottom;\n"
S+="font-size: 1.1em;\n"
S+="}\n\n"
for n in range(1,levels):
S+=".iconlevel"+unicode(n)+" {\n"
S+="padding-left: %2.2fem;\n" % (n*step)
S+="}\n\n"
return S
|
chiara-paci/santaclara-css
|
santaclara_css/templatetags/css_tags.py
|
css_tags.py
|
py
| 3,207 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27924886180
|
#код с регуляркой, присваивающий 0/1 в зависимости от динамики эпидемситуации
import re
import json
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'Covid_dict.json')
countgooddyn = 0
countbaddyn = 0
sample_json = ''
with open("data1.json", "r", encoding="utf-8") as file:
sample_json+=file.read()
glossary = json.loads(sample_json)
print(len(glossary))
for date in glossary:
if len(glossary[date][0]) == 1:
countries = glossary[date][0]
text = glossary[date][1]
if re.findall(r'[Мм]иновал|[Оо]слабл[а-я]+|[Сс]нят[а-я]+|[Уу]пад[а-я]+|[Сс]ниж[а-я]+|[Вв]ыходит|[Сс]мягч[а-я]+|[Пп]ад[а-я]*|[Зз]амедл[а-я]+|[Уу]был[а-я]+|[Сс]нима[а-я]+', text):
for country in countries:
countries[country]["dyn"] = 1
countgooddyn += 1
if re.findall(r'[Пп]ик[а]|[Вв]спышк[а-я]|[Пп]ревы[а-я]+|[Уу]велич[а-я]+|[А-Яа-я]+?рекорд[а-я]+|[Уу]худш[а-я]+|[Р-р][ао]ст[а-я]+|[Зз]акры[а-я]+|[Вв]в[ео]д[а-я]т([а-я]+)?|[Мм]аксим[а-я]+|[Вв]ы?рост[а-я]+|[Пп]рирост[а-я]|[Сс]кач[а-я]+|более|снова|[Уу]сил[а-я]+|выросло', text):
for country in countries:
countries[country]["dyn"] = 0
countbaddyn += 1
print(glossary[date][0])
with open ('Country_and_coord_and_dynFULL.json', 'w', encoding="utf-8") as file:
json.dump(new_glossary, file, ensure_ascii=False)
|
stefikh/map_COVID
|
code/4_dynamic_good_or_bad.py
|
4_dynamic_good_or_bad.py
|
py
| 1,709 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
25170385254
|
# Django imports
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
# Folder imports
from .utils.sky import quick_flight_search
from .models import *
from apps.authentication.models import Profile
from apps.trips.models import *
# Other imports
from datetime import datetime, date, timedelta
from dotenv import load_dotenv
import os
# URL: flights/partials/add_flight
# HTTP Method: GET
# Description: Intermediate screen to select way to add flight
def add_flight(request):
flight_direction = request.GET.get('flight_direction')
trip_id = request.GET.get('trip_id')
context = {'flight_direction': flight_direction, 'trip_id': trip_id, 'popup_title': f'Add an {flight_direction} flight'}
return render(request, 'partials/add_flight.html', context)
# URL: flights/partials/enter_flight
# HTTP Method: GET
# Description: Facilitats the manual entry of flight information
def enter_flight(request):
trip_id = request.GET.get('trip_id')
flight_direction = request.GET.get('flight_direction')
trip = get_object_or_404(Trip, id=trip_id)
if flight_direction == "outbound":
earliest_destination = trip.destination_set.order_by('order').first()
departure_airports = Airport.objects.all()
arrival_interrailairports = InterrailAirport.objects.filter(city=earliest_destination.city)
# Get arrival airports as Airport objects
arrival_airports = []
for airport in arrival_interrailairports:
arrival_airports.append(airport.airport)
# Take 1 days off the minimum date for outbound flights to allow for long journeys
min_date = str(trip.start_date - timedelta(days=1))
else:
last_destination = trip.destination_set.order_by('order').last()
departure_interrailairports = InterrailAirport.objects.filter(city=last_destination.city)
# Get departure airports as Airport objects
departure_airports = []
for airport in departure_interrailairports:
departure_airports.append(airport.airport)
arrival_airports = Airport.objects.all()
min_date = str(last_destination.end_date)
context = {'popup_title': 'Enter Flight', 'departure_airports': departure_airports, 'arrival_airports': arrival_airports, 'flight_direction': flight_direction,
'min_date': min_date}
return render(request, 'partials/enter_flight.html', context)
# URL: flight/partials/search_flight
# HTTP Method: GET
# Description: Allows search to be created for given flight criteria
def search_flight(request):
# Check API key can be found
load_dotenv()
skyscanner_key = os.getenv('skyscanner_api_key')
if skyscanner_key:
key_found = True
else:
key_found = False
# Get trip and flight direction from get request
trip = get_object_or_404(Trip, id=request.GET.get('trip_id'))
flight_direction = request.GET.get('flight_direction')
# If outbound flight, find the earliest destination's start date and find a flight to that destination on that date
if flight_direction == "outbound":
earliest_destination = trip.destination_set.order_by('order').first()
departure_airports = Airport.objects.filter(country = Profile.objects.get(user=request.user).nationality).order_by('name')
arrival_interrailairports = InterrailAirport.objects.filter(city=earliest_destination.city)
# Get arrival airports as Airport objects
arrival_airports = []
for airport in arrival_interrailairports:
arrival_airports.append(airport.airport)
# If inbound flight, find the last destination's end date and find a flight from that destination on that date
else:
last_destination = trip.destination_set.order_by('order').last()
departure_interrailairports = InterrailAirport.objects.filter(city=last_destination.city)
# Get departure airports as Airport objects
departure_airports = []
for airport in departure_interrailairports:
departure_airports.append(airport.airport)
arrival_airports = Airport.objects.filter(country = Profile.objects.get(user=request.user).nationality).order_by('name')
context = {'popup_title': 'Flight Search', 'departure_airports': departure_airports, 'arrival_airports': arrival_airports, 'trip_id': trip.id, 'flight_direction': flight_direction,
'key_found': key_found}
return render(request, 'partials/search_flight.html', context)
# URL: flight/partials/search_results
# HTTP Method: GET
# Description: Displays flight search criteria
def search_results(request):
# Get trip id, direction and direct flights flag from parameters
trip = get_object_or_404(Trip, id=request.GET.get('trip_id'))
flight_direction = request.GET.get('flight_direction')
if request.GET.get('direct_flights') == 'on':
direct = True
else:
direct = False
# Get airport objects from IDs
departure_airport = get_object_or_404(Airport, id = request.GET.get('departure_airport'))
destination_airport = get_object_or_404(Airport, id = request.GET.get('arrival_airport'))
# If outbound flight configure dates as trip start date
if flight_direction == "outbound":
earliest_destination = trip.destination_set.order_by('order').first()
session_token, direct_flights, connecting_flights = quick_flight_search("GBP", departure_airport.iata_code, destination_airport.iata_code, earliest_destination.start_date.year, earliest_destination.start_date.month, earliest_destination.start_date.day, direct)
# If inbound flight configure dates as trip end date
else:
last_destination = trip.destination_set.order_by('order').last()
session_token, direct_flights, connecting_flights = quick_flight_search("GBP", departure_airport.iata_code, destination_airport.iata_code, last_destination.start_date.year, last_destination.start_date.month, last_destination.start_date.day, direct)
context = {'direct_flights': direct_flights, 'connecting_flights': connecting_flights, 'flight_direction': flight_direction, 'departure_airport': departure_airport, 'destination_airport': destination_airport,
'popup_title': f'{departure_airport} - {destination_airport}', 'trip_id': trip.id}
return render(request, 'partials/search_results.html', context)
|
sc19jwh/COMP3931
|
apps/flights/views.py
|
views.py
|
py
| 6,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73831992187
|
import os
os.environ['OPENCV_IO_MAX_IMAGE_PIXELS'] = pow(2, 40).__str__()
import sys
import copy
from pathlib import Path
from collections import Counter
import numpy as np
import pandas as pd
import cv2
import bioformats.formatreader
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.utilities.zmq
import cellprofiler_core.utilities.java
#from cellprofiler_core.setting.subscriber import LabelSubscriber
#from cellprofiler_core.setting.range import IntegerRange
def _clahe(image):
#-----Reading the image-----------------------------------------------------
if not isinstance(image, np.ndarray):
image = cv2.imread(image, 1)
#-----Converting image to LAB Color model-----------------------------------
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
#-----Splitting the LAB image to different channels-------------------------
l, a, b = cv2.split(lab)
#-----Applying CLAHE to L-channel-------------------------------------------
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8,8))
cl = clahe.apply(l)
#-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv2.merge((cl,a,b))
#-----Converting image from LAB Color model to RGB model--------------------
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
#_____END_____#
#return cl
return final
def clahe(image, iter=5, return_gray=True):
"""
Enhance local contrast with CLAHE algorithm
Parameters
--------------
image: fn, np.ndarray
image file name or np.ndarray representing image
iter: int
how many times to enhance
"""
while iter:
image = _clahe(image)
iter -= 1
if return_gray:
image = np.dot(image[..., :3], [0.2989, 0.5870, 0.1140])
image = image.astype(int)
return image
def blur_detect(image, channel='g', chunk_size=3, method='laplacian', top_svd=30,
outfile=None, show_in_rgb=None, show_in_grey=None):
"""
Calculte blur values with stepwise slide chunks for RGB image
Parameters
------------------------------
image: np.ndarray, image
image matrix with three channels
channel: {'r', 'g', 'b'}, default g
which channel to be used
chunk_size: int
pixel number for each chunk
method: {'laplacian', 'svd'}, default laplacian
which method to calculate blur value
top_svd: int
top N svd used for svd method
outfile: str
write the blur matrix into file
show_in_rgb: str
display the blur value in rgb image
show_in_grey: str
display the blur value in grey image
"""
# background was detected as blur region
# I need to segmentate tissue region firstly
# here I used color masking for segmentation on green channel
b, g, r = cv2.split(image)
# detect based on green channel
light = 10
dark = 255
if channel == 'r':
channel = r
elif channel == 'g':
channel = g
elif channel == 'b':
channel = b
mask = cv2.inRange(channel, light, dark)
kernel = np.ones((10, 10), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
blur_image = np.zeros(shape=image.shape, dtype=np.uint8)
for (x, y), value in np.ndenumerate(mask):
if value == 0:
continue
chunk = image[x:x+chunk_size, y:y+chunk_size]
# small value indicate blur region
if method == 'laplacian':
blur_value = cv2.Laplacian(chunk, cv2.CV_64F).var()
elif method == 'svd':
u, sigma, vt = np.linalg.svd(img)
blur_value = sum(sigma[:top_svd]) / sum(sigma)
blur_image[x, y] = blur_value
if outfile:
np.savetxt(outfile, blur_image, fmt='%d')
if show_in_rgb:
blur_rgb_image = cv2.applyColorMap(blur_image, cv2.COLORMAP_JET)
cv2.imwrite(show_in_rgb, blur_rgb_image)
if show_in_grey:
black = np.zeros(shape=image.shape, dtype=np.uint8)
blur_mask = np.where(blur_image < 30, mask, black)
cv2.imwrite(show_in_grey, blur_mask)
return blur_image
def _pycellprofilter(image, name='DNA', cpi=None, saved_object='IdentifySecondaryObjects'):
print(cellprofiler_core.preferences.__is_headless)
# load pipeline from cpi file
print('load pipeline from {}'.format(cpi))
pipeline = cellprofiler_core.pipeline.Pipeline()
pipeline.load(cpi)
# get modules list
modules = pipeline.modules()
# setup image_set
image_set = cellprofiler_core.image.ImageSet(0, {'name':name}, name)
if isinstance(image, np.ndarray) and len(image.shape) == 2:
x = image
else:
x = cv2.imread(str(image), 0)
x[x > 230] = 230
image_x = cellprofiler_core.image.Image(x, path_name=image.parent, file_name=image.name)
image_set.add(name, image_x)
# init workspace
object_set = cellprofiler_core.object.ObjectSet()
measurements = cellprofiler_core.measurement.Measurements()
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
modules,
image_set,
object_set,
measurements,
[image_set]
)
for module in modules:
sys.stdout.write(f'... {module.module_name}\n')
module.run(workspace)
objects = workspace.object_set.get_objects(saved_object)
try:
celloutlines = workspace.image_set.get_image('CellOutlines')
except:
sys.stderr.write('cell outlines not get\n')
celloutlines = None
return objects, celloutlines
def pycellprofiler(image, save_prefix=None, return_image=True,
cpi='./default.cppipe',
image_name='DNA',
saved_object='IdentifySecondaryObjects',
outdir='./outdir', tmpdir='./tmpdir', ):
outdir, tmpdir = Path(outdir), Path(tmpdir)
if not outdir.exists():
outdir.mkdir(parents=True, exist_ok=True)
objects = None
try:
#cellprofiler_core.preferences.set_headless()
cellprofiler_core.preferences.set_temporary_directory(outdir)
cellprofiler_core.preferences.set_default_output_directory(outdir)
cellprofiler_core.utilities.java.start_java()
sys.stdout.write('Starting cellprofiler identify ...\n')
objects, celloutlines = _pycellprofilter(
image,
name=image_name,
cpi=cpi,
saved_object=saved_object
)
sys.stdout.write('Cell objects and outlines generated\n')
except Exception as err:
sys.stderr.write('***Error: {}\n'.format(err))
finally:
cellprofiler_core.utilities.zmq.join_to_the_boundary()
bioformats.formatreader.clear_image_reader_cache()
cellprofiler_core.utilities.java.stop_java()
if objects is None:
return
sys.stdout.write('Saving labled cells ...\n')
mask = objects.segmented
b, g, r = cv2.split(celloutlines.pixel_data)
if save_prefix is not None:
mask_file = str(outdir / f'{save_prefix}_mask.txt')
np.savetxt(mask_file, mask, fmt='%d')
boundary_file = str(outdir / f'{save_prefix}_boundary.txt')
np.savetxt(boundary_file, b, fmt='%d')
if return_image:
image = img_outliner(image, boundary=b)
return mask, b, image
else:
return mask, b
def boundary_detect(mask, image, save_prefix='cell'):
import skimage.segmentation
image = cv2.imread(str(image))
outlines = skimage.segmentation.mark_boundaries(
image,
mask,
color=(1, 0, 0),
mode='inner',
)
b, g, r = cv2.split(outlines)
if save:
np.savetxt(f'{prefix}.boundary.txt', b, fmt='%d')
image = img_outliner(image, boundary=b,
save=f'{prefix}.celloutlines.png'
)
return b
def img_outliner(image, boundary, save='celloutlines.png'):
if isinstance(image, str):
image = cv2.imread(image)
mask = np.isin(boundary, [1])
image[mask] = (255, 0, 0)
if save:
cv2.imwrite(save, image)
return image
def getfootprint(struc, a, b=None):
from skimage.morphology import (
square,
rectangle,
diamond,
disk,
octagon,
star)
struc_lib = {
'square': square,
'rectangle': rectangle,
'diamond': diamond,
'disk': disk,
'octagon': octagon,
'star': star
}
morph = struc_lib[struc]
if struc in ['rectangle', 'octagon']:
if b is None:
sys.stderr.write('two args required\n')
sys.exit()
return morph(a, b)
else:
if b is not None:
sys.stderr.write('only one arg required\n')
sys.exit()
return morph(a)
class Stoarr:
def __init__(self, matrix):
if isinstance(matrix, str):
if matrix.endswith('.txt'):
matrix = np.loadtxt(matrix)
elif matrix.endswith(('.tif', '.png')):
matrix = cv2.imread(matrix, cv2.IMREAD_UNCHANGED)
self.matrix = matrix.astype(int)
def to_triplet(self, name='mask'):
import scipy.sparse
mtx= scipy.sparse.csc_matrix(self.matrix)
mtx = mtx.tocoo()
tmp = []
for x, y, mask in zip(mtx.row, mtx.col, mtx.data):
tmp.append([x, y, int(mask)])
triplet = pd.DataFrame(tmp, columns=['x', 'y', name])
return triplet
def binning(self, bin_size):
sys.stdout.write('binning ... ')
sys.stdout.flush()
triplet = self.to_triplet()
triplet['xbin'] = (triplet.x / bin_size).astype(int) * bin_size
triplet['ybin'] = (triplet.y / bin_size).astype(int) * bin_size
triplet['bin'] = triplet.xbin.astype(str) + '_' + triplet.ybin.astype(str)
index = [(-i, x) for i, x in enumerate(triplet['bin'].unique())]
index = pd.DataFrame(index, columns=['N', 'bin'])
triplet = triplet.merge(index, how='left', on='bin')
matrix = np.zeros(shape=self.matrix.shape, dtype=int)
matrix[triplet['x'], triplet['y']] = triplet['N']
sys.stdout.write('done\n')
return Stoarr(matrix)
def to_binary(self):
obj = copy.deepcopy(self)
mask = np.isin(obj.matrix, [0], invert=True)
obj.matrix[mask] = 1
return obj
def subtract(self, other):
sys.stdout.write('subtracting ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
obj = obj.to_binary()
other = other.to_binary()
obj.matrix = obj.matrix - other.matrix
sys.stdout.write('done\n')
return obj
def intersection(self, other, label_area_cutoff=0.3):
"""intersection of label mask and binary mask
* mask: binary matrix
* label_area_cutoff: labels with greater area will be dropped
"""
sys.stdout.write('intersection ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
if isinstance(other, Stoarr):
other = other.to_binary()
values = np.unique(obj.matrix)
if len(values) == 2:
mask = cv2.bitwise_and(obj.matrix, other.matrix)
mask = np.invert(mask.astype(bool))
else:
binary = self.to_binary()
mask = cv2.bitwise_and(binary.matrix, other.matrix)
mask = np.invert(mask.astype(bool))
orig_counter = Counter(obj.matrix.flatten())
filter_part = obj.matrix[mask]
filter_counter = Counter(filter_part.flatten())
filter_labels = []
for label, pixels in filter_counter.items():
if label == 0:
continue
ratio = pixels / orig_counter[label]
if ratio < label_area_cutoff:
continue
filter_labels.append(label)
filter_labels = list(set(filter_labels))
mask = np.isin(obj.matrix, filter_labels)
obj.matrix[mask] = 0
sys.stdout.write('{} labels removed\n'.format(len(filter_labels)))
return obj
def relabel(self, label_map=None):
if label_map is None:
unique_labels, labels = np.unique(self.matrix, return_inverse=True)
matrix = labels.reshape(self.matrix.shape)
#obj = Mask(matrix)
#obj.unique_labels = unique_labels
#obj.labels = labels
return Stoarr(matrix)
else:
triplet = self.to_triplet()
triplet = triplet.merge(label_map, how='left',
left_on='mask', right_index=True)
matrix = np.zeros(shape=self.matrix.shape, dtype=int)
matrix[triplet['x'], triplet['y']] = triplet['mask_y']
return Stoarr(matrix)
def retrieve(self):
if not self.unique_labels and not self.labels:
return
matrix = self.unique_labels[self.labels]
matrix = matrix.reshape(self.shape)
obj = Stoarr(matrix)
return obj
def minimum_filter(self, footprint='octagon', ksize=(4, 4), iterations=2):
sys.stdout.write('minimum filter ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
obj.matrix = obj.matrix.astype(np.uint8)
#obj.matrix = cv2.applyColorMap(
# obj.matrix,
# cv2.COLORMAP_JET
# )
try:
n, m = ksize
except:
n = ksize
m = None
footprint = getfootprint(footprint, n, m)
obj.matrix = cv2.erode(
obj.matrix,
kernel=footprint,
iterations=iterations
)
#cv2.imwrite('blur.png', obj.matrix)
sys.stdout.write('done\n')
return obj
def filter_by_matrix(self, on=None, min_value=None, max_value=None,
draw=False, prefix=None):
"""label mask method
* on: filter by minimum value of the input matrix
"""
sys.stdout.write('filter by matrix ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
triplet = obj.to_triplet()
ref = on.to_triplet()
triplet = triplet.merge(ref, how='left', on=('x', 'y'))
triplet = triplet.fillna(0)
medians = triplet.groupby('mask_x')['mask_y'].median()
medians = medians.to_frame()
if draw:
fig = self.relabel(medians)
cv2.imwrite(f'{prefix}.median.png', fig.matrix)
if min_value:
filter_labels = medians[medians['mask_y'] < min_value].index.values
if max_value:
filter_labels = medians[medians['mask_y'] > max_value].index.values
mask = np.isin(obj.matrix, filter_labels)
obj.matrix[mask] = 0
sys.stdout.write('{} labels removed\n'.format(len(filter_labels)))
return obj
def filter_by_diameter(self, min_size=1, max_size=None):
"""label mask method
* min_size: max circo radius
"""
sys.stdout.write('filter by diameter ... ')
sys.stdout.flush()
from skimage.measure import regionprops
obj = copy.deepcopy(self)
#obj.matrix = obj.matrix.astype(np.uint8)
filter_labels = []
regions = regionprops(obj.matrix)
for index, props in enumerate(regions):
if props.minor_axis_length <= 8 and (props.minor_axis_length * 5
<= props.major_axis_length):
# abnormity cell with large aspect ratio
filter_labels.append(props.label)
continue
if props.area > 1000 or props.area < 6:
# extreme large cell caused by non-detected blur region
# extreme small cell original segmentation fault
filter_labels.append(props.label)
continue
if props.extent < 0.3:
filter_labels.append(props.label)
continue
if props.minor_axis_length < min_size:
# extreme thin cell
filter_labels.append(props.label)
continue
if max_size and props.major_axis_length > max_size:
# extreme fat cell
filter_labels.append(props.label)
continue
mask = np.isin(obj.matrix, filter_labels)
obj.matrix[mask] = 0
sys.stdout.write('{} labels removed\n'.format(len(filter_labels)))
return obj
def merge(self, other, how='left'):
sys.stdout.write('merge mix labels ... ')
sys.stdout.flush()
if how == 'left':
obj = copy.deepcopy(self)
mask1 = obj.to_binary()
mask2 = copy.deepcopy(other)
elif how == 'right':
obj = copy.deepcopy(other)
mask1 = obj.to_binary()
mask2 = copy.deepcopy(self)
else:
pass
intersection = cv2.bitwise_and(mask1.matrix, mask2.matrix)
mask2.matrix[intersection] = 0
obj.matrix += mask2.matrix
sys.stdout.write('done\n')
return obj
def save(self, prefix='out'):
np.savetxt(f'{prefix}.mask.txt', self.matrix, fmt='%d')
return
def overlayoutlines(self, image=None, prefix=None):
sys.stdout.write('draw outlines ... ')
sys.stdout.flush()
import skimage.io
import skimage.segmentation
if isinstance(image, str):
image = skimage.io.imread(image)
outlines = skimage.segmentation.mark_boundaries(
image,
self.matrix,
color=(1, 0, 0),
mode='inner',
)
b, g, r = cv2.split(outlines)
sys.stdout.write('{} labels\n'.format(len(np.unique(self.matrix))))
mask = np.isin(b, [1])
image[mask] = 255
if prefix:
np.savetxt(f'{prefix}.outlines.txt', b, fmt='%d')
cv2.imwrite(f'{prefix}.outlines.png', image)
return b, image
def thres_mask(image, out_prefix=None):
image = cv2.imread(image, 0)
_, th = cv2.threshold(image, 20, 255, cv2.THRESH_BINARY)
if out_prefix:
cv2.imwrite(f'{prefix}.mask.tif', th)
return th
def mixture_seg(cell_mask, tissue_mask, blur_mask, image=None, prefix='out',):
cell_mask = Stoarr(cell_mask)
tissue_mask = Stoarr(tissue_mask)
blur_mask = Stoarr(blur_mask)
blur_mask = blur_mask.minimum_filter(
footprint='octagon',
ksize=(7, 4)
)
orig_cell_mask = cell_mask.intersection(
tissue_mask,
label_area_cutoff=0.3
)
cell_mask = orig_cell_mask.filter_by_matrix(
on=blur_mask,
max_value=90,
draw=True,
prefix=prefix
)
cell_mask = cell_mask.filter_by_diameter(
min_size=3,
max_size=None,
)
tissue_mask = orig_cell_mask.subtract(cell_mask)
bin_mask = tissue_mask.binning(
bin_size=20
)
mix_mask = cell_mask.merge(
bin_mask,
how='left'
)
mix_mask.save(prefix=prefix)
outlines, image = mix_mask.overlayoutlines(
image=image,
prefix=prefix
)
return outlines, image
|
BGI-Qingdao/4D-BioReconX
|
Preprocess/cellsegmentation/objseg.py
|
objseg.py
|
py
| 19,716 |
python
|
en
|
code
| 4 |
github-code
|
6
|
17815024172
|
#!/usr/bin/env python3
"""Tool to update Conan dependencies to the latest"""
import argparse
import json
import os
import re
import subprocess
def main():
"""
Read Conan dependencies, look for updates, and update the conanfile.py with updates
"""
parser = argparse.ArgumentParser()
parser.add_argument("--repo", help="Repo name of the package to update", required=True)
command_args = parser.parse_args()
fullpath = os.path.join(os.getcwd(), command_args.repo)
with open(os.path.join(fullpath, "conanfile.py"),
"r", encoding="utf-8", newline="") as conan_file:
conan_file_content = conan_file.read()
packages = []
package_strings = re.findall(r'requires\("(.*?)/(.*?)@', conan_file_content)
for package_string in package_strings:
package = {
"name": package_string[0],
"version": package_string[1],
}
packages.append(package)
for package in packages:
conan_inspect_output = subprocess.run("conan inspect . --format json",
cwd=f"conan-recipes/recipes/{package['name']}",
shell=True, check=True, stdout=subprocess.PIPE)
conan_inspect_json = json.loads(conan_inspect_output.stdout.decode("utf-8"))
package["latest_version"] = conan_inspect_json["version"]
old_package = f"{package['name']}/{package['version']}"
new_package = f"{package['name']}/{package['latest_version']}"
if old_package != new_package and old_package in conan_file_content:
conan_file_content = conan_file_content.replace(old_package, new_package)
print("Replace:")
print(f" {old_package}")
print("With:")
print(f" {new_package}")
print()
with open(os.path.join(fullpath, "conanfile.py"),
"w", encoding="utf-8", newline="") as conan_file:
conan_file.write(conan_file_content)
if __name__ == "__main__":
main()
|
ssrobins/tools
|
update_conan_packages.py
|
update_conan_packages.py
|
py
| 2,066 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25125596863
|
# Реализовать класс «Дата», функция-конструктор которого должна принимать дату в виде строки формата «день-месяц-год».
# В рамках класса реализовать два метода. Первый, с декоратором @classmethod. Он должен извлекать число, месяц, год и
# преобразовывать их тип к типу «Число». Второй, с декоратором @staticmethod, должен проводить валидацию числа, месяца и года
# (например, месяц — от 1 до 12). Проверить работу полученной структуры на реальных данных.
import re
class Data:
def __init__(self, data):
self.data = data
def __str__(self):
return f'{self.data}'
@classmethod
def convert(cls, data):
instance = cls(cls.validator(data))
return instance
@staticmethod
def validator(data):
pattern = re.compile(r'(?P<day>\d{2})-(?P<month>\d{2})-(?P<year>\d+)$')
result = pattern.match(data)
if not result:
raise ValueError('Некорректная дата')
result = result.groupdict()
for key in result.keys():
result[key] = int(result[key])
if result['day'] < 1 or result['day'] > 31:
raise ValueError('Некорректное число')
if result['month'] < 1 or result['month'] > 12:
raise ValueError('Некорректный месяц')
if result['year'] < 1 or result['year'] > 5000:
raise ValueError('Введите год в заданном диапазоне')
return result
date = Data('05-11-2021')
print(date)
my_date = Data.convert('05-11-2021')
print(my_date)
# print()
# my_date = Data.convert('35-11-2021')
# print(my_date)
|
RombosK/GB_1824
|
Kopanev_Roman_DZ_11/dz_11_1.py
|
dz_11_1.py
|
py
| 1,983 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
41533682153
|
class Solution:
def minStartValue(self, nums: List[int]) -> int:
for i in range(1,len(nums)):
nums[i]=nums[i] +nums[i-1]
if min(nums)<0:
startValue=-1*(min(nums)) +1
return startValue
else:
return 1
|
dani7514/Competitive-Programming-
|
1413-minimum-value-to-get-positive-step-by-step-sum/1413-minimum-value-to-get-positive-step-by-step-sum.py
|
1413-minimum-value-to-get-positive-step-by-step-sum.py
|
py
| 283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6501962901
|
from flask import request
from mobile_endpoint.backends.manager import get_dao
from mobile_endpoint.case.case_processing import process_cases_in_form
from mobile_endpoint.extensions import requires_auth
from mobile_endpoint.form.form_processing import create_xform, get_instance_and_attachments, get_request_metadata
from mobile_endpoint.views import ota_mod
from mobile_endpoint.views.response import get_open_rosa_response
@ota_mod.route('/receiver/<domain>', methods=['POST'])
@requires_auth
def form_receiver(domain):
return _receiver(domain, backend='sql')
@ota_mod.route('/couch-receiver/<domain>', methods=['POST'])
@requires_auth
def couch_receiver(domain):
return _receiver(domain, backend='couch')
@ota_mod.route('/mongo-receiver/<domain>', methods=['POST'])
@requires_auth
def mongo_receiver(domain):
return _receiver(domain, backend='mongo')
def _receiver(domain, backend):
dao = get_dao(backend)
instance, attachments = get_instance_and_attachments(request)
request_meta = get_request_metadata(request)
request_meta['domain'] = domain
xform_lock = create_xform(instance, attachments, request_meta, dao)
with xform_lock as xform:
case_result = None
if xform.doc_type == 'XFormInstance':
case_result = process_cases_in_form(xform, dao)
dao.commit_atomic_submission(xform, case_result)
return get_open_rosa_response(xform, None, None)
|
dimagi/mobile-endpoint
|
prototype/mobile_endpoint/views/receiver.py
|
receiver.py
|
py
| 1,434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.