seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
31333928448
|
#!/usr/bin/env python3
import atexit
import copy
import datetime
import json
import os
import re
import sys
import threading
import botocore
from flask import Flask
from prometheus_client import make_wsgi_app, Gauge
from pyemvue import PyEmVue
from pyemvue.enums import Scale, Unit
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.serving import run_simple
poller_thread = threading.Thread()
minutesInAnHour = 60
wattsInAKw = 1000.0
USAGE_WATTS = Gauge(f'per_min_usage_total', f'Total usage for channel in watts.', labelnames=['channel', 'channel_num', 'device_name', 'device_gid', ], unit="watt")
EXCLUDED_CHANNELS = ['Balance', 'TotalUsage', ]
devices = {}
def log(level, msg):
now = datetime.datetime.utcnow()
print('{} | {} | {}'.format(now, level.ljust(5), msg), flush=True)
def debug(msg):
log("INFO", msg)
def info(msg):
log("INFO", msg)
def error(msg):
log("ERROR", msg)
def die():
global poller_thread
error('Caught exit signal')
try:
poller_thread.cancel()
except Exception as e:
pass
info('Shutting down.')
sys.exit(0)
def handle_exit(signum, frame):
die()
# get usage for each device
def get_usage_data(device_names, device):
device_name = device_names.get(device.device_gid, 'Unknown Device')
info(f'Device: #{device.device_gid} "{device_name}" has {len(device.channels.items())} channels.')
# Recurse thru the various channels, gathering rosebuds
for number, channel in device.channels.items():
if number in EXCLUDED_CHANNELS:
debug(f'Excluding data from channel "{number}".')
continue
if channel.nested_devices:
for gid, dev in channel.nested_devices.items():
debug(f'Recursing into nested devices for channel "{number}".')
get_channel_usage(device_names, dev)
kwhUsage = channel.usage
if kwhUsage is not None:
channel_label = re.sub(r'_+', '_', re.sub(r'[^a-z0-9_]','_', channel.name.lower(), re.I | re.M))
watts = wattsInAKw * minutesInAnHour * kwhUsage
USAGE_WATTS.labels(channel_label, number, device_name, device.device_gid).set(watts)
info(f'Channel #{number} - {channel.name} recorded as {channel_label}.')
# Thread to poll emporia for data
def poll_emporia(vue=None, retry_login=False, devices={}, poll_interval=60):
global poller_thread
# retry login if needed
if retry_login:
try:
info('logging in')
vue.login(username=os.environ.get('VUE_USERNAME'), password=os.environ.get('VUE_PASSWORD'))
info('successfully logged in')
except Exception as e:
error(f'Exception occurred during login: {e}')
info(f'skipping run and trying again in {poll_interval} seconds')
poller_thread = threading.Timer(poll_interval, poll_emporia, kwargs={"vue":vue, "retry_login":True, "devices":devices, "poll_interval":60,} )
poller_thread.start()
return
try:
device_list = vue.get_devices()
info(f'found {len(device_list)} devices.')
# give the system time to catch up with data
timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=15)
device_names = dict(
filter(lambda x: x[1],
map(lambda x:(x.device_gid, x.device_name), device_list)))
# get the usage
device_usage = vue.get_device_list_usage(list(map(lambda d: d.device_gid, device_list)), timestamp, scale=Scale.MINUTE.value, unit=Unit.KWH.value)
if not device_usage:
return
for gid, device in device_usage.items():
get_usage_data(device_names, device)
info(f'Finished polling run; next run in {poll_interval} seconds.')
poller_thread = threading.Timer(poll_interval, poll_emporia, kwargs={"vue":vue, "retry_login":False, "devices":devices, "poll_interval":60,} )
poller_thread.start()
except Exception as e:
error(f'Exception occurred: {e}')
info('restarting poll with login retry after 5s.')
poller_thread = threading.Timer(5, poll_emporia, kwargs={"vue":vue, "retry_login":True, "devices":devices, "poll_interval":60,} )
poller_thread.start()
return
def create_app(devices):
global poller_thread
app = Flask(__name__)
poll_interval = 60
vue = PyEmVue()
info(f'Launching first poll.')
poller_thread = threading.Timer(1, poll_emporia, kwargs={"vue":vue, "retry_login":True, "devices":devices, "poll_interval":60,} )
poller_thread.start()
atexit.register(handle_exit)
return app
deviceFilename = os.environ.get('VUE_DEVICES_FILE')
if deviceFilename:
try:
with open(deviceFilename) as deviceFile:
devices = json.load(deviceFile)
except FileNotFoundError:
info(f'No device list file found at {deviceFilename}')
try:
app = create_app(devices.get('devices', {}))
except:
error('Unable to log in - check VUE_USERNAME/VUE_PASSWORD')
sys.exit(-2)
# add /metrics prom dumper
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/metrics': make_wsgi_app()
})
|
thebaron/vueprom
|
src/vueprom.py
|
vueprom.py
|
py
| 5,230 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74309693947
|
from turtle import Screen
from pong_paddle import Paddle
from ball_class import Ball
from scoreboard import Score
import time
screen = Screen()
screen.bgcolor("black")
screen.title("Pong Game")
screen.setup(width=1000, height=800)
screen.tracer(0)
r_paddle = Paddle((450, 0))
l_paddle = Paddle((-450, 0))
ball = Ball()
score = Score()
screen.listen()
screen.onkey(r_paddle.move_up, "Up")
screen.onkey(r_paddle.move_down, "Down")
screen.onkey(l_paddle.move_up, "w")
screen.onkey(l_paddle.move_down, "s")
# Game LOOP
value = True
while value:
time.sleep(ball.move_speed)
screen.update()
ball.move()
if ball.ycor() > 380 or ball.ycor() < -380:
ball.bounce()
if ball.distance(r_paddle) < 60 and ball.xcor() > 370 or ball.distance(l_paddle) < 60 and ball.xcor() < -370:
ball.bounce_paddle()
if ball.xcor() > 450:
score.l_point()
ball.reset_ball()
if ball.xcor() < -450:
score.r_point()
ball.reset_ball()
screen.exitonclick()
|
shuklaritvik06/PythonProjects
|
Day - 22/main.py
|
main.py
|
py
| 996 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42579723690
|
import os
import git
import datetime
import argparse
class was_it_rufus:
"""
A class that instantiates all variables and methods about git status.
...
Methods
-------
Prints the git status
"""
def __init__(self, git_directory):
"""
Constructs all the necessary attributes for the Rufus object.
Parameters
----------
repo : git object
repo object to use for various functionalities
active_branch : boolean
true or false if the branch is active or not
local_changes : boolean
true or false if the changes were made local or not
recent_commit : boolean
true or false if there was a recent commit
blame_rufus : boolean
true or false if rufus was the author or not
"""
self.repo = git.Repo(git_directory)
self.active_branch = self.repo.active_branch.name
self.local_changes = self.repo.is_dirty()
self.recent_commit = (datetime.datetime.now().date() - self.repo.head.commit.authored_datetime.date()) < datetime.timedelta(
weeks=1)
self.blame_rufus = self.repo.head.commit.author.name == "Rufus"
def git_status(self):
"""
Prints the details of git status.
Returns
-------
None
"""
print("active branch: ", self.active_branch)
print("local changes: ", self.local_changes)
print("recent commit: ", self.recent_commit)
print("blame Rufus: ", self.blame_rufus)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Was it rufus?')
parser.add_argument('git_directory', type=str, help='name of the git repo directory')
args = parser.parse_args()
#checks if the parsed argument is a directory or not
if os.path.isdir(args.git_directory):
rufus_obj = was_it_rufus(args.git_directory)
rufus_obj.git_status()
else:
print(args.git_directory, "Invalid directory")
|
srirammura/was_it_rufus
|
main.py
|
main.py
|
py
| 2,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2518936352
|
from Getnum import getnum
def maxcha(alist):
n = len(alist)
num_min = min(alist)
num_max = max(alist)
if num_min == num_max :
return 0
d = (num_max-num_min)/(n+1)
tong = []
for i in range(n+1):
tong.insert(i,[])
print(tong)
for j in alist:
if j == num_max:
tong[n].append(j)
else:
index = (j-num_min)*(n+1)//(num_max-num_min)
tong[index].append(j)
print(tong)
temp = []
for i in range(0,len(tong)):
if not tong[i] and tong[i-1]:
for j in range(i,len(tong)):
if tong[j]:
temp.append(min(tong[j]) - max(tong[i-1]))
print(temp)
break
return max(temp)
if __name__ == '__main__':
a = getnum(10)
maxcha(a)
|
youyuebingchen/Algorithms
|
qiyue_alg/03_找差值最大值.py
|
03_找差值最大值.py
|
py
| 825 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21529376169
|
from .base_view import ClassView
def get_model_value(instance, field):
try:
value = getattr(instance, field)
except Exception:
if field.find('__') > 0:
fields = field.split('__')
elif field.find('.') > 0:
fields = field.split('.')
else:
raise Exception()
value = instance
for field in fields:
value = getattr(value, field)
if value is None:
return None
return value
class ModelListView(ClassView):
fields = None
has_pagination = True
default_per_page = 10
def __init__(self, request):
ClassView.__init__(self, request)
get = request.GET.get
self.per_page = get("per_page", self.default_per_page)
self.per_page = int(self.per_page)
self.page = get("page", 1)
self.page = int(self.page)
self.search = get("search", "")
self.sort_param = get("sort", None)
self.data = {}
self.json_string = None
self.validate_fields()
def validate_fields(self):
fields = self.fields
if type(fields) not in (list, tuple):
raise Exception("Fields format is not valid.")
new_fields = []
for field in fields:
if type(field) in (list, tuple):
if len(field) > 2:
raise Exception("Fields format is not valid.")
field, json_key = field
else:
field, json_key = field, field
new_fields.append((field, json_key))
self.fields = new_fields
def get_query(self):
raise NotImplementedError()
def to_json(self, record):
fields = self.fields
data = {}
for field, json_key in fields:
fn = getattr(self, 'render_' + field, None)
if fn is not None:
value = fn(record)
else:
value = get_model_value(record, field)
data[json_key] = value
return data
# noinspection PyBroadException
def sort(self, query):
sort = self.sort_param
if not sort:
return query
try:
param, asc = sort.split("|")
if asc != "asc":
param = "-" + param
return query.order_by(param)
except Exception:
return query
def get_count(self, query):
from django.db.models.query import QuerySet
if type(query) is QuerySet:
return query.count()
return len(query)
def process(self, request):
query = self.get_query()
fields = [a for a, b in self.fields]
if len(fields) > 0:
query = query.only(*fields)
query = self.sort(query)
if self.has_pagination:
data = self.paginate(query)
else:
data = [self.to_json(record) for record in query]
self.add("status", True)
self.add('total', self.get_count(query))
self.add("data", data)
def paginate(self, query):
from django.core.paginator import Paginator
if not self.has_pagination:
return
paginator = Paginator(query, self.per_page)
page = paginator.page(self.page)
records = page.object_list
data = [self.to_json(record) for record in records]
self.add("last_page", paginator.num_pages)
self.add("from", page.start_index())
self.add("current_page", self.page)
self.add("per_page", self.per_page)
self.add("to", page.end_index())
return data
|
sajithak52/store-django-app
|
myproject/base_class/views/list_view.py
|
list_view.py
|
py
| 3,646 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5001531967
|
from datetime import datetime
from django import template
from tag.models import Tag
register = template.Library()
@register.inclusion_tag('toptags.html')
def toptags():
tags=Tag.objects.all().order_by('-followers_count')[:5]
args={}
args['tags']=tags
return args
@register.inclusion_tag('trendingtags.html')
def trending_tags():
today = datetime.now()
tags=Tag.objects.filter(create__year=today.year,create__month=today.month).order_by('-followers_count')[:5]
args={}
args['tags']=tags
return args
@register.inclusion_tag('mytags.html',takes_context=True)
def mytags(context):
request=context['request']
tags=request.user.profile.follow_tags.all()
args={}
args['tags']=tags
return args
|
duonghau/hoidap
|
tag/templatetags/tag_template.py
|
tag_template.py
|
py
| 745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30984123610
|
""" apps/docs/urls.py """
from django.urls import path
from . import views
app_name = 'docs'
urlpatterns = [
path('', views.index, name='index'),
path('overview/', views.overview, name='overview'),
path('what_is_an_ordo/', views.what_is_an_ordo, name='what_is_an_ordo'),
path('create_an_account/', views.create_an_account, name='create_an_account'),
path('create_a_calendar/', views.create_a_calendar, name='create_a_calendar'),
path('populate_your_calendar/', views.populate_your_calendar,
name='populate_your_calendar'),
path('create_an_ordo/', views.create_an_ordo, name='create_an_ordo'),
path('for_developers/', views.for_developers, name='for_developers'),
]
|
BrRoman/ordomatic
|
ordomatic/apps/docs/urls.py
|
urls.py
|
py
| 709 |
python
|
en
|
code
| 3 |
github-code
|
6
|
2572453901
|
"""Домашнее задание.
Написать функцию вычисляющую метрику пользователей лифта.
ОПИСАНИЕ СИТУАЦИИ
Допустим у нас есть 10-ти этажное здание, в котором есть один лифт вместимостью 10 человек.
На каждом этаже есть кнопка вызова лифта. Когда человеку нужно попасть с этажа Х на этаж У, он нажимает кнопку вызова
лифта, ждёт когда лифт приедет, заходит в лифт, нажимает на кнопку этажа У, и когда лифт приезжает на нужный
этаж - выходит. В час-пик лифт может долго не приезжать, если это происходит, то человек может пойти пешком по лестнице
на нужный ему этаж.
АБ ЭКСПЕРИМЕНТ
Мы хотим изменить алгоритм работы лифта и с помощью АБ теста оценить как это повлияет на время, затрачиваемое людьми
на перемещение между этажами. В качестве метрики будем использовать некоторую статистику от эмпирической функции
распределения затрачиваемого времени пользователей.
МЕТРИКА ЭКСПЕРИМЕНТА
Затрачиваемое время T определим так:
- если человек дождался лифта, то T = t2 - t1, где t2 - время прибытия на целевой этаж, а t1 - время вызова лифта.
- если человек не дождался лифта и пошёл пешком, то T = 2 * (t2 - t1), где t2 - время когда решил пойти пешком, t1 -
время вызова лифта.
ОТКУДА ДАННЫЕ
Данные генерируются с помощью эмуляции работы лифта и случайного генерирования пользователей.
Для простоты время разбито на интервалы по 10 секунд.
В каждый интервал времени лифт может совершить одно из 3 действий: спуститься на 1 этаж вниз, подняться на 1 этаж
вверх, произвести посадку/высадку на текущем этаже.
В каждый интервал времени с некоторой вероятностью генерируются люди, которые обладают следующими свойствами:
- текущий этаж, с которого хочет уехать;
- целевой этаж, куда хочет приехать;
- начальное время, когда начал ждать лифт;
- максимальное время ожидания, если не зайдёт в лифт до этого момента, то пойдёт пешком.
ОПИСАНИЕ ДАННЫХ
У нас есть табличка с логами лифта, в которой есть следующие атрибуты:
- time - время в секундах.
- action - состояние лифта в следующие 10 секунд. OPEN - стоит открытый, UP - едет вверх, DOWN - едет вниз.
- user_out - количество вышедших человек
- user_in - количество вошедших человек
- user_total - количество человек в лифте
- floor - текущий этаж
- calls - список вызовов. Вызов описывается парой значений - время вызова и этаж, на который был вызван лифт.
- orders - список заказов, на какие этажи нажимали пользователи, зашедшие в лифт. Аналогично содержит список пар -
время заказа и целевой этаж.
ЗАДАНИЕ
Нужно написать функцию, которая принимает на вход таблицу pd.DataFrame с логами лифта и возвращает множество значений
метрик пользователей. Метрика описана выше в разделе МЕТРИКА ЭКСПЕРИМЕНТА.
ПРИМЕР
Рассмотрим кусок данных. Тут пользователь вызвал лифт при t1=10, и доехал на нужный этаж при t2=40, значение
метрики для него будет равно t2 - t1 = 30.
time | action | user_out | user_in | user_total | floor | calls | orders
--------------------------------------------------------------------------------------
0 | open | 0 | 0 | 0 | 1 | [] | []
10 | up | 0 | 0 | 0 | 1 | [(10, 2)] | []
20 | open | 0 | 1 | 1 | 2 | [] | [(20, 1)]
30 | down | 0 | 0 | 0 | 2 | [] | [(20, 1)]
40 | open | 1 | 0 | 0 | 1 | [] | []
ОЦЕНИВАНИЕ
По данным из вашей функции и по истинным значениям метрики будут построены эмпирические функция распределения.
Далее будет вычислено максимальное отличие между полученными ЭФР (аналогично статистике критерия Колмогорова).
Чем меньше отличие D, тем выше балл.
- D <= 0.1 - 10 баллов
- D <= 0.13 - 9 баллов
- D <= 0.16 - 8 баллов
- D <= 0.19 - 7 баллов
и так далее с шагом 0.03.
БОНУСНЫЕ БАЛЛЫ
Топ-5 участников с лучшими результатами получат бонусные баллы.
1 место - 3 балла
2 место - 2 балла
3-5 места - 1 балл
"""
import pandas as pd
import ast
def calculate_metrics(data: pd.DataFrame):
"""Вычисляет значения метрики пользователей.
data - таблица с логами лифта
return - список значений метрики
"""
ts = []
for i in range(data.shape[0]-1):
row_prev = data.iloc[i, :]
row_cur = data.iloc[i+1, :]
prev_calls = ast.literal_eval(row_prev["calls"])
for el in prev_calls:
cur_calls = ast.literal_eval(row_cur["calls"])
if el not in cur_calls:
if el[1] == row_cur["floor"] and row_cur["action"] == "open" and row_cur["user_in"] >= 1:
orders = ast.literal_eval(row_cur["orders"])
for ordr in orders:
if ordr[0] == row_cur["time"]:
order = ordr
break
t_order = row_cur["time"] - el[0]
for j in range(i+1, data.shape[0]):
cur_new_row = data.iloc[j, :]
cur_orders = ast.literal_eval(cur_new_row["orders"])
if order not in cur_orders and cur_new_row["action"] == "open" and cur_new_row["user_out"] >= 1:
t_lift = cur_new_row["time"] - order[0]
break
t = t_order + t_lift
ts.append(t)
else:
t = 2 * (row_cur["time"] - el[0])
ts.append(t)
return ts
|
LiliaMilutina/OzonMasters
|
AB-testing/task4.py
|
task4.py
|
py
| 8,025 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
75276539706
|
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
def testing(group_test, y_test, model):
rmse = 0
j = 1
result = []
while j <= 100:
x_test = group_test.get_group(j).to_numpy()
data_predict = 0
for t in range(x_test.shape[0]): # iterate to the end of each sequence
if t == 0:
continue
elif t == x_test.shape[0] - 1: # for last one row append a zero padding
X_test = np.append(x_test[t - 1:, 2:], [np.zeros(14)], axis=0)
else:
X_test = x_test[t - 1:t + 2, 2:]
X_test_tensors = Variable(torch.Tensor(X_test))
X_test_tensors_final = X_test_tensors.reshape((1, 1, X_test_tensors.shape[0], X_test_tensors.shape[1]))
test_predict = model.forward(X_test_tensors_final, t)
data_predict = test_predict.data.numpy()[-1]
# block for linearily decreasing the RUL after each iteration
if data_predict - 1 < 0:
data_predict = 0
else:
data_predict -= 1
result.append(data_predict)
rmse += np.power((data_predict - y_test.to_numpy()[j - 1]), 2)
j += 1
rmse = np.sqrt(rmse / 100)
result = y_test.join(pd.DataFrame(result))
result = result.sort_values('RUL', ascending=False)
return rmse, result
|
jiaxiang-cheng/PyTorch-Transformer-for-RUL-Prediction
|
testing.py
|
testing.py
|
py
| 1,420 |
python
|
en
|
code
| 140 |
github-code
|
6
|
9434937105
|
import random
import datetime
import urllib
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.files.storage import default_storage
from django.core.files.base import File
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User # NOQA
from fusionbox.blog.models import Blog
word_list = [
'john', 'intense', 'lucky', 'solid', 'hot', 'clever', 'amusing',
'wicked', 'damp', 'sticky', 'warm', 'courteous', 'young', 'slow',
'selfish', 'great', 'vigorous', 'glamorous', 'clean', 'placid',
'enthusiastic', 'instinctive', 'wild', 'hurt', 'tricky',
'diplomatic', 'sympathetic', 'painstaking', 'raspy', 'proud',
'thoughtful', 'delicious', 'itchy', 'cute', 'debtor', 'trip',
'france', 'cone', 'missile', 'statistic', 'equipment', 'push',
'fine', 'antarctica', 'apparel', 'meteorology', 'tsunami', 'head',
'balance', 'fowl', 'spoon', 'croissant', 'library', 'purchase',
'staircase', 'wasp', 'carnation', 'cannon', 'bronze', 'glass',
'kendo', 'cello', 'taiwan', 'shape', 'cauliflower', 'green',
'run', 'scarf', 'tower', 'regret', 'disgust', 'roof', 'hen',
'law',
]
tags = ['broccoli', 'violin', 'disintermediate', 'infomediaries', '"compelling synergy"']
names = ['John', 'Patrick', 'Alberto', 'Bertha', 'Claudette', 'Arlene', 'Vince']
def random_text(nwords, choices=word_list):
words = []
got_words = 0
while got_words < nwords:
word = random.choice(choices)
if got_words % 10 == 0 and got_words != 0:
word += '.'
if got_words % 50 == 0 and got_words != 0:
words += '\n\n\n'
words.append(word)
got_words += 1
return ' '.join(words)
def random_image(word='unicorn'):
tmpfile, header = urllib.urlretrieve('http://placenoun.com/' + urllib.quote_plus(word))
name = random_text(3)
return default_storage.save(name, File(open(tmpfile), name=name))
class Command(BaseCommand):
help = "Creates some random blogs"
option_list = BaseCommand.option_list + (
make_option('--images',
action='store_true',
default=False,
help='Include some random images'),
)
def handle(self, *args, **options):
author = User.objects.create(
first_name=random_text(1, names),
last_name=random_text(1, names),
username=random_text(3).replace(' ', ''),
email="%s@%s.com" % (random_text(2), random_text(1)),
)
for i in range(25):
body = random_text(500)
title_first = random_text(1)
title = title_first + ' ' + random_text(4)
Blog.objects.create(
title=title,
seo_title='Blog ' + title,
seo_keywords=random_text(5),
seo_description=body[:40],
author=author,
summary=body[:40],
body=body,
tags=random_text(2, tags),
is_published=True,
publish_at=datetime.datetime.now() - datetime.timedelta(days=random.randint(1, 1000)),
created_at=datetime.datetime.now() - datetime.timedelta(days=random.randint(1, 1000)),
image=random_image(title_first) if options['images'] and random.randint(0, 3) == 0 else None,
)
|
fusionbox/django-fusionbox-blog
|
fusionbox/blog/management/commands/seed_blogs.py
|
seed_blogs.py
|
py
| 3,513 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73532195387
|
from otree.api import Currency as c, currency_range
from . import views
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
def play_round(self):
yield (views.Demographics, {
'q_country': 'BS',
'q_age': 24,
'q_gender': 'Male'})
yield (views.CognitiveReflectionTest, {
'crt_bat': 10,
'crt_widget': 5,
'crt_lake': 48
})
for value in [
self.player.crt_bat,
self.player.q_country,
self.player.payoff
]:
assert value is not None
|
dimaba/svotree
|
tests.py
|
tests.py
|
py
| 619 |
python
|
en
|
code
| 7 |
github-code
|
6
|
39688498284
|
# Time: O(V+E)
# Space: O(V+E)
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
num_graph = collections.defaultdict(list)
for (x,y),val in zip(equations, values):
num_graph[x].append([y, val])
num_graph[y].append([x,1/val])
# print(num_graph)
op = []
for x,y in queries:
# print("Inside", x, y)
if x not in num_graph or y not in num_graph:
op.append(-1.0)
continue
seen = set()
count = self.dfs(num_graph, x, y, seen)
op.append(count)
return op
def dfs(self, num_graph, x, y, seen):
# print(x,y, seen)
if x==y:
return 1.0
for neigh, conv_val in num_graph[x]:
if neigh not in seen:
seen.add(neigh)
ret_val = self.dfs(num_graph, neigh, y, seen)
if ret_val>-1.0:
return ret_val*conv_val
return -1.0
|
cmattey/leetcode_problems
|
Python/lc_399_evaluate_division.py
|
lc_399_evaluate_division.py
|
py
| 1,085 |
python
|
en
|
code
| 4 |
github-code
|
6
|
39225944668
|
from ddl.tensorflow.cpp_backend import CPPBackend
class Communicator:
"""
表示一个通信域的类
"""
# 缓存全体进程所在通信域的信息
__world = None
def __init__(self, communicator_id: int):
"""
@param communicator_id: 其实是一个整数, 传入c_api中, C后端将会将其转换成指针再处理
python端将其看作一个整数即可
"""
self.__id = communicator_id
# 懒加载
self.__rank = None
self.__size = None
@property
def id(self):
return self.__id
@property
def rank(self) -> int:
if self.__rank is None:
self.__rank = CPPBackend.c_api().communicator_rank(self.id)
return self.__rank
@property
def size(self) -> int:
if self.__size is None:
self.__size = CPPBackend.c_api().communicator_size(self.id)
return self.__size
def split_communicator(self, color: int, key: int = None) -> 'Communicator':
"""
通过着色分割本通信域, 需要通信域内所有进程参与, 并且给出自己的颜色, 颜色相同的在同一个
通信域
@param color: 颜色
@param key: 控制本进程在新通信域内的rank, 按key的顺序从0开始到新通信域的大小 - 1,
如果为None, 则以本进程在本通信域内的rank作为key
@return: 本进程所属的新的通信域
"""
if key is None:
key = self.rank
return Communicator(
CPPBackend.c_api().split_communicator(self.id, color, key))
@classmethod
def world(cls) -> 'Communicator':
"""
获取所有进程所在的通信域
@return:
"""
if cls.__world is None:
cls.__world = cls(CPPBackend.c_api().world_communicator())
return cls.__world
|
LYL232/Experiment-Distributed-Deep-Learning
|
src/py/ddl/tensorflow/communicator.py
|
communicator.py
|
py
| 1,892 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
40106131495
|
from .auto import Auto
import gettext
def russian(text):
text = text.replace("usage:",
"использование:")
text = text.replace("show this help message and exit",
"показывает это сообщение и выходит")
text = text.replace("error:",
"ошибка:")
text = text.replace("the following arguments are required",
"требуются следующие аргументы")
text = text.replace("argument ",
"аргумент ")
text = text.replace("invalid choice",
"недопустимый вариант")
text = text.replace("choose from ",
"выберите из следующих ")
return text
gettext.gettext = russian
import argparse
gettext.bindtextdomain("argparse", "")
gettext.textdomain("argparse")
DESCRIPTION = """Система сборки Auto"""
def cli(function):
"""
Декоратор, создаёт командную утилиту из функции
:param function: - результат работы декоратора configure
Использование:
~~~~~~~~
::
@cli
@configure(**kwargs)
def test():
...
Все цели будут доступны для исполнения
*python source.py -h*
"""
manager: Auto = function()
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser._positionals.title = "Позиционные аргументы"
parser._optionals.title = "Опции"
choices = manager.targets.keys()
parser.add_argument("target", type=str, help="цель сборки", choices=choices)
namespace = parser.parse_args()
manager.execute(namespace.target)
|
Papr1ka/config
|
practice4/auto/cli.py
|
cli.py
|
py
| 1,875 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
5308151620
|
# n = int(input())
# s = input()
# ans = 0
# bonus = 0
# for i in range(len(s)):
# if s[i] == 'O':
# bonus += 1
# ans += i + bonus
# elif s[i] == 'X':
# bonus = 0
# print(ans)
n, s = input(), input()
score, bonus = 0, 0
for idx, ox in enumerate(s):
if ox == 'O':
score, bonus = score+idx+1+bonus, bonus+1
else:
bonus = 0
print(score)
|
louisuss/Algorithms-Code-Upload
|
Python/Baekjoon/17389.py
|
17389.py
|
py
| 395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23932718939
|
import os
import numpy as np
import pandas as pd
from variables import csv_path, label_encode, file_name, cutoff
from sklearn.utils import shuffle
def preprocess_data(csv_path):
df = pd.read_csv(csv_path)
df = df.copy()
df.dropna(axis=0, how='any', inplace=False)
df['label'] = df.apply(y2indicator, axis=1)
del df['species']
df = shuffle(df)
df.to_csv(file_name, encoding='utf-8')
def y2indicator(x):
species = x['species']
return label_encode[species]
def get_data():
if not os.path.exists(file_name):
preprocess_data(csv_path)
df = pd.read_csv(file_name)
Xdata = df.copy()[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']].to_numpy()
Ydata = df.copy()[['label']].to_numpy()
train_set = int(cutoff * len(df))
Xtrain, Xtest = Xdata[:train_set], Xdata[train_set:]
Ytrain, Ytest = Ydata[:train_set], Ydata[train_set:]
return Xtrain, Xtest, Ytrain, Ytest
def one_hot_encode(Ydata):
N = len(Ydata)
num_classes = 3
y = np.eye(num_classes)
return y[Ydata]
|
1zuu/Pytroch-Examples
|
IrishClassifier/util.py
|
util.py
|
py
| 1,061 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32055141770
|
import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
def initiate_db():
cursor.execute('CREATE TABLE IF NOT EXISTS books(name text primary key, author text, year integer, read integer)')
connection.commit()
def add_db(book, author, year):
try:
cursor.execute("INSERT INTO books VALUES(?, ?, ?, 0)", (book, author, year))
connection.commit()
return (f"Added '{book}' {author} {year} to the book store.")
except sqlite3.IntegrityError:
return f"BOOK: '{book}' by {author} already exists in the database."
def retrive_db():
cursor.execute('SELECT * from books')
books_db = [{"BOOK": row[0], "AUTHOR": row[1], "YEAR": str(row[2]), "READ": row[3] } for row in cursor.fetchall()]
if not books_db:
return [{"BOOK": 0, "AUTHOR": 0, "YEAR": 0, "READ": 0}]
return books_db
def _check_book(book_info):
books_found = [book for book in retrive_db() if book_info in book.values()]
return books_found
def mark_db(book):
if [book_name for book_name in _check_book(book) if book_name["BOOK"] == book]:
cursor.execute("UPDATE books SET read = 1 WHERE name = ?", (book,))
connection.commit()
return f"Marked Book:'{book}' as 'Read' "
return f"Book '{book}'' doesn't exist in Book Store"
def delete_db(book):
if _check_book(book) and _check_book(book)[0]["BOOK"] == book:
cursor.execute("DELETE FROM books WHERE name = ?",(book,))
connection.commit()
return f"'{book}' is removed from database"
return f"Book '{book}' doesn't exist in Book Store"
|
minnalisa/book_shelf
|
database2.py
|
database2.py
|
py
| 1,624 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15420800470
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import urllib.request
# 132 명의 남자 연예인들
man_list = [
'장근석',
'유아인',
'유동근',
'이서진',
'송일국',
'최재성',
'장혁',
'김민종',
'지창욱',
'주진모',
'안성기',
'이순재',
'신영균',
'이정재',
'공유',
'이영하',
'권상우',
'이승기',
'김우빈',
'최수종',
'강석우',
'차승원',
'이민호',
'차인표',
'소지섭',
'유승호',
'박근형',
'송중기',
'송승헌',
'고수',
'현빈',
'남궁원',
'김수현',
'강신성',
'배용준',
'강동원',
'조인성',
'정우성',
'원빈',
'장동건',
'강다니엘',
'지민',
'백현',
'뷔',
'정국',
'디오',
'찬열',
'진',
'슈가',
'카이',
'수호',
'세훈',
'첸',
'시우민',
'제이홉',
'레이',
'RM ',
'김우석',
'박지훈',
'옹성우',
'닉쿤',
'탑',
'서강준',
'김선호',
'차은우',
'김현중',
'엘',
'박형식',
'임시완',
'김재중',
'최시원',
'정용화',
'황민현',
'동해',
'강인',
'태민',
'남태현',
'우지',
'예성',
'지코',
'정지훈',
'양요섭',
'온유',
'은혁',
'마크',
'루카스',
'태양',
'박해진',
'김범',
'박유천',
'김준수',
'이준기',
'헨리',
'이홍기',
'박보검',
'준호',
'김지석',
'김강우',
'이상엽',
'박서준',
'이선균',
'정일우',
'변요한',
'이준',
'지성',
'최강창민',
'유노윤호',
'이동욱',
'이지훈',
'우도환',
'김래원',
'장기용',
'남주혁',
'박시후',
'주지훈',
'서인국',
'윤계상',
'유연석',
'조승우',
'정해인',
'하석진',
'이제훈',
'규현',
'윤두준',
'키',
'윤시윤',
'신성록',
'안재현',
'옥택연',
'하정우',
'류준열',
'조정석'
]
count = 0
driver = webdriver.Chrome('C:/Users/grand/Desktop/ML/CycleGAN/data_utils/chromedriver.exe')
driver.get('https://www.google.co.kr/imghp?hl=ko&tab=wi&ogbl')
for man in man_list:
try:
elem = driver.find_element_by_name("q") # Get Search Bar
elem.send_keys('{} 얼굴 사진'.format(man)) # Type search words
elem.send_keys(Keys.RETURN) # Press Enter
time.sleep(3)
try:
images = driver.find_elements_by_css_selector(".rg_i.Q4LuWd")
except:
print('{} Passed'.format(man))
continue
if len(images) == 0 :
print('{} No Images'.format(man))
continue
time.sleep(1)
for image in images:
inner_count = 0
try:
image.click()
time.sleep(3)
imgUrl = driver.find_element_by_xpath("/html/body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img").get_attribute('src')
if imgUrl[:4] != 'http':
continue
else:
urllib.request.urlretrieve(imgUrl, 'C:/Users/grand/Desktop/ML/CycleGAN/data/man/image_{}.jpg'.format(str(count)))
inner_count += 1
count += 1
if inner_count == 25 :
break
except:
continue
elem = driver.find_element_by_name("q")
elem.clear()
except:
print('{} Passed'.format(man))
continue
|
myoons/CycleGAN-Gender-Changer
|
data_utils/Korean_Crawling/man_crawling.py
|
man_crawling.py
|
py
| 4,090 |
python
|
en
|
code
| 6 |
github-code
|
6
|
73907153148
|
import tkinter as tk
import os
from style import fnt, ACTIVE_BG, BG, FG, ACCENT
class Option(tk.Checkbutton):
def __init__(self, parent, filename):
# Pull option content from file
with open(filename, 'r') as f:
self.content = f.readlines()
# Grab description of option from first line in file
title = self.content[0].replace('%', '').strip()
self.state = tk.IntVar()
super().__init__(parent, fg=FG, font=fnt(10), highlightbackground=BG,
activebackground=ACTIVE_BG, activeforeground=FG,
bg=BG, selectcolor=ACTIVE_BG, text=title, variable=self.state)
def make(self):
# If the option is selected, return content
if self.state.get():
return ''.join(self.content)
return ''
class Selector(tk.Frame):
def __init__(self, parent, folder):
super().__init__(parent, bg=BG)
title = tk.Label(self, font=fnt(10), text=f' Select {folder} ', fg=FG, bg=ACCENT)
title.pack(anchor=tk.W)
# Frame to hold Options
option_frame = tk.Frame(self, bg=BG, borderwidth=2, relief=tk.RIDGE)
option_frame.pack(fill=tk.X)
# Get path to the folder this module resides in
folder_path = os.path.join(os.path.dirname(__file__), folder)
# Create all option checkbuttons, providing an absolute path to each file
self.options = [Option(option_frame, os.path.join(folder_path, filename))
for filename in os.listdir(folder_path)]
# Place all checkbuttons
[option.pack(anchor=tk.W) for option in self.options]
def make(self):
# Get the result of this selector as a string
return ''.join([option.make() for option in self.options])
|
johnathan-coe/TexInit
|
widgets.py
|
widgets.py
|
py
| 1,828 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72699110267
|
import os
import requests
class RegistryHandler(object):
get_repos_url = '/v2/_catalog'
get_tags_url = '/v2/{repo}/tags/list'
get_digests_url = '/v2/{repo}/manifests/{tag}'
delete_digest_url = '/v2/{repo}/manifests/{digest}'
def __init__(self, host):
self.host = host
def get_repos(self):
url = f'{self.host}{self.get_repos_url}'
res = requests.get(url).json()
return res['repositories']
def get_tags(self, repo):
url = f'{self.host}{self.get_tags_url.format(repo=repo)}'
res = requests.get(url).json()
return res['tags']
def get_digest(self, repo, tag):
headers = {"Accept": "application/vnd.docker.distribution.manifest.v2+json"}
url = f'{self.host}{self.get_digests_url.format(repo=repo, tag=tag)}'
resp = requests.get(url, headers=headers)
return resp.headers['Docker-Content-Digest']
def delete_digest(self, repo, digest):
url = f'{self.host}{self.delete_digest_url.format(repo=repo, digest=digest)}'
requests.delete(url)
if __name__ == '__main__':
rh = RegistryHandler('http://10.204.112.43:5001')
repos = rh.get_repos()
for repo in repos:
tags = rh.get_tags(repo)
if not tags:
continue
delete_tags = sorted(
filter(lambda tag: '.' in tag, tags),
key=lambda tag: ''.join([f'{int(n):04d}' for n in tag.split('.')])
)[:-1]
for tag in delete_tags:
try:
digest = rh.get_digest(repo, tag)
rh.delete_digest(repo, digest)
except Exception as e:
print(f'{repo}:{tag} delete fail: {e}')
os.system("docker exec `docker ps | grep registry | awk '{print $1}'` registry garbage-collect /etc/docker/registry/config.yml")
os.system("systemcel restart docker `docker ps | grep registry | awk '{print $1}'`")
# docker exec -it $ registry sh -c 'registry garbage-collect /etc/docker/registry/config.yml'
# curl -I -H "Accept: application/vnd.docker.distribution.manifest.v2+json" 10.204.114.43:5001/v2/$ImageName/manifests/$tag
|
zzyy8678/stady_python
|
delete_regestry.py
|
delete_regestry.py
|
py
| 2,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26038424036
|
from __future__ import annotations
from typing import ClassVar
from pants.core.util_rules.environments import EnvironmentField
from pants.engine.target import (
COMMON_TARGET_FIELDS,
BoolField,
Dependencies,
DictStringToStringField,
IntField,
MultipleSourcesField,
SpecialCasedDependencies,
StringField,
StringSequenceField,
Target,
ValidNumbers,
)
from pants.util.strutil import help_text
class AdhocToolDependenciesField(Dependencies):
pass
class AdhocToolRunnableField(StringField):
alias: ClassVar[str] = "runnable"
required = True
help = help_text(
lambda: f"""
Address to a target that can be invoked by the `run` goal (and does not set
`run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments
specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive
dependencies, along with the transitive dependencies specified by
`{AdhocToolExecutionDependenciesField.alias}`.
"""
)
class AdhocToolOutputFilesField(StringSequenceField):
alias: ClassVar[str] = "output_files"
required = False
default = ()
help = help_text(
lambda: f"""
Specify the output files to capture, relative to the value of
`{AdhocToolWorkdirField.alias}`.
For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of
`{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
specified.
Relative paths (including `..`) may be used, as long as the path does not ascend further
than the build root.
"""
)
class AdhocToolOutputDirectoriesField(StringSequenceField):
alias: ClassVar[str] = "output_directories"
required = False
default = ()
help = help_text(
lambda: f"""
Specify full directories (including recursive descendants) of output to capture, relative
to the value of `{AdhocToolWorkdirField.alias}`.
For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of
`{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
specified.
Relative paths (including `..`) may be used, as long as the path does not ascend further
than the build root.
"""
)
class AdhocToolOutputDependenciesField(AdhocToolDependenciesField):
supports_transitive_excludes = True
alias: ClassVar[str] = "output_dependencies"
help = help_text(
lambda: f"""
Any dependencies that need to be present (as transitive dependencies) whenever the outputs
of this target are consumed (including as dependencies).
See also `{AdhocToolExecutionDependenciesField.alias}` and
`{AdhocToolRunnableDependenciesField.alias}`.
"""
)
class AdhocToolExecutionDependenciesField(SpecialCasedDependencies):
alias: ClassVar[str] = "execution_dependencies"
required = False
default = None
help = help_text(
lambda: f"""
The execution dependencies for this command.
Dependencies specified here are those required to make the command complete successfully
(e.g. file inputs, packages compiled from other targets, etc), but NOT required to make
the outputs of the command useful. Dependencies that are required to use the outputs
produced by this command should be specified using the
`{AdhocToolOutputDependenciesField.alias}` field.
If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`
will not be added to the execution sandbox.
See also `{AdhocToolOutputDependenciesField.alias}` and
`{AdhocToolRunnableDependenciesField.alias}`.
"""
)
class AdhocToolRunnableDependenciesField(SpecialCasedDependencies):
alias: ClassVar[str] = "runnable_dependencies"
required = False
default = None
help = help_text(
lambda: f"""
The runnable dependencies for this command.
Dependencies specified here are those required to exist on the `PATH` to make the command
complete successfully (interpreters specified in a `#!` command, etc). Note that these
dependencies will be made available on the `PATH` with the name of the target.
See also `{AdhocToolOutputDependenciesField.alias}` and
`{AdhocToolExecutionDependenciesField.alias}`.
"""
)
class AdhocToolSourcesField(MultipleSourcesField):
# We solely register this field for codegen to work.
alias: ClassVar[str] = "_sources"
uses_source_roots = False
expected_num_files = 0
class AdhocToolArgumentsField(StringSequenceField):
alias: ClassVar[str] = "args"
default = ()
help = help_text(
lambda: f"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field."
)
class AdhocToolStdoutFilenameField(StringField):
alias: ClassVar[str] = "stdout"
default = None
help = help_text(
lambda: f"""
A filename to capture the contents of `stdout` to. Relative paths are
relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
start at the build root.
"""
)
class AdhocToolStderrFilenameField(StringField):
alias: ClassVar[str] = "stderr"
default = None
help = help_text(
lambda: f"""
A filename to capture the contents of `stderr` to. Relative paths are
relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
start at the build root.
"""
)
class AdhocToolTimeoutField(IntField):
alias: ClassVar[str] = "timeout"
default = 30
help = "Command execution timeout (in seconds)."
valid_numbers = ValidNumbers.positive_only
class AdhocToolExtraEnvVarsField(StringSequenceField):
alias: ClassVar[str] = "extra_env_vars"
help = help_text(
"""
Additional environment variables to provide to the process.
Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
`ENV_VAR` to copy the value of a variable in Pants's own environment.
"""
)
class AdhocToolLogOutputField(BoolField):
alias: ClassVar[str] = "log_output"
default = False
help = "Set to true if you want the output logged to the console."
class AdhocToolWorkdirField(StringField):
alias: ClassVar[str] = "workdir"
default = "."
help = help_text(
"""
Sets the working directory for the process.
Values are relative to the build root, except in the following cases:
* `.` specifies the location of the `BUILD` file.
* Values beginning with `./` are relative to the location of the `BUILD` file.
* `/` or the empty string specifies the build root.
* Values beginning with `/` are also relative to the build root.
"""
)
class AdhocToolNamedCachesField(DictStringToStringField):
alias = "experimental_named_caches"
help = help_text(
"""
Named caches to construct for the execution.
See https://www.pantsbuild.org/docs/reference-global#named_caches_dir.
The keys of the mapping are the directory name to be created in the named caches dir.
The values are the name of the symlink (relative to the sandbox root) in the sandbox which
points to the subdirectory in the named caches dir
NOTE: The named caches MUST be handled with great care. Processes accessing the named caches
can be run in parallel, and can be cancelled at any point in their execution (and
potentially restarted). That means that _every_ operation modifying the contents of the cache
MUST be concurrency and cancellation safe.
"""
)
class AdhocToolOutputRootDirField(StringField):
alias: ClassVar[str] = "root_output_directory"
default = "/"
help = help_text(
"""
Adjusts the location of files output by this target, when consumed as a dependency.
Values are relative to the build root, except in the following cases:
* `.` specifies the location of the `BUILD` file.
* Values beginning with `./` are relative to the location of the `BUILD` file.
* `/` or the empty string specifies the build root.
* Values beginning with `/` are also relative to the build root.
"""
)
class AdhocToolTarget(Target):
alias: ClassVar[str] = "adhoc_tool"
core_fields = (
*COMMON_TARGET_FIELDS,
AdhocToolRunnableField,
AdhocToolArgumentsField,
AdhocToolExecutionDependenciesField,
AdhocToolOutputDependenciesField,
AdhocToolRunnableDependenciesField,
AdhocToolLogOutputField,
AdhocToolOutputFilesField,
AdhocToolOutputDirectoriesField,
AdhocToolSourcesField,
AdhocToolTimeoutField,
AdhocToolExtraEnvVarsField,
AdhocToolWorkdirField,
AdhocToolOutputRootDirField,
AdhocToolStdoutFilenameField,
AdhocToolStderrFilenameField,
EnvironmentField,
)
help = help_text(
lambda: f"""
Execute any runnable target for its side effects.
Example BUILD file:
{AdhocToolTarget.alias}(
{AdhocToolRunnableField.alias}=":python_source",
{AdhocToolArgumentsField.alias}=[""],
{AdhocToolExecutionDependenciesField.alias}=[":scripts"],
{AdhocToolOutputDirectoriesField.alias}=["results/"],
{AdhocToolOutputFilesField.alias}=["logs/my-script.log"],
)
shell_sources(name="scripts")
"""
)
# ---
# `system_binary` target
# ---
class SystemBinaryNameField(StringField):
alias: ClassVar[str] = "binary_name"
required = True
help = "The name of the binary to find."
class SystemBinaryExtraSearchPathsField(StringSequenceField):
alias: ClassVar[str] = "extra_search_paths"
default = ()
help = help_text(
"""
Extra search paths to look for the binary. These take priority over Pants' default
search paths.
"""
)
class SystemBinaryFingerprintPattern(StringField):
alias: ClassVar[str] = "fingerprint"
required = False
default = None
help = help_text(
"""
A regular expression which will be used to match the fingerprint outputs from
candidate binaries found during the search process.
"""
)
class SystemBinaryFingerprintArgsField(StringSequenceField):
alias: ClassVar[str] = "fingerprint_args"
default = ()
help = help_text(
"Specifies arguments that will be used to run the binary during the search process."
)
class SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):
alias: ClassVar[str] = "fingerprint_dependencies"
help = help_text(
"""
Specifies any runnable dependencies that need to be available on the `PATH` when the binary
is run, so that the search process may complete successfully. The name of the target must
be the name of the runnable dependency that is called by this binary.
"""
)
class SystemBinaryTarget(Target):
alias: ClassVar[str] = "system_binary"
core_fields = (
*COMMON_TARGET_FIELDS,
SystemBinaryNameField,
SystemBinaryExtraSearchPathsField,
SystemBinaryFingerprintPattern,
SystemBinaryFingerprintArgsField,
SystemBinaryFingerprintDependenciesField,
)
help = help_text(
lambda: f"""
A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.
Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search
paths provided, as well as default search paths. If
`{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be
executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries
whose output does not match the pattern will be excluded.
The first non-excluded binary will be the one that is resolved.
"""
)
|
pantsbuild/pants
|
src/python/pants/backend/adhoc/target_types.py
|
target_types.py
|
py
| 12,321 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
35282991136
|
import subprocess
import argparse
import datetime
import json
import time
def get_options():
parser = argparse.ArgumentParser(
description='Provision a Kubernetes cluster in GKE.')
parser.add_argument(
'-c', '--cluster', type=str, default=None,
help='K8s cluster to configure'
)
parser.add_argument(
'-i', '--image', type=str, default='',
help='Base distro OS image used in nodes.'
)
parser.add_argument(
'-z', '--zone', type=str, default=None,
help='Zone where the GPU cluster is running in.'
)
args = parser.parse_args()
return args
def run_cmd(cmd):
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Error running command: {}".format(cmd))
return output
def wait_for_gpus(cluster_name, timeout=datetime.timedelta(minutes=15)):
''' Wait until nodes are available in GPU cluster. '''
cmd = [
'kubectl', 'get', 'nodes',
'-l', 'cloud.google.com/gke-nodepool={}-gpu-pool'.format(cluster_name),
'-o=json'
]
end_time = datetime.datetime.now() + timeout
print('Waiting for GPUs to be ready ', end='')
while datetime.datetime.now() <= end_time:
output = run_cmd(cmd)
items = json.loads(output.decode('UTF-8')).get("items", [])
for i in items:
gpus = int(i['status']['capacity'].get('nvidia.com/gpu', '0'))
if gpus > 0:
print('OK')
return
print('.', end='')
time.sleep(10)
if __name__ == '__main__':
opts = get_options()
print('Getting credentials for cluster ...')
run_cmd(['gcloud', 'container', 'clusters', 'get-credentials', opts.cluster, '--zone', opts.zone])
print('Enabling Application CRD...')
app_crd_path = 'https://raw.githubusercontent.com/GoogleCloudPlatform/marketplace-k8s-app-tools/master/crd/app-crd.yaml'
run_cmd(['kubectl', 'apply', '-f', app_crd_path])
print('Enabling GPUs in GPU cluster...')
nv_daemonset = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
run_cmd(['kubectl', 'apply', '-f', nv_daemonset])
wait_for_gpus(opts.cluster)
|
NVIDIA/nvindex-cloud
|
provision/gke/finalize.py
|
finalize.py
|
py
| 2,346 |
python
|
en
|
code
| 10 |
github-code
|
6
|
3625906365
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import DetailView, View, UpdateView, ListView, TemplateView
from django.core.urlresolvers import reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.http import Http404
from .models import Checkout
from .forms import UpdateCheckoutForm
from products.models import Product
from accounts.models import Account
from django.shortcuts import render_to_response
from django.template import RequestContext
# class CheckoutView(DetailView):
# template_name = 'accounts/profile.html'
# success_url = reverse_lazy('orders:checkout')
# queryset = ''
#
# def get_object(self, **kwargs):
# id = self.kwargs['id']
# product = get_object_or_404(Product, id=id, publish=True)
# checkout = Checkout.objects.create(
# user=self.request.user,
# name=product.name,
# price=product.price,
# quantity=product.quantity,
# discount=product.discount,
# )
# return get_object_or_404(Product, id=id, publish=True)
#
# def get_context_data(self, **kwargs):
# context = super(CheckoutView, self).get_context_data(**kwargs)
# context['title'] = 'Profile'
# context['orders'] = Checkout.objects.filter(user=self.request.user)
# return context
# add to cart form
class CheckoutView(View):
template_name = 'orders/checkout.html'
def post(self, request, id):
id = self.kwargs['id']
qs = Checkout.objects.filter(product_id=id, status='waiting')
if qs.exists():
messages.success(request, 'You can not add this product because its already added before!')
return redirect('orders:checkout')
else:
product = get_object_or_404(Product, id=id, publish=True)
checkout = Checkout.objects.create(
user=self.request.user,
product_id=id,
name=product.name,
slug=product.slug,
price=product.price,
quantity=1,
discount=product.discount,
image=product.image,
)
messages.success(request, 'Successfully Added!')
return redirect('orders:checkout')
# all orders page that submitted
class OrdersView(ListView):
template_name = 'orders/orders.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(OrdersView, self).get_context_data(**kwargs)
context['title'] = 'Pending Orders'
if self.request.user.is_authenticated:
context['orders'] = Checkout.objects.filter(user=self.request.user, status='pending')
else:
raise Http404
return context
# all orders page that submitted & accepted
class AcceptedOrdersView(ListView):
template_name = 'orders/orders.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(AcceptedOrdersView, self).get_context_data(**kwargs)
context['title'] = 'Accepted Orders'
if self.request.user.is_authenticated:
context['orders'] = Checkout.objects.filter(user=self.request.user, status='accepted')
else:
raise Http404
return context
# all orders page that submitted & rejected
class RejectedOrdersView(ListView):
template_name = 'orders/orders.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(RejectedOrdersView, self).get_context_data(**kwargs)
context['title'] = 'Rejected Orders'
if self.request.user.is_authenticated:
context['orders'] = Checkout.objects.filter(user=self.request.user, status='rejected')
else:
raise Http404
return context
# checkout page
class CheckoutOrderView(ListView):
template_name = 'orders/checkout.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(CheckoutOrderView, self).get_context_data(**kwargs)
context['title'] = 'Cart'
if self.request.user.is_authenticated:
qs = Checkout.objects.filter(user=self.request.user, status='waiting')
context['orders'] = qs
orders = qs
total = 0
for order in orders:
total += order.price * order.quantity
context['total'] = total
else:
raise Http404
return context
# # update order page
# class CheckoutUpdateView(UpdateView):
# form_class = UpdateCheckoutForm
# model = Checkout
# template_name = 'orders/update_order.html'
# success_url = reverse_lazy('orders:checkout')
#
# # def get_success_url(self):
# # return reverse('orders:checkout')
#
# def get_context_data(self, **kwargs):
# context = super(CheckoutUpdateView, self).get_context_data(**kwargs)
# context['title'] = 'Update Order {}'.format(Checkout.objects.filter(id=self.kwargs['pk']).first().name)
# return context
# update order page
class CheckoutUpdateView(View):
template_name = 'orders/checkout.html'
def post(self, request, pk):
quantity = int(request.POST['quantity'])
product_id = request.POST['product_id']
available = Product.objects.filter(id=product_id).first()
if quantity > available.quantity:
messages.success(request, 'Quantity more than the available quantity : {} for product : {}'.format(available.quantity, available.name))
return redirect('orders:checkout')
if quantity == 0 or quantity < 0:
messages.success(request, 'Quantity can not be less than 1 for product : {}'.format(available.name))
return redirect('orders:checkout')
qs = Checkout.objects.filter(id=pk, status='waiting')
if qs.exists() and qs.count() == 1:
product_quantity = qs.first()
product_quantity.quantity = quantity
product_quantity.save()
messages.success(request, 'Successfully Added!')
return redirect('orders:checkout')
# delete order
class OrderDeleteView(View):
template_name = 'orders/checkout.html'
def post(self, request, id):
username = self.request.user
if username is None:
raise Http404
else:
qs = Checkout.objects.filter(id=id)
if qs.exists() and qs.count() == 1:
order = qs.first()
order.delete()
return redirect('orders:checkout')
# Buy orders
class BuyOrdersView(View):
template_name = 'orders/orders.html'
def post(self, request):
username = self.request.user
if username is None:
raise Http404
else:
user_id = Account.objects.filter(user=username).first()
account = Account.objects.filter(user=username)
qs = Checkout.objects.filter(user=username, status='waiting')
if account.exists():
user = account.first()
if user.gender is None \
or user.country is None \
or user.region is None \
or user.address1 is None \
or user.phone_number1 is None \
or user.phone_number2 is None:
messages.success(request, 'add your information first to complete buy orders!')
return redirect('accounts:update', pk=user_id.id)
if qs.exists():
for order in qs:
order.status = 'pending'
order.save()
product = Product.objects.filter(id=order.product_id).first()
product.quantity -= order.quantity
product.number_of_sales += 1
product.save()
return redirect('orders:thank')
class BuyThankView(TemplateView):
template_name = "orders/thank.html"
def get_context_data(self, **kwargs):
context = super(BuyThankView, self).get_context_data(**kwargs)
context['title'] = 'Thank You'
return context
def handler404(request):
response = render_to_response('404.html', {}, context_instance=RequestContext(request))
response.status_code = 404
return response
|
tegarty/E-Commerce_django
|
orders/views.py
|
views.py
|
py
| 8,371 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37176821074
|
import os
def clear_screen():
os.system('cls')
def recording_file(data):
with open('School.csv', 'w', encoding='utf-8') as file:
for line in data:
file.writelines(line)
file.close()
def add_contact(name, date_birth, sch_class, mid_ball):
with open('School.csv','r', encoding='utf-8') as f:
data = f.readlines()
last_line = data[-1]
f.close()
new_line = []
id = str(int(last_line[0]) + 1)
new_line = [id, name, date_birth, sch_class, mid_ball]
res = ';'.join(new_line)+'\n'
data.append(res)
return data
def dell_contact(num):
with open('School.csv','r', encoding='utf-8') as f:
data = f.readlines()
with open('School.csv', 'w', encoding='utf-8') as file:
for line in data:
id = int(line[0])
if id != num:
file.writelines(line)
file.close()
def find_contact(name, date_birth, sch_class):
tip = [name, date_birth, sch_class]
with open('School.csv','r', encoding='utf-8') as f:
data = f.readlines()
res = []
for el in tip:
if el:
for line in data:
if el in line:
res.append(line)
return(res)
def edit_contact(num, name, date_birth, sch_class, mid_ball):
tip = [name, date_birth, sch_class, mid_ball]
with open('School.csv','r', encoding='utf-8') as f:
data = f.readlines()
f.close()
for line in data:
id = int(line[0])
if id == num:
lst = line.split(';')
for el in tip:
if el == 0:
continue
else:
ind = tip.index(el)
for i in lst:
index = lst.index(i)
if index == ind+1:
lst[index] = el
res = ';'.join(lst)
data[id] = res
return data
|
Svetabtr/Homework_Python
|
hometask_8/modul_work.py
|
modul_work.py
|
py
| 2,049 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27213133585
|
import sys
M = int(sys.stdin.readline().rstrip())
S = set()
for i in range(M):
line = sys.stdin.readline().rstrip().split()
command = line[0]
if command == 'all':
S = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
elif command == 'empty':
S = set()
else:
x = int(line[1])
if command == 'add':
if x not in S:
S.add(x)
elif command == 'remove':
if x in S:
S.remove(x)
elif command == 'check':
if x in S:
print(1)
else:
print(0)
elif command == 'toggle':
if x in S:
S.remove(x)
else:
S.add(x)
|
hammii/Algorithm
|
BAEKJOON_python/11723_집합.py
|
11723_집합.py
|
py
| 762 |
python
|
en
|
code
| 2 |
github-code
|
6
|
37373909261
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 08:08:17 2017
@author: ivyONS
"""
IVY_AUTHORISATION = ''###please enter yours credentials
DEFAULT_CONFIG = {
"subBuildingName":{
"pafSubBuildingNameBoost":1.5,
"lpiSaoTextBoost":1.5,
"lpiSaoStartNumberBoost":1.0,
"lpiSaoStartSuffixBoost":1.0,
"lpiSaoPaoStartSuffixBoost": 0.5
},
"subBuildingRange": {
"lpiSaoStartNumberBoost":1.0,
"lpiSaoStartSuffixBoost":1.0,
"lpiSaoEndNumberBoost":1.0,
"lpiSaoEndSuffixBoost":1.0,
"lpiSaoStartEndBoost":0.1
},
"buildingName":{
"lpiPaoStartSuffixBoost":3.5,
"pafBuildingNameBoost":2.5,
"lpiPaoTextBoost":2.5,
"lpiPaoStartNumberBoost":2.5,
"lpiSaoPaoStartSuffixBoost": 0.5
},
"buildingNumber":{
"pafBuildingNumberBoost":3.0,
"lpiPaoStartNumberBoost":3.5,
"lpiPaoEndNumberBoost":0.1
},
"buildingRange":{
"lpiPaoStartNumberBoost":2.0,
"lpiPaoStartSuffixBoost":2.0,
"lpiPaoEndNumberBoost":2.0,
"lpiPaoEndSuffixBoost":2.0,
"pafBuildingNumberBoost":0.1,
"lpiPaoStartEndBoost":0.1
},
"streetName":{
"pafThoroughfareBoost":2.0,
"pafWelshThoroughfareBoost":2.0,
"pafDependentThoroughfareBoost":0.5,
"pafWelshDependentThoroughfareBoost":0.5,
"lpiStreetDescriptorBoost":2.0
},
"townName":{
"pafPostTownBoost":1.0,
"pafWelshPostTownBoost":1.0,
"lpiTownNameBoost":1.0,
"pafDependentLocalityBoost":0.5,
"pafWelshDependentLocalityBoost":0.5,
"lpiLocalityBoost":0.5,
"pafDoubleDependentLocalityBoost":0.2,
"pafWelshDoubleDependentLocalityBoost":0.2
},
"postcode":{
"pafPostcodeBoost":1.0,
"lpiPostcodeLocatorBoost":1.0,
"postcodeInOutBoost":0.5,
"postcodeOutBoost":0.8,
"postcodeInBoost":0.3
},
"organisationName":{
"pafOrganisationNameBoost":1.0,
"lpiOrganisationBoost":1.0,
"lpiPaoTextBoost":1.0,
"lpiLegalNameBoost":1.0,
"lpiSaoTextBoost":0.5
},
"departmentName":{
"pafDepartmentNameBoost":1.0,
"lpiLegalNameBoost":0.5
},
"locality":{
"pafPostTownBoost":0.2,
"pafWelshPostTownBoost":0.2,
"lpiTownNameBoost":0.2,
"pafDependentLocalityBoost":0.6,
"pafWelshDependentLocalityBoost":0.6,
"lpiLocalityBoost":0.6,
"pafDoubleDependentLocalityBoost":0.3,
"pafWelshDoubleDependentLocalityBoost":0.3
},
"fallback" :{
"fallbackQueryBoost":0.5,
"fallbackPafBoost":1.0,
"fallbackLpiBoost":1.0,
"fallbackPafBigramBoost":0.4,
"fallbackLpiBigramBoost":0.4,
"fallbackMinimumShouldMatch":"-40%",
"bigramFuzziness": "0"
},
"nisra" : {
"partialNiBoostBoost":1.1,
"partialEwBoostBoost":0.5,
"partialAllBoost":0.8,
"fullFallBackNiBoost":1.0,
"fullFallBackBigramNiBoost":0.4
},
"excludingDisMaxTieBreaker":0.0,
"includingDisMaxTieBreaker":0.5,
"topDisMaxTieBreaker":1.0,
"paoSaoMinimumShouldMatch":"-45%",
"organisationDepartmentMinimumShouldMatch":"30%",
"mainMinimumShouldMatch":"-40%"
}
|
ONSdigital/address-index-data
|
DataScience/Analytics/beta/default_param.py
|
default_param.py
|
py
| 3,430 |
python
|
en
|
code
| 18 |
github-code
|
6
|
34777830541
|
# Set up logging
import sys
import logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
level=logging.WARNING,
)
logger = logging.getLogger(__name__)
from typing import Optional
from dataclasses import dataclass, field
import os
import json
from contextlib import nullcontext
from alive_progress import alive_bar
from transformers.hf_argparser import HfArgumentParser
from transformers.models.auto import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM
from seq2seq.utils.pipeline import ConversationalText2SQLGenerationPipeline, Text2SQLGenerationPipeline, Text2SQLInput, ConversationalText2SQLInput
from seq2seq.utils.picard_model_wrapper import PicardArguments, PicardLauncher, with_picard
from seq2seq.utils.dataset import DataTrainingArguments
@dataclass
class PredictionOutputArguments:
"""
Arguments pertaining to execution.
"""
model_path: str = field(
default="tscholak/cxmefzzi",
metadata={"help": "Path to pretrained model"},
)
cache_dir: Optional[str] = field(
default="/tmp",
metadata={"help": "Where to cache pretrained models and data"},
)
db_path: str = field(
default="database",
metadata={"help": "Where to to find the sqlite files"},
)
inputs_path: str = field(default="data/dev.json", metadata={"help": "Where to find the inputs"})
output_path: str = field(
default="predicted_sql.txt", metadata={"help": "Where to write the output queries"}
)
device: int = field(
default=0,
metadata={
"help": "Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU. A non-negative value will run the model on the corresponding CUDA device id."
},
)
conversational: bool = field(default=False, metadata={"help": "Whether or not the inputs are conversations"})
def main():
# See all possible arguments by passing the --help flag to this program.
parser = HfArgumentParser((PicardArguments, PredictionOutputArguments, DataTrainingArguments))
picard_args: PicardArguments
prediction_output_args: PredictionOutputArguments
data_training_args: DataTrainingArguments
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
picard_args, prediction_output_args, data_training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
picard_args, prediction_output_args, data_training_args = parser.parse_args_into_dataclasses()
if os.path.isfile(prediction_output_args.output_path):
raise RuntimeError("file `{}` already exists".format(prediction_output_args.output_path))
# Initialize config
config = AutoConfig.from_pretrained(
prediction_output_args.model_path,
cache_dir=prediction_output_args.cache_dir,
max_length=data_training_args.max_target_length,
num_beams=data_training_args.num_beams,
num_beam_groups=data_training_args.num_beam_groups,
diversity_penalty=data_training_args.diversity_penalty,
)
# Initialize tokenizer
tokenizer = AutoTokenizer.from_pretrained(
prediction_output_args.model_path,
cache_dir=prediction_output_args.cache_dir,
use_fast=True,
)
# Initialize Picard if necessary
with PicardLauncher() if picard_args.launch_picard else nullcontext(None):
# Get Picard model class wrapper
if picard_args.use_picard:
model_cls_wrapper = lambda model_cls: with_picard(
model_cls=model_cls, picard_args=picard_args, tokenizer=tokenizer
)
else:
model_cls_wrapper = lambda model_cls: model_cls
# Initialize model
model = model_cls_wrapper(AutoModelForSeq2SeqLM).from_pretrained(
prediction_output_args.model_path,
config=config,
cache_dir=prediction_output_args.cache_dir,
)
if prediction_output_args.conversational:
conversational_text2sql(model, tokenizer, prediction_output_args, data_training_args)
else:
text2sql(model, tokenizer, prediction_output_args, data_training_args)
def get_pipeline_kwargs(
model, tokenizer: AutoTokenizer, prediction_output_args: PredictionOutputArguments, data_training_args: DataTrainingArguments
) -> dict:
return {
"model": model,
"tokenizer": tokenizer,
"db_path": prediction_output_args.db_path,
"prefix": data_training_args.source_prefix,
"normalize_query": data_training_args.normalize_query,
"schema_serialization_type": data_training_args.schema_serialization_type,
"schema_serialization_with_db_id": data_training_args.schema_serialization_with_db_id,
"schema_serialization_with_db_content": data_training_args.schema_serialization_with_db_content,
"device": prediction_output_args.device,
}
def text2sql(model, tokenizer, prediction_output_args, data_training_args):
# Initalize generation pipeline
pipe = Text2SQLGenerationPipeline(**get_pipeline_kwargs(model, tokenizer, prediction_output_args, data_training_args))
with open(prediction_output_args.inputs_path) as fp:
questions = json.load(fp)
with alive_bar(len(questions)) as bar:
for question in questions:
try:
outputs = pipe(inputs=Text2SQLInput(question["question"],question["db_id"]))
output = outputs[0]
query = output["generated_text"]
except Exception as e:
logger.error(e)
query = ""
logger.info("writing `{}` to `{}`".format(query, prediction_output_args.output_path))
bar.text(query)
bar()
with open(prediction_output_args.output_path, "a") as fp:
fp.write(query + "\n")
def conversational_text2sql(model, tokenizer, prediction_output_args, data_training_args):
# Initalize generation pipeline
pipe = ConversationalText2SQLGenerationPipeline(
**get_pipeline_kwargs(model, tokenizer, prediction_output_args, data_training_args)
)
with open(prediction_output_args.inputs_path) as fp:
conversations = json.load(fp)
length = sum(len(conversation["interaction"]) for conversation in conversations)
with alive_bar(length) as bar:
for conversation in conversations:
utterances = []
for turn in conversation["interaction"]:
utterances.extend((utterance.strip() for utterance in turn["utterance"].split(sep="|")))
try:
outputs = pipe(
inputs=ConversationalText2SQLInput(list(utterances),
db_id=conversation["database_id"])
)
output = outputs[0]
query = output["generated_text"]
except Exception as e:
logger.error(e)
query = ""
logger.info("writing `{}` to `{}`".format(query, prediction_output_args.output_path))
bar.text(query)
bar()
with open(prediction_output_args.output_path, "a") as fp:
fp.write(query + "\n")
with open(prediction_output_args.output_path, "a") as fp:
fp.write("\n")
if __name__ == "__main__":
main()
|
ServiceNow/picard
|
seq2seq/prediction_output.py
|
prediction_output.py
|
py
| 7,647 |
python
|
en
|
code
| 299 |
github-code
|
6
|
45254772596
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 20 23 20:40:00
@author: kirsh012
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import joblib
from sklearn.model_selection import PredefinedSplit, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.utils import class_weight
### Load custom modules
import nn_models as nnm
import dataprocessing as dp
import visualization as viz
### Load the data for comparison
data, varnames, target = dp.load_data_nn('1-sf', sensor='both', dlh=0, keep_SH=False, return_target=True)
# Split data into time oriented chunks
train_idx, test_idx, val_idx = nnm.split_data_cv_indx(data,target)
train_labels = target[train_idx]
test_labels = target[test_idx]
val_labels = target[val_idx]
print("Train labels shape: ", train_labels.shape)
print("Test labels shape: ", test_labels.shape)
print("Val labels shape: ", val_labels.shape)
train_data = data[train_idx,:]
test_data = data[test_idx,:]
val_data = data[val_idx,:]
print("Train shape: ", train_data.shape)
print("Test shape: ", test_data.shape)
print("Val shape: ", val_data.shape)
# Use indices to make PredefinedSplit for hyperparameter optimization
train_idx = np.full( (train_data.shape[0],) , -1, dtype=int)
val_idx = np.full( (val_data.shape[0], ) , 0, dtype=int)
test_fold = np.append(train_idx, val_idx)
print(test_fold.shape)
ps = PredefinedSplit(test_fold)
print(ps)
combined_train_data = np.vstack((train_data, val_data))
combined_train_labels = np.vstack((train_labels.reshape(-1,1), val_labels.reshape(-1,1))).ravel()
print("Combined train data shape: ", combined_train_data.shape)
print("Combined labels shape:", combined_train_labels)
param_grid = {
'n_estimators': [100, 200, 500, 1000, 2000, 5000],
'max_depth': [5, 10, 15, 20, 25, None]
}
# Compute the class weights
train_weights = class_weight.compute_class_weight(class_weight='balanced',
classes=np.unique(combined_train_labels), y=combined_train_labels)
train_weights = {i: weight for i, weight in enumerate(train_weights)}
from pathlib import Path
filename = Path("tuned_rf_model_weighted.pkl")
### Save the model
if not filename.exists():
print("Running the hyperparameter testing model...")
clf = GridSearchCV(RandomForestClassifier(class_weight=train_weights), param_grid=param_grid, scoring='roc_auc_ovr_weighted', cv = ps, verbose=3)
clf.fit(combined_train_data, combined_train_labels)
joblib.dump(clf, filename)
else:
clf = joblib.load(filename)
# Print hyperparameter esults
print("Report: \n", pd.DataFrame(clf.cv_results_))
print("Best inner loop score: ", clf.best_score_)
print("Best parameters: ", clf.best_params_)
# Predict on the anomalous
train_preds = clf.predict_proba(train_data)
test_preds = clf.predict_proba(test_data)
val_preds = clf.predict_proba(val_data)
### Visualize Performance
# Return AU-ROC
fpr_test, tpr_test, thresh_test = roc_curve(test_labels, test_preds[:,1])
fpr_train, tpr_train, thresh_train = roc_curve(train_labels, train_preds[:,1])
fpr_val, tpr_val, thresh_val = roc_curve(val_labels, val_preds[:,1])
# Return AU-PRC
ppr_test, rec_test, pthresh_test = precision_recall_curve(test_labels, test_preds[:,1])
ppr_train, rec_train, pthresh_train = precision_recall_curve(train_labels, train_preds[:,1])
ppr_val, rec_val, pthresh_val = precision_recall_curve(val_labels, val_preds[:,1])
viz.plot_roc_curve(tpr_train, fpr_train, tpr_val, fpr_val, tpr_test, fpr_test, title = "RandomForest AU-ROC")
viz.plot_prc_curve(rec_train, ppr_train, rec_val, ppr_val, rec_test, ppr_test, title = "RandomForest AU-PRC")
plt.show()
|
tk27182/masters-thesis
|
Code/run_test_randomforest.py
|
run_test_randomforest.py
|
py
| 3,799 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14505164780
|
import typing
from qittle.http.client import ABCHTTPClient, AiohttpClient
from .abc import ABCSessionManager
class SessionManager(ABCSessionManager):
def __init__(self, http_client: typing.Optional[typing.Type[ABCHTTPClient]] = None):
self.http_client = http_client or AiohttpClient
self._active_session: typing.Optional[ABCHTTPClient] = None
async def __aenter__(self) -> ABCHTTPClient:
self._active_session = self.http_client()
return self._active_session
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._active_session.close()
|
cyanlabs-org/qittle
|
qittle/http/session/manager.py
|
manager.py
|
py
| 621 |
python
|
en
|
code
| 8 |
github-code
|
6
|
13276813866
|
import sys
from os.path import join
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QListWidgetItem, QWidget
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
import sqlite3
from functools import partial
class EditInfo(QWidget):
def __init__(self, id) -> None:
super().__init__()
uic.loadUi(join("task5", "addEditCoffeeForm.ui"), self)
self.id = id
self.edit.clicked.connect(self.update)
def update(self):
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
try:
name = self.bookTitle.text()
elem1 = self.bookAuthor.text()
elem2 = self.bookReleaseYear.text()
desc = self.bookGenre.text()
for i in [name, elem1, elem2, desc]:
if not (bool(i) and bool(i.strip())):
raise ValueError
for w in i:
if w.isdigit():
raise ValueError
cs = float(self.costl.text())
vl = float(self.volumel.text())
except ValueError:
self.errors.setText("Введены некорректные данные!!!")
else:
cur.execute(f'''
UPDATE coffee SET name_of_the_variety = ?, degree_of_roasting = ?,
ground_or_in_grains = ?,
taste_description = ?, cost = ?, packing_volume = ?
WHERE id = ?''',
[name, elem1, elem2, desc, cs, vl, self.id])
db.commit()
db.close()
def addNewCoffeeSort(self):
# req = INSERT INTO coffee (name_of_the_variety, degree_of_roasting, ground_or_in_grains, taste_description, cost, packing_volume)
# VALUES ("1", "1", "1", "1", 1, 1)
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
try:
name = self.bookTitle.text()
elem1 = self.bookAuthor.text()
elem2 = self.bookReleaseYear.text()
desc = self.bookGenre.text()
for i in [name, elem1, elem2, desc]:
if not (bool(i) and bool(i.strip())):
raise ValueError
for w in i:
if w.isdigit():
raise ValueError
cs = float(self.costl.text())
vl = float(self.volumel.text())
except ValueError:
self.errors.setText("Введены некорректные данные!!!")
cur.execute(f'''
INSERT INTO coffee (name_of_the_variety, degree_of_roasting, ground_or_in_grains, taste_description, cost, packing_volume)
VALUES (?, ?, ?, ?, ?, ?)''',
[name, elem1, elem2, desc, cs, vl])
db.commit()
db.close()
class AddNewInfo(QWidget):
def __init__(self) -> None:
super().__init__()
uic.loadUi(join("task5", "addEditCoffeeForm2.ui"), self)
self.add.clicked.connect(self.addNewItem)
def addNewItem(self):
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
try:
name = self.bookTitle.text()
elem1 = self.bookAuthor.text()
elem2 = self.bookReleaseYear.text()
desc = self.bookGenre.text()
for i in [name, elem1, elem2, desc]:
if not (bool(i) and bool(i.strip())):
raise ValueError
for w in i:
if w.isdigit():
raise ValueError
cs = float(self.costl.text())
vl = float(self.volumel.text())
except ValueError:
self.errors.setText("Введены некорректные данные!!!")
else:
cur.execute(f'''
INSERT INTO coffee (name_of_the_variety, degree_of_roasting, ground_or_in_grains, taste_description, cost, packing_volume)
VALUES (?, ?, ?, ?, ?, ?)''',
[name, elem1, elem2, desc, cs, vl])
db.commit()
db.close()
class MyWidget(QMainWindow):
def __init__(self) -> None:
super().__init__()
uic.loadUi(join("task5", "main.ui"), self)
self.btn.clicked.connect(self.search)
self.addNewCoffe.clicked.connect(self.addSomeInfo)
def search(self):
self.listWidget.clear()
# searchText = self.lineEdit.text()
# db = sqlite3.connect(join("QT_Standalone", "task7", "books.db"))
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
res = cur.execute(
f'''SELECT * FROM coffee''')
for elem in res:
# print(elem)
btn = QPushButton(f"{elem[1]}(нажми для большей информации)", self)
clickFunc = partial(self.some, elem[0], elem[1],
elem[2], elem[3], elem[4], elem[5], elem[6])
btn.clicked.connect(clickFunc)
item = QListWidgetItem()
item.setSizeHint(btn.sizeHint())
self.listWidget.addItem(item)
self.listWidget.setItemWidget(item, btn)
def some(self, id, name, author, year, genre, cost, volume):
self.pop = EditInfo(id)
# там снизу есть нужные поля для ввода
self.pop.bookAuthor.setText(author)
self.pop.bookTitle.setText(name)
self.pop.bookGenre.setText(genre)
self.pop.bookReleaseYear.setText(str(year))
self.pop.costl.setText(str(cost))
self.pop.volumel.setText(str(volume))
self.pop.show()
def addSomeInfo(self):
self.pop2 = AddNewInfo()
self.pop2.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
# print(eval("9!"))
sys.exit(app.exec_())
|
QBoff/Moscow-Kiper
|
task5/main.py
|
main.py
|
py
| 5,993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10139749320
|
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from .forms import (CreateCourseForm,
CreateCourseRegistrationForm,
CreateDepartmentForm,
CreateRegistrationForm)
from .models import Course, CourseRegistration, Department, Registration
import datetime
from django.utils import timezone
class CreateCourseView(LoginRequiredMixin, View):
"""
View for admin to create new Course.
"""
template_name = 'academicInfo/create_course.html'
create_course_form = CreateCourseForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_course_form = self.create_course_form()
return render(request, self.template_name, {'create_course_form' : create_course_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_course_form = CreateCourseForm(request.POST)
if create_course_form.is_valid():
course = create_course_form.save()
course.save()
return redirect('view_course')
return render(request, self.template_name, {'create_course_form' : create_course_form})
class CreateCourseRegistrationView(LoginRequiredMixin, View):
"""
View for admin to add Course to the Registration.
"""
template_name = 'academicInfo/create_course_registration.html'
create_course_registration_form = CreateCourseRegistrationForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_course_registration_form = self.create_course_registration_form()
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_course_registration_form = CreateCourseRegistrationForm(request.POST)
if create_course_registration_form.is_valid():
# Add course to registration only if this course is not added already
# in this registration.
course_registration = create_course_registration_form.save(commit=False)
# Check if the registration has already started.
if course_registration.registration.startTime <= timezone.now():
create_course_registration_form.add_error('registration',
'The registration has already started, you cannot add course to it now.')
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
courses_in_registration = course_registration.registration.courseregistration_set.all()
similar_course_registration = courses_in_registration.filter(course=course_registration.course,
semester=course_registration.semester)
# Check if course is not already present in the same registration and semester.
if len(similar_course_registration) == 0:
course_registration.save()
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form,
'success': 'Successfully added course to the registration.'})
else:
create_course_registration_form.add_error('course', 'This course is already added in this semester.')
create_course_registration_form.add_error('semester', 'This semester already has this course.')
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
class CreateDepartmentView(LoginRequiredMixin, View):
"""
View for admin to add new Department.
"""
template_name = 'academicInfo/create_department.html'
create_department_form = CreateDepartmentForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_department_form = self.create_department_form()
return render(request, self.template_name, {'create_department_form' : create_department_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_department_form = CreateDepartmentForm(request.POST)
# Check if Department with same name does not already exist.
if create_department_form.is_valid():
department = create_department_form.save()
department.save()
return redirect('view_department')
else:
return render(request, self.template_name, {'create_department_form' : create_department_form})
class CreateRegistrationView(LoginRequiredMixin, View):
"""
View for admin to create new Registration.
"""
template_name = 'academicInfo/create_registration.html'
create_registration_form = CreateRegistrationForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_registration_form = self.create_registration_form()
return render(request, self.template_name, {'create_registration_form' : create_registration_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_registration_form = CreateRegistrationForm(request.POST)
# Check if the Registration form is valid.
if create_registration_form.is_valid():
days = int(request.POST['days'])
hours = int(request.POST['hours'])
minutes = int(request.POST['minutes'])
# Check if duration is 0 or not.
if days + hours + minutes == 0:
# Duration cannot be 0.
return render(request, self.template_name, {'create_registration_form' : create_registration_form,
'error' : 'Duration cannot be 0.'})
startTime = create_registration_form.cleaned_data['startTime']
duration = datetime.timedelta(days=days, hours=hours, minutes=minutes)
endTime = startTime + duration
registration = Registration.objects.create(name=create_registration_form.cleaned_data['name'],
startTime=startTime,
duration=duration,
endTime=endTime)
registration.save()
return redirect('registration')
return render(request, self.template_name, {'create_registration_form' : create_registration_form})
class RegistrationsView(View):
"""
View for everyone to view all the registrations.
"""
template_name = 'academicInfo/registration.html'
def get(self, request, *args, **kwargs):
time = timezone.now()
future_registrations = Registration.objects.filter(startTime__gt=time).order_by('startTime')
present_registrations = Registration.objects.filter(
endTime__gt=time
).exclude(startTime__gt=time).order_by('endTime')
past_registrations = Registration.objects.filter(endTime__lt=time)
return render(request, self.template_name, {'future_registrations': future_registrations,
'present_registrations': present_registrations,
'past_registrations': past_registrations})
class LiveRegistrationView(LoginRequiredMixin, View):
"""
View for student to register and unregister from live registrations.
"""
template_name = 'academicInfo/live_registration.html'
def get(self, request, *args, **kwargs):
# Render this page only for the students.
if hasattr(request.user, 'student'):
registration = get_object_or_404(Registration, pk=self.kwargs['registration_id'])
time = timezone.now()
# Check if registration is currently live.
if registration.startTime < time and registration.endTime > time:
student = request.user.student
# Show courses which are in either current semester or the next semester of student.
courses_in_registration = registration.courseregistration_set.all()
course_registration = courses_in_registration.filter(
semester__gt=student.get_student_semester
).exclude(semester__gt=student.get_student_semester+1)
return render(request, self.template_name, {'course_registration' : course_registration,
'student_courses' : student.courseregistration_set.all()})
else:
return redirect('registration')
else:
return redirect('home')
def post(self, request, *args, **kwargs):
# Only students should be allowed to register.
if hasattr(request.user, 'student'):
course_registration = get_object_or_404(CourseRegistration,
pk=request.POST['course_registration_id'])
registration = course_registration.registration
currTime = timezone.now()
student = request.user.student
semester = student.get_student_semester
# If student wants to register for the course.
if 'Register' in request.POST:
if (currTime > registration.startTime and
currTime < registration.endTime and
course_registration.semester in range(semester, semester+2)):
if (not student in course_registration.students.all() and
course_registration.remaining_seats > 0):
course_registration.students.add(student)
return redirect(reverse('live_registration',
kwargs={'registration_id' : registration.id}))
else:
return redirect('home')
# If student wants to unregister from the course.
elif 'UnRegister' in request.POST:
if (currTime > registration.startTime and
currTime < registration.endTime and
student in course_registration.students.all()):
course_registration.students.remove(student)
return redirect(reverse('live_registration',
kwargs={'registration_id' : registration.id}))
else:
return redirect('home')
class DepartmentsView(LoginRequiredMixin, View):
"""
View for admin to see departments and add new department.
"""
template_name = 'academicInfo/departments.html'
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
departments = Department.objects.all()
return render(request, self.template_name, {'departments' : departments})
else:
return redirect('home')
class CourseView(LoginRequiredMixin, View):
"""
View for admin to see Courses and add new Course.
"""
template_name = 'academicInfo/courses.html'
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
courses = Course.objects.all()
return render(request, self.template_name, {'courses' : courses})
else:
return redirect('home')
|
shreygoel7/Pinocchio
|
Pinocchio/academicInfo/views.py
|
views.py
|
py
| 12,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39732651900
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 12:44:53 2022
@author: mathaes
"""
import copy
class HexBoard:
"""
The HexBoard class represents a state of a game of Hex on an
n x n hexagonal-parallelogram board.
The two players are represented by X and O.
One player plays in the left-right (across columns) and
the other in the top-bottom (across rows). These can
be changed but the default is that O plays first, in left-right direction.
"""
EMPTY_TOKEN = '-'
X_TOKEN = 'X'
O_TOKEN = 'O'
def __init__(self,
size=11,
first_player = O_TOKEN,
lr_player = O_TOKEN):
self.size = size
self.first_player = first_player
self.lr_player = lr_player
self.label = '0'
self.clear()
def __repr__(self):
strrep = ''
for row in range(0, self.size):
if row == int(self.size / 2):
strrep += self.lr_player
strrep += ' ' * (row)
else:
strrep += ' ' * (row + 1)
for col in range(0, self.size):
curValue = self.getTileValue(row, col)
strrep += curValue
if col < self.size - 1:
strrep += ' '
strrep += '\n'
if None != self.label:
strrep += f"({self.label})\n"
return strrep
def __copy__(self):
clone = HexBoard()
clone.size = self.size
clone.first_player = self.first_player
clone.lr_player = self.lr_player
clone.board = copy.deepcopy(self.board)
clone.label = self.label
return clone
def copy(self):
return copy.copy(self)
def getTileValue(self, row, col):
if row < 0 or col < 0 or row >= self.size or col >= self.size:
raise Exception(f"Coordinate ({row},{col}) is out of range for this board.")
return self.board[row][col]
def playX(self, row, col):
if self.getNextPlayer() != self.X_TOKEN:
raise Exception("playX called but next player is not X")
cur = self.getTileValue(row, col)
if not HexBoard.EMPTY_TOKEN == cur:
raise Exception(f"Coordinate ({row},{col}) is {cur} (not unset).")
self.board[row][col] = HexBoard.X_TOKEN
def playO(self, row, col):
if self.getNextPlayer() != self.O_TOKEN:
raise Exception("playX called but next player is not X")
cur = self.getTileValue(row, col)
if not HexBoard.EMPTY_TOKEN == cur:
raise Exception(f"Coordinate ({row},{col}) is {cur} (not unset).")
self.board[row][col] = HexBoard.O_TOKEN
def getSelfPlayer(self):
"""
Determine which player played last for the current board state.
Returns
-------
Either TOKEN_X or TOKEN_O, unless the board is empty
in which case it returns TOKEN_EMPTY
"""
xcount = self.countXTiles()
ocount = self.countOTiles()
if self.first_player == HexBoard.O_TOKEN:
if xcount == ocount:
return HexBoard.X_TOKEN
elif xcount == ocount-1:
return HexBoard.O_TOKEN
else:
raise Exception(f"Bad board state: O goes first but there are {ocount} O tiles and {xcount} X tiles")
elif self.first_player == HexBoard.X_TOKEN:
if xcount == ocount:
return HexBoard.O_TOKEN
elif ocount == xcount-1:
return HexBoard.X_TOKEN
else:
raise Exception(f"Bad board state: X goes first but there are {ocount} O tiles and {xcount} X tiles")
return HexBoard.EMPTY_TOKEN
def getNextPlayer(self):
"""
Determine which player plays next from the current board state.
Returns
-------
Either TOKEN_X or TOKEN_O, unless the game is over
in which case it returns TOKEN_EMPTY
"""
if self.isGameOver():
return HexBoard.EMPTY_TOKEN
cur = self.getSelfPlayer()
if cur == HexBoard.O_TOKEN:
return HexBoard.X_TOKEN
elif cur == HexBoard.X_TOKEN:
return HexBoard.O_TOKEN
return HexBoard.EMPTY_TOKEN
def clear(self):
self.board = []
for n in range(0, self.size):
self.board.append([HexBoard.EMPTY_TOKEN] * self.size)
def getTileCoordinatesOfValue(self, value):
coords = []
for i in range(0, self.size):
for j in range(0, self.size):
cur = self.getTileValue(i, j)
if cur == value:
coords.append([i, j])
return coords
def countXTiles(self):
return self.countTiles(HexBoard.X_TOKEN)
def countOTiles(self):
return self.countTiles(HexBoard.O_TOKEN)
def countEmptyTiles(self):
return self.countTiles(HexBoard.EMPTY_TOKEN)
def countTiles(self, value):
count = 0
for i in range(0, self.size):
for j in range(0, self.size):
cur = self.getTileValue(i, j)
if cur == value:
count += 1
return count
def getXTiles(self):
return self.getTileCoordinatesOfValue(HexBoard.X_TOKEN)
def getOTiles(self):
return self.getTileCoordinatesOfValue(HexBoard.O_TOKEN)
def getEmptyTiles(self):
return self.getTileCoordinatesOfValue(HexBoard.EMPTY_TOKEN)
def adjacent(tile1, tile2):
rowDiff = tile1[0] - tile2[0]
colDiff = tile1[1] - tile2[1]
rowAdjacent = rowDiff == 0 and abs(colDiff) == 1
colAdjacent = colDiff == 0 and abs(rowDiff) == 1
diagAdjacent = abs(rowDiff) == 1 and rowDiff + colDiff == 0
return rowAdjacent or colAdjacent or diagAdjacent
def adjacentIndexes(tile, tileArray):
indexes = []
for n in range(0, len(tileArray)):
if HexBoard.adjacent(tile, tileArray[n]):
indexes.append(n)
return indexes
def getConnectedOBlocks(self):
return HexBoard.getBlocks(self.getOTiles())
def getConnectedXBlocks(self):
return HexBoard.getBlocks(self.getXTiles())
def getBlocks(tiles):
"""
Given a set of tiles, return them partitioned into
blocks of adjacent tiles
Parameters
----------
tiles : list
A list of tile coordinates
Returns
-------
blocks : list of lists
A list coordinate-list blocks, where each block
is a group of connected coordinates (by hexagonal adjacency)
"""
blocks = []
while len(tiles) > 0:
nextTile = tiles.pop(0)
block = HexBoard.recursiveAdjacencies(nextTile, tiles)
block.append(nextTile)
block.sort()
blocks.append(block)
return blocks
def recursiveAdjacencies(tile, tileArray):
adjacencies = []
adjixs = HexBoard.adjacentIndexes(tile, tileArray)
if len(adjixs) == 0:
return []
adjixs.sort()
adjixs.reverse()
subtiles = []
for ix in adjixs:
subtile = tileArray.pop(ix)
subtiles.append(subtile)
adjacencies.extend(subtiles)
for sub in subtiles:
more = HexBoard.recursiveAdjacencies(sub, tileArray)
if len(more) > 0:
adjacencies.extend(more)
return adjacencies
def isXWin(self):
check_lr = self.lr_player == HexBoard.X_TOKEN
xblocks = self.getConnectedXBlocks()
for xblock in xblocks:
if check_lr:
if self.block_spans_lr(xblock):
return True
else:
if self.block_spans_tb(xblock):
return True
return False
def isOWin(self):
check_lr = self.lr_player == HexBoard.O_TOKEN
oblocks = self.getConnectedOBlocks()
for oblock in oblocks:
if check_lr:
if self.block_spans_lr(oblock):
return True
else:
if self.block_spans_tb(oblock):
return True
return False
def block_spans_lr(self, block):
# we assume the block is connected, so we
# just look for an element with 0 first entry
# and one with size-1 first entry
foundLeft = False
foundRight = False
for coord in block:
if coord[0] == 0:
foundLeft = True
elif coord[0] == self.size - 1:
foundRight = True
if foundLeft and foundRight:
return True
return False
def block_spans_tb(self, block):
# we assume the block is connected, so we
# just look for an element with 0 first entry
# and one with size-1 first entry
foundTop = False
foundBottom = False
for coord in block:
if coord[1] == 0:
foundTop = True
elif coord[1] == self.size - 1:
foundBottom = True
if foundTop and foundBottom:
return True
return False
def isGameOver(self):
if self.countEmptyTiles() == 0:
return True
return self.isXWin() or self.isOWin()
|
ccorbell/gametheory
|
hex/hexboard.py
|
hexboard.py
|
py
| 9,928 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28158074215
|
"""Destroy unused AMIs in your AWS account.
Usage:
ami_destroyer.py <requiredtag> [options]
Arguments:
<requiredtag> Tag required for an AMI to be cleaned up in the form tag:NameOfTag
Options:
--retain=<retain> Number of images to retain, sorted newest to latest [default: 2]
--regions=<regions> A comma-separated list of AWS Regions to run against [default: us-east-1]
--help Show this help string
--dryrun List the AMIs that'll be destroyed by this script
"""
import sys
import logging
from operator import itemgetter
from docopt import docopt
import boto3
import botocore.exceptions as botoex
_LOGGER = logging.Logger("ami-destroyer")
def setup_logging():
_LOGGER.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s : %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
def get_account_id():
acctid = sts.get_caller_identity().get('Account')
_LOGGER.info("Retrieving Account ID: {}".format(acctid))
return acctid
def get_curated_images(tagname, accountid):
_LOGGER.info("Retrieving AMIs with {}".format(tagname))
return ec2.images.filter(
Owners=[accountid],
Filters=[
{
'Name':tagname,
'Values':['True']
}
]
)
def sort_curated_images(curatedimages):
_LOGGER.info("Sorting tagged AMIs into a nice dictionary of lists of dictionaries")
sortedimages = {}
for i in curatedimages:
for tag in i.tags:
if tag['Key'] == 'Name':
iname = tag['Value']
break
else:
iname = "nonametag"
if iname not in sortedimages:
sortedimages[iname] = []
sortedimages[iname].append({
'creation_date': i.creation_date,
'ami_id': i.image_id,
'snapshot_id': i.block_device_mappings[0]['Ebs']['SnapshotId']
})
sortedimages[iname] = sorted(
sortedimages[iname],
key=itemgetter('creation_date'),
reverse=True
)
return sortedimages
def prune_sorted_images(images, retain):
for family in images:
_LOGGER.info(
"Found {} tagged images for type {}. Retaining the latest {}".format(
len(images[family]),
family,
retain
)
)
images[family] = images[family][retain:]
if not images[family]:
_LOGGER.info("No images to prune for {}".format(family))
return images
def destroy_ami(ami_id, family, dryrun):
try:
ec2.Image(ami_id).deregister(DryRun=dryrun)
_LOGGER.info("Family: {} - Deregistered {}".format(family, ami_id))
except botoex.ClientError as e:
_LOGGER.warning("{} - {}".format(ami_id, e))
def destroy_snapshot(snapshot_id, family, dryrun):
try:
ec2.Snapshot(snapshot_id).delete(DryRun=dryrun)
_LOGGER.info("Family: {} - Deleted {}".format(family, snapshot_id))
except botoex.ClientError as e:
_LOGGER.warning("{} - {}".format(snapshot_id, e))
def run(tag, retain, dryrun):
acctid = get_account_id()
curatedimages = get_curated_images(tag, acctid)
sortedimages = sort_curated_images(curatedimages)
if sortedimages:
prunedimages = prune_sorted_images(sortedimages, numretain)
for family in prunedimages:
if prunedimages[family]:
for ami in prunedimages[family]:
destroy_ami(ami['ami_id'], family, dryrun)
destroy_snapshot(ami['snapshot_id'], family, dryrun)
else:
_LOGGER.error("No tagged images to prune")
if __name__ == '__main__':
args = docopt(__doc__)
requiredtag = args['<requiredtag>']
dryrun = args['--dryrun']
numretain = int(args['--retain'])
regions = args['--regions']
setup_logging()
for r in regions.split(','):
_LOGGER.info("##### Running cleanup for region {} #####".format(r))
ec2 = boto3.resource('ec2', region_name=r)
sts = boto3.client('sts')
run(requiredtag, numretain, dryrun)
|
crielly/amidestroyer
|
amidestroyer.py
|
amidestroyer.py
|
py
| 4,275 |
python
|
en
|
code
| 5 |
github-code
|
6
|
40686886293
|
import time
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import MagmadUtil, SpgwUtil
class TestAttachNwInitiatedDetachFail(unittest.TestCase):
"""
S1AP Integration test for Failed Network Initiated Detach
"""
def setUp(self):
"""Initialize s1ap wrapper and spgw utility
"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper(
stateless_mode=MagmadUtil.stateless_cmds.ENABLE,
)
self._spgw_util = SpgwUtil()
def tearDown(self):
"""Clean up utilities and sctp connection
"""
self._s1ap_wrapper.cleanup()
def test_attach_nw_initiated_detach_fail(self):
"""
The test case validates retransmission of Detach Request after MME
restarts
Step 1: UE attaches to network
Step 2: Send request to delete default bearer, since deletion is
invoked for default bearer, MME initiates detach procedure
Step 3: MME starts 3422 timer to receive Detach Accept message
Step 4: S1AP tester ignores and does not send Detach Accept
"""
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
print(
"********************** Running End to End attach for ",
"UE id ",
req.ue_id,
)
# Now actually complete the attach
attach = self._s1ap_wrapper._s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
print("Sleeping for 5 seconds")
time.sleep(5)
print(
"********************** Deleting default bearer for IMSI",
"".join([str(i) for i in req.imsi]),
)
# Delete default bearer
self._spgw_util.delete_bearer(
"IMSI" + "".join([str(i) for i in req.imsi]),
attach.esmInfo.epsBearerId,
attach.esmInfo.epsBearerId,
)
# Receive NW initiated detach request
# Wait for timer 3422 expiry 5 times
for _ in range(5):
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_NW_INIT_DETACH_REQUEST.value
print("**************** Received NW initiated Detach Req")
time.sleep(6)
if __name__ == "__main__":
unittest.main()
|
magma/magma
|
lte/gateway/python/integ_tests/s1aptests/test_attach_nw_initiated_detach_fail.py
|
test_attach_nw_initiated_detach_fail.py
|
py
| 2,615 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
4222865674
|
#!usr/bin/python
import os
import SimpleITK as sitk
import numpy as np
import scipy.ndimage.interpolation
import skimage.exposure
import skimage.filters
import skimage.transform
path="//Users//zhangyuwei//Desktop//test"
ShrinkFactor = 4
for i in os.walk(path):
for j in range(len(i[2])):
if os.path.splitext(i[2][j])[1] == ".gz" :
print(os.path.splitext(i[2][j])[0])
nifti_file = sitk.ReadImage(os.path.join(i[0],i[2][j]))
mask_img = sitk.BinaryThreshold(nifti_file, 80, 5000)
mask_filename = "globalmask_" + os.path.splitext(i[2][j])[0] + ".gz"
output_filename = "N4ITKcorrected_" + os.path.splitext(i[2][j])[0] + ".gz"
output_biasname1 = "bias_in_" + os.path.splitext(i[2][j])[0] + ".gz"
output_biasname2 = "bias_out_" + os.path.splitext(i[2][j])[0] + ".gz"
sitk.WriteImage(mask_img, mask_filename)
nifti_shape = sitk.GetArrayFromImage(nifti_file)
nifti_shape = nifti_shape.shape
# Call and initialize an N4 corrector instance.
corrector = sitk.N4BiasFieldCorrectionImageFilter()
corrector.SetMaximumNumberOfIterations = 50
corrector.SetNumberOfHistogramBins = 128
corrector.SetSplineOrder = 10
corrector.SetConvergenceThreshold = 0.001
corrector.SetNumberOfControlPoints = 8
print("> Initializing Compelete!")
if ShrinkFactor > 1 :
shrinked_img = sitk.Shrink(nifti_file, [ShrinkFactor] * nifti_file.GetDimension())
shrinked_mask = sitk.Shrink(mask_img, [ShrinkFactor] * nifti_file.GetDimension())
shrinked_img = sitk.Cast(shrinked_img, sitk.sitkFloat32)
#shrinked_mask = sitk.Cast(shrinked_mask, sitk.sitkFloat32)
print("> Starting Execution...")
corrected_img = corrector.Execute(shrinked_img, shrinked_mask)
print("> Execution Complete!")
# Estimate the bias field of corrected image
re_corrected = corrector.Execute(corrected_img, shrinked_mask)
print("> Corrected Bias Estimation Complete!")
corrected_img = sitk.GetArrayFromImage(corrected_img)
corrected_img[corrected_img == 0] = 0.001
re_corrected = sitk.GetArrayFromImage(re_corrected)
re_corrected[re_corrected == 0] = 0.001
shrinked_img = sitk.GetArrayFromImage(shrinked_img)
# Generate biasfield
shrinked_bias = shrinked_img / corrected_img
corrected_bias = corrected_img / re_corrected
# Output
output_bias = scipy.ndimage.zoom(shrinked_bias, np.array(nifti_shape) / shrinked_bias.shape)
output_bias2 = scipy.ndimage.zoom(corrected_bias, np.array(nifti_shape) / shrinked_bias.shape)
output_img = sitk.GetArrayFromImage(nifti_file) / output_bias
output_bias = sitk.GetImageFromArray(output_bias)
output_bias2 = sitk.GetImageFromArray(output_bias2)
output_img = sitk.Cast(sitk.GetImageFromArray(output_img), sitk.sitkUInt16)
sitk.WriteImage(output_img, output_filename)
sitk.WriteImage(output_bias, output_biasname1)
sitk.WriteImage(output_bias2, output_biasname2)
print("> Save Complete!")
else:
source_img = sitk.Shrink(nifti_file, [ShrinkFactor] * nifti_file.GetDimension())
mask_img = sitk.Shrink(mask_img, [ShrinkFactor] * mask_img.GetDimension())
source_img = sitk.Cast(source_img, sitk.sitkFloat32)
output_img = corrector.Execute(source_img, mask_img)
output_img = sitk.Cast(output_img, sitk.sitkUInt16)
sitk.WriteImage(output_img, output_filename)
#biasfield_img = source_img / output_img
#biasfield_img[biasfield_img < 0.5] = 0.5
|
20zzyw/Radiomic-Toolbox
|
N4ITK_instance.py
|
N4ITK_instance.py
|
py
| 4,354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73590162109
|
# Filename : 02-添加子类属性.py
# Date : 2018/8/2
"""
对象属性的继承:是通过继承init方法来继承的对象属性
给当前类添加对象属性:重写init方法,如果需要保留父类的对象属性,需要使用
super()去调用父类的init方法
多态:同一个事物有多种形态,子类继承父类的方法,可以对方法进行重写,
一个方法就有多种形态(多态的表现)
类的多态:继承产生多态
"""
class Person:
def __init__(self, name='', age=2):
self.name = name
self.age = age
class Staff(Person):
# init方法的参数:保证在创建对象的时候就可以给某些属性赋值
def __init__(self, name):
super().__init__(name)
self.salary = 0
if __name__ == '__main__':
s1 = Person()
s1.__init__('wd', 12)
print(s1.name, s1.age)
# 练习
"""
声明人类,有属性,名字、年龄、性别。身高
要求创建人的对象的时候可以给名字、性别、年龄赋初值
再创建学生类继承自人类,拥有人类的所有的属性,再添加学号、
成绩、电话属性
要求创建学生对象的时候可以给名字、年龄和电话赋初值
"""
class Human:
def __init__(self, name, age=0, sex='男'):
self.name = name
self.height = 0
self.age = age
self.sex = sex
class Student(Human):
def __init__(self, name, age, tel):
super().__init__(self, name, age)
self.score = 0
self.id_num = 0
self.tel = 13
|
gilgameshzzz/learn
|
day14Python对象3/02-添加子类属性.py
|
02-添加子类属性.py
|
py
| 1,547 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
5114407056
|
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.interpolate import interp1d
import astropy.units as u
import astropy.coordinates as coord
from astropy.cosmology import FlatLambdaCDM
from astropy.constants import M_sun
import tensorflow as tf
from flowpm import utils as ut
import copy
import time
from sys import argv,exit,byteorder
from field_util import Field
class FGPA(object):
def __init__(self, fn, box_size = 512*u.Mpc, box_res = 256, origin = [3550*u.Mpc, -256*u.Mpc, -256*u.Mpc],
tau_0 = 1, T_0 = 1, beta = 1.6, gamma = 1.6, from_npy = True):
if from_npy:
self.dark = self.read_from_npy(fn)
else:
self.header, self.gas, self.dark, self.star = self.tipsy_read(fn)
self.auxilinary(box_size, box_res)
self.set_origin(origin)
self.set_FGPA_param(tau_0, T_0, beta, gamma)
def set_FGPA_param(self, tau_0 = None, T_0 = None, beta = None, gamma = None):
if tau_0 is not None:
self.tau_0 = tau_0 # TBD
if T_0 is not None:
self.T_0 = T_0 # TBD
if beta is not None:
self.beta = beta
if gamma is not None:
self.gamma = gamma
return
def set_origin(self, orig_pos):
'''
Set a offset value (move the origin point to the given [x0, y0, z0])
add units!
'''
self.x0, self.y0, self.z0 =orig_pos
return
def auxilinary(self, box_size, box_res, N_particles = None):
# settings of the simulation
self.box_size = box_size # unit: Mpc
self.box_res = box_res # unit: none
self.res = box_size/box_res # unit: Mpc
if N_particles is None:
self.n_den = self.header['N'] / self.box_size**3
else:
self.n_den = N_particles / self.box_size**3
#Do general cosmo things
self.cosmo = FlatLambdaCDM(H0=100, Om0=0.315)
self.mass_res = self.cosmo.critical_density0.to(u.M_sun/u.Mpc**3)*(self.box_size)**3/self.box_res**3*self.cosmo.Om(0.)
#get speed of light in km/s
self.ckms = 299792
self.velkms = (self.box_size/8677.2079486362706)*self.ckms
def read_from_npy(self, fn, costco_style = True):
'''
use this function to read reduced data, and recover the data to tipsy style.
'''
dark = np.load(fn)
self.auxilinary(box_size = 512*u.Mpc, box_res = 256, N_particles = len(dark))
dark = pd.DataFrame(dark,columns=dark.dtype.names)
# basically what we do in the rev function
dark['x'] = dark['x'] / self.box_size - 0.5
dark['y'] = dark['y'] / self.box_size - 0.5
dark['z'] = dark['z'] / self.box_size - 0.5
dark['x'].units = None
dark['y'].units = None
dark['z'].units = None
dark['vx'] = dark['vx'] / self.velkms
dark['vy'] = dark['vy'] / self.velkms
dark['vz'] = dark['vz'] / self.velkms
dark['vx'].units = None
dark['vy'].units = None
dark['vz'].units = None
if costco_style:
dark['x'], dark['y'], dark['z'] = dark['y'], dark['x'], dark['z'] # ?
dark['vx'], dark['vy'], dark['vz'] = dark['vy'], dark['vx'], dark['vz'] # dont forget the vel data!
return dark
def tipsy_read(self, fn, costco_style = True):
tipsy = open(fn, 'rb')
header_type = np.dtype([('time', '>f8'),('N', '>i4'), ('Dims', '>i4'), ('Ngas', '>i4'), ('Ndark', '>i4'), ('Nstar', '>i4'), ('pad', '>i4')])
gas_type = np.dtype([('mass','>f4'), ('x', '>f4'),('y', '>f4'),('z', '>f4'), ('vx', '>f4'),('vy', '>f4'),('vz', '>f4'),
('rho','>f4'), ('temp','>f4'), ('hsmooth','>f4'), ('metals','>f4'), ('phi','>f4')])
dark_type = np.dtype([('mass','>f4'), ('x', '>f4'),('y', '>f4'),('z', '>f4'), ('vx', '>f4'),('vy', '>f4'),('vz', '>f4'),
('eps','>f4'), ('phi','>f4')])
star_type = np.dtype([('mass','>f4'), ('x', '>f4'),('y', '>f4'),('z', '>f4'), ('vx', '>f4'),('vy', '>f4'),('vz', '>f4'),
('metals','>f4'), ('tform','>f4'), ('eps','>f4'), ('phi','>f4')])
header = np.fromfile(tipsy,dtype=header_type,count=1)
header = dict(zip(header_type.names,header[0]))
gas = np.fromfile(tipsy,dtype=gas_type,count=header['Ngas'])
dark = np.fromfile(tipsy,dtype=dark_type,count=header['Ndark'])
star = np.fromfile(tipsy,dtype=star_type,count=header['Nstar'])
if byteorder == 'little':
gas = gas.byteswap().newbyteorder('=')
dark = dark.byteswap().newbyteorder('=')
star = star.byteswap().newbyteorder('=')
gas = pd.DataFrame(gas,columns=gas.dtype.names)
dark = pd.DataFrame(dark,columns=dark.dtype.names) # here is the raw data
# in raw_data:
# x - RA
# y - DEC
# z - red
# what we want:
# x - red
# y - RA
# z - DEC
if costco_style:
dark['x'], dark['y'], dark['z'] = dark['y'], dark['x'], dark['z'] # ?
dark['vx'], dark['vy'], dark['vz'] = dark['vy'], dark['vx'], dark['vz'] # dont forget the vel data!
star = pd.DataFrame(star,columns=star.dtype.names)
tipsy.close()
return header, gas, dark, star
def process_dark(self, dark = None):
# for painting, keep the particles in [0, 512]^3 box
# this function write in-situ results, so be careful
# only use if you want to ensure the particles are in reasonable positions
if dark is None:
dark = copy.deepcopy(self.dark)
dark['x'] = (dark['x']+0.5) * self.box_size + self.x0
dark['y'] = (dark['y']+0.5) * self.box_size + self.y0
dark['z'] = (dark['z']+0.5) * self.box_size + self.z0
dark['x'].units = u.Mpc
dark['y'].units = u.Mpc
dark['z'].units = u.Mpc
dark['vx'] = dark['vx'] * self.velkms
dark['vy'] = dark['vy'] * self.velkms
dark['vz'] = dark['vz'] * self.velkms
dark['vx'].units = u.km * u.s**-1
dark['vy'].units = u.km * u.s**-1
dark['vz'].units = u.km * u.s**-1
dark['mass'] = self.mass_res.value
dark['mass'].units= M_sun
return dark
def process_dark_rev(self, dark_processed):
'''
recover the input field (to raw format)
'''
dark_processed['x'] = (dark_processed['x']-self.x0) / self.box_size - 0.5
dark_processed['y'] = (dark_processed['y']-self.y0) / self.box_size - 0.5
dark_processed['z'] = (dark_processed['z']-self.z0) / self.box_size - 0.5
dark_processed['x'].units = None
dark_processed['y'].units = None
dark_processed['z'].units = None
dark_processed['vx'] = dark_processed['vx'] / self.velkms
dark_processed['vy'] = dark_processed['vy'] / self.velkms
dark_processed['vz'] = dark_processed['vz'] / self.velkms
dark_processed['vx'].units = None
dark_processed['vy'].units = None
dark_processed['vz'].units = None
return dark_processed
def particle_paint(self, nc, weight_col = None):
'''
nc: # of cells along any direction
raw_part_data: *raw* particle data from the simuation (x, y, z, \\in [-0.5, 0.5])
weight_col: pick one col in raw_data as weight; default value is 1 for all the particles
'''
mesh = tf.zeros([1, nc, nc, nc], dtype = float)
dark_raw = self.dark
dark_pos = tf.convert_to_tensor([dark_raw['x'], dark_raw['y'], dark_raw['z']], dtype = float)
dark_pos = tf.transpose((dark_pos + 0.5) * nc)
dark_pos = tf.expand_dims(dark_pos, axis = 0) # [1, partN, 3]
partN = dark_pos.shape[1]
n_den = partN / nc**3
if weight_col is None:
weight = tf.ones([1, partN])
else:
weight = tf.convert_to_tensor(dark_raw[weight_col])
weight = tf.expand_dims(weight, axis = 0)
return ut.cic_paint(mesh, dark_pos, weight = weight) / n_den
def particle_paint_clip_with_real_coord(self, real_coord_start, real_coord_end, dl, dark_raw = None, weight_col = None, smooth = False):
'''
translate real space box to box in [-0.5, 0.5]^3 then run particle_paint_clip
plz add units to all the parameters
Issue: get things wrong here
'''
if dark_raw is None:
dark_raw = self.dark
rcs_x, rcs_y, rcs_z = real_coord_start
rce_x, rce_y, rce_z = real_coord_end
box_scale = round((self.box_size/dl).value)
nc = np.round([((rce_x-rcs_x)/dl).value,
((rce_y-rcs_y)/dl).value,
((rce_z-rcs_z)/dl).value]).astype(int)
offset = np.round([((rcs_x-self.x0)/dl).value,
((rcs_y-self.y0)/dl).value,
((rcs_z-self.z0)/dl).value]).astype(int)
n_den = self.n_den * dl**3 # global density per grid
field_data = self.particle_paint_clip(nc, offset, box_scale, n_den, dark_raw = dark_raw, weight_col = weight_col)[0] # [nx, ny, nz] tensor
field = Field(rcs_x, rcs_y, rcs_z, dl, field_data)
if smooth:
field.smooth(dl)
return field
def particle_paint_clip(self, nc, offset, box_scale, n_den, dark_raw = None, weight_col = None):
'''
nc: [nx, ny, nz]
the shape/physical-scale of mesh.
offset: [ox, oy, oz]
[0,0,0] position of the mesh; default value ox=0, oy=0, oz=0
box_scale: the full scale of the simulation - box coord: [0, box_scale]^3
# raw_part_data: *raw* particle data from the simuation (x, y, z, \\in [-0.5, 0.5])
weight_col: pick one col in raw_data as weight; default value is 1 for all the particles
this function cutouts a box [ox, ox+nx] * [oy, oy+ny] * [oz, oz+nz] with field values.
auto periodical condition(?)
'''
if dark_raw is None:
dark_raw = self.dark
nx, ny, nz = nc
ox, oy, oz = offset
mesh = tf.zeros([1, nx, ny, nz], dtype = float)
# remove particles out of the boundary
dark_clip = dark_raw[(dark_raw['x']<=(ox+nx)/box_scale-0.5) & (dark_raw['x']>= ox/box_scale-0.5) &
(dark_raw['y']<=(oy+ny)/box_scale-0.5) & (dark_raw['y']>= oy/box_scale-0.5) &
(dark_raw['z']<=(oz+nz)/box_scale-0.5) & (dark_raw['z']>= oz/box_scale-0.5)]
dark_pos = tf.convert_to_tensor([(dark_clip['x']+ 0.5)*box_scale - ox,
(dark_clip['y']+ 0.5)*box_scale - oy,
(dark_clip['z']+ 0.5)*box_scale - oz], dtype = float)
assert (np.max(dark_pos[0]) <= nx) & (np.max(dark_pos[1]) <= ny) & (np.max(dark_pos[2]) <= nz), print(np.max(dark_pos[0]), np.max(dark_pos[1]), np.max(dark_pos[2]))
assert (np.min(dark_pos[0]) >= 0 ) & (np.min(dark_pos[1]) >= 0 ) & (np.min(dark_pos[2]) >= 0 )
dark_pos = tf.transpose(dark_pos)
dark_pos = tf.expand_dims(dark_pos, axis = 0) # [1, partN, 3]
partN = dark_pos.shape[1]
if weight_col is None:
weight = tf.ones([1, partN])
else:
weight = tf.convert_to_tensor(dark_clip[weight_col])
weight = tf.expand_dims(weight, axis = 0)
paint = ut.cic_paint(mesh, dark_pos, weight = weight) / n_den
return paint
def RSD_catalog(self, real_coord_start, real_coord_end, dl):
'''
return a particle catalog with RSDed pos
'''
opd_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl)
fvx_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl, weight_col = 'vx')
fvy_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl, weight_col = 'vy')
fvz_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl, weight_col = 'vz')
part_new_pos = self.field_to_part_pos(opd_clip)
# generate a new particle catalog
part_new = pd.DataFrame(part_new_pos[0], columns = ['x', 'y', 'z'])
opd_tensor = tf.expand_dims(opd_clip.field_data, axis = 0)
fvx_tensor = tf.expand_dims(fvx_clip.field_data, axis = 0)
fvy_tensor = tf.expand_dims(fvy_clip.field_data, axis = 0)
fvz_tensor = tf.expand_dims(fvz_clip.field_data, axis = 0)
# flowpm.cic_readout requires a compatible mesh and coord ([nx, ny, nz] grid - [0, n*]^3 coord)
part_pos_grid = self.mesh_to_part_pos(tf.expand_dims(opd_clip.field_data, axis = 0))
part_new['opd'] = ut.cic_readout(opd_tensor, part_pos_grid)[0]
part_new['vx'] = ut.cic_readout(fvx_tensor, part_pos_grid)[0]
part_new['vy'] = ut.cic_readout(fvy_tensor, part_pos_grid)[0]
part_new['vz'] = ut.cic_readout(fvz_tensor, part_pos_grid)[0]
# convert to real space
part_new = self.process_dark(part_new)
# RSD
# TODO: consider the effect of yz distance
part_new['red_real'] = self.z_from_dist(part_new['x'] * u.Mpc)
part_new['red_rs'] = part_new['red_real'] + part_new['vx'] / self.ckms
part_new['x_red'] = self.z_to_dist(part_new['red_rs'])
return part_new
def FGPA_eval(self, mesh_catalog):
# calculate tau and T for each particle
# TODO: find out characterized tau_0 & T_0
mesh_catalog['tau'] = self.tau_0 * mesh_catalog['opd']**self.beta
mesh_catalog['T'] = self.T_0 * mesh_catalog['opd']**self.gamma
return mesh_catalog
def raw_tau_map(self, real_coord_start, real_coord_end, dl):
# generate the particle catalog
part_new = self.RSD_catalog(real_coord_start, real_coord_end, dl)
# add tau / T to the catalog
part_new = self.FGPA_eval(part_new)
# this one in the RS space
part_new_new = pd.DataFrame(part_new[['y', 'z', 'vx', 'vy', 'vz', 'tau', 'T']], columns = ['y', 'z', 'vx', 'vy', 'vz', 'tau', 'T'])
part_new_new['x'] = part_new['x_red'] # not sure why this doesn't work
# recover the raw_field
part_new_new = self.process_dark_rev(part_new_new)
# paint the final result
tau_field = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl,
dark_raw = part_new_new, weight_col = 'tau', smooth = False)
return tau_field
def tau_map(self, real_coord_start, real_coord_end, dl, z_comp = 2.30, F_obs = 0.8447):
# derive the A_norm by comparing the desired value in obs with calculation
self.set_FGPA_param(tau_0=1)
raw_tau_field = self.raw_tau_map(real_coord_start, real_coord_end, dl)
# construct the clip coord
# hardcode bad, but works here
clip_start = copy.deepcopy(real_coord_start)
clip_start[0] = self.cosmo.comoving_distance(z_comp - 0.05)
clip_end = copy.deepcopy(real_coord_end)
clip_end[0] = self.cosmo.comoving_distance(z_comp + 0.05)
test_tau_field = raw_tau_field.clip_with_coord(clip_start, clip_end)
# no newton, just interp
tau_list = test_tau_field.field_data
tau_list = tau_list[~np.isnan(tau_list)]
l = []
for A in np.arange(0, 0.5, 0.001):
l.append(np.mean(np.exp(-tau_list * A)))
A_func = interp1d(l, np.arange(0, 0.5, 0.001))
A_norm = A_func(F_obs)
tau_field = copy.deepcopy(raw_tau_field)
tau_field.field_data = tau_field.field_data * A_norm
return tau_field
def trans_map(self, real_coord_start, real_coord_end, dl):
tau_field = self.tau_map(real_coord_start, real_coord_end, dl)
trans_field = copy.deepcopy(tau_field)
trans_field.field_data = np.exp(-trans_field.field_data)
trans_field.field_data = trans_field.field_data / np.mean(trans_field.field_data)
return trans_field
def field_to_part_pos(self, field, tensor = True):
'''
'''
nx, ny, nz = field.field_data.shape
part_mesh = np.meshgrid(np.arange(nx), np.arange(ny), np.arange(nz))
part_mesh = np.reshape(part_mesh, [3, nx*ny*nz]).T.astype(float)
part_mesh[:, 0] = (part_mesh[:, 0]*field.dl + field.x0 - self.x0) / self.box_size - 0.5
part_mesh[:, 1] = (part_mesh[:, 1]*field.dl + field.y0 - self.y0) / self.box_size - 0.5
part_mesh[:, 2] = (part_mesh[:, 2]*field.dl + field.z0 - self.z0) / self.box_size - 0.5
if tensor:
part_mesh = tf.convert_to_tensor(part_mesh, dtype = float)
part_mesh = tf.expand_dims(part_mesh, 0)
return part_mesh
def mesh_to_part_pos(self, mesh):
'''
Note this function is not complete: it converts the mesh into position [0, 0, 0] ... [nx, ny, nz] regardless the position
Obselated
'''
_, nx, ny, nz = mesh.shape
part_mesh = np.meshgrid(np.arange(nx), np.arange(ny), np.arange(nz))
part_mesh = np.reshape(part_mesh, [3, nx*ny*nz]).T.astype(float)
part_mesh = tf.convert_to_tensor(part_mesh, dtype = float)
part_mesh = tf.expand_dims(part_mesh, 0)
return part_mesh
def z_from_dist(self, distance):
dummyred = np.linspace(0.,10.,10000)
dummydist = self.cosmo.comoving_distance(dummyred)
res = np.interp(distance,dummydist,dummyred)
return (res)
def z_to_dist(self, red):
return self.cosmo.comoving_distance(red).value
|
pointeee/preheat2022_public
|
FGPA.py
|
FGPA.py
|
py
| 18,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38454129872
|
n = int(input())
L = [list(map(int,input().split())) for _ in range(n)]
papers = [0,0,0] #-1,0,1
def same(A):
ref = A[0][0]
for row in A:
for i in row:
if i != ref:
return 9
return ref
def cut(y,x,n):
global papers
num = same([L[i][x:x+n] for i in range(y,y+n)])
if num != 9:
papers[num+1] += 1
return
for i in range(3):
for j in range(3):
cut(y+i*(n//3), x+j*(n//3), n//3)
cut(0,0,n)
for i in papers:
print(i)
|
LightPotato99/baekjoon
|
recursive/papernum.py
|
papernum.py
|
py
| 520 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2578089451
|
from collections import defaultdict
def func(nums1, nums2):
hash = defaultdict(int)
while nums1:
np1 = nums1.pop()
hash[np1[0]] += np1[1]
while nums2:
np2 = nums2.pop()
hash[np2[0]] += np2[1]
return sorted([[key, value] for key, value in hash.items()])
nums1 = [[2,4],[3,6],[5,5]]
nums2 = [[1,3],[4,3]]
print(func(nums1, nums2))
|
mayo516/Algorithm
|
주리머/2-2w/wc/(성공) 6362. Merge Two 2D Arrays by Summing Values.py
|
(성공) 6362. Merge Two 2D Arrays by Summing Values.py
|
py
| 409 |
python
|
en
|
code
| null |
github-code
|
6
|
25798704755
|
#! python3
import sys
import win32api
from PyQt5.QtWidgets import QApplication, QWidget, \
QToolTip, QPushButton, QMessageBox, QDesktopWidget, \
QMainWindow, QAction, QMenu, QStatusBar
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QCoreApplication
'''
#面向过程
app = QApplication(sys.argv)
w = QWidget()
w.resize(300, 150)
w.setFixedSize(700, 400)
x, y = win32api.GetSystemMetrics(0), win32api.GetSystemMetrics(1)
w.move((x - 250) / 2, (y - 150) / 2)
w.setWindowTitle('将图片转换为jpg 1920*1080')
w.show()
sys.exit(app.exec_())
'''
x , y= 0,0
class Example(QMainWindow):
def __init__(self):
super().__init__()#调用父类构造函数
self.initUI()
def initUI(self):
global x,y
QToolTip.setFont(QFont('SansSerif', 10))
self.setToolTip('This is a <b>QWidget</b> widget')
btn = QPushButton('Quit', self)
btn.setToolTip('This is a <b>QPushButton</b> widget')
# btn.clicked.connect(QCoreApplication.instance().quit) #clicked btn and exit app
btn.clicked.connect(QApplication.instance().exit) #clicked btn and exit app
btn.resize(btn.sizeHint())
btn.move(50, 50)
exitAct:QAction = QAction(QIcon('46.jpg'), 'Exit', self)
exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(QApplication.instance().quit)
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('File')
fileMenu.addAction(exitAct)
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(exitAct)
# subMenu add to FileMenu
subMenu = QMenu('Import', self)
subAct = QAction('Import file', self)
subMenu.addAction(subAct)
fileMenu.addMenu(subMenu)
#checkMenu
viewSBar:QAction = QAction('View statusbar', self, checkable=True)
viewSBar.setStatusTip('View statusbar')
viewSBar.setChecked(True)
viewSBar.triggered.connect(self.toggleMenu)
fileMenu.addAction(viewSBar)
# x, y = win32api.GetSystemMetrics(0), win32api.GetSystemMetrics(1)
# self.setGeometry((x-700)/2, (y-400)/2, 700, 400)
self.center()
self.setFixedSize(700, 400)
self.setWindowTitle('将图片转换成jpg 1080')
self.statusbar = self.statusBar()
self.statusbar.showMessage('Ready')
self.setWindowIcon(QIcon('46.jpg'))
self.show()
def center(self):
qr = self.frameGeometry()#get self RECT
cp = QDesktopWidget().availableGeometry().center()#
qr.moveCenter(cp)
self.move(qr.topLeft())
#When Widgets is closing, call this function by automaticlly
#virtualFunction
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', 'Are you sure to quit',
QMessageBox.Yes|QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
#virtual Function
def contextMenuEvent(self, event):
cmenu = QMenu(self)
newAct = cmenu.addAction('New')
opnAct = cmenu.addAction('Open')
quitAct = cmenu.addAction('Quit')
#which Action
print(type(event), event.pos())
action = cmenu.exec_(self.mapToGlobal(event.pos()))
if action == quitAct:
QApplication.instance().quit()
def toggleMenu(self, state):
print(state)
if state:
self.statusbar.show()
else:
self.statusbar.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
JcobCN/PyLearn
|
pyQt5.py
|
pyQt5.py
|
py
| 3,718 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28156199284
|
from functools import partial
from pathlib import Path
from typing import Dict, Any, Callable, Tuple, Optional, Sequence
import PIL
import imageio
import numpy as np
import torch
from PIL import Image
from torch import Tensor
from torch.nn import Module, Tanh, Parameter
from torch.nn.functional import grid_sample, l1_loss, mse_loss
from torch.utils.tensorboard import SummaryWriter
from thre3d_atom.networks.dense_nets import SkipMLP, SkipMLPConfig
from thre3d_atom.networks.network_interface import Network
from thre3d_atom.networks.shared.layers import (
PositionalEncodingsEmbedder,
PixelwiseNorm,
)
from thre3d_atom.utils.constants import NUM_COLOUR_CHANNELS
from thre3d_atom.utils.imaging_utils import (
adjust_dynamic_range,
to8b,
mse2psnr,
get_2d_coordinates,
)
from thre3d_atom.utils.logging import log
class FeatureGrid2D(Module):
def __init__(
self,
height: int,
width: int,
feature_dims: int,
tunable: bool = True,
device: torch.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
),
) -> None:
super().__init__()
# state of the object:
self._height = height
self._width = width
self._feature_dims = feature_dims
self._tunable = tunable
self.features = torch.empty(
(1, feature_dims, height, width), device=device, requires_grad=True
)
torch.nn.init.xavier_uniform_(self.features)
if self._tunable:
self.features = Parameter(self.features)
@classmethod
def from_feature_tensor(cls, feature_tensor: Tensor) -> Any:
_, feature_dims, height, width = feature_tensor.shape
# initialize a random feature_grid
feature_grid = cls(
height=height,
width=width,
feature_dims=feature_dims,
tunable=False,
device=feature_tensor.device,
)
# use the given feature_tensor as it's features:
feature_grid.features = feature_tensor
return feature_grid
def extra_repr(self) -> str:
return (
f"grid_dims: {self.features.shape[2:]}, "
f"feature_dims: {self.features.shape[1]}, "
f"tunable: {self._tunable}"
)
def get_save_info(self) -> Dict[str, Any]:
return {
"conf": {
"height": self._height,
"width": self._width,
"feature_dims": self._feature_dims,
"tunable": self._tunable,
},
"state_dict": self.state_dict(),
}
def forward(self, coords: Tensor) -> Tensor:
"""coords should be of shape => [N x 2], and be in the range [-1, 1]"""
sam_vals = grid_sample(
# note the convention difference between the image and sample coordinates
self.features.permute(0, 1, 3, 2),
coords[None, None, ...],
mode="bilinear",
align_corners=False,
)
return sam_vals.permute(0, 2, 3, 1)[0, 0, ...]
class ImageDecoderMLP(Network):
# noinspection PyUnresolvedReferences
def __init__(
self,
mlp: SkipMLP,
feature_dims: int = 32,
feature_embedding_dims: int = 0,
use_local_coords: bool = False,
local_coords_embedding_dims: int = 0,
normalize_features: bool = False,
) -> None:
super().__init__()
self._mlp = mlp
self._feature_dims = feature_dims
self._feature_embedding_dims = feature_embedding_dims
self._use_local_coords = use_local_coords
self._local_coords_embedding_dims = local_coords_embedding_dims
self._normalize_features = normalize_features
# objects of modification:
self._normalizer = PixelwiseNorm()
self._feature_embedder = PositionalEncodingsEmbedder(
input_dims=self._feature_dims, emb_dims=self._feature_embedding_dims
)
self._local_coords_embedder = PositionalEncodingsEmbedder(
input_dims=2, emb_dims=self._local_coords_embedding_dims
)
@property
def input_shape(self) -> Sequence[Tuple[int, ...]]:
return self._mlp.input_shape
@property
def output_shape(self) -> Sequence[Tuple[int, ...]]:
return self._mlp.output_shape
@property
def feature_dims(self) -> int:
return self._feature_dims
@property
def use_local_coords(self) -> bool:
return self._use_local_coords
def get_save_info(self) -> Dict[str, Any]:
return {
"conf": {
"feature_dims": self._feature_dims,
"feature_embedding_dims": self._feature_embedding_dims,
"use_local_coords": self._use_local_coords,
"local_coords_embedding_dims": self._local_coords_embedding_dims,
"normalize_features": self._normalize_features,
},
"mlp": self._mlp.get_save_info(),
"state_dict": self.state_dict(),
}
def load_weights(self, weights: Dict[str, Any]) -> None:
self._mlp.load_state_dict(weights["mlp"]["state_dict"])
def forward(self, x: Tensor) -> Tensor:
if self._use_local_coords:
features, local_coords = (
x[..., : self._feature_dims],
x[..., self._feature_dims :],
)
else:
features, local_coords = x, torch.zeros(size=(x.shape[0], 0))
embedded_features = self._feature_embedder(features)
embedded_local_coords = self._local_coords_embedder(local_coords)
normalized_features = self._normalizer(features)
if self._use_local_coords:
feats = (
normalized_features if self._normalize_features else embedded_features
)
mlp_input = torch.cat([feats, embedded_local_coords], dim=-1)
else:
mlp_input = (
normalized_features if self._normalize_features else embedded_features
)
return self._mlp(mlp_input)
def get_default_image_decoder_mlp(
feature_dims: int = 32,
feature_embedding_dims: int = 0,
use_local_coords: bool = False,
local_coords_embedding_dims: int = 0,
normalize_features: bool = False,
) -> ImageDecoderMLP:
feat_inp_dims = feature_dims + (2 * feature_dims * feature_embedding_dims)
lc_inp_dims = 2 + (2 * 2 * local_coords_embedding_dims)
if use_local_coords:
mlp_input_dims = feat_inp_dims + lc_inp_dims
elif normalize_features:
mlp_input_dims = feature_dims
else:
mlp_input_dims = feat_inp_dims
mlp_config = SkipMLPConfig(
input_dims=mlp_input_dims,
layer_depths=[256],
output_dims=NUM_COLOUR_CHANNELS,
skips=[False],
use_equalized_learning_rate=True,
out_activation_fn=Tanh(),
)
return ImageDecoderMLP(
SkipMLP(mlp_config),
feature_dims=feature_dims,
feature_embedding_dims=feature_embedding_dims,
use_local_coords=use_local_coords,
local_coords_embedding_dims=local_coords_embedding_dims,
normalize_features=normalize_features,
)
def decode_coords_with_fg_and_mlp(
coords: Tensor,
feature_grid: FeatureGrid2D,
decoder_mlp: ImageDecoderMLP,
image_resolution: Tuple[int, int],
) -> Tensor:
"""decodes the coords tensor into RGB pixel values"""
orig_shape = coords.shape
coords = coords.reshape(-1, orig_shape[-1])
image_height, image_width = image_resolution
local_coords = adjust_dynamic_range(coords, drange_in=(-1, 1), drange_out=(0, 1))
local_coords[..., 0] *= image_height
local_coords[..., 1] *= image_width
local_coords = local_coords - torch.floor(local_coords)
decoded_features = feature_grid(coords)
decoder_input = (
torch.cat([decoded_features, local_coords], dim=-1)
if decoder_mlp.use_local_coords
else decoded_features
)
return decoder_mlp(decoder_input).reshape(*orig_shape[:-1], -1)
class ImageModel:
def __init__(
self,
image_height: int,
image_width: int,
feature_dims: int = 32,
decoder_mlp_maker: Callable[[], Network] = get_default_image_decoder_mlp,
device: torch.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
),
verbose_creation: bool = True,
) -> None:
self._image_height = image_height
self._image_width = image_width
self._feature_dims = feature_dims
self._device = device
# compute the height and width of the feature-grid so as to keep the number of
# parameters in the image and the parametric-model same:
self._setup_feature_dims()
# create a feature grid object (note that these are kept public):
self.feature_grid = FeatureGrid2D(
self._feature_height, self._feature_width, feature_dims, device=device
)
self.decoder_mlp = decoder_mlp_maker().to(self._device)
# print info related to the Feature Grid and the Decoder MLP:
if verbose_creation:
log.info(f"Created Feature grid: {self.feature_grid}")
log.info(f"Created Decoder MLP: {self.decoder_mlp}")
@property
def image_resolution(self) -> Tuple[int, int]:
return self._image_height, self._image_width
@staticmethod
def compute_feature_grid_dims(
image_resolution: Tuple[int, int], feature_dims: int
) -> Tuple[int, int]:
image_height, image_width = image_resolution
aspect_ratio = image_width / image_height
total_image_params = image_width * image_height * NUM_COLOUR_CHANNELS
needed_params = total_image_params / feature_dims
feature_grid_height = int(np.ceil(np.sqrt(needed_params / aspect_ratio)))
feature_grid_width = int(aspect_ratio * feature_grid_height)
return feature_grid_height, feature_grid_width
def _setup_feature_dims(self) -> None:
self._feature_height, self._feature_width = self.compute_feature_grid_dims(
image_resolution=(self._image_height, self._image_width),
feature_dims=self._feature_dims,
)
@staticmethod
def _shuffle_tensor_2d(tensor_2d: Tensor) -> Tensor:
""" shuffles a 2D Tensor of shape [N x C]"""
return tensor_2d[torch.randperm(len(tensor_2d))]
def _infinite_data_loader(self, data: Tensor, batch_size: int) -> Tensor:
while True:
data = self._shuffle_tensor_2d(data)
for batch_index in range(0, len(data), batch_size):
data_batch = data[batch_index : batch_index + batch_size]
if data_batch.shape[0] == batch_size:
yield data_batch
else:
break
@staticmethod
def _check_log_condition(
current_step: int, frequency_step: int, start_step: int, end_step: int
) -> bool:
return (
current_step % frequency_step == 0
or current_step == start_step
or current_step == end_step
)
def get_save_info(self) -> Dict[str, Any]:
return {
"conf": {
"image_height": self._image_height,
"image_width": self._image_width,
"feature_dims": self._feature_dims,
},
"feature_grid": self.feature_grid.get_save_info(),
"decoder_mlp": self.decoder_mlp.get_save_info(),
}
def render(
self,
render_resolution: Optional[Tuple[int, int]] = None,
chunk_size: int = 64 * 1024,
) -> Tensor:
height, width = (
(self._image_height, self._image_width)
if render_resolution is None
else render_resolution
)
# create a coordinates mesh-grid:
coords = get_2d_coordinates(height, width)
# flatten the coordinates and bring them on the GPU:
flat_coords = coords.reshape(-1, coords.shape[-1]).to(self._device)
# decode all the coordinates into pixel values chunk by chunk:
decoded_image = []
with torch.no_grad():
for chunk_index in range(0, len(flat_coords), chunk_size):
coord_chunk = flat_coords[chunk_index : chunk_index + chunk_size]
decoded_image.append(
decode_coords_with_fg_and_mlp(
coord_chunk,
self.feature_grid,
self.decoder_mlp,
self.image_resolution,
)
)
decoded_image = torch.cat(decoded_image, dim=0)
decoded_image = decoded_image.reshape(height, width, -1)
decoded_image = adjust_dynamic_range(
decoded_image.cpu(),
drange_in=(-1, 1),
drange_out=(0, 1),
slack=True,
)
return decoded_image
def train(
self,
training_image: PIL.Image.Image,
num_iterations: int = 10000,
batch_size: int = 8192,
learning_rate: float = 0.003,
lr_decay_steps: int = 5000,
feedback_frequency: int = 1000,
loss_feedback_frequency: int = 10,
testing_frequency: int = 1000,
save_frequency: int = 2000,
output_dir: Path = Path(__file__).parent.absolute() / "logs",
) -> None:
# load the training image and create a dataset of pixel_coordinates -> pixel RGB values:
image_np = np.array(training_image).astype(np.float32) / 255
if len(image_np.shape) < 3:
image_np = np.tile(image_np[..., None], (1, 1, 3))
image_np = image_np[..., :3] # in case of > 3 channel images
real_feedback_image = image_np
# bring the pixel range to (-1, 1) for training
image_np = adjust_dynamic_range(image_np, drange_in=(0, 1), drange_out=(-1, 1))
# make sure the training image is compatible with the ImageModel
assert (
self._image_height == image_np.shape[0]
and self._image_width == image_np.shape[1]
), (
f"The provided training image with size ({image_np.shape[:-1]}) is incompatible with the Image-Model's"
f"image size ({self._image_height, self._image_width})"
)
image_coords = get_2d_coordinates(self._image_height, self._image_width)
coord_rgb_image = torch.cat(
[
image_coords.to(self._device),
torch.from_numpy(image_np).to(self._device),
],
dim=-1,
)
training_data = coord_rgb_image.reshape(-1, coord_rgb_image.shape[-1])
training_data_loader = iter(
self._infinite_data_loader(training_data, batch_size=batch_size)
)
# setup optimizer:
optimizer = torch.optim.Adam(
params=[
{"params": self.feature_grid.parameters(), "lr": learning_rate},
{"params": self.decoder_mlp.parameters(), "lr": learning_rate},
],
betas=(0, 0.99),
)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
# setup output directories
# fmt: off
model_dir = output_dir / "saved_models"
logs_dir = output_dir / "training_logs"
tensorboard_dir = logs_dir / "tensorboard"
render_dir = logs_dir / "rendered_output"
for directory in (model_dir, logs_dir, tensorboard_dir,
render_dir):
directory.mkdir(exist_ok=True, parents=True)
# fmt: on
# create the tensorboard directory:
tensorboard_writer = SummaryWriter(tensorboard_dir)
# log the real image for feedback:
log.info(f"Logging real feedback image")
imageio.imwrite(
render_dir / f"1__real_log.png",
to8b(real_feedback_image),
)
log.info(f"!! Beginning Training !!")
for num_iter in range(1, num_iterations + 1):
# load the next batch of data:
data_batch = next(training_data_loader)
coords, gt_rgb = (
data_batch[..., :-NUM_COLOUR_CHANNELS],
data_batch[..., -NUM_COLOUR_CHANNELS:],
)
# forward pass and compute the loss
pred_rgb = decode_coords_with_fg_and_mlp(
coords,
self.feature_grid,
self.decoder_mlp,
self.image_resolution,
)
loss = l1_loss(pred_rgb, gt_rgb)
# perform single step of optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
# verbose logging per iteration:
loss_value = loss.item()
psnr_value = mse2psnr(mse_loss(pred_rgb, gt_rgb).item())
# tensorboard summaries feedback (logged every iteration)
for summary_name, summary_value in (
("loss", loss_value),
("psnr", psnr_value),
):
if summary_value is not None:
tensorboard_writer.add_scalar(
summary_name, summary_value, global_step=num_iter
)
# console loss feedback log
if self._check_log_condition(
num_iter, loss_feedback_frequency, 1, num_iterations
):
loss_info_string = (
f"Global Iteration: {num_iter} "
f"Loss: {loss_value: .5f} "
f"PSNR: {psnr_value: .5f} "
)
log.info(loss_info_string)
# step the learning rate schedulers
if num_iter % lr_decay_steps == 0:
lr_scheduler.step()
new_lrs = [param_group["lr"] for param_group in optimizer.param_groups]
log_string = f"Adjusted learning rate | learning rate: {new_lrs} "
log.info(log_string)
# save the rendered feedback
if self._check_log_condition(
num_iter, feedback_frequency, 1, num_iterations
):
imageio.imwrite(
render_dir / f"render_log_{num_iter}.png",
to8b(self.render().numpy()),
)
# obtain and log test metrics
if self._check_log_condition(
num_iter, testing_frequency, 1, num_iterations
):
log.info(f"Computing test score ...")
test_psnr = mse2psnr(
mse_loss(
self.render(),
torch.from_numpy(real_feedback_image),
).item()
)
log.info(f"Full image PSNR: {test_psnr: .5f}")
tensorboard_writer.add_scalar(
"full_image_psnr", test_psnr, global_step=num_iter
)
# save the model
if self._check_log_condition(num_iter, save_frequency, 1, num_iterations):
torch.save(
self.get_save_info(),
model_dir / f"model_iter_{num_iter}.pth",
)
# save the final model
torch.save(self.get_save_info(), model_dir / f"model_final.pth")
log.info("!! Training complete !!")
def load_trained_image_model(
model_path: Path, device: torch.device, verbose_creation: bool = True
) -> ImageModel:
loaded_model = torch.load(model_path)
if verbose_creation:
log.info(f"loaded trained model from: {model_path}")
img_mod = ImageModel(
**loaded_model["conf"],
device=device,
verbose_creation=verbose_creation,
decoder_mlp_maker=partial(
get_default_image_decoder_mlp,
**loaded_model["decoder_mlp"]["conf"],
),
)
img_mod.feature_grid.load_state_dict(loaded_model["feature_grid"]["state_dict"])
img_mod.decoder_mlp.load_weights(loaded_model["decoder_mlp"])
return img_mod
|
akanimax/3inGAN
|
projects/thre3ingan/singans/image_model.py
|
image_model.py
|
py
| 20,218 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36107263574
|
import torch
def unpack_data(results):
pix = results['input']
hand_pix = results['hand_pix']
fake_fish_depth = results['fake_fish_depth']
heatmap = results['heatmap']
heatmap_true = results['heatmap_true']
heatmap_reprojected = results['heatmap_reprojected']
joint = results['joint']
return pix, hand_pix, fake_fish_depth, heatmap, heatmap_true, heatmap_reprojected, joint
|
KAIST-HCIL/DeepFisheyeNet
|
run/pipeline/helper.py
|
helper.py
|
py
| 407 |
python
|
en
|
code
| 27 |
github-code
|
6
|
31791424883
|
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='bhamcal',
version='0.1',
license='GPL 3',
python_requires='>=3',
author='Justin Chadwell',
author_email='[email protected]',
url='https://github.com/jedevc/bhamcal',
description='A timetable extractor for University of Birmingham',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
install_requires=[
'click',
'colorama',
'beautifulsoup4',
'pytz',
'google-api-python-client',
'google-auth-httplib2',
'google-auth-oauthlib'
],
extras_require={
'browser': ['selenium']
},
entry_points={
'console_scripts': [
'bhamcal=bhamcal:main'
]
},
)
|
jedevc/bhamcal
|
setup.py
|
setup.py
|
py
| 881 |
python
|
en
|
code
| 12 |
github-code
|
6
|
74543276668
|
# count 메소드를 활용해서 풀어보자
s='110010101001'
answer=[]
num=0
cnt=0
zero=0
while (not (s=='1')):
ones = s.count('1')
zero += len(s)-ones
s=bin(ones)[2:]
cnt+=1
# while(True):
# if(s=='1'):
# break
# #0 걷어내기
# new_s=''
# for i in s:
# if (i=='1'):
# new_s+=i
# else:
# zero+=1
# c=len(new_s)
# s=bin(c)[2:]
# # print('new',s,new_s)
# cnt+=1
# if(cnt>10):
# break
answer.append(cnt)
answer.append(zero)
print(answer)
|
JiHwonChoi/Algorithm
|
code_challenge01_05.py
|
code_challenge01_05.py
|
py
| 584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74416652349
|
# Ten program wyświetla pięć liczb losowych
# z przedziału od 1 do 100.
import random
def main():
for count in range(5):
# Wygenerowanie liczby losowej.
number = random.randint(1, 100)
# Wyświetlenie wygenerowanej liczby.
print(number)
# Wywołanie funkcji main().
main()
|
JeanneBM/Python
|
Owoce Programowania/R05/25. Random_numbers2.py
|
25. Random_numbers2.py
|
py
| 317 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
42677098996
|
from pwn import *
# conn = remote('20.197.63.174', 3331)
conn = process(['python3', 'game.py'])
def send_replacements(lst):
global conn
n = len(lst)
confirm = [b'y'] * n
confirm[-1] = b'n'
for i in range(n):
conn.sendline(lst[i])
conn.sendline(confirm[i])
def play_level(lst, level=0):
global conn
conn.sendline(b'y')
send_replacements(lst)
conn.recvuntil(b'Level passed!\n')
# level 1
play_level([
b'hacking => CSeC'
], level=1)
# level 2
play_level([
b'hi => bye',
b'asc => crash'
], level=2)
# level 3
play_level([
b'qq => q'
], level=3)
# level 4
play_level([
b'qq => d', b'qd => d', b'dq => d', b'dd => d',
b'd => quackquack'
], level=4)
conn.recvuntil(b'Game: ')
flag1 = conn.recvline().strip().decode()
print(flag1)
# level 5
play_level([
b'*0 => *aR', b'*1 => *bR', # init left
b'R0$ => La$', b'R1$ => Lb$', # init right
b'a0L => aaR', b'a1L => abR', b'b0L => baR', b'b1L => bbR', # left turnback
b'R0a => Laa', b'R1a => Lba', b'R0b => Lab', b'R1b => Lbb', # right turnback
b'0L => L0', b'1L => L1', # move left
b'R0 => 0R', b'R1 => 1R', # move right
b'L => p', b'aR => p', b'bR => p', # base case palindrome
b'apa => p', b'bpb => p', # palindrome reduction from center
b'apb => q', b'bpa => q', b'aqa => q', b'bqb => q', b'aqb => q', b'bqa => q', # not_palindrome reduction
b'*p$ => palindrome', b'*q$ => not_palindrome' # final answer
], level=5)
conn.recvuntil(b'flag: ')
flag2 = conn.recvline().strip().decode()
print(flag2)
conn.close()
|
neelaryan2/CTFs
|
ctf/2021/iitbctf/swap_game/solve.py
|
solve.py
|
py
| 1,567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
60843869
|
# -*- coding:utf-8 -*-
import time
import pickle
from .utils import Logger
class Scheduler(object):
spider = None
def __init__(self, crawler):
self.settings = crawler.settings
self.logger = Logger.from_crawler(crawler)
if self.settings.getbool("CUSTOM_REDIS"):
from custom_redis.client import Redis
else:
from redis import Redis
self.redis_conn = Redis(self.settings.get("REDIS_HOST"),
self.settings.getint("REDIS_PORT"))
self.queue_name = None
self.queues = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def open(self, spider):
self.spider = spider
self.queue_name = self.settings.get(
"TASK_QUEUE_TEMPLATE", "%s:request:queue") % spider.name
spider.set_redis(self.redis_conn)
def enqueue_request(self, request):
request.callback = getattr(
request.callback, "__name__", request.callback)
request.errback = getattr(
request.errback, "__name__", request.errback)
self.redis_conn.zadd(
self.queue_name,
pickle.dumps(request),
-int(request.meta["priority"]))
self.logger.debug("Crawlid: %s, url: %s added to queue. " % (
request.meta['crawlid'], request.url))
def next_request(self):
self.logger.debug(
"length of queue %s is %s" % (
self.queue_name, self.redis_conn.zcard(self.queue_name)))
item = None
if self.settings.getbool("CUSTOM_REDIS"):
item = self.redis_conn.zpop(self.queue_name)
else:
pipe = self.redis_conn.pipeline()
pipe.multi()
pipe.zrange(self.queue_name, 0, 0).zremrangebyrank(
self.queue_name, 0, 0)
result, _ = pipe.execute()
if result:
item = result[0]
if item:
request = pickle.loads(item)
request.callback = request.callback and getattr(
self.spider, request.callback)
request.errback = request.errback and getattr(
self.spider, request.errback)
return request
def close(self, reason):
self.logger.info("Closing Spider: %s. " % self.spider.name)
def has_pending_requests(self):
return False
class SingleTaskScheduler(Scheduler):
def __init__(self, crawler):
super(SingleTaskScheduler, self).__init__(crawler)
self.queue_name = "%s:single:queue"
def has_pending_requests(self):
return self.redis_conn.zcard(self.queue_name) > 0
|
ShichaoMa/structure_spider
|
structor/scheduler.py
|
scheduler.py
|
py
| 2,682 |
python
|
en
|
code
| 29 |
github-code
|
6
|
24132755409
|
#!/usr/bin/env python
import argparse
def filter_sam( out_fn, in_fn, chromosome):
with open(out_fn, 'w') as donor_out:
for line in open(in_fn, 'r'):
if line.startswith("@SQ"):
if "SN:{}\t".format(chromosome) in line:
donor_out.write(line)
elif line.startswith("@"):
donor_out.write(line)
else:
fields = line.strip('\n').split('\t')
if fields[2] == chromosome:
donor_out.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Filter specified chromosome')
parser.add_argument('--target', help='target filename')
parser.add_argument('--source', help='source filename')
parser.add_argument('--chromosome', help='name of chromosome')
args = parser.parse_args()
filter_sam(args.target, args.source, args.chromosome)
|
supernifty/reference-bias
|
bin/filter_sam.py
|
filter_sam.py
|
py
| 832 |
python
|
en
|
code
| 2 |
github-code
|
6
|
811313586
|
# Inserting a Node Into a Sorted Doubly Linked List
# Given a reference to the head of a doubly-linked list and an integer, , create a new DoublyLinkedListNode object having data value
# and insert it at the proper location to maintain the sort.
class DoublyLinkedListNode:
def __init__(self, data=0, prev=None, next=None):
self.val = val
self.prev = prev
self.next = next
class Solution:
def sortedInsert(head, data):
current = head
newNode = DoublyLinkedListNode(data)
# If the node is to be inserted at
# the beginning of the doubly linked list
if head.data >= newNode.data:
newNode.next = head
newNode.next.prev = newNode
head = newNode
else:
current = head
# Locate the node after which
# the new node is to be inserted
while current.next and current.next.data < newNode.data:
current = current.next
# Make the appropriate links
newNode.next = current.next
# If the new node is not inserted
# at the end of the list
if current.next:
newNode.next.prev = newNode
newNode.prev = current
current.next = newNode
return head
|
Saima-Chaity/Leetcode
|
LinkedList/insertingANodeIntoASortedDoublyLinkedList.py
|
insertingANodeIntoASortedDoublyLinkedList.py
|
py
| 1,344 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1425636890
|
# -*- coding: utf-8 -*-
# import needed modules
from requests import get
from csv import writer, reader
from datetime import date
import sys
# %% define function for export/save data to *.csv file
def write_csv(data, filepath):
with open(filepath, 'w', newline='') as csv_file:
write = writer(csv_file)
for element in data:
write.writerow(element.split(","))
# %% check arguments passed by user by sys.argv
if len(sys.argv) == 3:
api_key = sys.argv[1]
user_input_date = sys.argv[2]
elif len(sys.argv) == 2:
api_key = sys.argv[1]
user_input_date = str(date.today())
elif len(sys.argv) < 2:
print("\nPodano za mało argumentów za pomocą 'sys.argv'.\n"
"Program do działania wymaga dwóch lub trzech argumentów:\n"
"rain_forecast.py << klucz API >> << data w formacie YYYY-MM-DD >>\n"
"lub\n"
"rain_forecast.py << klucz API >>.\n\n"
"Działanie programu zakończone.")
sys.exit()
else:
print("\nPodano za dużo argumentów za pomocą 'sys.argv'.\n"
"Program do działania wymaga dwóch lub trzech argumentów:\n"
"rain_forecast.py << klucz API >> << data w formacie YYYY-MM-DD >>\n"
"lub\n"
"rain_forecast.py << klucz API >>.\n\n"
"Działanie programu zakończone.")
sys.exit()
if len(user_input_date) != 10:
print("\nNieprawidłowy format daty.\n"
"Prawidłowy format to YYYY-MM-DD, np. 2022-10-10.\n\n"
"Działanie programu zakończone.")
sys.exit()
# %% variables to downlad data from API
url = "https://weatherbit-v1-mashape.p.rapidapi.com/forecast/daily"
# coordinates for Poznań
latitude = 52.40692
longitude = 16.92993
querystring = {"lat": latitude, "lon": longitude}
headers = {
"X-RapidAPI-Key": f"{api_key}",
"X-RapidAPI-Host": "weatherbit-v1-mashape.p.rapidapi.com"
}
# %% variables to open/read data from *.csv file
FILEPATH = "checked_days.csv"
csv_file = reader(open(FILEPATH))
lines_from_csv = list(csv_file)
dictionary_CSV = {}
# %% load data from *.csv file to dict
for element in lines_from_csv:
dictionary_CSV[f"{element[0]}"] = {
'precip': float(element[1]),
'snow': float(element[2])
}
# %% main 'if/elif/else' statetments
if user_input_date in dictionary_CSV.keys():
if (dictionary_CSV[f'{user_input_date}']['precip'] > 0 and
dictionary_CSV[f'{user_input_date}']['snow'] > 0):
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg z deszczem :(")
elif dictionary_CSV[f'{user_input_date}']['precip'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać deszcz! Zabierz ze sobą parasol :)")
elif dictionary_CSV[f'{user_input_date}']['snow'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg! Ubierz coś ciepłego :)")
else:
print(f"\nW dniu {user_input_date} w Poznaniu nie będzie padać! "
"Miłego dnia :)")
else:
print("\nPobieram dane z API.")
dictionary_API = {}
r = get(url, headers=headers, params=querystring)
response = r.json()
weather_forecast_data = response['data']
for day in weather_forecast_data:
dictionary_API[f"{day['datetime']}"] = {
'precip': day['precip'],
'snow': day['snow']
}
if user_input_date in dictionary_API.keys():
if (dictionary_API[f'{user_input_date}']['precip'] > 0 and
dictionary_API[f'{user_input_date}']['snow'] > 0):
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg z deszczem :(")
elif dictionary_API[f'{user_input_date}']['precip'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać deszcz! Zabierz ze sobą parasol :)")
elif dictionary_API[f'{user_input_date}']['snow'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg! Ubierz coś ciepłego :)")
else:
print(f"\nW dniu {user_input_date} w Poznaniu nie będzie padać! "
"Miłego dnia :)")
else:
print(f"\nNie wiem czy w dniu {user_input_date} będzie padać "
"w Poznaniu!")
# write/save data to *.csv file
list_for_write_csv = []
for key, day in zip(dictionary_API.keys(), dictionary_API):
string = (f"{key},{dictionary_API[day]['precip']},"
f"{dictionary_API[day]['snow']}")
list_for_write_csv.append(string)
write_csv(list_for_write_csv, FILEPATH)
# %%
|
filrat2/rain_forecast
|
rain_forecast.py
|
rain_forecast.py
|
py
| 4,691 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
36229690900
|
from typing import Optional
'''
1373. 二叉搜索子树的最大键值和
dfs
边统计和边判断是否为搜索树即可。
一旦子树不为搜索树,直接520520。
'''
null = None
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxSumBST(self, root: Optional[TreeNode]) -> int:
res = 0
def dfs(node):
nonlocal res
if node is None:
return 0, 520520, -520520
sum_left, min_left, max_left = dfs(node.left)
sum_right, min_right, max_right = dfs(node.right)
if min_left == -520520 or min_right == -520520 or max_left == 520520 or max_right == 520520:
return 0, -520520, 520520
if max_left >= node.val or min_right <= node.val:
return 0, -520520, 520520
sum1 = sum_left + sum_right + node.val
res = max(res, sum1)
return sum1, min(node.val, min_left), max(node.val, max_right)
_, _, _ = dfs(root)
return res
s = Solution()
print(s.maxSumBST([1,4,3,2,4,2,5,null,null,null,null,null,null,4,6]))
|
z-w-wang/Leetcode-Problemlist
|
DailyProblem/Tree/1373.2023-05-20.py
|
1373.2023-05-20.py
|
py
| 1,207 |
python
|
en
|
code
| 3 |
github-code
|
6
|
1336912689
|
#!/usr/bin/env python3
from http.server import ThreadingHTTPServer
from os.path import dirname, realpath
from .httpHandler import HTTPApiHandler
from .fileCacher import cacheFile
from ..query import QueryHandler
def startServer(port):
HTTPApiHandler.queryHandler = QueryHandler()
filesPath = dirname(realpath(__file__)) + "/files"
cacheFile(None, filesPath + "/404.html", "text/html")
cacheFile("/404_styles.css", filesPath + "/404_styles.css", "text/css")
cacheFile("/404.png", filesPath + "/404.png", "image/png")
cacheFile("/index.html", filesPath + "/index.html", "text/html")
cacheFile("/rest-caller.js", filesPath + "/rest-caller.js", "text/javascript")
cacheFile("/styles.css", filesPath + "/styles.css", "text/css")
cacheFile("/roboto.css", filesPath + "/roboto.css", "text/css")
cacheFile("/Roboto-Regular.ttf", filesPath + "/Roboto-Regular.ttf", "application/octet-stream")
cacheFile("/Roboto-Medium.ttf", filesPath + "/Roboto-Medium.ttf", "application/octet-stream")
httpd = ThreadingHTTPServer(('', port), HTTPApiHandler)
try:
print("Server started")
httpd.serve_forever()
except Exception as e:
print("\nException occurred, stopping: {}".format(e))
except KeyboardInterrupt:
print("\nGracefully stopping server...")
httpd.server_close()
print("Server stopped")
if __name__ == "__main__":
startServer(8080)
|
wheelerd/uni-chatbot
|
stockbot/web_frontend/__main__.py
|
__main__.py
|
py
| 1,427 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27321029603
|
"""
Kela Kanta data preprocessing
Reads Kela Kanta data, applies the preprocessing steps below and writes the result to files.
- remove extra linebreaks
- remove empty lines
- transform base16 ints to base10 ints
- parse dates
- replace "," with "." as a decimal point
Note: running this script on ePouta takes several hours.
Speed could be improved with e.g. multiprocessing if needed.
Input files:
- 107_522_2021_LM_<YYYY>.csv.finreg_IDs (11 files)
- 107_522_2021_LT_<YYYY>.csv.finreg_IDs (11 files)
Output files:
- prescriptions_<YYYY>_<YYYY-MM-DD>.csv (11 files)
- deliveries_<YYYY>_<YYYY-MM-DD>.csv (11 files)
"""
import os
import re
import pandas as pd
from datetime import datetime
from functools import partial
from finregistry_data.config import KELA_KANTA_INPUT_DIR, KELA_KANTA_OUTPUT_DIR
def read_prescription_data(filepath):
"""Read drug prescriptions data in chunks"""
hash_to_int = partial(int, base=16)
hash_cols = [
"CDA_ID_MD5HASH",
"CDA_SET_ID_MD5HASH",
"DOC_GROUP_MD5HASH",
"CDA_ADDENDUM_REF_MD5HASH",
"CDA_RPLC_ID_MD5HASH",
"PRO_PERSON_REG_MD5HASH",
"ORGANIZATION_OID_MD5HASH",
]
date_cols = ["CREATION_DATE"]
dtypes = {
"PATIENT_ID": str,
"DOC_TYPE_CODE": float,
"DOC_VERSION": float,
"DRUG_NAME_C": str,
"DOSE_QUANTITY_TEXT": str,
"ATC_CODE": str,
"PURPOSE_OF_USE": str,
"DOSAGE_INSTRUCTIONS": str,
"ITERATION_CODE": float,
"TYPE_1_AMOUNT": str,
"TYPE_1_SIZE": str,
"TYPE_2_AMOUNT": str,
"TYPE_2_SIZE_UNIT": str,
"TYPE_3_TIME": str,
"TYPE_3_UNIT": str,
"PRODUCT_CODE": float,
"DOSE_DISTRIBUTION": str,
"PREPARATION_TYPE_CODE": float,
"RESEPTISTATUS": str,
"LAAKEMUOTOKOODI": float,
"ERIKOISALA_CODE": str,
"MED_EXCHANGE_BAN": str,
"RENEWAL_BAN": str,
}
chunks = pd.read_csv(
filepath,
sep=";",
engine="python",
encoding="utf-8",
encoding_errors="ignore",
on_bad_lines="warn",
converters=dict.fromkeys(hash_cols, hash_to_int),
parse_dates=date_cols,
dtype=dtypes,
chunksize=10000,
)
return chunks
def read_delivery_data(filepath):
"""Read drug delivery data in chunks"""
hash_to_int = partial(int, base=16)
hash_cols = [
"CDA_ID_MD5HASH",
"DOC_GROUP_MD5HASH",
"CDA_ADDENDUM_REF_MD5HASH",
"CDA_RPLC_ID_MD5HASH",
]
date_cols = ["CREATION_DATE"]
dtypes = {
"PATIENT_ID": str,
"DRUG_NAME_C": str,
"DOSE_QUANTITY_TEXT": str,
"ATC_CODE": str,
"MED_EXCHANGED": str,
"DIS_AMOUNT_CALC_TXT": str,
"DIS_AMT_VALUE": str,
"DIS_AMOUNT_TXT": str,
"DIS_AMT_UNIT": str,
"PRODUCT_CODE1": str,
"DOSE_DISTRIBUTION": str,
"PREPARATION_TYPE_CODE": float,
"RESEPTISTATUS": str,
"LAAKEMUOTOKOODI": float,
"DELIVERY_FEE": float,
}
chunks = pd.read_csv(
filepath,
sep=";",
engine="python",
encoding="utf-8",
encoding_errors="ignore",
on_bad_lines="warn",
decimal=",",
converters=dict.fromkeys(hash_cols, hash_to_int),
parse_dates=date_cols,
dtype=dtypes,
chunksize=10000,
)
return chunks
def get_output_filepath(input_filepath):
"""Get output filepath from input filepath."""
input_filename = os.path.basename(input_filepath)
today = datetime.today().strftime("%Y-%m-%d")
pattern = r"^107_522_2021_(.{2})_(\d{4})\.csv\.finreg_IDs"
filetype, year = re.findall(pattern, input_filename, re.IGNORECASE)[0]
filetype = "prescriptions" if filetype == "LM" else "deliveries"
output_filename = filetype + "_" + year + "_" + today + ".csv"
output_path = KELA_KANTA_OUTPUT_DIR / output_filename
return output_path
def write_chunk_to_csv(chunk, output_filepath):
"""Writes chunk to csv"""
chunk.to_csv(
output_filepath,
mode="a",
header=not os.path.exists(output_filepath),
index=False,
sep=";",
)
if __name__ == "__main__":
# Preprocess drug prescriptions
prescription_files = KELA_KANTA_INPUT_DIR.glob("107_522_2021_LM_*")
for prescription_file in prescription_files:
print(prescription_file)
output_filepath = get_output_filepath(prescription_file)
chunks = read_prescription_data(prescription_file)
for chunk in chunks:
chunk = chunk.replace("\n", " ", regex=True)
write_chunk_to_csv(chunk, output_filepath)
# Preprocess drug deliveries
delivery_files = KELA_KANTA_INPUT_DIR.glob("107_522_2021_LT_*")
for delivery_file in delivery_files:
print(delivery_file)
output_filepath = get_output_filepath(delivery_file)
chunks = read_delivery_data(delivery_file)
for chunk in chunks:
chunk = chunk.replace("\n", " ", regex=True)
write_chunk_to_csv(chunk, output_filepath)
|
dsgelab/finregistry-data
|
finregistry_data/registries/kela_kanta.py
|
kela_kanta.py
|
py
| 5,134 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8665817734
|
import os
import boto3
from elasticsearch import Elasticsearch
from unittest import TestCase
from me_articles_drafts_delete import MeArticlesDraftsDelete
from tests_util import TestsUtil
class TestMeArticlesDraftsDelete(TestCase):
dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:4569/')
elasticsearch = Elasticsearch(
hosts=[{'host': 'localhost'}]
)
@classmethod
def setUpClass(cls):
TestsUtil.set_all_tables_name_to_env()
os.environ['DOMAIN'] = 'example.com'
cls.article_info_table = cls.dynamodb.Table('ArticleInfo')
cls.article_content_table = cls.dynamodb.Table('ArticleContent')
cls.article_content_edit_table = cls.dynamodb.Table('ArticleContentEdit')
cls.article_history_table = cls.dynamodb.Table('ArticleHistory')
cls.tag_table = cls.dynamodb.Table('Tag')
def setUp(self):
TestsUtil.delete_all_tables(self.dynamodb)
article_info_items = [
{
'article_id': 'draftId00001',
'user_id': 'test01',
'status': 'draft',
'tags': ['a', 'b', 'c'],
'eye_catch_url': 'https://' + os.environ['DOMAIN'] + '/00001.png',
'sort_key': 1520150272000000,
'version': 2
},
{
'article_id': 'draftId00002',
'user_id': 'test01',
'status': 'public',
'sort_key': 1520150272000000,
'version': 2
}
]
TestsUtil.create_table(self.dynamodb, os.environ['ARTICLE_INFO_TABLE_NAME'], article_info_items)
def tearDown(self):
TestsUtil.delete_all_tables(self.dynamodb)
def assert_bad_request(self, params):
me_articles_drafts_publish_with_header = MeArticlesDraftsDelete(params, {}, dynamodb=self.dynamodb)
response = me_articles_drafts_publish_with_header.main()
self.assertEqual(response['statusCode'], 400)
def test_main_ok(self):
params = {
'pathParameters': {
'article_id': 'draftId00001'
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
response = MeArticlesDraftsDelete(params, {}, dynamodb=self.dynamodb).main()
self.assertEqual(response['statusCode'], 200)
article_info = self.article_info_table.get_item(Key={'article_id': params['pathParameters']['article_id']})['Item']
self.assertEqual(article_info['status'], 'delete')
def test_main_ng_with_public_article(self):
params = {
'pathParameters': {
'article_id': 'publicId00002'
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
response = MeArticlesDraftsDelete(params, {}, dynamodb=self.dynamodb).main()
self.assertEqual(response['statusCode'], 400)
def test_validation_with_no_article_id(self):
params = {
'queryStringParameters': {},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
self.assert_bad_request(params)
def test_validation_article_id_max(self):
params = {
'queryStringParameters': {
'article_id': 'A' * 13,
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
self.assert_bad_request(params)
def test_validation_article_id_min(self):
params = {
'queryStringParameters': {
'article_id': 'A' * 11,
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
self.assert_bad_request(params)
|
AlisProject/serverless-application
|
tests/handlers/me/articles/drafts/delete/test_me_articles_drafts_delete.py
|
test_me_articles_drafts_delete.py
|
py
| 4,930 |
python
|
en
|
code
| 54 |
github-code
|
6
|
34199270716
|
import time
import json
import requests
import datetime
import math
import sys
from pprint import pprint
s_time = time.time()
path = sys.argv[0].replace('c_timer.py', '')
# скважина качает 4м3/ч или 4000л/ч охватывая 10соток или 1000м2, т.е. за час получается 4л/м2 или 4мм
with open(path+'json/const_zones.json') as f:
rf = json.load(f)
pprint(rf)
zones = rf['zones']
start_h_watering = rf['start_h_watering']
'''
{
"zones": [
{"name": "z1", # номер/имя зоны полива
"gpio": "14", # gpio esp zone
"norm": 3, # норма суточного полива мм/м2
"http_esp": "http://192.168.0.54:84"}, # http_esp, возможно подлл. нескольких esp
{"name": "z2",
"gpio": "12",
"norm": 3,
"http_esp": "http://192.168.0.54:84"}],
"start_h_watering": 19 # час начала полива
}
'''
city_openweather = 'Kiev'
api_openweather = 'your_api_key'
# текущая погода http://api.openweathermap.org/data/2.5/weather?q=Kiev&APPID=your_api_key&units=metric
# прогноз погоды http://api.openweathermap.org/data/2.5/forecast?q=Kiev&APPID=your_api_key&units=metric
data_weather = requests.get('http://api.openweathermap.org/data/2.5/forecast?q='+city_openweather+'&APPID='+api_openweather+'&units=metric')
weather = data_weather.json()['list']
today = datetime.datetime.today().strftime('%Y-%m-%d %H:%M')
print(today)
cur_time = round(time.time())
print(cur_time)
# считаем суточное количество осадков
day_rain = 0
for d in weather:
if d['dt'] <= (cur_time+24*60*60) and 'rain' in d:
print(d['rain'])
if '3h' in d['rain']: day_rain += d['rain']['3h']
print("day_rain = ", day_rain)
rf.update({"day_rain":day_rain})
temp_time = 0
for z in rf['zones']:
watering = z["norm"] - day_rain
if watering > 0:
h = round(watering/4-watering/4*100%25/100, 2) # необходимое время полива в часах до сотых
start_time = round(start_h_watering+temp_time, 2)
end_time = round(start_h_watering+temp_time+h, 2)
temp_time += h
z.update({"watering": [start_time, end_time]})
rf.update({"date":today})
pprint(rf)
with open(path+'json/timer.json', 'w') as outfile:
json.dump(rf, outfile)
print("--- %s seconds ---" % (time.time() - s_time))
|
sdfim/watering
|
c_timer.py
|
c_timer.py
|
py
| 2,563 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
42340475091
|
from coroutine import coroutine
@coroutine
def grep(pattern):
print('looking for %s pattern' % pattern)
try:
while True:
line = (yield)
if pattern in line:
print(line)
except GeneratorExit: # run when g.close() is called
print('Going away, Good bye')
if __name__ == "__main__":
g = grep('python')
g.send('Yeah, but no, but yeah, but no')
g.send('A series of tubes"')
g.send('python generators rock!')
g.close()
g.throw(RuntimeError,"you're are hosed") #can also throw error
|
danny-94/coroutines
|
grep_close.py
|
grep_close.py
|
py
| 569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21618000912
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
class check_box_single():
def __init__(self):
self.driver = webdriver.Chrome('./chromedriver')
self.driver.implicitly_wait(10)
self.driver.get("https://www.seleniumeasy.com/test/basic-checkbox-demo.html")
self.driver.maximize_window()
def push_button(self):
button = self.driver.find_element_by_xpath("//*[@id=\"isAgeSelected\"]")
self.driver.execute_script("arguments[0].click();", button)
def extract_text(self):
elements = self.driver.find_element(By.ID, 'txtAge')
msg2 = str(elements.text)
return msg2
def validation(self, msg2):
if msg2:
print('Avem afisare')
if 'Success' in msg2:
return True
else:
return False
else:
return False
def msj_check(driver):
button = driver.find_element_by_xpath("//*[@id=\"isAgeSelected\"]")
driver.execute_script("arguments[0].click();", button)
elements = driver.find_element(By.ID, 'txtAge')
msg2 = str(elements.text)
if msg2:
print('Avem afisare')
if 'Success' in msg2:
print('Contine')
print(msg2)
else:
print("Afisare incorecta!")
else:
print ('False')
ob = check_box_single()
ob.push_button()
msg2 = ob.extract_text()
print(ob.validation(msg2))
|
CorozelEmanuel/Luxoft2021-proiect1
|
Ceausu Ionut Marian/Selenium1/exemple/checkboxsingle.py
|
checkboxsingle.py
|
py
| 1,572 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27175032500
|
from .base_source import BaseSource
import numpy as np
class LineSource(BaseSource):
def __init__(self, source_params):
super().__init__(source_params)
self.x = source_params['x_start']
self.y1 = source_params['y_start']
self.y2 = source_params['y_end']
if self.function == 'gaussian':
self.amplitude = source_params['amplitude']
self.t0 = source_params.get('t0', 0)
self.frequency_center = float(source_params['frequency_center'])
self.frequency_width = float(source_params['frequency_width'])
self.sigma_f = self.frequency_width / (2.0 * np.sqrt(2 * np.log(2)))
self.sigma_t = 1 / (2 * np.pi * self.sigma_f)
self.omega = 2.0 * np.pi * self.frequency_center
elif self.function == 'sinusoidal':
self.frequency = float(source_params['frequency'])
self.omega = 2.0 * np.pi * self.frequency
else:
raise ValueError(f"Unsupported function type: {self.function}")
def update_source(self, time, dt, ez):
if self.function == 'gaussian':
ez[self.x, self.y1:self.y2] = self.amplitude * np.exp(-((time - self.t0) ** 2) / (2 * self.sigma_t ** 2)) * \
np.sin(self.omega * (time-self.t0))
elif self.function == 'sinusoidal':
ez[self.x, self.y1:self.y2] = np.sin(self.omega * time)
else:
raise ValueError(f"Unsupported function type: {self.function}")
def __str__(self):
base_str = super().__str__()
if self.function == 'gaussian':
return f"{base_str}, source_x: {self.source_x}, source_y: {self.source_y}, amplitude: {self.amplitude}, t0: {self.t0}, frequency center: {self.frequency_center}, frequency width: {self.frequency_width}"
elif self.function == 'sinusoidal':
return f"{base_str}, x: {self.x}, y1: {self.y1}, y2: {self.y2}, frequency: {self.frequency}, omega: {self.omega}"
|
vlrmzz/tfdtd
|
simulator/sources/line_source.py
|
line_source.py
|
py
| 2,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75226368188
|
class Calci():
def __init__(self, tokens):
self._tokens = tokens
self._current = tokens[0]
def expression(self):
eq_result = self.term()
while self._current is '+':
self.next()
eq_result += self.term()
print(result)
while self._current is '-':
self.next()
eq_result -= self.term()
return eq_result
def factor(self):
eq_result = None
if self._current.isdigit():
eq_result = float(self._current)
self.next()
elif self._current is '(':
self.next()
eq_result = self.expression()
self.next()
return eq_result
def next(self):
self._tokens = self._tokens[1:]
self._current = self._tokens[0] if len(self._tokens) > 0 else None
def term(self):
eq_result = self.factor()
while self._current is '*':
self.next()
eq_result *= self.factor()
while self._current is '/':
self.next()
eq_result /= self.factor()
return eq_result
if __name__ == '__main__':
while True:
print("please enter your calculation expression")
my_list = list(input('> '))
tokens = []
real_tokens = []
for i in range(len(my_list)):
if my_list[i].isdigit() and len(tokens) > 0 and tokens[-1].isdigit():
tokens[-1] += my_list[i]
else:
tokens.append(my_list[i])
for ext_space in tokens:
if ext_space != " ":
real_tokens.append(ext_space)
print (Calci(real_tokens).expression() )
|
prashanth612/sparkcentralcalculator
|
Calulcator.py
|
Calulcator.py
|
py
| 1,701 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2516918892
|
import netCDF4 as netCDF
from extraction_utils import basic, getCoordinateVariable
import json
import matplotlib.pyplot as plt
import decimal
import numpy as np
import traceback
class ImageStats(object):
"""docstring for ImageStats"""
def __init__(self, filename, variable):
super(ImageStats, self).__init__()
self.filename = filename
self.variable = variable
def process(self):
#print "running basic processing on %s" % self.filename
# create three arrays, 1D lat 1D lon 2D data
#print "processing image"
netcdf_file = netCDF.Dataset(self.filename, "r")
#variable = np.ma.masked_array(netcdf_file.variables[self.variable])
variable = np.ma.array(netcdf_file.variables[self.variable][:])
lats = getCoordinateVariable(netcdf_file, "Lat")
lons = getCoordinateVariable(netcdf_file, "Lon")
time_dim_index = netcdf_file.variables[self.variable].dimensions.index('time')
if 'depth' in netcdf_file.variables[self.variable].dimensions:
depth_dim_index = netcdf_file.variables[self.variable].dimensions.index('depth')
var_list = []
lat_list = []
lon_list = []
#print variable.shape
if(len(variable.shape) > 3 ):
#print "hmmmm"
#print variable.shape
#print variable
#print np.nanmean(variable, axis=time_dim_index).shape
var_list = [[float(x) if not np.isinf(x) and not np.isnan(x) else None for x in y ] for y in np.nanmean(variable, axis=time_dim_index)[0]]
#print var_list
lat_list = [float(x) for x in lats]
lon_list = [float(x) for x in lons]
elif(len(variable.shape) > 2 ):
#print variable.shape
#print variable
#print np.nanmean(variable, axis=time_dim_index)
var_list = [[float(x) if not np.isinf(x) and not np.isnan(x) else None for x in y ] for y in np.nanmean(variable, axis=time_dim_index)]
#var_list = [[float(x) for x in y] for y in variable[0]]
#print var_list
lat_list = [float(x) for x in lats]
lon_list = [float(x) for x in lons]
else:
var_list = [list(x) for x in variable]
lat_list = [float(x) for x in lats]
lon_list = [float(x) for x in lons]
#print len(lat_list)
#print len(lon_list)
#print len(var_list)
#print len(var_list[0])
#print lat_list
_ret = {}
_ret['vars'] = ['Data','Latitudes','Longitudes']
_ret['data'] = []
_ret['data'].append(var_list)
_ret['data'].append(lat_list)
_ret['data'].append(lon_list)
#print json.dumps(_ret )
return json.dumps(_ret )
#netcdf_variable = netcdf_file[variable]
|
pmlrsg/GISportal
|
plotting/data_extractor/analysis_types/image_stats.py
|
image_stats.py
|
py
| 2,466 |
python
|
en
|
code
| 71 |
github-code
|
6
|
6484276041
|
import os
import shutil
import time
import configparser
from PIL import Image
STEP = 10
# from lib.utils import Id2name
# INPUT_DIR = r"data/MH_01_easy/mav0/cam0/data"
# OUTPUT_DIR = r"./imgs"
config = configparser.ConfigParser()
config.read("config.ini", encoding="utf-8")
INPUT_DIR = config["DEFAULT"]["SIMULATOR_IMG_DIR"]
OUTPUT_DIR = config["DEFAULT"]["IMGS_FROM_SERVER"]
imgs = os.listdir(INPUT_DIR)
imgs.sort()
# for i in range (len(os.listdir(INPUT_DIR))):
# shutil.copy("{}/output{}.jpg".format(INPUT_DIR, i), "{}/output{}.jpg".format(OUTPUT_DIR, i))
# time.sleep(0.5)
##for i in range(140, len(os.listdir(INPUT_DIR))):
# for i in range(140, 300):
# img = Id2name(i)
# shutil.copy("{}/{}".format(INPUT_DIR, img), "{}/output{}.jpg".format(OUTPUT_DIR, i-1))
# time.sleep(0.25)
# for i in range(0, 600):
# shutil.copy("{}/img{}.jpg".format(INPUT_DIR, i), "{}/output{}.jpg".format(OUTPUT_DIR, i))
# time.sleep(0.25)
### EuRoC Machine Hall
for i in range(0, 1000000, STEP):
img = imgs[i]
# shutil.copy("{}/{}".format(INPUT_DIR, img), "{}/output{}.jpg".format(OUTPUT_DIR, i))
im = Image.open("{}/{}".format(INPUT_DIR, img))
rgb_im = im.convert("RGB")
rgb_im.save("{}/{}.jpg".format(OUTPUT_DIR, img[:-4])) # jpg
time.sleep(1)
|
franioli/COLMAP_SLAM
|
simulator.py
|
simulator.py
|
py
| 1,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26776312630
|
"""Apply Perl::Critic tool and gather results."""
import argparse
import logging
import subprocess
from typing import List, Optional
from statick_tool.issue import Issue
from statick_tool.package import Package
from statick_tool.tool_plugin import ToolPlugin
class PerlCriticToolPlugin(ToolPlugin):
"""Apply Perl::Critic tool and gather results."""
def get_name(self) -> str:
"""Get name of tool."""
return "perlcritic"
def gather_args(self, args: argparse.Namespace) -> None:
"""Gather arguments."""
args.add_argument(
"--perlcritic-bin",
dest="perlcritic_bin",
type=str,
help="perlcritic binary path",
)
def get_file_types(self) -> List[str]:
"""Return a list of file types the plugin can scan."""
return ["perl_src"]
def process_files(
self, package: Package, level: str, files: List[str], user_flags: List[str]
) -> Optional[List[str]]:
"""Run tool and gather output."""
perlcritic_bin = "perlcritic"
if self.plugin_context and self.plugin_context.args.perlcritic_bin is not None:
perlcritic_bin = self.plugin_context.args.perlcritic_bin
flags = ["--nocolor", "--verbose=%f:::%l:::%p:::%m:::%s\n"]
flags += self.get_user_flags(level)
try:
output = subprocess.check_output(
[perlcritic_bin] + flags + files,
stderr=subprocess.STDOUT,
universal_newlines=True,
).join(" ")
except subprocess.CalledProcessError as ex:
output = ex.output
if ex.returncode != 2:
logging.warning("perlcritic failed! Returncode = %d", ex.returncode)
logging.warning("%s exception: %s", self.get_name(), ex.output)
return []
except OSError as ex:
logging.warning("Couldn't find %s! (%s)", perlcritic_bin, ex)
return []
logging.debug("%s", output)
return output.splitlines()
def parse_output(
self, total_output: List[str], package: Optional[Package] = None
) -> List[Issue]:
"""Parse tool output and report issues."""
issues: List[Issue] = []
# Load the plugin mapping if possible
warnings_mapping = self.load_mapping()
for line in total_output:
split_line = line.strip().split(":::")
# Should split into five segments, anything less is invalid.
if len(split_line) < 5:
continue
cert_reference = None
if split_line[2].replace("::", "__") in warnings_mapping:
cert_reference = warnings_mapping[split_line[2].replace("::", "__")]
issues.append(
Issue(
split_line[0],
split_line[1],
self.get_name(),
split_line[2],
split_line[4],
split_line[3],
cert_reference,
)
)
return issues
|
sscpac/statick
|
statick_tool/plugins/tool/perlcritic_tool_plugin.py
|
perlcritic_tool_plugin.py
|
py
| 3,117 |
python
|
en
|
code
| 66 |
github-code
|
6
|
73029609789
|
from django.db import models
class ShowManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['show_title']) < 2:
errors["show_title"] = "Show title should be at least 2 characters!"
if len(postData['show_network']) < 3:
errors["show_network"] = "Network should be at least 3 characters"
if len(postData['show_description']) < 10:
errors["show_description"] = "description should be at least 10 characters"
return errors
class Show(models.Model):
title = models.CharField(max_length=255)
network = models.CharField(max_length=30)
release_date = models.CharField(max_length=10, null=True)
description = models.TextField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ShowManager()
|
Jgomez1996/deployment_test
|
shows_app/models.py
|
models.py
|
py
| 904 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27535046003
|
from common import *
def updateaddr():
rent = Mydb()
items = ['id', 'title', 'addr', 'area', 'city']
table = 'rent58'
updatelnglat = 0
updateaddr = 0
rent.select(table, items, None)
records = rent.cur.fetchall()
for record in records:
id, title, addr, area, city = record
condition = dict(id=id)
taddr = title.split()[0]
addr = area+addr
for area in (taddr, addr):
url = lnglat % (area, city)
result = getapi(url)
if result['status'] == 0:
result = result['result']['location']
lng = result['lng']
lat = result['lat']
rent.update(table, dict(lnglat=str(lng)+','+str(lat)), condition)
updatelnglat += 1
#update address
url = location % (lng, lat)
result = getapi(url)
addr = result['result']['formatted_address']
if addr:
rent.update(table, dict(addr=addr), condition)
updateaddr += 1
break
elif result['status'] == 302:
print(result['message'])
exit()
print('updated %s records of lng and lat, %s records of address' % (updatelnglat, updateaddr))
if __name__ == '__main__':
updateaddr()
|
flwwsg/rent58
|
updateaddrs.py
|
updateaddrs.py
|
py
| 1,085 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41373296282
|
#!/usr/bin/env python
from graph import DiGraph, before_after_calculations
import os
import json
import threading
import random
from datetime import date
def read_graphml_files():
with open('tmp/interesting_graphs.txt') as fh:
lines = [line.rstrip() for line in fh]
return [(line, DiGraph.from_graphml(f"graphml/{line}")) for line in lines]
def process_graph(name, G, results):
print(f"-- Processing graph {name}")
rand_ports = [random.randrange(1, 65535) for i in range(50)]
G_simulation, G_attack, stats = G.simulate_traffic(amount_of_rules=50, rand_ports=rand_ports)
print("Finished processing")
results.append(
(
before_after_calculations(
f"{name} simulated",
f"{name} simulated + attack",
G_simulation,
G_attack
),
stats
)
)
def main():
graphs = read_graphml_files()
thread_list = []
results = []
for name, graph in graphs:
thread = threading.Thread(target=process_graph, args=(name, graph, results))
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
filename = f"networks_simulation_results_{date.today()}.json"
# Überprüfen, ob Datei bereits existiert
i = 0
while os.path.isfile(filename):
i += 1
filename = f"networks_simulation_results_{date.today()}.{i}.json"
# results = [process_graph(name, graph) for name, graph in graphs]
with open(f"tmp/{filename}", 'a') as fh:
fh.write(json.dumps(results))
if __name__ == "__main__":
main()
|
mkapra/graph_measurements_segmentation
|
simulate_networks.py
|
simulate_networks.py
|
py
| 1,686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29279214986
|
from setuptools import setup, find_packages
VERSION = '0.0.19'
DESCRIPTION = 'ParkingLot is a Python service imitating a parking lot like system.'
LONG_DESCRIPTION = 'The service indicates rather a vehicle allowed or not allowed to enter the parking lot.'
# Setting up
setup(
name="parkinglot",
version=VERSION,
author="PsychoRover",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=['psycopg2', 'ocrspace'],
keywords=['python', 'parking', 'lot', 'moon', 'parkinglot'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
|
PsychoRover/parking-lot
|
setup.py
|
setup.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28985728032
|
import sys,re, os, io, codecs
from _collections import defaultdict
def loadModelfile():
modelfile = sys.argv[1]
features = defaultdict()
#load model file into features
with open(modelfile,"r", encoding = "ISO-8859-1") as modelhandler:
model = modelhandler.readlines()
for cldata in model:
cldata = cldata.strip()
cldata = cldata.split(" ")
clname = cldata[0]
cldata = cldata[1:]
weights = defaultdict(float)
for ft in cldata:
ft = ft.split("=|")
weights[ft[0]] = float(ft[1])
features[clname] = weights
input_stream = io.TextIOWrapper(sys.stdin.buffer,encoding = "ISO-8859-1")
calculate_accuracy(input_stream,features)
def calculate_accuracy(lines,features):
for line in lines:
output=""
line = line.strip()
line = " ".join(line.split())
data = line.split(" ")
length = len(data)
bos = "*BOS*"
eos = "*EOS*"
prevpos = bos
prev2pos= bos
prevclass= bos
prev2class=bos
i=0
while(i<length):
word = data[i]
pos = word.rfind("/")
postag = word[pos+1:]
crnt = word[:pos]
if(i==length-1):
next =eos
nextpos=eos
else:
nextdata = data[i+1]
rpos = nextdata.rfind("/")
nextpos = nextdata[rpos+1:]
next = nextdata[:rpos]
wshape = wordshape(crnt)
ftarray = ["crnt|"+crnt, "crntpos|"+postag, "prevpos|"+prevpos,
"prevcls|"+prevclass, "prev2pos|"+prev2pos,"prev2cls|"+prev2class,
"next|"+next, "nextpos|"+nextpos, "wshape|"+wshape]
scores = defaultdict()
for key in features.keys():
score = calculateScore(ftarray,features[key])
scores[key]=score
predClass = max(scores.keys(), key=(lambda key: scores[key] ))
if(next == eos):
prev2pos = bos
prevpos= bos
prevclass = bos
prev2class = bos
else:
prev2pos = prevpos
prev2class = prevclass
prevpos = postag
prevclass = predClass
output+=word+"/"+predClass+" "
#calculate Fscore and accuracy
i+=1
output=output.strip()+ os.linesep
sys.stdout.write(output)
sys.stdout.flush()
def wordshape(word):
wshape = word
wshape = re.sub("[A-Z]+","A",wshape)
wshape = re.sub("[a-z]+","a",wshape)
wshape = re.sub("[0-9]+","0",wshape)
wshape = re.sub("[^A-Za-z0-9]+","_",wshape)
return wshape
def calculateScore(data,weightVector):
score = 0.0
for ft in data:
if(ft in weightVector.keys()):
score+=weightVector[ft]
return score
loadModelfile()
|
chandrashekar-cv/POS-Tagging
|
ner/netag.py
|
netag.py
|
py
| 3,029 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72908983868
|
import os
import testinfra.utils.ansible_runner
import pytest
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
@pytest.mark.parametrize("name", [
("apt-transport-https"),
("software-properties-common"),
("unattended-upgrades"),
("mailutils"),
("bsd-mailx"),
])
def test_default_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
def test_default_config_files_present(host):
f = host.file("/etc/apt/apt.conf.d/50unattended-upgrades")
assert f.exists
assert f.is_file
|
ddrugeon/ansible-pi-bootstrap
|
roles/unattended-upgrades/molecule/default/tests/test_install_mandatory_tools.py
|
test_install_mandatory_tools.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12667959383
|
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from modelo import (
FuncionalidadRecetas,
FuncionalidadSemanas,
FuncionalidadConfiguracion,
)
from pdf import crear
from modelo import Recetas, Configuracion
import tkinter
class VistaApp:
def ventana_volver(ventana):
ventana.destroy()
VistaApp.ventana_principal()
def ventana_principal():
#########################################
# VENTANA PRINCIPAL
##########################################
ventana = Tk()
ventana.title("Comedere Club")
ventana.config(width=230, height=220)
ventana.resizable(width=FALSE, height=FALSE)
ventana.iconphoto(False, tkinter.PhotoImage(file="trebol.ico"))
dec1 = Label(ventana, width=60, bg="olive drab2")
dec1.place(x=0, y=210)
### LABELS
# TITULO
lbl_titulo = Label(
ventana,
text="🍀 Comedere Club",
width=19,
font=("Segoe Print", 14),
bg="olive drab2",
)
lbl_titulo.place(x=0, y=10)
### BOTONES
# BOTON PARA VER LAS RECETAS
btn_recetas = Button(ventana, text="Recetas", bd=5, font=("Gadugi", 14))
btn_recetas.place(x=67, y=60)
btn_recetas.config(command=lambda: VistaApp.ventana_recetas(ventana))
# BOTON PARA VER MENU SEMANAL
btn_semana = Button(ventana, text="Menu Semanal", bd=5, font=("Gadugi", 12))
btn_semana.place(x=48, y=120)
btn_semana.config(command=lambda: VistaApp.ventana_semanas(ventana))
# BOTON PARA VER MENU SEMANAL
btn_configuracion = Button(
ventana, text="Configuración", bd=2, font=("Gadugi", 8)
)
btn_configuracion.place(x=7, y=180)
btn_configuracion.config(
command=lambda: VistaApp.ventana_configuracion(ventana)
)
ventana.mainloop()
####################################
# VENTANA RECETAS
####################################
def ventana_recetas(ventana_prin):
ventana_prin.destroy()
## VENTANA RECETAS
ventana = Tk()
ventana.title("Comedere Club")
ventana.protocol("WM_DELETE_WINDOW", lambda: VistaApp.ventana_volver(ventana))
ventana.config(width=490, height=400)
ventana.resizable(width=FALSE, height=FALSE)
ventana.iconphoto(False, tkinter.PhotoImage(file="trebol.ico"))
### LABELS
# TITULO
lbl_titulo = Label(
ventana,
text="🍀 Comedere Club / Recetas",
width=33,
font=("Segoe Print", 18),
bg="olive drab2",
)
lbl_titulo.place(x=0, y=5)
# TOTAL DE RECETAS AGREGADAS
lbl_recetas = Label(ventana, font=("Gadugi", 12), bg="olive drab2")
lbl_recetas.place(x=15, y=369)
# MOSTRAR TOTAL DE RECETAS AGREGADAS
def mostrar_total():
resultado = 0
for x in Recetas.select():
resultado = resultado + 1
lbl_recetas.config(text="Recetas guardadas: " + str(resultado) + " 🍀")
lbl_recetas.after(800, mostrar_total)
mostrar_total()
# FORMATO ULTIMA VEZ
lbl_formato = Label(ventana, text="dd/mm/aaaa", font=("Gadugi", 8))
lbl_formato.place(x=400, y=165)
# LABEL PARA BORRAR
lbl_borrar = Label(ventana, text="ID de la receta ▲", font=("Gadugi", 8))
lbl_borrar.place(x=381, y=245)
### ENTRYS
# ENTRY PARA AGREGAR RECETAS
entry_agregar = Entry(ventana, width=24, bg="snow3")
entry_agregar.place(x=15, y=105)
# ENTRY PARA BUSCAR RECETAS
entry_buscar = Entry(ventana, width=24, bg="snow3")
entry_buscar.place(x=330, y=105)
# ENTRY PARA BORRAR
entry_borrar = Entry(ventana, width=4, bg="snow3")
entry_borrar.place(x=450, y=223)
### BOTONES
# BOTON PARA AGREGAR RECETAS
btn_agregar = Button(ventana, text="Agregar recetas", bd=5, font=("Gadugi", 12))
btn_agregar.place(x=24, y=60)
btn_agregar.config(
command=lambda: FuncionalidadRecetas.agregar_receta(
str.lower(entry_agregar.get()), entry_agregar
)
)
# BOTON PARA BUSCAR RECETAS
btn_buscar = Button(ventana, text="Buscar...", bd=5, font=("Gadugi", 12))
btn_buscar.place(x=370, y=60)
btn_buscar.config(
command=lambda: FuncionalidadRecetas.buscar_recetas(
str.lower(entry_buscar.get()),
recetas.get(),
ult_vez.get(),
entry_borrar,
treeview,
)
)
# BOTON PARA BORRAR RECETAS
btn_borrar = Button(ventana, text="Borrar", bd=5, font=("Gadugi", 12))
btn_borrar.place(x=380, y=205)
btn_borrar.config(
command=lambda: FuncionalidadRecetas.borrar_receta(
str.lower(entry_borrar.get()), entry_borrar, treeview
)
)
# BOTON PARA MOSTRAR RECETAS
btn_mostrar = Button(ventana, text="Mostrar Recetas", bd=5, font=("Gadugi", 10))
btn_mostrar.place(x=375, y=290)
btn_mostrar.config(
command=lambda: FuncionalidadRecetas.mostrar_recetas(treeview)
)
# BOTON PARA VOLVER
btn_volver = Button(ventana, text="Volver", bd=2, font=("Gadugi", 10))
btn_volver.place(x=440, y=370)
btn_volver.config(command=lambda: VistaApp.ventana_volver(ventana))
### CHECK BOX
recetas = IntVar()
check_recetas = Checkbutton(
ventana,
variable=recetas,
onvalue=1,
offvalue=0,
text="Receta",
command=lambda: checks(check_ult_vez, recetas.get()),
)
check_recetas.place(x=380, y=125)
ult_vez = IntVar()
check_ult_vez = Checkbutton(
ventana,
variable=ult_vez,
onvalue=1,
offvalue=0,
text="Ultima vez",
command=lambda: checks(check_recetas, ult_vez.get()),
)
check_ult_vez.place(x=380, y=145)
def checks(check, var):
check.config(state=DISABLED)
if var == 0:
check.config(state=NORMAL)
# TREEVIEW PARA VER RECETAS AGREGADAS
style = ttk.Style(ventana)
style.theme_use("clam")
style.configure("Treeview", foreground="white")
treeview = ttk.Treeview(ventana)
treeview["columns"] = ("ID", "Receta", "Ultima vez hecha")
treeview.column("#0", width=0, stretch=NO)
treeview.column(
"ID",
width=40,
anchor=CENTER,
)
treeview.column(
"Receta",
width=233,
anchor=CENTER,
)
treeview.column(
"Ultima vez hecha",
width=80,
anchor=CENTER,
)
treeview.heading("#0", text="", anchor=CENTER)
treeview.heading("ID", text="ID", anchor=CENTER)
treeview.heading("Receta", text="Receta", anchor=CENTER)
treeview.heading("Ultima vez hecha", text="Ultima vez hecha", anchor=CENTER)
treeview.place(x=15, y=130)
ventana.mainloop()
##############################################
# VENTANA SEMANAS
##############################################
def ventana_semanas(ventana_prin):
ventana_prin.destroy()
## VENTANA SEMANAS
ventana = Tk()
ventana.title("Comedere Club")
ventana.protocol("WM_DELETE_WINDOW", lambda: VistaApp.ventana_volver(ventana))
ventana.config(width=670, height=320)
ventana.resizable(width=FALSE, height=FALSE)
ventana.iconphoto(False, tkinter.PhotoImage(file="trebol.ico"))
### LABELS
# TITULO
lbl_titulo = Label(
ventana,
text="🍀 Comedere Club / Semanas",
width=45,
font=("Segoe Print", 18),
bg="olive drab2",
)
lbl_titulo.place(x=0, y=5)
### ENTRYS
# ENTRY NOMBRE DEL PDF
entry_nombre = Entry(ventana, width=21, bg="snow3")
entry_nombre.place(x=15, y=260)
entry_nombre.insert(0, "Menu semanal")
### BOTONES
# BOTON PARA VOLVER
btn_volver = Button(ventana, text="Volver", bd=2, font=("Gadugi", 10))
btn_volver.place(x=5, y=290)
btn_volver.config(command=lambda: VistaApp.ventana_volver(ventana))
# BOTON PARA MODIFICAR SEMANA
btn_mod_semana = Button(
ventana,
text="Modificar la\nsemana marcada",
bd=4,
font=("Gadugi", 10),
padx=7,
)
btn_mod_semana.place(x=15, y=140)
btn_mod_semana.config(
command=lambda: VistaApp.ventana_mod_semana(
ventana, pasado.get(), presente.get(), futuro.get(), treeview
)
)
btn_pdf = Button(
ventana, text="Importar la semana\nactual a PDF", bd=4, font=("Gadugi", 10)
)
btn_pdf.place(x=15, y=200)
btn_pdf.config(command=lambda: crear(str(entry_nombre.get())))
### CHECK BOX
pasado = IntVar()
check_pasado = Checkbutton(
ventana, variable=pasado, onvalue=1, offvalue=0, text="Semana pasada"
)
check_pasado.config(
command=lambda: [
checks_semanas(check_presente, check_futuro, pasado.get()),
FuncionalidadSemanas.mostrar_semana(
pasado.get(), presente.get(), futuro.get(), treeview
),
]
)
check_pasado.place(x=15, y=70)
presente = IntVar()
check_presente = Checkbutton(
ventana, variable=presente, onvalue=1, offvalue=0, text="Semana actual"
)
check_presente.config(
command=lambda: [
checks_semanas(check_pasado, check_futuro, presente.get()),
FuncionalidadSemanas.mostrar_semana(
pasado.get(), presente.get(), futuro.get(), treeview
),
]
)
check_presente.place(x=15, y=90)
futuro = IntVar()
check_futuro = Checkbutton(
ventana, variable=futuro, onvalue=1, offvalue=0, text="Semana siguiente"
)
check_futuro.config(
command=lambda: [
checks_semanas(check_pasado, check_presente, futuro.get()),
FuncionalidadSemanas.mostrar_semana(
pasado.get(), presente.get(), futuro.get(), treeview
),
]
)
check_futuro.place(x=15, y=110)
def checks_semanas(check1, check2, var):
check1.config(state=DISABLED)
check2.config(state=DISABLED)
if var == 0:
check1.config(state=NORMAL)
check2.config(state=NORMAL)
# TREEVIEW PARA VER LAS SEMANAS
style = ttk.Style(ventana)
style.theme_use("clam")
style.configure("Treeview", foreground="white")
treeview = ttk.Treeview(ventana)
treeview["columns"] = ("Dia", "Receta", "Que falta")
treeview.column("#0", width=0, stretch=NO)
treeview.column(
"Dia",
width=80,
anchor=CENTER,
)
treeview.column(
"Receta",
width=223,
anchor=CENTER,
)
treeview.column(
"Que falta",
width=180,
anchor=CENTER,
)
treeview.heading("#0", text="", anchor=CENTER)
treeview.heading("Dia", text="Dia", anchor=CENTER)
treeview.heading("Receta", text="Receta", anchor=CENTER)
treeview.heading("Que falta", text="Que falta", anchor=CENTER)
treeview.place(x=170, y=70)
ventana.mainloop()
##############################################
# VENTANA MODIFICAR SEMANAS
##############################################
def ventana_mod_semana(ventana_prin, pasado, presente, futuro, treeview):
if pasado == 0 and presente == 0 and futuro == 0:
messagebox.showinfo(
message="Ninguna semana seleccionada", title="Comedere Club"
)
VistaApp.ventana_semanas(ventana)
else:
ventana_prin.withdraw()
## VENTANA MODIFICAR SEMANAS
ventana = Tk()
ventana.title("Comedere Club")
ventana.protocol(
"WM_DELETE_WINDOW",
lambda: [ventana_prin.deiconify(), ventana.destroy()],
)
ventana.config(width=270, height=250)
ventana.resizable(width=FALSE, height=FALSE)
texto = ""
if pasado == 1:
texto = " Modificando la semana pasada ..."
elif presente == 1:
texto = " Modificando la semana actual ..."
else:
texto = " Modificando la semana siguiente ..."
# LABEL PARA SABER QUE SEMANA ESTOY MODIFICANDO
lbl_modificar = Label(
ventana,
text=(texto),
anchor=W,
width=43,
font=("Gadugi", 12),
bg="olive drab2",
)
lbl_modificar.place(x=0, y=10)
### LABELS
# LABEL DIA
lbl_dia = Label(
ventana, text="Dia", width=9, font=("Gadugi", 8), bg="olive drab2"
)
lbl_dia.place(x=15, y=100)
# LABEL RECETA
lbl_receta = Label(
ventana, text="Receta", width=9, font=("Gadugi", 8), bg="olive drab2"
)
lbl_receta.place(x=15, y=150)
# LABEL DIA
lbl_falta = Label(
ventana,
text="Que falta?",
width=9,
font=("Gadugi", 8),
bg="olive drab2",
)
lbl_falta.place(x=15, y=200)
### ENTRYS
# ENTRY DIA
entry_dia = Entry(ventana, width=17, bg="snow3", state="readonly")
entry_dia.place(x=35, y=125)
# ENTRY RECETA
entry_receta = Entry(ventana, width=20, bg="snow3")
entry_receta.place(x=15, y=175)
# ENTRY QUE FALTA
entry_falta = Entry(ventana, width=20, bg="snow3")
entry_falta.place(x=15, y=225)
# BOTON PARA MODIFICAR ALGUNA RECETA
btn_modificar = Button(ventana, text="Modificar", bd=5, font=("Gadugi", 12))
btn_modificar.place(x=15, y=45)
btn_modificar.config(
command=lambda: FuncionalidadSemanas.modificar_semana(
pasado,
presente,
futuro,
entry_dia.get(),
str.lower(entry_receta.get()),
entry_falta.get(),
treeview,
)
)
# ELEGIR RECETA ALEATORIAMENTE
btn_random = Button(
ventana, text="Elegir receta\naleatoriamente", bd=5, font=("Gadugi", 9)
)
btn_random.place(x=147, y=155)
btn_random.config(
command=lambda: FuncionalidadSemanas.receta_aleatoria(entry_receta)
)
# BOTON PARA VOLVER
btn_dia = Button(ventana, text="Volver", bd=2, font=("", 10))
btn_dia.place(x=220, y=220)
btn_dia.config(
command=lambda: [ventana_prin.deiconify(), ventana.destroy()]
)
# POPUP PARA AUTOCOMPLETAR EL DIA
btn_dia = Button(ventana, text="▼", bd=5, font=("", 5))
btn_dia.place(x=15, y=125)
btn_dia.config(command=lambda: popup_mostrar())
# MOSTRAR MENU POPUP
def popup_mostrar():
try:
popup_dia.tk_popup(x=200, y=300)
finally:
popup_dia.grab_release()
# OPCIONES DEL MENU POPUP
popup_dia = Menu(ventana, tearoff=0)
popup_dia.add_command(
label="Lunes",
command=lambda: [
normal(entry_dia),
borrar_entry(entry_dia),
entry_dia.insert(0, "Lunes"),
readonly(entry_dia),
],
)
popup_dia.add_command(
label="Martes",
command=lambda: [
normal(entry_dia),
borrar_entry(entry_dia),
entry_dia.insert(0, "Martes"),
readonly(entry_dia),
],
)
popup_dia.add_command(
label="Miercoles",
command=lambda: [
normal(entry_dia),
borrar_entry(entry_dia),
entry_dia.insert(0, "Miercoles"),
readonly(entry_dia),
],
)
popup_dia.add_command(
label="Jueves",
command=lambda: [
normal(entry_dia),
borrar_entry(entry_dia),
entry_dia.insert(0, "Jueves"),
readonly(entry_dia),
],
)
popup_dia.add_command(
label="Viernes",
command=lambda: [
normal(entry_dia),
borrar_entry(entry_dia),
entry_dia.insert(0, "Viernes"),
readonly(entry_dia),
],
)
def borrar_entry(entry):
entry.delete(0, "end")
def readonly(entry):
entry.config(state="readonly")
def normal(entry):
entry.config(state="normal")
ventana.mainloop()
##################
## VENTANA CONFIGURACION
##################
def ventana_configuracion(ventana_prin):
ventana_prin.destroy()
ventana = Tk()
ventana.title("Comedere Club")
ventana.protocol("WM_DELETE_WINDOW", lambda: VistaApp.ventana_volver(ventana))
ventana.config(width=195, height=150)
ventana.resizable(width=FALSE, height=FALSE)
ventana.iconphoto(False, tkinter.PhotoImage(file="trebol.ico"))
lbl_titulo = Label(
ventana,
text="🍀 Configuración",
width=16,
font=("Segoe Print", 14),
bg="olive drab2",
)
lbl_titulo.place(x=0, y=10)
guardar_recetas = IntVar()
check_recetas = Checkbutton(
ventana,
variable=guardar_recetas,
onvalue=1,
offvalue=0,
text="Guardar recetas",
)
check_recetas.place(x=5, y=60)
btn_dia = Button(ventana, text="❓", font=("", 8))
btn_dia.place(x=150, y=60)
btn_dia.config(
command=lambda: messagebox.showinfo(
title="Comedere Club",
message="Cuando finalice la semana, las recetas que esten en la semana actual sin guardar, se agregaran automaticamente en el listado de recetas",
)
)
btn_guardar = Button(ventana, text="Guardar y salir", font=("", 10))
btn_guardar.place(x=45, y=120)
btn_guardar.config(
command=lambda: [
FuncionalidadConfiguracion.configuraciones(guardar_recetas.get()),
VistaApp.ventana_volver(ventana),
]
)
for x in Configuracion.select():
if x.configuracion == 1 and x.id == 2:
check_recetas.select()
ventana.mainloop()
|
IgnacioGuede/Comedere
|
vista.py
|
vista.py
|
py
| 20,056 |
python
|
es
|
code
| 0 |
github-code
|
6
|
27813014463
|
import numpy as np
import gym
import cv2
class StackedEnv(gym.Wrapper):
def __init__(self, env, width, height, n_img_stack, n_action_repeats):
super(StackedEnv, self).__init__(env)
self.width = width
self.height = height
self.n_img_stack = n_img_stack
self.n_action_repeats = n_action_repeats
self.stack = []
def reset(self):
img_rgb = super(StackedEnv, self).reset()
img_gray = self.preprocess(img_rgb)
self.stack = [img_gray] * self.n_img_stack
return np.rollaxis(np.stack(self.stack, axis=2), 2, 0)
def step(self, action):
total_reward = 0
done = False
img_rgb = None
info = None
for i in range(self.n_action_repeats):
img_rgb, reward, done, info = super(StackedEnv, self).step(action)
total_reward += reward
if done:
break
img_gray = self.preprocess(img_rgb)
self.stack.pop(0)
self.stack.append(img_gray)
assert len(self.stack) == self.n_img_stack
return np.rollaxis(np.stack(self.stack, axis=2), 2, 0), total_reward, done, info
def preprocess(self, rgb_img):
gray = np.dot(rgb_img[..., :], [0.299, 0.587, 0.114])
gray = gray / 128. - 1.
res = cv2.resize(gray, dsize=(self.height, self.width), interpolation=cv2.INTER_CUBIC)
return res
|
yiliu77/deep_rl_proj
|
environments/stacked.py
|
stacked.py
|
py
| 1,405 |
python
|
en
|
code
| 3 |
github-code
|
6
|
75159668346
|
import little_helper
def paths(list):
"""
>>> list(paths([[0],[1]]))
[[0, 1]]
>>> list(paths([[1, 2]]))
[[1], [2]]
>>> list(paths([[0],[1,2]]))
[[0, 1], [0, 2]]
"""
if len(list) > 0:
lists = []
for option in list[0]:
for path in paths(list[1:]):
lists.append([option] + path)
return lists
else:
return [[]]
def mask_it(mask, bin_value):
for a,b in zip(mask, bin_value):
if a == '1':
yield ['1']
elif a == 'X':
yield ['0', '1']
else:
yield [b]
def answer(input):
"""
>>> answer('''mask = 000000000000000000000000000000X1001X
... mem[42] = 100
... mask = 00000000000000000000000000000000X0XX
... mem[26] = 1''')
208
"""
lines = input.split('\n')
memory = {}
for line in lines:
if line.startswith('mask = '):
mask = line[7:]
else:
address, value = line.replace('mem[', '').replace('] = ', '|').split('|',1)
address = int(address)
value = int(value)
binary_address = bin(address).replace('0b','').rjust(36,'0')
new_bin_address_masked = mask_it(mask, binary_address)
for path in paths(list(new_bin_address_masked)):
new_bin_address = ''.join(path)
new_address = int(new_bin_address, 2)
memory[new_address] = value
return sum(memory.values())
if __name__ == '__main__':
little_helper.help(2020, __file__, answer)
|
broersma/advent-of-code-2020-python
|
14_2.py
|
14_2.py
|
py
| 1,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
854412704
|
version = '0.2'
identifier = 'edu.utah.sci.vistrails.itk'
name = 'ITK'
import core.bundles.utils
import core.requirements
from core.modules.vistrails_module import Module, ModuleError
# Ugly, but Carlos doesnt know any better
if core.bundles.utils.guess_system() == 'linux-ubuntu':
import sys
sys.path.append('/usr/local/lib/VisTrailsITK')
try:
from core.bundles import py_import
itk = py_import('itk', {'linux-ubuntu': 'vistrails-itk'})
except ImportError:
raise core.requirements.MissingRequirement("ITK and WrapITK")
import core.modules
import core.modules.module_registry
# ITK Package imports
from PixelType import *
from FeatureExtractionFilters import *
from ITK import *
from Image import Image
from IntensityFilters import *
from SegmentationFilters import *
from SelectionFilters import *
from SmoothingFilters import *
from ThresholdFilters import *
from GradientFilters import *
from NeighborhoodFilters import *
from ImageReader import *
def initialize(*args, **keywords):
reg = core.modules.module_registry
basic = core.modules.basic_modules
########################################################################################
# Misc.
Index2D.register(reg,basic)
Index3D.register(reg,basic)
Size.register(reg,basic)
Region.register(reg,basic)
PixelType.register(reg,basic)
Filter.register(reg,basic)
Kernel.register(reg,basic)
Image.register(reg,basic)
########################################################################################
# Pixel Types
pixeltypes = [PixelTypeFloat,
PixelTypeUnsignedChar,
PixelTypeUnsignedShort,
PixelTypeRGB]
for cls in pixeltypes:
cls.register(reg,basic)
########################################################################################
# Feature Extraction Filters
featurefilters = [GradientMagnitudeRecursiveGaussianImageFilter,
DanielssonDistanceMapImageFilter,
SobelEdgeDetectionImageFilter]
for cls in featurefilters:
cls.register(reg,basic)
########################################################################################
# Intensity Filters
intensityfilters = [RescaleIntensityImageFilter,
SigmoidImageFilter,
ThresholdImageFilter,
ShiftScaleImageFilter,
NormalizeImageFilter]
for cls in intensityfilters:
cls.register(reg,basic)
########################################################################################
# Segmentation Filters
segmentationfilters = [IsolatedWatershedImageFilter,
ConnectedThresholdImageFilter,
ConfidenceConnectedImageFilter,
IsolatedConnectedImageFilter]
for cls in segmentationfilters:
cls.register(reg,basic)
########################################################################################
# Selection Filters
selectionfilters = [RegionOfInterestImageFilter,
CastImageFilter,
ExtractImageFilter]
for cls in selectionfilters:
cls.register(reg,basic)
########################################################################################
# Smoothing Filters
smoothingfilters = [CurvatureAnisotropicDiffusionFilter,
RecursiveGaussianImageFilter,
DiscreteGaussianImageFilter,
GradientAnisotropicDiffusionImageFilter,
MinMaxCurvatureFlowImageFilter,
BinomialBlurImageFilter,
BilateralImageFilter,
CurvatureFlowImageFilter]
for cls in smoothingfilters:
cls.register(reg,basic)
########################################################################################
# Threshold Filters
thresholdfilters = [BinaryThresholdImageFilter]
for cls in thresholdfilters:
cls.register(reg,basic)
########################################################################################
# Gradient Filters
gradientfilters = [GradientMagnitudeImageFilter]
for cls in gradientfilters:
cls.register(reg,basic)
########################################################################################
# Neighborhood Filters
neighborhoodfilters = [MeanImageFilter,
MedianImageFilter,
BinaryErodeImageFilter]
for cls in neighborhoodfilters:
cls.register(reg,basic)
########################################################################################
# Image Reader
imagereader = [ImageReader,
ImageToFile,
GDCMReader,
DICOMReader]
for cls in imagereader:
cls.register(reg,basic)
|
VisTrails/VisTrails
|
contrib/itk/__init__.py
|
__init__.py
|
py
| 4,935 |
python
|
en
|
code
| 100 |
github-code
|
6
|
16920362382
|
from approzium import AuthClient
from approzium.pymysql import connect
auth = AuthClient(
"authenticator:6001",
# This is insecure, see https://approzium.org/configuration for proper use.
disable_tls=True,
)
conn = connect(host="dbmysqlsha1", user="bob", db="db", authenticator=auth)
with conn.cursor() as cursor:
cursor.execute("SELECT 1")
result = cursor.fetchone()
print(result)
conn.close()
|
cyralinc/approzium
|
sdk/python/examples/pymysql_connect.py
|
pymysql_connect.py
|
py
| 420 |
python
|
en
|
code
| 57 |
github-code
|
6
|
19124516217
|
from components import decorators
from components import endpoints_webapp2
import webapp2
import api
import config
import notifications
import service
import swarming
README_MD = (
'https://chromium.googlesource.com/infra/infra/+/master/'
'appengine/cr-buildbucket/README.md')
class MainHandler(webapp2.RequestHandler): # pragma: no cover
"""Redirects to README.md."""
def get(self):
return self.redirect(README_MD)
class CronResetExpiredBuilds(webapp2.RequestHandler):
"""Resets expired builds."""
@decorators.require_cronjob
def get(self):
service.reset_expired_builds()
class CronUpdateBuckets(webapp2.RequestHandler): # pragma: no cover
"""Updates buckets from configs."""
@decorators.require_cronjob
def get(self):
config.cron_update_buckets()
class BuildHandler(webapp2.RequestHandler): # pragma: no cover
"""Redirects to API explorer to see the build."""
def get(self, build_id):
api_path = '/_ah/api/buildbucket/v1/builds/%s' % build_id
return self.redirect(api_path)
def get_frontend_routes(): # pragma: no cover
routes = [
webapp2.Route(r'/', MainHandler),
webapp2.Route(r'/b/<build_id:\d+>', BuildHandler),
endpoints_webapp2.discovery_service_route(),
]
routes += endpoints_webapp2.api_routes(api.BuildBucketApi)
routes += endpoints_webapp2.api_routes(swarming.SwarmbucketApi)
return routes
def get_backend_routes():
return [
webapp2.Route(
r'/internal/cron/buildbucket/reset_expired_builds',
CronResetExpiredBuilds),
webapp2.Route(
r'/internal/cron/buildbucket/update_buckets',
CronUpdateBuckets),
webapp2.Route(
r'/internal/task/buildbucket/notify/<build_id:\d+>',
notifications.TaskPublishNotification),
]
|
mithro/chromium-infra
|
appengine/cr-buildbucket/handlers.py
|
handlers.py
|
py
| 1,762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28705152846
|
# Дана упорядоченная последовательность an чисел от 1 до N.
# Из копии данной последовательности bn удалили одно число, а оставшиеся перемешали. Найти удаленное число.
import random
n = int(input('Введите длину последовательности: '))
first_list = [i for i in range(1, n + 1)]
set_first_list = set(first_list)
print(f'Последовательность an {first_list}')
first_list.pop(int(input('Введите какой элемент удалить: ')))
random.shuffle(first_list)
print(f'Последовательность bn {first_list}')
set1 = set(first_list)
print(set1)
print(set_first_list.difference(set1))
|
MihailOgorodov/python_courses
|
seminar3/4.py
|
4.py
|
py
| 788 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
37877845832
|
import datetime
from PySide2.QtWidgets import QDialog
from ui_add_update import Ui_Dialog
class addDialog(QDialog):
def __init__(self, *args, **kvargs):
super().__init__(*args, **kvargs)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.OkButton.clicked.connect(self.accept)
self.ui.CancelButton.clicked.connect(self.reject)
self.ui.dateEdit.setDate(datetime.date.today())
self.ui.OkButton.clicked.connect(self.get_data)
self.ui.sc1000.valueChanged.connect(self.set_label)
self.ui.sc500.valueChanged.connect(self.set_label)
self.ui.sc200.valueChanged.connect(self.set_label)
self.ui.sc100.valueChanged.connect(self.set_label)
self.ui.sc50.valueChanged.connect(self.set_label)
def set_label(self):
self.ui.lsc1000.setNum(1000*self.ui.sc1000.value())
self.ui.lsc500.setNum(500*self.ui.sc500.value())
self.ui.lsc200.setNum(200*self.ui.sc200.value())
self.ui.lsc100.setNum(100*self.ui.sc100.value())
self.ui.lsc50.setNum(50*self.ui.sc50.value())
self.ui.label.setNum(1000*self.ui.sc1000.value()+500*self.ui.sc500.value()+200*self.ui.sc200.value()+100*self.ui.sc100.value()+50*self.ui.sc50.value())
def get_data(self):
return {
"c_date": self.ui.dateEdit.date().toPython(),
"c_1000": self.ui.sc1000.value(),
"c_500": self.ui.sc500.value(),
"c_200": self.ui.sc200.value(),
"c_100": self.ui.sc100.value(),
"c_50": self.ui.sc50.value()
}
|
randrust/cashflow_pyside2
|
dialogs/add_dialog.py
|
add_dialog.py
|
py
| 1,602 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30358219721
|
import unittest
from traits.api import HasTraits, Int, Str, Tuple
from traitsui.api import Item, View
from traits.testing.api import UnittestTools
from traitsui.tests._tools import (
BaseTestMixin,
create_ui,
requires_toolkit,
reraise_exceptions,
ToolkitName,
)
class TupleEditor(HasTraits):
"""Dialog containing a Tuple of two Int's."""
tup = Tuple(Int, Int, Str)
traits_view = View(
Item(label="Enter 4 and 6, then press OK"), Item("tup"), buttons=["OK"]
)
class TestTupleEditor(BaseTestMixin, unittest.TestCase, UnittestTools):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
@requires_toolkit([ToolkitName.qt, ToolkitName.wx])
def test_value_update(self):
# Regression test for #179
model = TupleEditor()
with create_ui(model):
with self.assertTraitChanges(model, "tup", count=1):
model.tup = (3, 4, "nono")
@requires_toolkit([ToolkitName.qt])
def test_qt_tuple_editor(self):
# Behavior: when editing the text of a tuple editor,
# value get updated immediately.
from pyface import qt
val = TupleEditor()
with reraise_exceptions(), create_ui(val) as ui:
# the following is equivalent to clicking in the text control of
# the range editor, enter a number, and clicking ok without
# defocusing
# text element inside the spin control
lineedits = ui.control.findChildren(qt.QtGui.QLineEdit)
lineedits[0].setFocus()
lineedits[0].clear()
lineedits[0].insert("4")
lineedits[1].setFocus()
lineedits[1].clear()
lineedits[1].insert("6")
lineedits[2].setFocus()
lineedits[2].clear()
lineedits[2].insert("fun")
# if all went well, the tuple trait has been updated and its value
# is (4, 6, "fun")
self.assertEqual(val.tup, (4, 6, "fun"))
if __name__ == "__main__":
# Executing the file opens the dialog for manual testing
val = TupleEditor()
val.configure_traits()
print(val.tup)
|
enthought/traitsui
|
traitsui/tests/editors/test_tuple_editor.py
|
test_tuple_editor.py
|
py
| 2,229 |
python
|
en
|
code
| 290 |
github-code
|
6
|
31331693062
|
#! /usr/bin/python3
import os
import sys
import glob
import time
import shutil
import logging
import argparse
import subprocess
import pandas as pd
from pathlib import Path
from itertools import repeat
from multiprocessing import Pool
from nest.bbduk import QualCheck
from nest.alignment import Bwa
from nest.alignment import Bowtie
from nest.alignment import BBMap
from nest.alignment import Snap
from nest.samtools import Samtools
from nest.gatk import GenAnTK
from nest.gatk import Picard
from nest.gatk import FreeBayes
from nest.kestrel import KestrelVar
#from nest.annotater import Annotate
from nest.kestrel import kes_runner
from nest.summarize import Summary
from nest.prepinputs import Prepper
from nest.parsers.vcfReader import Reader
from nest.parsers.vcfmerge import Merge
from nest.parsers.vcfannotate import Annotate
from nest.parsers.vcfwriter import Writer
def main(arguments):
bbduk_path = arguments[0]
alinger_path = arguments[1]
smt_path = arguments[2]
bft_path = arguments[3]
gatk_path = arguments[4]
sam_name = arguments[5]
file_list = arguments[6]
ref_path = arguments[7]
adp_path = arguments[8]
bed_path = arguments[9]
out_dir = arguments[10]
aligner = arguments[11]
pic_path = arguments[12]
voi_path = arguments[13]
java_path = arguments[14]
sra_path = arguments[15]
purge = arguments[16]
sra_list = arguments[17]
#Setup logging
#Get logger for main method
main_logger = logging.getLogger('NeST.{0}'.format(sam_name))
main_logger.debug('Starting analysis for {0}'.format(sam_name))
#Check if files are present
out_path = '{0}/{1}'.format(os.path.abspath(out_dir), sam_name)
if not os.path.exists(out_path):
os.mkdir(out_path)
fastq_path = '{0}/RawFastq'.format(out_path)
if not os.path.exists(fastq_path):
os.mkdir(fastq_path)
#Get FASTQs
prepper = Prepper(fastq_path, out_dir, sra_path)
fastq_path = prepper.sra(sam_name, sra_list, file_list)
##Note: Generalize this, right now it will only work with SRA. This is a fix for NEJM
rone_path = file_list[0]
rtwo_path = file_list[1]
if not os.path.exists(rone_path):
raise FileNotFoundException('Forward read not found; Exiting MARs')
sys.exit()
if not os.path.exists(rtwo_path):
raise FileNotFoundException('Reverse read not found; Exiting MARs')
sys.exit()
if not os.path.exists(ref_path):
raise FileNotFoundException('Reference fasta file not found; Exiting MARs')
sys.exit()
if not os.path.exists(adp_path):
raise FileNotFoundException('Adpater sequence not found; Exiting MARs')
sys.exit()
if not os.path.exists(out_path):
os.mkdir(out_path)
#Create completion folder
completion_path = '{0}/completion'.format(out_path)
if not os.path.exists(completion_path):
os.mkdir(completion_path)
#Call Bbduk
main_logger.debug('Running BBDuk')
if os.path.exists('{0}/bbduk.rt'.format(completion_path)):
brone = os.path.splitext(os.path.basename(rone_path))[0]
brtwo = os.path.splitext(os.path.basename(rtwo_path))[0]
rone_path = '{0}/{1}/{2}_cleaned.fq'.format(out_path, 'CleanedFastq', brone)
rtwo_path = '{0}/{1}/{2}_cleaned.fq'.format(out_path, 'CleanedFastq', brtwo)
main_logger.debug('Skipping BBDuk')
bret = 0
else:
bbduk = QualCheck(bbduk_path, adp_path, out_path, java_path)
rone_path, rtwo_path, bret = bbduk.bbduk(rone_path, rtwo_path)
if bret == 0:
Path('{0}/bbduk.rt'.format(completion_path)).touch()
if bret != 0:
raise RuntimeError('BBDuk failed to complete; Exiting MARs')
else:
main_logger.debug('BBDuk completed')
if aligner == 'bwa':
#Call BWA
main_logger.debug('Running BWA')
if os.path.exists('{0}/align.rt'.format(completion_path)):
sam_path = '{0}/alignments/output.sam'.format(out_path)
mret = 0
main_logger.debug('Skipping BWA')
else:
bwa = Bwa(alinger_path, out_path, ref_path)
sam_path, mret = bwa.bwamem(rone_path, rtwo_path)
if mret == 0:
Path('{0}/align.rt'.format(completion_path)).touch()
if mret != 0:
raise RuntimeError('Bwa mem failed to complete; Exiting MARs')
else:
main_logger.debug('BWA completed')
elif aligner == 'bowtie2':
#Call Bowtie2
main_logger.debug('Running Bowtie2')
if os.path.exists('{0}/aling.rt'.format(completion_path)):
sam_path = '{0}/alignments/output.sam'.format(out_path)
mret = 0
main_logger.debug('Skipping Bowtie2')
else:
bowtie = Bowtie(alinger_path, out_path, ref_path)
sam_path, mret = bowtie.bowtie(rone_path, rtwo_path)
if mret == 0:
Path('{0}/align.rt'.format(completion_path)).touch()
if mret != 0:
raise RuntimeError('Bowtie2 failed to complete; Exiting MARs')
else:
main_logger.debug('Bowtie2 completed')
elif aligner == 'snap':
#Call Snap
main_logger.debug('Running Snap')
snap = Snap(alinger_path, out_path, ref_path)
sam_path, mret = snap.snap(rone_path, rtwo_path)
if mret != 0:
raise RuntimeError('Snap failed to complete; Exiting MARs')
else:
main_logger.debug('Snap completed')
elif aligner == 'bbmap':
#Call Bbmap
main_logger.debug('Running BBMap')
if os.path.exists('{0}/aling.rt'.format(completion_path)):
sam_path = '{0}/alignments/output.sam'.format(out_path)
mret = 0
else:
bbmap = BBMap(alinger_path, out_path, ref_path)
sam_path, mret = bbmap.bbmap(rone_path, rtwo_path)
if mret == 0:
Path('{0}/align.rt'.format(completion_path)).touch()
if mret != 0:
raise RuntimeError('BBMap failed to complete; Exitinign MARs')
else:
main_logger.debug('BBMap completed')
#Fix mate information, sort files and add read groups
varengine = Samtools(smt_path, bft_path, out_path)
if os.path.exists('{0}/fixmate.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(sam_path))[0]
bam_path = '{0}/alignments/{1}_FM.bam'.format(out_path, base)
fret = 0
main_logger.debug('Skipping fixmate')
else:
bam_path, fret = varengine.fixmate(sam_path)
if fret == 0:
Path('{0}/fixmate.rt'.format(completion_path)).touch()
main_logger.debug('Running Samtools fixmate')
if fret != 0:
raise RuntimeError('Samtools fixmate failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools fixmate completed')
if os.path.exists('{0}/sort.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(bam_path))[0]
bam_path = '{0}/alignments/{1}_SR.bam'.format(out_path, base)
sret = 0
main_logger.debug('Skipping sort')
else:
bam_path, sret = varengine.sort(bam_path)
if sret == 0:
Path('{0}/sort.rt'.format(completion_path)).touch()
main_logger.debug('Running Samtools sort')
if sret != 0:
raise RuntimeError('Samtools sort failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools sort completed')
if os.path.exists('{0}/dedup.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(bam_path))[0]
bam_path = '{0}/alignments/{1}_DD.bam'.format(out_path, base)
dret = 0
main_logger.debug('Skipping Dedup')
else:
bam_path, dret = varengine.dedup(bam_path)
if dret == 0:
Path('{0}/dedup.rt'.format(completion_path)).touch()
main_logger.debug('Running Samtools dedup')
if sret != 0:
raise RuntimeError('Samtools dedup failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools dedup completed')
rgadder = Picard(java_path, pic_path, out_path)
if os.path.exists('{0}/readgroup.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(bam_path))[0]
bam_path = '{0}/alignments/{1}_RG.bam'.format(out_path, base)
aret = 0
main_logger.debug('Skipping add read group')
else:
bam_path, aret = rgadder.picard(bam_path, sam_name)
main_logger.debug('Running Picard AddOrReplaceReadGroups')
if aret == 0:
Path('{0}/readgroup.rt'.format(completion_path)).touch()
if aret != 0:
raise RuntimeError('Picard AddOrReplaceReadGroups failed to complete; Exiting MARs')
else:
main_logger.debug('Picard AddOrReplaceReadGroups completed')
#Run samtools mpileup, bcftools index, call and stats to generate VCF files
if os.path.exists('{0}/pileup.rt'.format(completion_path)):
bcf_path = '{0}/{1}_variants.bcf'.format(out_path, sam_name)
pret = 0
main_logger.debug('Skipping Pileup')
else:
bcf_path, pret = varengine.pileup(ref_path, bam_path, sam_name)
main_logger.debug('Running Samtools mpileup')
if pret == 0:
Path('{0}/pileup.rt'.format(completion_path)).touch()
if pret != 0:
raise RuntimeError('Samtools mpileup failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools mpileup completed')
if os.path.exists('{0}/bcfindex.rt'.format(completion_path)):
bret = 0
main_logger.debug('Skipping Bcfindex')
else:
bret = varengine.bcfindex(bcf_path)
main_logger.debug('Running Bcftools index')
if bret ==0 :
Path('{0}/bcfindex.rt'.format(completion_path)).touch()
if bret != 0:
raise RuntimeError('Bcftools index failed to complete; Exiting MARs')
else:
main_logger.debug('Bcftools index completed')
if os.path.exists('{0}/bcfcall.rt'.format(completion_path)):
vcf_path = '{0}/{1}_variants_samtools.vcf'.format(out_path, sam_name)
bret = 0
main_logger.debug('Skipping bcfcall')
else:
vcf_path, bret = varengine.bcftools(bcf_path, sam_name)
main_logger.debug('Running Bcftools call')
if bret == 0:
Path('{0}/bcfcall.rt'.format(completion_path)).touch()
if bret != 0:
raise RuntimeError('Bcftools call failed to complete; Exiting MARs')
else:
main_logger.debug('Bcftools call completed')
#Call GATK HaplotypeCaller to generate VCF files
varcaller = GenAnTK(gatk_path, out_path, java_path, pic_path)
main_logger.debug('Running GATK HaplotypeCaller')
if os.path.exists('{0}/gatk.rt'.format(completion_path)):
gvcf_path = '{0}/{1}_variants_gatk.vcf'.format(out_path, sam_name)
gret = 0
main_logger.debug('Skipping GATK')
else:
gvcf_path, gret = varcaller.hapCaller(bam_path, ref_path, sam_name)
if gret == 0:
Path('{0}/gatk.rt'.format(completion_path)).touch()
if gret != 0:
raise RuntimeError('GATK HaplotypeCaller failed to complete; Exiting MARs')
else:
main_logger.debug('GATK HaplotypeCaller stats completed')
#Call Freebayes to generate VCF files
varcaller = FreeBayes('freebayes', out_path)
main_logger.debug('Running Freebayes')
if os.path.exists('{0}/freebayes.rt'.format(completion_path)):
fvcf_path = '{0}/{1}_variants_freebayes.vcf'.format(out_path, sam_name)
fret = 0
main_logger.debug('Skipping Freebayes')
else:
fvcf_path, fret = varcaller.freeBayes(bam_path, ref_path, sam_name)
if fret == 0:
Path('{0}/freebayes.rt'.format(completion_path)).touch()
if fret != 0:
raise RuntimeError('Freebayes failed to complete; Exiting MARs')
else:
main_logger.debug('Freebayes stats completed')
#Filer and annotate variant calls
main_logger.debug('Annotating variants')
annotate = Annotate()
gvcf_path = annotate.getAnnotation(bed_path, gvcf_path, ref_path, out_path, bam_path)
vcf_path = annotate.getAnnotation(bed_path, vcf_path, ref_path, out_path, bam_path)
fvcf_path = annotate.getAnnotation(bed_path, fvcf_path, ref_path, out_path, bam_path)
vcf_dict = {gvcf_path: 'GATK', vcf_path: 'Samtools', fvcf_path: 'Freebayes'}
merger = Merge(out_path, vcf_dict, ref_path)
merged_vcf = merger.splitter(list(vcf_dict.keys()))[0]
final_vcf= '{0}/{1}_variants_merged_annotated.vcf'.format(out_path, sam_name)
os.rename(merged_vcf, final_vcf)
#final_path = annotate.getAnnotation(bed_path, final_vcf, ref_path, out_path, bam_path)
main_logger.debug('Filetering low quality variants and merging GATK and Samtools calls')
#merged_vcf = Vcf.Merge(gvcf_file, svcf_file, out_path).merge()
summary = Summary(ref_path, bed_path, voi_path, out_dir)
var_sum = summary.getVarStats(final_vcf)
main_logger.info('Total variants : {0}; Verified calls : {1}; Exonic : {2}; Intronic : {3}; Synonymous : {4}; Non Synonymous : {5}; Transition : {6}; Transversion : {7}'.format(
var_sum[0], var_sum[1], var_sum[2], var_sum[3], var_sum[4], var_sum[5], var_sum[6], var_sum[7]))
if purge:
shutil.rmtree('{0}/RawFastq'.format(out_path))
shutil.rmtree('{0}/CleanedFastq'.format(out_path))
alignments = glob.glob('{0}/alignments/*'.format(out_path))
for files in alignments:
if 'output_FM_SR_DD_RG.ba' in files:
continue
else:
os.remove(files)
vcffiles = glob.glob('{0}/*.bcf*'.format(out_path))
for files in vcffiles:
os.remove(files)
return(final_vcf, 0)
def marsBatch(bbduk_path, aligner_path, smt_path, bft_path, gatk_path,
inp_path, ref_path, adp_path, bed_path, out_dir, aligner,
pic_path, voi_path, java_path, sra_path, verbose, threads, purge):
#Creating logger for nest
logger = logging.getLogger('NeST')
logger.setLevel(logging.DEBUG)
#Create output paths for the run
if not os.path.exists(os.path.abspath(out_dir)):
os.mkdir(os.path.abspath(out_dir))
# Creating a file handler which logs even debug messages
fh = logging.FileHandler('{0}/nest.log'.format(os.path.abspath(out_dir)))
if verbose:
fh.setLevel(logging.DEBUG)
else:
fh.setLevel(logging.INFO)
# Creating a console handler to log info messages
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# Create formatter and add it to the handlers
formatter = logging.Formatter('{asctime} - {name} - {levelname} - {message}', style="{")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
#Create file and console handlers for MaRS
logger.info('Gathering input information from input path.')
prep = Prepper(inp_path, out_dir, sra_path).prepInputs()
samples, sra_list, files = list(), list(), list()
logger.info('Running MaRS on {0} experiments'.format(len(prep)))
#summary = Summary(ref_path, bed_path, voi_path, out_dir)
#samples = config.keys()
pools = Pool(threads)
for sample in prep:
samples.append(prep[sample].sample)
files.append(prep[sample].files)
sra_list.append(prep[sample].sra)
#rone_list = list()
#rtwo_list = list()
#name_list = list()
#for samples in config:
# name_list.append(config[samples].sample)
# rone_list.append(config[samples].files[0])
# rtwo_list.append(config[samples].files[1])
#sra_list = files
vcf_list = pools.map(main, zip(repeat(bbduk_path), repeat(aligner_path),
repeat(smt_path), repeat(bft_path), repeat(gatk_path),
samples, files, repeat(ref_path), repeat(adp_path),
repeat(bed_path), repeat(out_dir), repeat(aligner),
repeat(pic_path), repeat(voi_path),
repeat(java_path), repeat(sra_path), repeat(purge), sra_list))
logger.info('Summarizing variant calls from all {0} experiments'.format(len(prep)))
summary = Summary(ref_path, bed_path, voi_path, out_dir)
#Sumarize variants of intrest
summary.getSummary()
return(0)
if __name__ == '__main__':
#Define deffault paths and aligner informations
def_path = "{0}/lib".format(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
ref_def_path = "{0}/ref".format(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
bbduk_def = 'bbduk.sh' #"{0}/bbmap/bbduk.sh".format(def_path)
bbmap_def = 'bbmap.sh' #"{0}/bbmap/bbmap.sh".format(def_path)
bwa_def = 'bwa' #"{0}/bwa/bwa".format(def_path)
bowtie_def = 'bowtie2' #"{0}/bowtie2/bowtie2".format(def_path)
snap_def = 'snap-alinger' #"{0}/snap/snap-aligner".format(def_path)
smt_def = 'samtools' #"{0}/samtools/samtools".format(def_path)
bft_def = 'bcftools' #"{0}/bcftools/bcftools".format(def_path)
gatk_def = 'gatk' #"{0}/GenomeAnalysisTK.jar".format(def_path)
pic_def = 'picard' #"{0}/picard.jar".format(def_path)
sra_def = 'fastq-dump' #'{0}/sratoolkit/bin/fastq-dump'.format(def_path)
voi_def = None #'{0}/Reportable_SNPs.csv'.format(ref_def_path)
#if 'java version "1.8.' in str(subprocess.check_output(["java", "-version"], stderr=subprocess.STDOUT).decode('UTF-8').split('\n')[0]):
java_def = 'java'
#else:
# java_def = "{0}/jdk/bin/java".format(def_path)
aligner_def = {'bwa' : bwa_def, 'snap' : snap_def, 'bowtie2': bowtie_def, 'bbmap': bbmap_def}
#Get arguments
parser = argparse.ArgumentParser(prog='NeST')
parser.add_argument('-i', '--inp_path', type=str,
help='Path to input directory (Specify only for batch mode)')
parser.add_argument('-1', '--fwd', dest='rone_path', type=str,
help='Path to forward reads fastq', )
parser.add_argument('-2', '--rev', dest='rtwo_path', type=str,
help='Path to reverse reads fastq')
parser.add_argument('-r', '--ref', dest='ref_path', type=str,
help='Path to Reference fasta file', required=True)
parser.add_argument('-a', '--adapter', dest='adp_path', type=str,
help='Path to Adpater fasta file', required=True)
parser.add_argument('-b', '--bed', dest='bed_path', type=str,
help='Path to Bed file for MDR regions', required=True)
parser.add_argument('-o', '--outpath', dest='out_path', type=str,
help='Path where all outputs will be stored', required=True)
parser.add_argument('-n', '--sam_name', dest='sam_name', type=str,
help='Sample name', default=None)
parser.add_argument('-m', '--mapper', dest='aligner', type=str,
choices=['bowtie2', 'bwa', 'bbmap', 'snap'],
default='bwa', help='The aligner to used by MARs')
parser.add_argument('--bbduk', dest='bbduk_path', type=str, default=bbduk_def,
help='Path to BBduk executable')
parser.add_argument('--aligner', dest='aligner_path', type=str, default=None,
help='Path to aligner executable')
parser.add_argument('--samtools', dest='smt_path', type=str, default=smt_def,
help='Path to Samtools executable')
parser.add_argument('--gatk', dest='gatk_path', type=str, default=gatk_def,
help='Path to GATK executable')
parser.add_argument('--bcftools', dest='bft_path', type=str, default=bft_def,
help='Path to Bcftools executable')
parser.add_argument('--picard', dest='pic_path', type=str, default=pic_def,
help='Path to Bcftools executable')
parser.add_argument('--varofint', dest='voi_path', type=str, default=voi_def,
help='Path to variant of interest')
parser.add_argument('--threads', dest='threads', type=int, default=5,
help='Number of threads')
parser.add_argument('--verbose', action='store_true',
help='Increase verbosity of log file')
parser.add_argument('--purge', action='store_true',
help='Remove intermiediate Fastq and alignment files')
args = parser.parse_args()
#Validate parsed arguments
if args.aligner_path is None:
args.aligner_path = aligner_def[args.aligner]
if not os.path.exists(args.out_path):
os.mkdir(args.out_path)
#single sample experiment.
#Check if the run command is for batch mode analysis or single sample
#analysis.
#If inp_path is empty and rone_path is not, then the experiment is a
#single sample experiment.
status = marsBatch(args.bbduk_path, args.aligner_path, args.smt_path,
args.bft_path, args.gatk_path, args.inp_path, args.ref_path,
args.adp_path, args.bed_path, args.out_path, args.aligner,
args.pic_path, args.voi_path, java_def, sra_def, args.verbose,
args.threads, args.purge)
|
ohjeyy93/NFNeST
|
nest.py
|
nest.py
|
py
| 21,367 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3167046119
|
#!/usr/bin/env python3
import base64
from functions.aes import (
gen_random_bytes,
get_blocks,
pkcs7_unpad,
pkcs7_pad,
PKCS7Error,
AESCipher,
)
_STRINGS = [
base64.b64decode(s)
for s in [
"MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=",
"MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=",
"MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==",
"MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==",
"MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl",
"MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==",
"MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==",
"MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=",
"MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=",
"MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93",
]
]
_KEY = gen_random_bytes(16)
def _encrypt(pt: bytes) -> (bytes, bytes):
iv = gen_random_bytes(16)
cbc = AESCipher(AESCipher.MODE_CBC, _KEY, iv=iv)
ct = cbc.encrypt(pkcs7_pad(pt))
return iv, ct
def _oracle(iv: bytes, ct: bytes) -> bool:
cbc = AESCipher(AESCipher.MODE_CBC, _KEY, iv=iv)
try:
pkcs7_unpad(cbc.decrypt(ct))
except PKCS7Error:
return False
return True
def _attack(iv: bytes, ct: bytes) -> bytes:
cipher_blocks = [iv] + get_blocks(ct)
pt = b""
for i in reversed(range(1, len(cipher_blocks))):
ct_block_previous = cipher_blocks[i - 1]
ct_block_current = cipher_blocks[i]
intermediate_block = b""
for j in reversed(range(16)):
ctb_prefix = gen_random_bytes(j)
ctb_suffix = b""
for k in range(len(intermediate_block)):
ctb_suffix += bytes([(16 - j) ^ intermediate_block[k]])
n = 0
for m in range(256):
ctb = ctb_prefix + bytes([m]) + ctb_suffix
if _oracle(ctb, ct_block_current):
n = m
break
intermediate_block = bytes([n ^ (16 - j)]) + intermediate_block
pt = bytes([ct_block_previous[j] ^ int(intermediate_block[0])]) + pt
return pkcs7_unpad(pt)
def challenge17() -> bool:
for msg in _STRINGS:
ret = _attack(*_encrypt(msg))
if ret not in _STRINGS:
return False
return True
if __name__ == "__main__":
assert challenge17(), "The result does not match the expected value"
print("Ok")
|
svkirillov/cryptopals-python3
|
cryptopals/set3/challenge17.py
|
challenge17.py
|
py
| 2,562 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29478342356
|
import sys
import networkx as nx
from assignNetwork import assignNetwork
conflicts = []
messageTypes = []
incomingMessages = []
outgoingMessages = []
stableStates = []
graph = {}
G = nx.MultiDiGraph()
assert len(sys.argv[1:]) <= 2, "Too many arguments"
file = sys.argv[1]
if(len(sys.argv[1:]) == 2):
constraiantFile = sys.argv[2]
with open(file) as f:
lines = f.readlines()
line_idx = 0
while(not "RevMurphi.MurphiModular.Types.Enums.SubEnums.GenMessageTypes" in lines[line_idx]):
line_idx += 1
line_idx += 2
# Get all messages
while(not ";" in lines[line_idx]):
msg = lines[line_idx].strip()
if msg[-1] == ',':
msg = msg[:-1]
messageTypes.append(msg)
line_idx += 1
print(messageTypes)
while(not "RevMurphi.MurphiModular.Types.Enums.SubEnums.GenArchEnums" in lines[line_idx]):
line_idx += 1
line_idx += 2
#get stable states
cacheStates = []
while not "directory" in lines[line_idx]:
state = lines[line_idx].strip().replace("cache", "")
if len(state) == 0:
line_idx += 1
continue
if state[-1] == ',':
state = state[:-1]
cacheStates.append(state)
line_idx += 1
line_idx += 1
while not ";" in lines[line_idx]:
state = lines[line_idx].strip().replace("directory", "")
if state[-1] == ',':
state = state[:-1]
if state in cacheStates:
stableStates.append("cache" + state)
line_idx += 1
#find and parse state machines
while(not "----RevMurphi.MurphiModular.StateMachines.GenMessageStateMachines" in lines[line_idx]):
# print(i, lines[i])
line_idx += 1
for i in range(line_idx, len(lines)):
#find switch cbe.State then find inmsg.mtype
if ("case cache" in lines[i]): #don't need to check directory or "case directory" in lines[i]
inState = lines[i].strip()[5:-1]
outState = ""
edge = ""
print("state: " + inState)
i += 1
incoming = False
prev_message = ""
msg_types = {}
for msgType in messageTypes:
msg_types[msgType] = "stall"
while(not "endswitch;" in lines[i]):
if("case" in lines[i]):
incoming_msg = lines[i].strip()
incoming_msg = incoming_msg[5:-1]
edge = incoming_msg
print("incoming message: " + incoming_msg)
if incoming_msg not in incomingMessages:
incomingMessages.append(incoming_msg)
msg_types[incoming_msg] = "nonstall"
incoming = True
prev_message = incoming_msg
if("msg := " in lines[i]):
outgoing_msg = lines[i]
outgoing_msg = outgoing_msg.split(',')[1]
print("outgoing message: " + outgoing_msg)
if outgoing_msg not in outgoingMessages:
outgoingMessages.append(outgoing_msg)
if("Send" in lines[i]):
incoming = False
if "cbe.State :=" in lines[i]:
outState = lines[i].strip()[13:-1]
key = G.add_edge(inState, outState, edge)
G.edges[inState, outState, edge]["message"] = edge
print(outState)
i += 1
print("finished messages for this state")
#check for conflicts in this state
print("all messages:")
print(msg_types)
keys = list(msg_types.keys())
conflictNum = 0
if len(keys) > 1:
for j in range(len(keys)-1):
m1 = keys[j]
m1_type = msg_types[m1]
for k in range(j+1, len(keys)):
m2 = keys[k]
m2_type = msg_types[m2]
if(m1_type != m2_type):
if(not (m1,m2) in conflicts and not (m2,m1) in conflicts):
print("appending {} {}".format(m1, m2))
conflicts.append((m1, m2))
conflictNum += 1
print("Number of new conflict in state: " + str(conflictNum))
for msg in messageTypes:
if (msg not in incomingMessages and msg not in outgoingMessages):
outgoingMessages.append(msg)
print("original conflicts: " + str(len(conflicts)))
assignNetwork(messageTypes, conflicts)
print("")
print("INCOMING {}".format(incomingMessages))
print("OUTGOING {}".format(outgoingMessages))
print("BOTH")
for msg in messageTypes:
if (msg in incomingMessages and msg in outgoingMessages):
print(msg)
print("\n")
newConflicts = []
outOnly = []
for msg in messageTypes:
if msg in outgoingMessages and msg not in incomingMessages:
outOnly.append(msg)
for (m1, m2) in conflicts:
conflicting = True
if m1 in outOnly or m2 in outOnly:
conflicting = False
if conflicting:
newConflicts.append((m1, m2))
print("omitting incoming/outgoing non-conflicts: {} conflicts left".format(str(len(newConflicts))))
print("newConflicts Length {}".format(len(newConflicts)))
falseConflict = {}
for con in newConflicts:
falseConflict[con] = True
#get gray from trace here
def enumNode(node):
posMsg = set()
print(node)
if node in stableStates:
print("base case")
out_edges = G.out_edges(node)
for out_edge in out_edges:
outMsg = G.get_edge_data(out_edge[0], out_edge[1]).keys()
print(outMsg)
for msg in outMsg:
posMsg.add(msg)
print("returning {}".format(posMsg))
return posMsg
else:
out_edges = G.out_edges(node)
# print(node, out_edges)
for out_edge in out_edges:
curOutMsg = G.get_edge_data(out_edge[0], out_edge[1]).keys()
# for msg in curOutMsg:
# posMsg.append(msg)
if not out_edge[0] == out_edge[1]:
nxt_out_edges = G.out_edges(out_edge[1])
for oEdge in nxt_out_edges:
nxt_Msgs = G.get_edge_data(oEdge[0], oEdge[1]).keys()
for msg in nxt_Msgs:
posMsg.add(msg)
# for msg in curOutMsg:
# posMsg.add(msg)
print("NEXT state {}".format(out_edge[1]))
nextOutMsg = enumNode(out_edge[1])
for msg in nextOutMsg:
print("ADDING {}".format(msg))
posMsg.add(msg)
else:
print("ADDING {}".format(curOutMsg))
for msg in curOutMsg:
posMsg.add(msg)
# else:
# posMsg.append(next(iter(curOutMsg)))
# print(out_edge, outMsg)
# print()
print("returning {}".format(posMsg))
return posMsg
for n1 in G.nodes:
print("start node: {}".format(n1))
# if n1 in stableStates:
# continue
possibleMsgs = enumNode(n1)
print("POSSIBLE for {}: {}".format(n1, possibleMsgs))
currentMsgs = set()
out_edges = G.out_edges(n1)
# print(len(out_edges))
for out_edge in out_edges:
curOutMsg = G.get_edge_data(out_edge[0], out_edge[1]).keys()
for msg in curOutMsg:
currentMsgs.add(msg)
# print(possibleMsgs)
print("current msgs for {}: {}".format(n1, currentMsgs))
# if n1 not in stableStates:
nxtN = G.successors(n1)
for n in nxtN:
# if n == n1:
# continue
print("next node: {}".format(n))
nxtOut = G.out_edges(n)
for nxtOutEdge in nxtOut:
nxtOutMsg = G.get_edge_data(nxtOutEdge[0], nxtOutEdge[1]).keys()
print(nxtOutMsg)
for msg in nxtOutMsg:
if msg in currentMsgs:
if msg in possibleMsgs:
print("REMOVE {}".format(msg))
possibleMsgs.remove(msg)
# greyMsgs = []
# for msg in currentMsgs:
# if msg in possibleMsgs:
# possibleMsgs.remove(msg)
# print("{} can be received now".format(msg))
# for msg in incomingMessages:
# if msg not in possibleMsgs:
# greyMsgs.append(msg)
print("nonstall for state {}: {}".format(n1, currentMsgs))
print("stall for state {}: {}".format(n1, possibleMsgs))
for pm1 in currentMsgs:
for pm2 in possibleMsgs:
if (pm1, pm2) in newConflicts:
falseConflict[(pm1, pm2)] = False
print("True conflict {} {}".format(pm1, pm2))
elif (pm2, pm1) in newConflicts:
falseConflict[(pm2, pm1)] = False
print("True conflict {} {}".format(pm2, pm1))
# print("Grey messages: {}".format(greyMsgs))
print(falseConflict)
for k in falseConflict.keys():
if falseConflict[k] == True:
newConflicts.remove(k)
# print("removed conflict {}".format(k))
else:
pass
# print("true conflict {}".format(k))
netConstraint = []
if len(sys.argv[1:]) == 2:
print("=========Applying constraints...===========")
with open(constraiantFile) as f:
lines = f.readlines()
for line in lines:
if line[0] == '[':
sameNet = line[1:-1].replace(" ", "").split(",")
for m1 in sameNet:
for m2 in sameNet:
rmvConflict = (m1, m2)
if rmvConflict in newConflicts:
newConflicts.remove(rmvConflict)
netConstraint.append(sameNet)
else:
pair = line.strip().replace(" ","").split(",")
if (pair[0], pair[1]) in newConflicts:
rmvConflict = (pair[0], pair[1])
newConflicts.remove(rmvConflict)
elif ((pair[1], pair[0]) in newConflicts):
rmvConflict = (pair[1], pair[0])
newConflicts.remove(rmvConflict)
print("Final number of conflicts: {}".format(len(newConflicts)))
assignNetwork(incomingMessages, newConflicts, netConstraint)
print("Outgoing Network")
print(outgoingMessages)
|
ChingLingYeung/honoursProject
|
simple_conflict.py
|
simple_conflict.py
|
py
| 10,499 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27392319981
|
key = []
image = dict()
with open("Day20.txt", 'r') as INPUT:
data = INPUT.read().replace(".", "0").replace("#", "1").split("\n\n")
key = list(map(int, list(data[0])))
rest = data[1].split("\n")
for i in range(len(rest)):
a = list(map(int, list(rest[i])))
for j in range(len(a)):
image[(i, j)] = a[j]
default = 0
edges = []
def get(p, image):
global edges
if p in image:
return image[p]
edges.append(p)
return default
def swap_default():
global default
if default == 0:
default = key[0]
elif default == 1:
default = key[0b111111111]
kernel = [(-1, -1), (0, -1), (1, -1), (-1, 0), (0, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
def get_k(kernel, p, image):
v = 0
for i in kernel:
v <<= 1
v += get((i[1] + p[0], i[0] + p[1]), image)
return v
def print_img(image):
row = None
for i, j in sorted(image.keys()):
if i != row:
print()
row = i
print("#" if image[(i, j)] else ".", end="", sep="")
print()
def get_count(image):
return sum(image[i] for i in image)
for index in range(50):
edges = []
new_image = dict()
for i in image:
n = get_k(kernel, i, image)
new_image[i] = key[n]
c_edges = edges.copy()
for i in c_edges:
n = get_k(kernel, i, image)
new_image[i] = key[n]
swap_default()
image = new_image
if index == 1:
print("part 1:", get_count(image))
print("part 2:", get_count(image))
|
stepheneldridge/Advent-of-Code-2021
|
Day20.py
|
Day20.py
|
py
| 1,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29792276111
|
#!/usr/bin/env python3
import pandas as pd
import os
import rospy
import rospkg
from std_msgs.msg import Bool
from robo_demo_msgs.srv import RunPlanningTest
rospack = rospkg.RosPack()
EE_CONTROL_PATH = rospack.get_path('end_effector_control')
PLANNING_DATA_PATH = os.path.join(EE_CONTROL_PATH, 'data', 'planning')
COMMON_DATA_PATH = os.path.join(PLANNING_DATA_PATH, 'planning_data.csv')
DEFAULT_TEST_DICT = {'change_goal': 10, 'add_obstacle': 10, 'add_obstacle_change_goal': 10}
class TestAutomator():
def __init__(self, test_dict=DEFAULT_TEST_DICT):
self.test_dict = test_dict
self.update_test_counts()
self.planning_test_srv = rospy.ServiceProxy('/test_interface_service', RunPlanningTest)
rospy.sleep(0.5)
rospy.loginfo("TestAutomator initialized, running tests")
self.run_tests()
def update_test_counts(self):
if os.path.exists(COMMON_DATA_PATH):
df = pd.read_csv(COMMON_DATA_PATH)
(t1, t2, t3) = ("change_goal", "add_obstacle", "add_obstacle_change_goal")
t1_count = len(df[df["Scenario"]==t1])
t2_count = len(df[df["Scenario"]==t2])
t3_count = len(df[df["Scenario"]==t3])
self.test_dict[t1] -= t1_count
self.test_dict[t2] -= t2_count
self.test_dict[t3] -= t3_count
rospy.loginfo(f"counts: {t1_count}, {t2_count}, {t3_count}")
def run_tests(self):
for (test_type, iterations) in self.test_dict.items():
for i in range(iterations):
rospy.loginfo(f"Running {test_type}, iteration: {i+1}/{iterations}")
response = self.planning_test_srv(test_type)
rospy.loginfo(f"Received response: {response}")
if __name__ == "__main__":
rospy.init_node("automate_testing_node")
test_dict = {'add_obstacle': 2}
ta = TestAutomator(test_dict)
# ta = TestAutomator()
|
dwya222/end_effector_control
|
scripts/automate_testing_v2.py
|
automate_testing_v2.py
|
py
| 1,922 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32599097431
|
from flask import Flask
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
class Hellocall(Resource):
def get(self,name,number):
return({'Name':name,'Age':number})
api.add_resource(Hellocall,"/Helloworld/<string:name>/<int:number>")
if __name__ == "__main__":
app.run(debug=True)
|
somasundaram1702/Flask-basics
|
main.py
|
main.py
|
py
| 330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40413480181
|
import re
import requests
from bs4 import BeautifulSoup
from time import time as timer
__author__ = "Allen Roberts"
__credits__ = ["Allen Roberts"]
__version__ = "1.0.0"
__maintainer__ = "Allen Roberts"
def readfile():
with open('KY.txt') as file:
lines = file.readlines()
print(lines)
return lines;
def writetofile(list):
filename = "emails.txt"
f = open(filename, "w")
for email in list:
f.write(email+"\n")
f.close()
def google_parse(search_string, start):
print("Test")
temp = []
url = 'http://www.google.com/search'
payload = {'q': search_string, 'start': start}
user_agent = {'User-agent': 'Mozilla/11.0'}
request_response = requests.get(url, params=payload, headers=user_agent)
soup = BeautifulSoup(request_response.text, 'html.parser')
aTags = soup.find_all('a')
print(aTags)
for a in aTags:
try:
temp.append(re.search('url\?q=(.+?)\&sa', a['href']).group(1))
except:
continue
return temp
def main(search, pages):
start = timer()
result = []
for page in range( 0, int(pages) ):
result.extend(google_parse( search, str(page*10) ) )
result = list( set( result ) )
result = removefalselinks(result)
print( *result, sep = '\n' )
print( '\nTotal URLs Scraped : %s ' % str( len( result ) ) )
print( 'Script Execution Time : %s ' % ( timer() - start, ) )
return result
def removefalselinks(pagelist):
for page in pagelist:
if 'http' in page:
continue
else:
pagelist.remove(page)
return pagelist
def scrapepages( pagelist ):
emails = []
for page in pagelist:
print(page + ":")
foundemails = finddata(page)
if foundemails is None:
continue
for email in foundemails:
emails.append(email)
print(emails)
return emails
def finddata( pageurl ):
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/11.0'
}
try:
print("Making Request")
req = requests.get(pageurl, headers, timeout=5)
print("Grabbing HTML")
soup = BeautifulSoup(req.content, 'html.parser')
print("Prettying up data")
pagedata = soup.prettify()
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
emails = re.findall(regex, pagedata)
print(emails)
return emails
except:
print("Timeout")
def removeduplicates(emails):
print(emails)
return list(set(emails))
def validatelist(emails):
newlist = removeduplicates(emails)
emailfinal = []
for email in newlist:
if '.png' in email:
print(email)
elif '.jpeg' in email:
print(email)
else:
emailfinal.append(email)
return emailfinal
if __name__ == '__main__':
businesses = readfile()
scrapedEmails = []
i = 0
while i < 3000:
i += 1
for business in businesses:
urls = main(business, 1)
emails = scrapepages(urls)
validatedlist = validatelist(emails)
for email in validatedlist:
scrapedEmails.append(email)
print(scrapedEmails)
writetofile(scrapedEmails)
print("Done")
|
AllenRoberts/EmailScraper
|
main.py
|
main.py
|
py
| 3,463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8568584883
|
# Create class
from Tools.Scripts.treesync import raw_input
import datetime
from datetime import date
class Person:
def __init__(self, name,year,month,day):
self.__name=name
self.__year=year
self.__month=month
self.__day=day
def getage(self):
now = datetime.datetime.now()
c_year = now.hour
c_month = now.month
c_day = now.day
current_date = date(c_year, c_month, c_day)
print(current_date)
dob = date(self.__year, self.__month, self.__day)
age = current_date - dob
print (age)
irene = Person("Irene",1988,8,19)
irene.getage()
|
iWanjugu/Personal-Development-II
|
Python/getAge.py
|
getAge.py
|
py
| 673 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6288317611
|
#! /usr/bin/env python
import math
import rospy
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion
def imuCallback(imu):
quat = [imu.orientation.w, imu.orientation.x, imu.orientation.y, imu.orientation.z]
roll, pitch, yaw = euler_from_quaternion(quat)
rospy.loginfo('{} {} {}'.format(roll*180.0/math.pi, pitch*180.0/math.pi, yaw*180.0/math.pi))
def imuToYaw():
rospy.init_node('imu_to_yaw')
imu_sub = rospy.Subscriber('/android/imu', Imu, imuCallback)
rospy.spin()
if __name__ == '__main__':
imuToYaw()
|
vigneshrajap/UNSW-work
|
imu_to_yaw/src/imu_to_yaw_node.py
|
imu_to_yaw_node.py
|
py
| 558 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39513139874
|
from tkinter import *
from tkinter import ttk
class FileSaveFrame(Frame):
def __init__(self, master,filename,app):
ttk.Frame.__init__(self, master)# super class initialization
self.relief = GROOVE# tkinter relief attribute.
self.grid()# grid the frame
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.parent=app
self.filename=filename # intialize file name
self.Widgets()
def Widgets(self):
"""
main window of the GUI
containt:-
- file_nameL
- corr_Button for plot clear
"""
self.file_name=StringVar(self)
update_filename = self.register(self.update_filename)
main_frame= ttk.Frame(self, borderwidth=0, relief=GROOVE)
main_frame.grid()
main_frame.grid_columnconfigure(0, weight=1)
main_frame.grid_rowconfigure(0, weight=1)
self.file_nameL=ttk.Label(main_frame,text='FileName/location')
self.file_nameL.grid(row=0, column=0, rowspan=1, columnspan=1,
sticky=W + E + N + S)
self.file_nameV=ttk.Entry(main_frame, textvariable=self.file_name,
width=62,
validate='focusout',
validatecommand=update_filename)
self.file_nameV.grid(row=0, column=1, rowspan=1, columnspan=5,
sticky=W + E + N + S)
self.corr_Button=ttk.Button(main_frame,text='Clc Plt', command=self.clear_plot)
self.corr_Button.grid(row=0, column=6, rowspan=1, columnspan=1,
sticky=W + E + N + S)
def update_filename(self):
"""
to update file name
"""
self.filename=self.file_name.get()
self.parent.updateFileName(self.filename)
def clear_plot(self):
"""
to clear plot
"""
self.parent.clear_plot()
def get_fileName(self):
"""
to get file name
"""
return self.filename
|
dnsmalla/Water-splitting-measurement-LCR-GUI
|
gui_frames/fileSaveFrameGUI.py
|
fileSaveFrameGUI.py
|
py
| 2,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28747708713
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import Context, loader
def index(request):
omi = "omprakash"
import urllib
import json
resp = urllib.urlopen('https://api.coursera.org/api/courses.v1?q=search&query=malware+underground').read()
content = json.loads(resp)
template = loader.get_template("myapp/index.html")
context = {
'omi': omi,
'content' : content,
}
return HttpResponse(template.render(context, request))
def search(request):
form = data = request.GET.get('squery')
url = "https://api.coursera.org/api/courses.v1?q=search&query=" + form
omi = "omprakash"
import urllib
import json
vari = form
resp = urllib.urlopen('https://api.coursera.org/api/courses.v1?start=1&limit=3&q=search&query='+ vari).read()
content = json.loads(resp)
template = loader.get_template("myapp/search.html")
context = {
'omi': omi,
'content' : content,
'form' : form,
}
return HttpResponse(template.render(context, request))
|
aumiom/Educational-Website-Template-with-Coursera-Search-API-Integration
|
myproject/myapp/views.py
|
views.py
|
py
| 1,090 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71893897149
|
from Board import Board
from User import User
from Server import Server
import sys
import os
import socket
import json
import threading as th
#-------------------------------------------------------------------------------------
'''
In class library phase you should implement your basic classes and write a command
line test application demonstrating all features of your library.
'''
#-------------------------------------------------------------------------------------
PORT=3333
filename="../gameBoards/deneme_in"
for i in range(len(sys.argv)):
if sys.argv[i]=="--port":
PORT=int(sys.argv[i+1])
break
# Creating server
server=Server()
# Server socket starts
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind socket to a specific address and port
server_socket.bind(('', PORT))
'''#-----------------------------------------------------
# Server start
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# bind socket to a specific address and port
os.remove("/tmp/mysocket")
server_socket.bind("/tmp/mysocket")
#-----------------------------------------------------
'''
# listen for incoming connections
server_socket.listen()
print(f"Server listening on port {PORT}")
def become_playing_thread(monopoly,user):
while True:
game_state = monopoly.turn(user)
if (game_state == None):
break
def become_observer_thread(monopoly,user):
while True:
message=user.client_socket.recv(1024).decode("Utf-8").strip()
game_state = monopoly.observe(user,message)
if (game_state == None):
break
def become_game_thread(monopoly):
current_user = monopoly.order[0]
while True:
current_user = monopoly.turn(current_user)
if (current_user == None):
break
#-----------------------------user thread function--------------------------------------------
def user_thread_func(client_socket, address):
# ask sign in or up:
while True:
wrong_count=0
client_socket.send("Do u want to login or sign up or exit:write either \"login\" or \"sign up\" or \"exit\"\n".encode("utf-8"))
received_msg = client_socket.recv(1024).decode('utf-8').strip()
if(received_msg=="login"):
user = User(client_socket, address, "login")
if(user.authh()==False):
client_socket.send("Wrong password or Username goodbye\n".encode("utf-8"))
return
break
elif(received_msg=="sign up"):
user = User(client_socket, address,"sign up")
elif(received_msg == "exit"):
client_socket.send("Goodbye and have a nice day\n".encode("utf-8"))
return
else:
wrong_count+=1
if(wrong_count<3):
client_socket.send(f"You wrote {received_msg} wrong input count={wrong_count}\n".encode("utf-8"))
else:
client_socket.send(f"You wrote {received_msg} wrong input count={wrong_count} goodbye\n".encode("utf-8"))
return
while True:
# sending game list to the user
this_thread_is_observer_thread=False
while True:
is_user_attached=False
game_list=server.list()
user.client_socket.send(("New or join or observe\nto exit write exit\nto join write Join(game_id)\nto observe write Observe(game_id)\nto create a game New(number of players)\n\tnumber of players must be between 2-4\n"+str(game_list)+"\n").encode("utf-8"))
received_msg = user.client_socket.recv(1024).decode('utf-8').strip()
if received_msg.startswith("New(") and received_msg.endswith(")") and received_msg[4:-1].isnumeric():
user_count = int(received_msg[4:-1])
monopoly=server.new(user_count)
if monopoly:
is_user_attached=server.open(monopoly, user)
# is_user_attached=True
elif received_msg.startswith("Join(") and received_msg.endswith(")") and received_msg[5:-1].isnumeric():
game_index=int(received_msg[5:-1])
if(game_index in game_list):
is_user_attached=server.open(game_list[game_index],user)
if(is_user_attached):
monopoly=game_list[game_index]
else:
user.client_socket.send(f"There is no game ({game_index})\n".encode("utf-8"))
elif received_msg.startswith("Observe(") and received_msg.endswith(")") and received_msg[8:-1].isnumeric():
game_index=int(received_msg[8:-1])
if(game_index in game_list):
server.observe(game_list[game_index],user)
monopoly = game_list[game_index]
this_thread_is_observer_thread=True
is_user_attached=True
else:
user.client_socket.send(f"There is no game ({game_index})\n".encode("utf-8"))
elif (received_msg == "exit"):
client_socket.send("Goodbye and have a nice day\n".encode("utf-8"))
return
else:
user.client_socket.send(f"Sent wrong commend({received_msg})\n".encode("utf-8"))
if(is_user_attached):
break
if(this_thread_is_observer_thread==True):
become_observer_thread(monopoly,user)
continue
# wait for user to respond with ready
user.client_socket.send("write \"ready\" when you are ready write \"exit\" if you want to leave\n".encode('utf-8'))
ready_msg = user.client_socket.recv(1024).decode('utf-8').strip()
while ready_msg!="ready" and ready_msg!="exit":
user.client_socket.send(f"Invalid message {ready_msg}. write \"ready\" when you are ready write \"exit\" if you want to leave\n".encode('utf-8'))
ready_msg = user.client_socket.recv(1024).decode('utf-8').strip()
if ready_msg == "exit":
server.close(monopoly,user)
continue
monopoly.ready(user)
# barrier waiting it will be opened by the last user in board
'''if monopoly.WaitingState==True:
user.mutex.acquire()
else:
become_game_thread(monopoly)'''
become_playing_thread(monopoly,user)
#---------------------------------------------------------------------------------------------
user_threads=[]
while True:
# accept incoming connection
client_socket, address = server_socket.accept()
print(f"Accepted connection from {address}")
user_thread=th.Thread(target=user_thread_func,args=(client_socket, address))
user_threads.append(user_thread)
user_thread.start()
for user_thread in user_threads:
user_thread.join()
'''current_user=monopoly.order[0]
while True:
current_user=monopoly.turn(current_user)
if(current_user==None):
break'''
|
e-hengirmen/ceng445-term-project
|
phase4/main.py
|
main.py
|
py
| 6,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17046267898
|
import struct
class InvalidArgumentException(Exception):
pass
class Color:
def __init__(self, r, g, b):
self.red = r
self.green = g
self.blue = b
self._validate()
def _validate(self):
for v in [self.red, self.green, self.blue]:
if not(0 <= v <= 255):
raise InvalidArgumentException("invalid color")
def element_iter(self):
yield self.red
yield self.green
yield self.blue
class Gif:
def __init__(self, data, width, height, colors):
self.image_data = data
self.logical_screen_width = width
self.logical_screen_height = height
self.global_color_resolution = len('{:b}'.format(len(colors) )) - 1
self.colors = colors
diff_len = pow(2, (self.global_color_resolution + 1)) - len(colors)
self.colors.extend([Color(0, 0, 0) for _ in range(diff_len)])
self._validate()
def _validate(self):
if not (0 < self.logical_screen_width < 65535):
raise InvalidArgumentException("invalid widht")
if not (0 < self.logical_screen_height < 65535):
raise InvalidArgumentException("invalid height")
if not (0 <= self.global_color_resolution < 7):
raise InvalidArgumentException("invalid global color resolution")
def _global_color_tables(self):
for color in self.colors:
yield from color.element_iter()
pallet_len = pow(2, (self.global_color_resolution))
for _ in range((pallet_len - len(self.colors)) * 3):
yield 0
def _get_header_data(self):
data = bytearray()
data.extend(b'GIF') # signature
data.extend(b'87a') # gif version
data.extend(struct.pack('<H', self.logical_screen_width))
data.extend(struct.pack('<H', self.logical_screen_height))
b = 1 # Global Color Table Flag
b = b << 3 | self.global_color_resolution # Color Resolution
b = b << 1 | 0 # Size of Global Color Table
b = b << 3 | self.global_color_resolution # Sort Flagn
data.append(b)
data.append(0) # Background Color Index
data.append(0) # Pixel Aspect Ratio
data.extend([b for b in self._global_color_tables()])
return data
def _get_image_blocks(self):
data = bytearray()
data.append(0x2c) # Image Separator
data.extend([0x00, 0x00]) # Image Left Position
data.extend([0x00, 0x00]) # Image Top Position
data.extend(struct.pack('<H', self.logical_screen_width))
data.extend(struct.pack('<H', self.logical_screen_height))
b = 0 # Local Color Table Flag
b = b << 1 | 0 # Interlace Flag
b = b << 1 | 0 # Sort Flag
b = b << 2 | 0 # Reversed
b = b << 3 | 0 # Size of Local Color Table
data.append(b)
lzw = Lzw(len(self.colors))
data.append(lzw.minimum_code_size)
lzw_data = list(lzw.compress(self.image_data))
lzw_len = len(lzw_data)
while lzw_len > 0:
if lzw_len > 255:
data.append(255)
data.extend(lzw_data[:255])
lzw_data = lzw_data[255:]
lzw_len -= 255
else:
data.append(lzw_len)
data.extend(lzw_data)
lzw_len = 0
data.append(0x00) # Block Terminator
return data
def to_bytes(self):
data = self._get_header_data()
data.extend(self._get_image_blocks())
data.append(0x3b)
return data
class Bits:
def __init__(self):
self.bits = []
def append(self, value, bit_len):
for i in range(bit_len):
self.bits.insert(0, (value >> i & 1))
def to_bytes(self):
bits = self.bits
while len(bits) > 0:
b = bits[-8:]
for _ in range(len(b)):
bits.pop()
byte = 0
for i in range(len(b)):
byte = (byte << 1) | b[i]
yield byte
class Lzw:
def __init__(self, dic_len):
self.dic_len = dic_len
self.max_code = (self.dic_len if self.dic_len > 2 else 4)
self.minimum_code_size = len('{:b}'.format(self.max_code - 1))
def compress(self, data):
max_code = self.max_code
max_code_bits = self.minimum_code_size
def to_key(add, prev_key=[]):
tmp = list(prev_key)
tmp.append(add)
return tuple(tmp)
# 辞書初期化
dic = {}
for i in range(max_code):
key = to_key(i)
dic[key] = i
# クリアコード
dic[to_key(max_code)] = max_code
clear_code = max_code
# エンドコード
max_code += 1
dic[to_key(max_code)] = max_code
end_code = max_code
if max_code >= 1 << max_code_bits and max_code_bits < 12:
max_code_bits += 1
# 圧縮
key = []
result = Bits()
result.append(clear_code, max_code_bits)
for n, c in enumerate(data):
prev_key = key
key = to_key(c, prev_key)
if key in dic:
continue
result.append(dic[prev_key], max_code_bits)
if max_code < 4096:
max_code += 1
dic[key] = max_code
if max_code == 1 << max_code_bits and max_code_bits < 12:
max_code_bits += 1
key = to_key(c)
else:
if key in dic:
result.append(dic[key], max_code_bits)
result.append(end_code, max_code_bits)
return result.to_bytes()
if __name__ == '__main__':
data = [1,1,1,1,1,2,2,2,2,2,
1,1,1,1,1,2,2,2,2,2,
1,1,1,1,1,2,2,2,2,2,
1,1,1,0,0,0,0,2,2,2,
1,1,1,0,0,0,0,2,2,2,
2,2,2,0,0,0,0,1,1,1,
2,2,2,0,0,0,0,1,1,1,
2,2,2,2,2,1,1,1,1,1,
2,2,2,2,2,1,1,1,1,1,
2,2,2,2,2,1,1,1,1,1]
colors = [Color(255, 255, 255), Color(255, 0, 0), Color(0, 0, 255)]
g = Gif(data, 10, 10, colors)
with open("temp.gif", 'wb') as f:
f.write(g.to_bytes())
|
meganehouser/kantencolors
|
project/gif.py
|
gif.py
|
py
| 6,258 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7323978481
|
import enemy
from enemy import *
class CHero(CEnemy):
def clean(self,fair=True):
if fair:
self.skill = dice()+6
self.stamina = dice(2)+12
self.luck = dice()+6
else:
self.skill = 12
self.stamina = 24
self.luck = 12
self.maxskill = self.skill
self.maxstamina = self.stamina
self.maxluck = self.luck
self.damage = 2
self.alive = True
def __init__(self,fair=True):
self.clean(fair)
def heal(self,diff):
self.stamina = self.stamina + diff
self.stamina = min(self.stamina,self.maxstamina)
def testluck(self):
self.luck = self.luck-1
return(self.luck > dice(2))
def testskill(self):
return self.skill>=dice(2)
|
Vo1t/sol
|
hero.py
|
hero.py
|
py
| 657 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17720876787
|
import view as vw
import logger as log
import model as ml
import is_number as is_n
def run():
while True:
select_action = vw.select_action()
if is_n.is_number(select_action):
data_file = log.get_data()
if select_action == 1:
if not data_file:
last_id = 1
else:
last_id = int(ml.get_last_id(data_file)) + 1
create_new = vw.add_contact(last_id)
log.create_new(create_new)
print('Контакт добавлен!')
print()
elif select_action == 2:
view_search = vw.search_contact()
result = ml.search_contact(data_file, view_search)
vw.show_contact(result)
elif select_action == 3:
view_search = vw.search_contact()
result = ml.search_contact(data_file, view_search)
if isinstance(result[0], dict):
indexes = []
for value in result:
indexes.append(int(data_file.index(value)))
if len(indexes) == 1:
query = vw.update_value(data_file[0])
updated_file = ml.update_contact(query, data_file, indexes[0])
log.overwrite_file(updated_file)
print(f'Данные успешно изменены!')
else:
select_element = vw.update_element(indexes, data_file)
query = vw.update_value(data_file[select_element])
updated_file = ml.update_contact(query, data_file, select_element)
log.overwrite_file(updated_file)
print(f'Данные успешно изменены!')
else:
print(''.join(result[0]))
elif select_action == 4:
view_search = vw.search_contact()
result = ml.search_contact(data_file, view_search)
if isinstance(result[0], dict):
indexes = []
for value in result:
indexes.append(int(data_file.index(value)))
if len(indexes) == 1:
new_data = ml.delete_contact(indexes[0], data_file)
log.overwrite_file(new_data)
print(f'Контакт удален!')
else:
choose_element = vw.choose_element(indexes, data_file)
new_data = ml.delete_contact(choose_element, data_file)
log.overwrite_file(new_data)
print(f'Контакт удален!')
else:
print(''.join(result[0]))
elif select_action == 5:
exit()
else:
print('Вы ввели неверное значение! Попробуйте снова!')
|
tim24ktk/homework_8
|
controller.py
|
controller.py
|
py
| 3,079 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15060558322
|
import csv
import json
species_file = open('./data/union_list.json','r')
species_list = json.loads(species_file.read())
country_codes = [
"BG",
"HR",
"CZ",
"DK",
"NL",
"UK",
"EE",
"FI",
"FR",
"DE",
"GR",
"HU",
"IE",
"IT",
"LV",
"LT",
"MT",
"PL",
"PT",
"RO",
"SK",
"SI",
"ES",
"SE"
]
result = []
delimiter = ";"
for iso in country_codes:
iso_input = open('./data/common_names_inputs/'+iso+'_common_names.csv','r')
common_names_dict = csv.DictReader(iso_input)
common_names = []
for item in common_names_dict:
common_names.append(item)
for row in species_list:
tmp_names = set()
for item in common_names:
if item['eu_name'].startswith(row['speciesName']):
tmp_names.add(item['common_name'])
row[iso+'_CommonName'] = delimiter.join(tmp_names)
result.append(row)
print(result)
species_list_section_A = open('./data/species_list_a.json', 'w', encoding='utf-8')
json.dump(species_list, species_list_section_A, ensure_ascii=False)
#for item in country_codes:
# common_names_lang_file = open('./data/common_names_outputs/'+item+'_common_names.json', 'w', encoding='utf-8')
# json.dump(result[item], nuts_regions_file, ensure_ascii=False)
|
eea/ias-dataflow
|
scripts/parse_common_names.py
|
parse_common_names.py
|
py
| 1,330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9004329912
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'siscoer.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^grappelli/', include('grappelli.urls')),
# Apps urls
url(r'^$', 'estoque.views.start',),
url(r'^estoque/', include('estoque.urls')),
url(r'^cadastro/$', 'estoque.views.cadastro', name='cadastro'),
url(r'^request_user_pass/$', 'estoque.views.request_user_pass', name='request_user_pass'),
# Autenticação
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {"next_page": "/"}, name='logout'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
clebersa/2014-1-gps-siscoer
|
src/siscoer/siscoer/urls.py
|
urls.py
|
py
| 1,206 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16312672546
|
r1 = float(input('Primeiro segmento: '))
r2 = float(input('Segundo segmento: '))
r3 = float(input('Terceiro segmento: '))
if r1 <r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos podem formar um triângulo, ', end='')
if r1 == r2 == r3: # ou r1 == r2 and r2 == r3
print('EQUILÁTERO')
elif r1 != r2 != r3 != r1:
print('ESCALENO')
else:
print('ISOSCELES')
else:
print('Os segmentos nao podem formar um triângulo')
|
igorfreits/Studies-Python
|
Curso-em-video/Mundo-2/AULA12-Condições-Aninhadas/#042 - Analisando Triângulos v2.0.py
|
#042 - Analisando Triângulos v2.0.py
|
py
| 485 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
71429998588
|
from django.db.models import Q
from django.shortcuts import get_object_or_404
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.exceptions import NotFound, PermissionDenied, ParseError
from rest_framework.permissions import IsAuthenticated
from .models import Letterlist, Letter
from . import serializers
from users.models import User
# /me/
class ChattingList(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="채팅방 목록 조회",
responses={
200: openapi.Response(
description="Succfull Response",
schema=serializers.ChatroomSerialzier(many=True),
)
},
)
def get(self, request):
chatlist = Letterlist.objects.filter(user=request.user).order_by("-updated_at")
chatlist = [i for i in chatlist if request.user not in i.ignore_by.all()]
serializer = serializers.ChatroomSerialzier(
chatlist,
many=True,
context={"request": request},
)
return Response(serializer.data)
# /<int:pk>/ GET
class ChattingRoom(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="해당 채팅방의 쪽지 기록 조회",
responses={
200: openapi.Response(
description="Successful Response",
schema=serializers.MessageSerialzier(),
),
400: openapi.Response(description="Not Found Pk"),
403: openapi.Response(description="Permission Denied"),
},
)
def get(self, request, pk):
chat = Letter.objects.filter(room__pk=pk)
if chat:
if request.user not in chat[0].room.user.all():
raise PermissionDenied
chat = [i for i in chat if request.user not in i.delete_by.all()]
serializer = serializers.MessageSerialzier(
chat,
many=True,
context={"request": request},
)
return Response(serializer.data)
raise NotFound
@swagger_auto_schema(
operation_summary="쪽지방 차단",
responses={
204: openapi.Response(
description="Successful Response",
),
403: openapi.Response(description="Sender != request.user"),
404: openapi.Response(description="Not Found Pk"),
},
)
def delete(self, request, pk):
letter = get_object_or_404(Letterlist, pk=pk)
user = [i for i in letter.user.all()]
if request.user in user:
letter.ignore_by.add(request.user)
letter.save()
return Response("Ok", status=204)
else:
raise PermissionDenied
# /message/ POST -> 메세지 전송
class MessageSend(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="쪽지 전송",
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=["receiver", "text"],
properties={
"receiver": openapi.Schema(
type=openapi.TYPE_INTEGER,
description="보내는 유저의 pk ",
),
"text": openapi.Schema(
type=openapi.TYPE_STRING,
description="전송하는 메세지",
),
},
),
responses={
201: openapi.Response(
description="Successful Response",
),
400: openapi.Response(description="Data Error"),
404: openapi.Response(description="Not Found Pk"),
},
)
def post(self, request):
serializer = serializers.MessageSerialzier(data=request.data)
if serializer.is_valid():
receiver = request.data.get("receiver")
if not receiver:
raise ParseError("required receiver")
if receiver == str(request.user.pk):
raise ParseError("can't send to yourself")
message = serializer.save(sender=request.user, receiver=receiver)
return Response("Successful Response", status=201)
else:
return Response(serializer.errors, status=400)
class MessageDelete(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="쪽지 삭제",
responses={
204: openapi.Response(
description="Successful Response",
),
400: openapi.Response(description="Not Found Pk"),
403: openapi.Response(description="Sender != request.user"),
},
)
def delete(self, request, pk):
letter = get_object_or_404(Letter, pk=pk)
user = [i for i in letter.room.user.all()]
if request.user in user:
letter.delete_by.add(request.user)
letter.save()
# if letter.sender == request.user:
# # letter.delete()
return Response("Ok", status=204)
else:
raise PermissionDenied
|
izunaaaaa/CurB_Backend
|
letterlist/views.py
|
views.py
|
py
| 5,352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17347679642
|
from django.urls import path, include
from .views import LoginView, RegisterView, \
LogoutView, DepartView, JobView, SetPwd, DepartEditView, JobEditView, \
JobRelateUserView, DepartRelateUserView, AppointWorkflowAdmin, IndexView, AppointCommon, AddDepartView, \
DepartRelateUserEditView, AddJobView, JobRelateUserEditView
from django.views import View
urlpatterns = [
path('index/<str:job_id>', IndexView.as_view()), # 个人主页
path('login/', LoginView.as_view()), # 用户初始登录
path('login/set_pwd/', SetPwd.as_view()), # 注册后的用户登录后修改密码
path('logout/', LogoutView.as_view()), # 注销
path('register/', RegisterView.as_view()), # 管理员进行用户注册
path('appoint_workflow_admin/', AppointWorkflowAdmin.as_view()), # 管理员指定一个或多个用户(GET请求,查询字符串?job_id=1&job_id=2获得用户,POST请求发送job_id列表设置用户)为工作流管理员
path('appoint_common/', AppointCommon.as_view()), # 管理员指定一个或多个工作流管理员(GET请求,查询字符串?job_id=1&job_id=2获得用户,POST请求发送job_id列表设置用户)为普通用户
path('depart/', DepartView.as_view()), # 管理员使用,查看(分页、url查询字符串)部门信息
path('add_depart/', AddDepartView.as_view()), # 管理员使用,添加部门
path('depart/edit/<int:depart_id>', DepartEditView.as_view()), # 管理员使用,修改、删除指定部门
path('depart/view/depart<int:depart_id>/relate_user/', DepartRelateUserView.as_view()), # 管理员使用,查看部门成员
path('depart/edit/depart<int:depart_id>/relate_user/', DepartRelateUserEditView.as_view()), # 管理员用户, 添加、更新、删除部门成员
path('job/', JobView.as_view()), # 管理员使用,查看(分页、url查询字符串)岗位
path('add_job/', AddJobView.as_view()), # 管理员使用,添加岗位
path('job/edit/<int:jobtitle_id>', JobEditView.as_view()), # 管理员使用,修改、删除指定岗位
path('job/view/job<int:jobtitle_id>/relate_user/', JobRelateUserView.as_view()), # 管理员使用,查看岗位成员
path('job/edit/job<int:jobtitle_id>/relate_user/', JobRelateUserEditView.as_view()), # 管理员使用,添加、更新、删除岗位成员
]
|
cuifeihe/django_WorkFlowProject
|
app_account/urls.py
|
urls.py
|
py
| 2,353 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
39839377690
|
#Library
import datetime
import time
from tkinter import *
import tkinter.ttk as ttk
from urllib.request import urlretrieve
import serial
import os
import RPi.GPIO as GPIO
#End Library
#Firmwares
ultra1 = serial.Serial("/dev/ttyUSB0",baudrate=9600, timeout=1)
gsm = serial.Serial("/dev/ttyAMA0",baudrate=9600, timeout=1)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(31,GPIO.OUT) #Relay 1
GPIO.setup(33,GPIO.OUT) #Relay 2
GPIO.setup(35,GPIO.OUT) #Relay 3
GPIO.setup(37,GPIO.OUT) #Relay 4
GPIO.setup(12,GPIO.OUT) #Sensor1 Enable
#End Firmwares
root=Tk()
root.geometry("%dx%d+%d+%d"%(800,480,100,50)) #x,y,horizental,vertical
root.title('SAJAB')
root.configure(background='lightblue')
f1=open("relay1_source.txt","r")
f2=open("relay2_source.txt","r")
f3=open("relay3_source.txt","r")
f4=open("relay4_source.txt","r")
v1 = IntVar()
v1.set(int(f1.read())) # initializing the choice, i.e. Python
v2 = IntVar()
v2.set(int(f2.read())) # initializing the choice, i.e. Python
v3 = IntVar()
v3.set(int(f3.read())) # initializing the choice, i.e. Python
v4 = IntVar()
v4.set(int(f4.read())) # initializing the choice, i.e. Python
GPIO.output(31,v1.get())
GPIO.output(33,v2.get())
GPIO.output(35,v3.get())
GPIO.output(37,v4.get())
f1.close()
f2.close()
f3.close()
f4.close()
#Variables
station_name = "Golestan Uni"
pass_main='1120'
time_xloc= 20
time_yloc= 5
date_xloc=10
date_yloc=30
table_x=20
table_y=150
global default_sampling_rate
global default_bias_value
global default_coefficent
global default_max_level
global default_hysteresis_level
global default_mobile_phone1
global default_mobile_phone2
global default_mobile_phone3
step = 20
stepx = 40
global sampling_rate
sampling_rate=2
#End Variables
#Functions
def relay1():
relay1_source = open("relay1_source.txt","w+")
GPIO.setup(31,GPIO.OUT) #Relay 1
if v1.get()==0:
print("is off")
GPIO.output(31,0)
relay1_source.write("0")
else:
print("in on")
GPIO.output(31,1)
relay1_source.write("1")
relay1_source.close()
def relay2():
relay2_source = open("relay2_source.txt","w+")
GPIO.setup(33,GPIO.OUT) #Relay 1
if v2.get()==0:
print("is off")
GPIO.output(33,0)
relay2_source.write("0")
else:
print("in on")
GPIO.output(33,1)
relay2_source.write("1")
relay2_source.close()
def relay3():
relay3_source = open("relay3_source.txt","w+")
GPIO.setup(35,GPIO.OUT) #Relay 1
if v3.get()==0:
print("is off")
GPIO.output(35,0)
relay3_source.write("0")
else:
print("in on")
GPIO.output(35,1)
relay3_source.write("1")
relay3_source.close()
def relay4():
relay4_source = open("relay4_source.txt","w+")
GPIO.setup(37,GPIO.OUT) #Relay 1
if v4.get()==0:
print("is off")
GPIO.output(37,0)
relay4_source.write("0")
else:
print("in on")
GPIO.output(37,1)
relay4_source.write("1")
relay4_source.close()
#Send Data
def send_data():
gsm.write("AT\r\n".encode('ascii'))
rcv = gsm.read(10)
print (rcv)
gsm.write("AT+CSQ\r\n".encode('ascii'))
time.sleep(1)
gsm.write("AT+CGATT?\r\n".encode('ascii'))
time.sleep(1)
gsm.write("AT+SAPBR=3,1,\"CONTYPE\",\"GPRS\"\r\n".encode('ascii'))
time.sleep(1)
gsm.write("AT+SAPBR=3,1,\"APN\",\"mtnirancell\"\r\n".encode('ascii'))
time.sleep(4)
gsm.write("AT+SAPBR=1,1\r\n".encode('ascii'))
time.sleep(2)
gsm.write("AT+HTTPINIT\r\n".encode('ascii'))
time.sleep(2)
data_link = "AT+HTTPPARA=\"URL\",\"http://sajab.sazabgolestan.com/server.php?action=save&station_index=3&ha=%d&hb=3&imei=9359374362\"\r\n" %(sensor1_read(sen1))
gsm.write(data_link.encode('ascii'))
time.sleep(1)
gsm.write("AT+HTTPACTION=0\r\n".encode('ascii'))
time.sleep(10)
#End Send Data
#Sensor Read
def sensor1_read(sen):
def count():
GPIO.output(12,0)
global u1
u1 = ultra1.read(12)
u1 = str(u1)
loc = u1.find('R')
u1 = u1[loc+1:loc+6]
GPIO.output(12,1)
sen.config(text=str(u1))
sen.after(sampling_rate*500,count)
count()
return int(u1)
#End Sensor read
#Samle Rate read
#def sr_read(sr_label):
# def count5():
# sr_label.config(text=str(sampling_rate))
# sr_label.after(sampling_rate*500,count5)
# count5()
#End Samle Rate read
def pass_check():
global passcheck
passcheck = Toplevel()
passcheck.geometry("%dx%d+%d+%d"%(200,140,100,50)) #x,y,horizental,vertical
passcheck.title('Setting')
passcheck.configure(background='lightblue')
Label(passcheck,text="Enter Password:",fg=top_bg_color,bg=color,width=0).place(x=40,y=20)
global pass_in
pass_in=Entry(passcheck,width=18)
pass_in.place(x=25,y=50)
Button(passcheck,text="OK",command=pass_check2).place(x=80,y=80)
def pass_check2():
if pass_in.get()==pass_main:
passcheck.destroy()
setting()
else:
Label(passcheck,text="Password is wrong!",fg=top_bg_color,bg=color,width=0).place(x=40,y=110)
def setting():
global setting_frame
setting_frame = Toplevel()
setting_frame.geometry("%dx%d+%d+%d"%(700,420,100,50)) #x,y,horizental,vertical
setting_frame.title('Setting')
setting_frame.configure(background='lightblue')
Label(setting_frame,text="Station name:",fg=top_bg_color,bg=color,width=0).grid(row=0,column=0,ipadx=30,pady=8)
Label(setting_frame,text=station_name,fg=top_bg_color,bg=color,width=0).grid(row=0,column=1,ipadx=30,pady=8)
Label(setting_frame,text="Sampling rate:",fg=top_bg_color,bg=color,width=0).grid(row=1,column=0,ipadx=30,pady=8)
global samp_rate
samp_rate=Entry(setting_frame,width=8)
samp_rate.place(x=140,y=45)
samp_rate.insert(10,str(sampling_rate))
Label(setting_frame,text="Sec.",fg=top_bg_color,bg=color,width=0).place(x=220,y=46)
Label(setting_frame,text="Calibration:",fg=top_bg_color,bg=color,width=0).grid(row=2,column=0,ipadx=30,pady=8)
Label(setting_frame,text="Bias Value:",fg=top_bg_color,bg=color,width=0).grid(row=3,column=0,ipadx=30,pady=8)
global bs_value
bs_value=Entry(setting_frame,width=8)
bs_value.place(x=140,y=118)
bs_value.insert(10,str(default_bias_value))
Label(setting_frame,text="Coefficent:",fg=top_bg_color,bg=color,width=0).grid(row=4,column=0,ipadx=30,pady=8)
global coef
coef=Entry(setting_frame,width=8)
coef.place(x=140,y=154)
coef.insert(10,str(default_coefficent))
Label(setting_frame,text="Alert:",fg=top_bg_color,bg=color,width=0).grid(row=0,column=3,ipadx=30,pady=8)
Label(setting_frame,text="Max level:",fg=top_bg_color,bg=color,width=0).grid(row=1,column=3,ipadx=30,pady=8)
global mx_level
mx_level=Entry(setting_frame,width=8)
mx_level.place(x=465,y=45)
mx_level.insert(10,str(default_max_level))
Label(setting_frame,text="m.m.",fg=top_bg_color,bg=color,width=0).place(x=545,y=46)
Label(setting_frame,text="Hysteresis level:",fg=top_bg_color,bg=color,width=0).grid(row=2,column=3,ipadx=30,pady=8)
global hys_level
hys_level=Entry(setting_frame,width=8)
hys_level.place(x=465,y=81)
hys_level.insert(10,str(default_hysteresis_level))
Label(setting_frame,text="m.m.",fg=top_bg_color,bg=color,width=0).place(x=545,y=82)
Label(setting_frame,text="Mobile Phone 1:",fg=top_bg_color,bg=color,width=0).grid(row=3,column=3,ipadx=30,pady=8)
global mob_phone1
mob_phone1=Entry(setting_frame,width=15)
mob_phone1.place(x=465,y=117)
mob_phone1.insert(10,str(default_mobile_phone1))
Label(setting_frame,text="Mobile Phone 2:",fg=top_bg_color,bg=color,width=0).grid(row=4,column=3,ipadx=30,pady=8)
global mob_phone2
mob_phone2=Entry(setting_frame,width=15)
mob_phone2.place(x=465,y=153)
mob_phone2.insert(10,str(default_mobile_phone2))
Label(setting_frame,text="Mobile Phone 3:",fg=top_bg_color,bg=color,width=0).grid(row=5,column=3,ipadx=30,pady=8)
global mob_phone3
mob_phone3=Entry(setting_frame,width=15)
mob_phone3.place(x=465,y=189)
mob_phone3.insert(10,str(default_mobile_phone3))
Button(setting_frame,text="OK",command=ok).place(x=30,y=220)
Button(setting_frame,text="Set as default",command=setasdefault).place(x=120,y=220)
Button(setting_frame,text="Default values",command=defaultvals).place(x=300,y=220)
Button(setting_frame,text="Cancel",command=cncl).place(x=500,y=220)
def ok():
global sampling_rate
sampling_rate = int(samp_rate.get())
print(int(samp_rate.get()))
print(sampling_rate)
bias_value = int(bs_value.get())
coefficent = int(coef.get())
max_level = int(mx_level.get())
hysteresis_value = int(hys_level.get())
mobile_phone1 = mob_phone1.get()
mobile_phone2 = mob_phone2.get()
mobile_phone3 = mob_phone3.get()
conf_str = "http://sajab.sazabgolestan.com/server.php?action=station&imei=9359374362&station_index=3&status=1&sampling_rate=%d&bias_value=%d&coefficent=%d&max_level=%d&hysteresis_value=%d&mobile_phone1=%s&mobile_phone2=%s&mobile_phone3=%s" %(sampling_rate,bias_value,coefficent,max_level,hysteresis_value,mobile_phone1,mobile_phone2,mobile_phone3)
conf_file = open("conf.txt","w+")
conf_file.write(conf_str)
conf_file.close()
setting_frame.destroy()
def cncl():
setting_frame.destroy()
def defaultvals():
samp_rate.delete(0,END)
samp_rate.insert(10,str(default_sampling_rate))
bs_value.delete(0,END)
bs_value.insert(10,str(default_bias_value))
coef.delete(0,END)
coef.insert(10,str(default_coefficent))
mx_level.delete(0,END)
mx_level.insert(10,str(default_max_level))
hys_level.delete(0,END)
hys_level.insert(10,str(default_hysteresis_level))
mob_phone1.delete(0,END)
mob_phone1.insert(10,str(default_mobile_phone1))
mob_phone2.delete(0,END)
mob_phone2.insert(10,str(default_mobile_phone2))
mob_phone3.delete(0,END)
mob_phone3.insert(10,str(default_mobile_phone3))
def setasdefault():
default_conf_str = "http://sajab.sazabgolestan.com/server.php?action=station&imei=9359374362&station_index=3&status=1&sampling_rate=%d&bias_value=%d&coefficent=%d&max_level=%d&hysteresis_value=%d&mobile_phone1=%s&mobile_phone2=%s&mobile_phone3=%s" %(default_sampling_rate,default_bias_value,default_coefficent,default_max_level,default_hysteresis_value,default_mobile_phone1,default_mobile_phone2,default_mobile_phone3)
default_conf_file = open("default_conf.txt","w+")
default_conf_file.write(default_conf_str)
default_conf_file.close()
#Functions for splitting the different components of date and time
def nowYear():
now = datetime.datetime.now()
year = now.year
return str(year)
def nowMonth():
now = datetime.datetime.now()
month = now.month
return str(month)
def nowDay():
now = datetime.datetime.now()
day = now.day
return str(day)
def nowHour():
now = datetime.datetime.now()
hour = now.hour
return str(hour)
def nowMinute():
now = datetime.datetime.now()
minute = now.minute
return str(minute)
def nowSecond():
now = datetime.datetime.now()
second = now.second
return str(second)
def year_label(label):
def count1():
label.config(text=nowYear())
label.after(1000, count1)
count1()
def month_label(label):
def count2():
label.config(text=nowMonth())
label.after(1000, count2)
count2()
def day_label(label):
def count3():
label.config(text=nowDay())
label.after(1000, count3)
count3()
def hour_label(label):
def count4():
label.config(text=nowHour())
label.after(1000, count4)
count4()
def minute_label(label):
def count5():
label.config(text=nowMinute())
label.after(1000, count5)
count5()
def second_label(label):
def count6():
label.config(text=nowSecond())
label.after(1000, count6)
count6()
def about():
filewin = Toplevel(root)
tx ="""
Development by: Sina Meshkini
+98 911 380 6028
[email protected]
@SinaMeshkini
"""
message = Message(filewin, text=tx, relief = RIDGE , width = 400)
message.pack(fill="both", expand="yes")
#End Functions
#Desigen Param
color = 'lightblue'
top_fg_color = 'lightblue'
top_bg_color = '#111131'
#End Desigen Param
#Header
w = Canvas(root,width= 800,height= 100)
w.pack()
w.create_rectangle(0,0,800,100,fill=top_bg_color)
Label(root,text='SAJAB Management System',fg=top_fg_color,bg=top_bg_color,font="tahoma 24 bold",pady=10).place(x=150,y=5)
#Time
hourLabel = Label(root,text=nowHour(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
hourLabel.place(x=time_xloc,y=time_yloc)
hour_label(hourLabel)
colon = Label(root, text = ":",fg=top_fg_color,bg=top_bg_color,font=("Ravie",14))
colon.place(x=time_xloc+step,y=time_yloc-5)
minuteLabel = Label(root, text = nowMinute(),fg=top_fg_color,bg=top_bg_color,font=("Ravie",10))
minuteLabel.place(x=time_xloc+2*step,y=time_yloc)
minute_label(minuteLabel)
colon = Label(root, text = ":",fg=top_fg_color,bg=top_bg_color,font=("Ravie",14))
colon.place(x=time_xloc+3*step,y=time_yloc-5)
secondLabel = Label(root, text = nowSecond(),fg=top_fg_color,bg=top_bg_color,font=("Ravie",10))
secondLabel.place(x=time_xloc+4*step,y=time_yloc)
second_label(secondLabel)
#End Time
#Date
yearLabel = Label(root,text=nowYear(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
yearLabel.place(x=date_xloc,y=date_yloc)
year_label(yearLabel)
colon = Label(root, text = "/",fg=top_fg_color,bg=top_bg_color)
colon.place(x=date_xloc+36,y=date_yloc)
monthLabel = Label(root,text=nowMonth(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
monthLabel.place(x=date_xloc+45,y=date_yloc)
month_label(monthLabel)
colon = Label(root, text = "/",fg=top_fg_color,bg=top_bg_color)
colon.place(x=date_xloc+60,y=date_yloc)
dayLabel = Label(root,text=nowDay(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
dayLabel.place(x=date_xloc+68,y=date_yloc)
day_label(dayLabel)
#End Date
#Temp
temp_label = Label(root,text="Temp: ",fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
temp_label.place(x=date_xloc,y=date_yloc+20)
#End Temp
#End Header
#Body
sensors = ['Sensor 1:','Sensor 2:','Sensor 3:','Sensor 4:']
relays = ['Relay 1','Relay 2','Relay 3','Relay 4']
r=0
for c in sensors:
Label(root,text=c,fg=top_bg_color,bg=color,width=0).place(x=table_x,y=table_y+r*stepx)
r=r+1
r=0
for c in relays:
Label(root,text=c,fg=top_bg_color,bg=color,width=0).place(x=table_x+300,y=table_y+r*stepx)
r=r+1
#Sensors Display
global sen1
sen1 = Label(root,fg=top_bg_color,bg=color)
sen1.place(x= table_x+130,y=table_y)
sensor1_read(sen1)
#End Sensors Display
#Relay control
Radiobutton(root,text="OFF",variable=v1,command=relay1,value=1).place(x= table_x+400,y=table_y)
Radiobutton(root,text="ON",variable=v1,command=relay1,value=0).place(x= table_x+500,y=table_y)
Radiobutton(root,text="OFF",variable=v2,command=relay2,value=1).place(x= table_x+400,y=table_y+stepx)
Radiobutton(root,text="ON",variable=v2,command=relay2,value=0).place(x= table_x+500,y=table_y+stepx)
Radiobutton(root,text="OFF",variable=v3,command=relay3,value=1).place(x= table_x+400,y=table_y+2*stepx)
Radiobutton(root,text="ON",variable=v3,command=relay3,value=0).place(x= table_x+500,y=table_y+2*stepx)
Radiobutton(root,text="OFF",variable=v4,command=relay4,value=1).place(x= table_x+400,y=table_y+3*stepx)
Radiobutton(root,text="ON",variable=v4,command=relay4,value=0).place(x= table_x+500,y=table_y+3*stepx)
#End Relay control
Label(root,text="Sampling rate:",fg=top_bg_color,bg=color,width=0).place(x=table_x+50,y=350)
global sr_label
sr_label = Label(root,fg=top_bg_color,bg=color)
sr_label.place(x=table_x+150,y=350)
#sr_read(sr_label)
Label(root,text="Sec.",fg=top_bg_color,bg=color,width=0).place(x=table_x+230,y=351)
Button(root,text="Setting",command=pass_check).place(x=700,y=410)
Button(root,text="Send Data",command=send_data).place(x=200,y=410)
#End Body
root.mainloop()
|
sinameshkini/python_samples
|
sajab4.py
|
sajab4.py
|
py
| 16,045 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38045683002
|
from plugin_format import PluginFormat
class PlugIn(object):
def __init__(self, name, version, plugin_format, file_path):
self.name = name
self.version = version
self.plugin_format = plugin_format
self.file_path = file_path
def __str__(self):
name = f"name={self.name}"
version = f"version={self.version}"
plugin_format = f"plugin_format={self.plugin_format}"
file_path = f"file_path={self.file_path}"
return f"Plugin[{name}, {version}, {plugin_format}, {file_path}]"
@staticmethod
def from_path(read_file):
def first_existing_key(keys, file):
if len(keys) == 0:
return None
elif len(keys) == 1 or keys[0] in file:
return file[keys[0]]
else:
return first_existing_key(keys[1:], file)
def wrapper(path_to_plugin):
plist_file = read_file(path_to_plugin)
if plist_file is None:
return None
plugin_format = PluginFormat.from_path(path_to_plugin)
name = first_existing_key(["CFBundleName", "CFBundleExecutable"], plist_file)
version = first_existing_key(["CFBundleShortVersionString", "CFBundleVersion"], plist_file)
return PlugIn(name=name, version=version, plugin_format=plugin_format, file_path=path_to_plugin)
return wrapper
|
adrianswiatek/py-plugins
|
plugin.py
|
plugin.py
|
py
| 1,420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21480310570
|
import bpy
import re
def get_group_name_from_data_path(data_path):
m = re.match(r'^pose\.bones\[\"([^\"]+)"\]', data_path)
if m:
return m[1]
# For pose blender. Should probably not be hardcoded
m = re.match(r'^\[\"([^\"]+)"\]$', data_path)
if m and m[1].endswith("_pose"):
return "Poses"
return None
class GRET_OT_channels_auto_group(bpy.types.Operator):
"""Group animation channels by their bone name"""
bl_idname = 'gret.channels_auto_group'
bl_label = "Auto-Group Channels"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.space_data and context.space_data.type in {'DOPESHEET_EDITOR', 'GRAPH_EDITOR'}
def execute(self, context):
obj = context.active_object
action = obj.animation_data.action if (obj and obj.animation_data) else None
if not action:
return {'CANCELLED'}
fcurves = []
# Create the necessary groups first THEN assign them to prevent the following error
# https://github.com/blender/blender/blob/v3.4.1/source/blender/makesrna/intern/rna_fcurve.c#L527
for fc in action.fcurves:
group_name = get_group_name_from_data_path(fc.data_path)
if group_name and (not fc.group or fc.group.name != group_name):
fcurves.append((fc, group_name))
if group_name not in action.groups:
action.groups.new(name=group_name)
for fc, group_name in fcurves:
old_group, fc.group = fc.group, action.groups.get(group_name)
if fc.group:
fc.group.show_expanded = True
fc.group.show_expanded_graph = True
if old_group and not old_group.channels:
action.groups.remove(old_group)
return {'FINISHED'}
def draw_menu(self, context):
self.layout.operator(GRET_OT_channels_auto_group.bl_idname)
def register(settings, prefs):
if not prefs.animation__enable_channels_auto_group:
return False
# Would be nice to have this menu item next to the other group operators
bpy.utils.register_class(GRET_OT_channels_auto_group)
bpy.types.GRAPH_MT_channel.append(draw_menu)
bpy.types.DOPESHEET_MT_channel.append(draw_menu)
def unregister():
bpy.types.GRAPH_MT_channel.remove(draw_menu)
bpy.types.DOPESHEET_MT_channel.remove(draw_menu)
bpy.utils.unregister_class(GRET_OT_channels_auto_group)
|
greisane/gret
|
anim/channels_auto_group.py
|
channels_auto_group.py
|
py
| 2,546 |
python
|
en
|
code
| 298 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.