seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
36768435919
|
def main():
for i in range(int(input())):
s = input()
if len(s) > 10:
print(f"{s[0]}{len(s)-2}{s[-1]}")
else:
print(s)
if __name__ == '__main__':
main()
|
arbkm22/Codeforces-Problemset-Solution
|
Python/WayTooLongWords.py
|
WayTooLongWords.py
|
py
| 211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16708430730
|
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
# nodes=['A','B','C','D','E','F','1']
dataset = pd.read_csv("./topology.csv", header=None)
lenth = dataset.shape[0]
G=nx.Graph()
edges=[]
for i in range(lenth):
edges.append((dataset.values[i, 0], dataset.values[i, 1]))
r=G.add_edges_from(edges)
# shortest_way=nx.shortest_path(G,"F","D")
# print(shortest_way)
#
# nx.draw(G, with_labels=True,node_color='r', node_size=50,)
# plt.show()
options = {"node_color": "red", "node_size": 300, "linewidths": 0, "width": 0.1, "with_labels": True}
pos = nx.spring_layout(G, random_state=1969) # Seed for reproducible layout
nx.draw(G, pos, **options)
plt.show()
|
GAVIN-YAN/FinTechauthon2022
|
topology/topology.py
|
topology.py
|
py
| 712 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7859737668
|
liste_mois = [
('janvier', 31),
('fevrier', 28),
('mars', 31),
('avril', 30),
('mai', 31),
('juin', 30),
('juillet', 31),
('aout', 31),
('septembre', 30),
('octobre', 31),
('novembre', 30),
('decembre', 31)
]
calendrier = []
for nom_mois, nb_jours in liste_mois:
for numero_jour in range(1, nb_jours + 1):
calendrier.append({
'j': numero_jour,
'm': nom_mois
})
print(calendrier)
|
mercator-ocean/python-notes
|
exercices/makina/syntaxe-bases/calendrier_1.py
|
calendrier_1.py
|
py
| 473 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
26529327586
|
#유기농 배추
from collections import deque
def bfs(x,y):
queue = deque()
queue.append((x,y))
graph[x][y] = 0
while queue:
x, y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0<=nx<n and 0<=ny<m and graph[nx][ny] == 1:
queue.append((nx,ny))
graph[nx][ny] = 0
return
graph = []
dx = [-1,1, 0, 0]
dy = [0,0,-1,1]
t = int(input())
for _ in range(t):
cnt = 0
m, n, k = map(int, input().split())
graph = [[0 for _ in range(m)] for _ in range(n)]
for _ in range(k):
x, y = map(int, input().split())
graph[y][x] = 1
for i in range(n):
for j in range(m):
if graph[i][j] == 1:
bfs(i,j)
cnt += 1
print(cnt)
|
Jaeheon-So/baekjoon-algorithm
|
DFS, BFS/1012.py
|
1012.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31536399436
|
from project.hardware.heavy_hardware import HeavyHardware
from project.hardware.power_hardware import PowerHardware
from project.software.express_software import ExpressSoftware
from project.software.light_software import LightSoftware
class System:
_hardware = []
_software = []
@staticmethod
def register_power_hardware(name: str, capacity: int, memory: int):
power_hardware = PowerHardware(name, capacity, memory)
System._hardware.append(power_hardware)
@staticmethod
def register_heavy_hardware(name: str, capacity: int, memory: int):
heavy_hardware = HeavyHardware(name, capacity, memory)
System._hardware.append(heavy_hardware)
@staticmethod
def register_express_software(hardware_name: str, name: str, capacity_consumption: int, memory_consumption: int):
hardware = [h for h in System._hardware if h.name == hardware_name]
if not hardware:
return "Hardware does not exist"
hardware = hardware[0]
software = ExpressSoftware(name, capacity_consumption, memory_consumption)
hardware.install(software)
System._software.append(software)
@staticmethod
def register_light_software(hardware_name: str, name: str, capacity_consumption: int, memory_consumption: int):
hardware = [h for h in System._hardware if h.name == hardware_name]
if not hardware:
return "Hardware does not exist"
hardware = hardware[0]
software = LightSoftware(name, capacity_consumption, memory_consumption)
hardware.install(software)
System._software.append(software)
@staticmethod
def release_software_component(hardware_name: str, software_name: str):
hardware = [h for h in System._hardware if h.name == hardware_name]
software = [s for s in System._software if s.name == software_name]
if not hardware or not software:
return "Some of the components do not exist"
hardware = hardware[0]
software = software[0]
hardware.uninstall(software)
System._software.remove(software)
@staticmethod
def analyze():
hardware_components_amount = len(System._hardware)
software_components_amount = len(System._software)
total_memory_consumption = sum([s.memory_consumption for s in System._software])
total_memory = sum([h.memory for h in System._hardware])
total_capacity_consumption = sum([s.capacity_consumption for s in System._software])
total_capacity = sum([h.capacity for h in System._hardware])
result = "System Analysis\n"
result += f"Hardware Components: {hardware_components_amount}\n"
result += f"Software Components: {software_components_amount}\n"
result += f"Total Operational Memory: {total_memory_consumption} / {total_memory}\n"
result += f"Total Capacity Taken: {total_capacity_consumption} / {total_capacity}"
return result
@staticmethod
def system_split():
result = ''
for hardware in System._hardware:
name = hardware.name
express_software_amount = [s for s in hardware.software_components if s.__type__.__name__ == "Express"]
light_software_amount = [s for s in hardware.software_components if s.__type__.__name__ == "Light"]
total_memory_consumption = sum([s.memory_consumption for s in hardware.software_components])
total_memory = hardware.memory
total_capacity_consumption = sum([s.capacity_consumption for s in hardware.software_components])
total_capacity = hardware.capacity
hardware_type = hardware.__type__.__name__
software_components_names = [s.name for s in hardware.software_components]
if not software_components_names:
software_components_names = "None"
else:
software_components_names = ", ".join(software_components_names)
result += f"Hardware Component - {name}"
result += f"Express Software Components: {express_software_amount}"
result += f"Light Software Components: {light_software_amount}"
result += f"Memory Usage: {total_memory_consumption} / {total_memory}"
result += f"Capacity Usage: {total_capacity_consumption} / {total_capacity}"
result += f"Type: {hardware_type}"
result += f"Software Components: {software_components_names}"
return result.strip()
|
iliyan-pigeon/Soft-uni-Courses
|
pythonProjectOOP/project/system.py
|
system.py
|
py
| 4,537 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41457482585
|
from sqlite3 import IntegrityError, Connection
import schema
from flask import g
from flask import request
from flask import Blueprint
from flask.views import MethodView
from werkzeug.exceptions import abort
from flaskr import settings
from flaskr.utils.auth import login_required
from flaskr.utils.db import get_db, get_all_posts, get_post, get_all_comments
bp_index = Blueprint("index", __name__, url_prefix='/api')
bp = Blueprint("post", __name__, url_prefix="/api/post")
bp_comment = Blueprint("comment", __name__, url_prefix='/api/comment')
class Index(MethodView):
def get(self):
"""
Get posts.
:return json 200: List[Post]
"""
return dict(posts=get_all_posts())
class Post(MethodView):
def get(self):
"""
Get posts.
:return json 200: List[Post]
"""
return dict(posts=get_all_posts())
@staticmethod
def post_schema(from_dict: dict) -> dict:
try:
data = schema.Schema({
"title": schema.And(str, lambda x: settings.POST_TITLE_MIN_WORDS_LENGTH <= len(x) <= settings.POST_TITLE_MAX_WORDS_LENGTH),
"body": schema.And(str, lambda x: settings.POST_BODY_MIN_WORDS_LENGTH <= len(x) <= settings.POST_BODY_MAX_WORDS_LENGTH)
}).validate(from_dict)
except schema.SchemaError:
raise abort(400, "Wrong args.")
else:
return data
@login_required
def post(self):
"""
Create a post
:param form title: post title
:param form body: post content text
:return json 201: Post
"""
data = self.post_schema(dict(request.form))
db: Connection = get_db()
try:
with db:
cur = db.cursor()
cur.execute(
"INSERT INTO post (title, body, author_id) VALUES (?, ?, ?)",
(data['title'], data['body'], g.user["id"]),
)
except IntegrityError:
raise abort(400, "Duplicated data")
post = get_post(post_id=cur.lastrowid)
return dict(post), 201
class PostId(MethodView):
def get(self, post_id: int):
"""
Get post by specific id
:param int post_id: id of post
:return json 200: Post
"""
post = get_post(post_id=post_id)
if post is None:
abort(404, f"Post id {post_id} doesn't exist.")
return post
def comment_schema(form_dict: dict) -> dict:
try:
data = schema.Schema({
"body": schema.And(
str,
lambda x: settings.COMMENT_BODY_MIN_WORDS_LENGTH <= len(x) <= settings.COMMENT_BODY_MAX_WORDS_LENGTH
)
}).validate(form_dict)
except schema.SchemaError:
raise abort(400, "Wrong args.")
else:
return data
class PostIdComment(MethodView):
def get(self, post_id: int):
"""
Get all comments of a post
:param: int post_id:
:return json 200: {"comments": List[Comment]}
"""
return dict(comments=get_all_comments(post_id=post_id))
@login_required
def post(self, post_id: int):
"""
Add a comment to post
:param int post_id:
:param form body: comment content text
:return json 201: Post
"""
data = comment_schema(dict(request.form))
db = get_db()
with db:
cur = db.cursor()
cur.execute("INSERT INTO comment (author_id, body, parent_id, post_id) VALUES (?, ?, ?, ?)",
(g.user['id'], data['body'], None, post_id))
return get_post(post_id), 201
class CommentId(MethodView):
def get(self, comment_id: int):
"""
Get a comment.
:param int comment_id:
:return json 200: Comment
"""
row = get_db().execute("SELECT id, post_id, created, body FROM comment WHERE id=?", (comment_id, )).fetchone()
if not row:
raise abort(400, f'Comment {comment_id} not found.')
return dict(row)
class CommentIdComment(MethodView):
@login_required
def post(self, comment_id: int):
"""
Add a comment of a comment.
Login required
:param int comment_id: id of comment
:param form body: comment content text
:param form body: comment content text
:raise 401 Login required:
:return json 201: Post
"""
data = comment_schema(dict(request.form))
db = get_db()
comment = db.execute(
"SELECT post_id FROM comment WHERE id=?",
(comment_id, )
).fetchone()
if not comment:
raise abort(400, f"Comment not found.")
with db:
cur = db.cursor()
cur.execute("INSERT INTO comment (author_id, body, parent_id, post_id) VALUES (?, ?, ?, ?)",
(g.user['id'], data['body'], comment_id, comment['post_id']))
return get_post(comment['post_id']), 201
bp_index.add_url_rule('/', view_func=Index.as_view('Index'))
bp.add_url_rule('/<int:post_id>/comment', view_func=PostIdComment.as_view('PostIdComment'))
bp.add_url_rule('/<int:post_id>', view_func=PostId.as_view('PostId'))
bp.add_url_rule('/', view_func=Post.as_view('Post'))
bp_comment.add_url_rule('/<int:comment_id>/comment',
view_func=CommentIdComment.as_view('PostIdCommentIdComment'))
bp_comment.add_url_rule('/<int:comment_id>', view_func=CommentId.as_view('PostIdCommentId'))
|
MioYvo/unlimited-level-messages
|
backend/flaskr/views/post.py
|
post.py
|
py
| 5,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18100605374
|
"""
https://leetcode.com/problems/maximum-ice-cream-bars/
1833. Maximum Ice Cream Bars
It is a sweltering summer day, and a boy wants to buy some ice cream bars.
At the store, there are n ice cream bars. You are given an array costs of length n, where costs[i] is the price of the ith ice cream bar in coins. The boy initially has coins coins to spend, and he wants to buy as many ice cream bars as possible.
Return the maximum number of ice cream bars the boy can buy with coins coins.
Note: The boy can buy the ice cream bars in any order.
"""
from typing import List
class Solution:
def maxIceCream(self, costs: List[int], coins: int) -> int:
answer = 0
freq = [0] * (max(costs) + 1)
# Fill in the list of frequency
# (each index is cost of icecream)
for cost in costs:
freq[cost] += 1
for cost, amount in enumerate(freq):
# If frequency is 0, skip it
if freq[cost] == 0:
continue
# If cost * amount is less than coins,
# simply decrease the coins by cost * amount
if amount * cost <= coins:
coins -= amount * cost
answer += amount
continue
# At this point we cannot buy amount * cost
# So coins // cost should be the amount of icecream we can buy
answer += coins // cost
# And don't forget to exist loop
# (we can't buy icecreams anymore)
break
return answer
|
hirotake111/leetcode_diary
|
leetcode/1833/solution.py
|
solution.py
|
py
| 1,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7869115009
|
import math
# 入力
A, B, H, M = map(float, input().split())
# 座標を求める
PI = 3.14159265358979
AngleH = 30.0 * H + 0.5 * M
AngleM = 6.0 * M
Hx = A * math.cos(AngleH * PI / 180.0)
Hy = A * math.sin(AngleH * PI / 180.0)
Mx = B * math.cos(AngleM * PI / 180.0)
My = B * math.sin(AngleM * PI / 180.0)
# 答えを出力
d = (((Hx - Mx) ** 2 + (Hy - My) ** 2) ** 0.5)
print("%.12f" % d)
|
E869120/math-algorithm-book
|
editorial/chap4-1/prob4-1-4.py
|
prob4-1-4.py
|
py
| 408 |
python
|
en
|
code
| 897 |
github-code
|
6
|
5026987486
|
# -*- coding:utf-8 -*-
my_name = "分数"
import pygame
pygame.init()
my_font = pygame.font.SysFont("simSun", 66)
name_surface = my_font.render(u'分数', True, (0, 0, 0), (255, 255, 255))
pygame.image.save(name_surface, "name.png")
enemy_hit_dict = dict()
score = 0
ENEMY_SCORE = 100
# enemy_hit_dict = pygame.sprite.groupcollide(enemy_group, hero.bullets, True, True)
# score += len(enemy_hit_dict) * ENEMY_SCORE; # 计算得分
# enemy_hit_group.add(enemy_hit_dict)
screen = pygame.display.set_mode((480, 700))
bg = pygame.image.load("./images/background.png")
# 2、使用blit方法将背景绘制在屏幕的(0,0)位置
screen.blit(bg, (0, 0))
screen.blit(name_surface, (20, 20))
# 3、更新屏幕
pygame.display.update()
|
xinlongOB/python_docment
|
飞机大战/字体.py
|
字体.py
|
py
| 737 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17961314485
|
from urllib import request, parse
import json
from getAccessToken import getToken
def newNotification(filename, touser, key1, key2, key3, key4, first, remark):
ACCESS_TOKEN = getToken()
url = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token=ACCESS_TOKEN'
url = url.replace('ACCESS_TOKEN', ACCESS_TOKEN)
headers = {
'Content-Type': 'application/json'
}
data = open(filename, 'rb').read(-1).decode()
print(type(data))
print(touser)
data = data.format(touser, key1, key2, key3, key4, first, remark).encode()
req = request.Request(url, data=data, headers=headers)
res = request.urlopen(req)
data = res.read().decode()
return data
|
pkugeeklab/wx-server
|
notification/newNotification.py
|
newNotification.py
|
py
| 708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14816787756
|
"""web URL Configuration"""
from django.conf.urls import include,url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^pdx/(?P<pk>\w{0,50})/$', views.pdx, name="pdx"),
url(r'^search/', views.search, name="search"),
#url(r'^search/', include('haystack.urls')),
url(r'^resources/', views.resources, name="resources"),
]
|
jmason-ebi/pdx
|
web/urls.py
|
urls.py
|
py
| 442 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9460034812
|
"""
Запуск класса с датасетами твитов и генома ВИЧ
"""
import Hyperbolic
import numpy as np
import Levenshtein as Lev
import pandas as pd
from grad_descent import MSE
import draw
"""
ТВИТЫ
"""
positive = np.array(pd.read_csv(
r'twitter_data/positive.csv', sep=';', usecols=[3], names=['text']))
negative = np.array(pd.read_csv(
r'twitter_data/negative.csv', sep=';', usecols=[3], names=['text']))
# positive_90 = np.array(
# positive[positive['text'].apply(lambda text: len(text) == 90)])
# negative_90 = np.array(
# negative[negative['text'].apply(lambda text: len(text) == 90)])
size = 50
dataset = np.concatenate((positive[np.random.choice(
len(positive), size)], negative[np.random.choice(len(negative), size)]))
perm = np.random.permutation(2*size)
ran = np.array(range(2*size))
map = {perm[i]: ran[i] for i in range(2*size)}
dataset = dataset[perm]
distance = np.zeros((2*size, 2*size), dtype=float)
for i in range(2*size):
for j in range(2*size):
distance[i, j] = Lev.distance(dataset[i, 0], dataset[j, 0])
distance = distance / 10.
H = Hyperbolic.Hyperbolic(graph=distance, dimension=2, maxiter=1000, batch=1.)
print("MSE %f" % MSE(H.point_coordinates, distance))
draw.draw(H.point_coordinates, distance,
draw_edges=False, map=map, annotate=False)
"""
Геном вич
"""
# lst = np.array([]).reshape(0, 0)
# for i in range(100):
# f = open(rf'data/sequence ({i}).txt', 'r')
# string = f.read().replace("\n", "")
# tmp = list(string.encode())
# if i == 0:
# lst = np.array(tmp).reshape(1, -1)
# else:
# lst = np.vstack((lst, np.array(tmp).reshape(1, -1)))
# # создал пустой интовый массив для расстояний
# distance = np.zeros((100, 100), dtype=float)
# # заполнил его правильными значениями
# for i in range(100):
# for j in range(100):
# distance[i, j] = (lst[i] != lst[j]).sum()
# distance = distance / 3.
# H = Hyperbolic.Hyperbolic(graph=distance, dimension=2, maxiter=100, batch=0.1)
# draw.draw(H.point_coordinates, distance, True, annotate=False)
|
DanilShishkin/Hyperbolic
|
actual/main.py
|
main.py
|
py
| 2,184 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22251514739
|
import time
def isprime(n):
mas = []
d = 1
while d * d < n:
if n % d == 0:
mas.append(d)
mas.append(n // d)
d += 1
if d * d == n:
mas.append(d)
if len(mas) == 2:
return mas[1]
def g(n):
counter = 0
mas = []
d = 2
while d * d < n:
if n % d == 0:
mas.append(d)
mas.append(n // d)
d += 1
if d * d == n:
mas.append(d)
if len(mas) > 3:
for j in range(len(mas)):
if isprime(mas[j]) is not None:
counter += 1
if counter > 3:
return n
start = time.time()
k = 0
for i in range(2, 20000 + 1):
if g(i) is not None:
k += 1
print(k, g(i))
print(time.time() - start)
|
MakinFantasy/xo
|
25/hw/2_correct.py
|
2_correct.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14070127148
|
import math
import numpy as np
from scipy.special import expit
class LogReg():
def __init__(self, lambda_1=0.0, lambda_2=1.0, gd_type='full',
tolerance=1e-4, max_iter=1000, w0=None, alpha=1e-3):
"""
lambda_1: L1 regularization param
lambda_2: L2 regularization param
gd_type: 'full' or 'stochastic'
tolerance: for stopping gradient descent
max_iter: maximum number of steps in gradient descent
w0: np.array of shape (d) - init weights
alpha: learning rate
"""
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.gd_type = gd_type
self.tolerance = tolerance
self.max_iter = max_iter
self.w0 = w0
self.alpha = alpha
self.w = None
self.loss_history = None
def fit(self, X, y):
"""
X: np.array of shape (l, d)
y: np.array of shape (l)
---
output: self
"""
self.loss_history = []
count = 0
self.w = np.ones((len(X[0]),))
while np.linalg.norm(self.w - self.w0) > self.tolerance or count < self.max_iter:
count += 1
self.w = self.w0
if self.gd_type == 'stochastic':
i = random.randint(0, len(y))
grad = self.calc_gradient(X[i, :], np.array(y[i]))
else:
grad = self.calc_gradient(X, y)
self.w = self.w0 - self.alpha * grad
loss = self.calc_loss(X, y)
self.loss_history.append(loss)
self.w0 = self.w
return self
def predict_proba(self, X):
"""
X: np.array of shape (l, d)
---
output: np.array of shape (l, 2) where
first column has probabilities of -1
second column has probabilities of +1
"""
if self.w is None:
raise Exception('Not trained yet')
pred = expit(np.dot(X, self.w))
return pred
def calc_gradient(self, X, y):
"""
X: np.array of shape (l, d) (l can be equal to 1 if stochastic)
y: np.array of shape (l)
---
output: np.array of shape (d)
"""
tm1 = expit(-y * np.dot(X, self.w))
tm2 = y[:, np.newaxis] * X
tm3 = tm1[:, np.newaxis] * tm2
tm4 = -np.sum(tm3, axis=0)
grad = tm4 / X.shape[0] + self.lambda_2 * self.w
return grad
def calc_loss(self, X, y):
"""
X: np.array of shape (l, d)
y: np.array of shape (l)
---
output: float
"""
n = X.shape[0]
tm1 = np.logaddexp(0, -y * np.dot(X, self.w))
reg = self.lambda_2 * np.sum(self.w ** 2) / 2
loss = (1 / n) * np.sum(tm1, axis=0) + reg
return loss
|
idStep/hse_ml
|
logreg.py
|
logreg.py
|
py
| 2,793 |
python
|
en
|
code
| 0 |
github-code
|
6
|
651508757
|
from . learningTasks import RandomForest
import os
import luigi
import numpy as np
import logging
# import the proper nifty version
try:
import nifty.graph.rag as nrag
except ImportError:
try:
import nifty_with_cplex.graph.rag as nrag
except ImportError:
import nifty_with_gurobi.graph.rag as nrag
from .dataTasks import StackedRegionAdjacencyGraph, InputData
from .tools import config_logger, run_decorator
from .featureTasks import RegionNodeFeatures
from .customTargets import HDF5DataTarget, FolderTarget
from .pipelineParameter import PipelineParameter
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
class DefectNodeGroundtruth(luigi.Task):
pathToSeg = luigi.Parameter()
pathToDefectGt = luigi.Parameter()
def requires(self):
return{
"rag": StackedRegionAdjacencyGraph(self.pathToSeg),
"defect_gt": InputData(self.pathToDefectGt, dtype='uint8')
}
@run_decorator
def run(self):
inp = self.input()
rag = inp['rag'].read()
defect_gt = inp['defect_gt']
defect_gt.open()
node_labels = nrag.gridRagAccumulateLabels(rag, defect_gt.get())
assert (np.unique(node_labels) == np.array([0, 1])).all(), str(np.unique(node_labels))
self.output().write(node_labels)
def output(self):
seg_file = os.path.split(self.pathToSeg)[1][:-3]
save_path = "DefectNodeGroundtruth_%s.h5" % seg_file
return HDF5DataTarget(save_path)
class LearnDefectRandomForest(luigi.Task):
pathsToSeg = luigi.ListParameter()
pathsToDefectGt = luigi.ListParameter()
def requires(self):
assert len(self.pathsToSeg) == len(self.pathsToGt)
n_inputs = len(self.pathsToSeg)
inputs = PipelineParameter().inputs
if n_inputs == 1:
raw_path = inputs['data'][0]
return {
'gt': DefectNodeGroundtruth(self.pathsToSeg[0], self.pathsToDefectGt[0]),
'feats': RegionNodeFeatures(self.pathsToSeg[0], raw_path)
}
else:
inp_paths = inputs['data']
assert n_inputs % inp_paths == 0
inp_per_seg = len(inp_paths) // n_inputs
return {
'gt': [DefectNodeGroundtruth(self.pathsToSeg[i], self.pathsToDefectGt[i]) for i in range(n_inputs)],
'feats': [RegionNodeFeatures(self.pathToSeg[i], inp_paths[inp_per_seg * i]) for i in range(n_inputs)]
}
@run_decorator
def run(self):
if(self.pathsToSeg) > 1:
self._learn_defect_rf_multi_input()
else:
self._learn_defect_rf_single_input()
def _learn_defect_rf_multi_input(self):
inp = self.input()
gts = inp['gt']
feats = inp['feats']
assert len(gts) == len(feats)
features = []
labels = []
for i, gt in enumerate(gts):
this_gt = gt.read()
this_feats = feats[i].read([0, 0], feats[i].shape)
assert len(this_gt) == len(this_feats), "%i, %i" % (len(this_gt), len(this_feats))
features.append(this_feats)
labels.append(this_gt)
features = np.concatenate(features, axis=0)
labels = np.concatenate(labels, axis=0)
rf = RandomForest(
features, labels,
n_trees=PipelineParameter().nTrees,
n_threads=PipelineParameter().nThreads
)
rf.write(str(self.output().path), 'rf')
def _learn_defect_rf_single_input(self):
inp = self.input()
gt = inp['gt'].read()
feats = inp['feats']
feats = feats.readSubarray([0, 0], feats.shape)
assert len(gt) == len(feats), "%i, %i" % (len(gt), len(feats))
rf = RandomForest(
feats, gt,
n_trees=PipelineParameter().nTrees,
n_threads=PipelineParameter().nThreads
)
rf.write(str(self.output().path), 'rf')
def output(self):
save_path = 'LearnDefectRandomForest_%s' % (
'multi_input' if len(self.pathsToSeg) > 1 else 'single_input',
)
return FolderTarget(save_path)
|
constantinpape/mc_luigi
|
mc_luigi/defectRandomForests.py
|
defectRandomForests.py
|
py
| 4,204 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31360177466
|
#Long Short-Term Memory
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
dataset_train = pd.read_csv('files/Salestrain.csv')
plt.plot(dataset_train, color='blue', label='Vendas')
plt.title('Vendas')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
sc = MinMaxScaler(feature_range=(0, 1))
trainning_set_scaled = sc.fit_transform(dataset_train)
X_train = []
y_train = []
for i in range(90, len(trainning_set_scaled)):
data = trainning_set_scaled[i-90:i, 0]
X_train.append(data)
y_train.append(trainning_set_scaled[i,0])
X_train = np.array(X_train.reshape(-1,90,1))
y_train = np.array(y_train)
model = Sequential()
model.add(LSTM(units=100, return_sequences=True, input_shape=(X_train.shape[1],1)))
model.add(Dropout(0.2))
model.add(LSTM(units=100, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=100, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=100))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, y_train, epochs=300, batch_size=1)
dataset_test = pd.read_csv('files/Salestest.csv')
train_values = dataset_train['data'].values
test_values = dataset_test['data'].values
total_values = np.concatenate((train_values, test_values), axis=0)
time_index = range(len(total_values))
plt.plot(time_index[:len(train_values)], color='blue', label='Vendas - Treinamento')
plt.plot(time_index[len(test_values):], color='red', label='Vendas - Teste')
plt.title('Vendas')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
dataset_test_anomalies = dataset_test.copy()
dataset_test_anomalies.loc[:9, 'data'] = 90
dataset_test_anomalies.loc[10:34, 'data'] = np.random.uniform(100,200,size=(25,))
dataset_test_anomalies.loc[35:, 'data'] = 90
plt.plot(dataset_test, color='blue', label='Vendas')
plt.plot(dataset_test_anomalies, color='red', label='Vendas com anomalias')
plt.title('Vendas')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
dataset_total = pd.concat((dataset_train['data'], dataset_test['data']), axis=0)
inputs = dataset_total[len(dataset_total)-len(dataset_test-90):]
inputs = pd.DataFrame(inputs, columns=['data'])
inputs = sc.transform(inputs)
dataset_total_anomalies = pd.concat((dataset_train['data'], dataset_test_anomalies['data']), axis=0)
inputs_anomalies = dataset_total_anomalies[len(dataset_total_anomalies)-len(dataset_test_anomalies-90):]
inputs_anomalies = pd.DataFrame(inputs_anomalies, columns=['data'])
inputs_anomalies = sc.transform(inputs_anomalies)
X_test = []
X_test_anomalies = []
for i in range(90, len(inputs)):
X_test.append(inputs[i-90:i,0])
X_test_anomalies.append(inputs_anomalies[i-90:i,0])
X_test, X_test_anomalies = np.array(X_test), np.array(X_test_anomalies)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
X_test_anomalies = np.reshape(X_test_anomalies, (X_test_anomalies.shape[0], X_test_anomalies.shape[1], 1))
prediced_sales = model.predict(X_test)
prediced_sales = sc.inverse_transform(prediced_sales)
prediced_sales_anomalies = model.predict(X_test_anomalies)
prediced_sales_anomalies = sc.inverse_transform(prediced_sales_anomalies)
mean_squared_error_test = mean_squared_error(test_values, prediced_sales)
mean_squared_error_anomalies = mean_squared_error(test_values, prediced_sales_anomalies)
print(f'MSE normal data: ', mean_squared_error_test)
print(f'MSE data with anomalies: ', mean_squared_error_anomalies)
plt.plot(test_values, color='blue', label='Valores reais')
plt.plot(prediced_sales_anomalies, colo='red', label='Previsões com anomalias')
plt.plot(prediced_sales, color='green', label='Previsões')
plt.title('Previsões com anomalias, sem anomalias e valores reais')
plt.xlabel('Tempo')
plt.ylabel('Vendas')
plt.legend()
plt.show()
|
francinehahn/ai-and-machine-learning
|
detectingAnomalies/LSTM.py
|
LSTM.py
|
py
| 4,027 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39220567289
|
from sklearn.preprocessing import StandardScaler
file_name2 = 'data2.csv'
df = pd.read_csv(file_name2)
df['race_date'] = pd.to_datetime(df['race_date']).dt.date
# 情報不足行を削除
df = df.dropna(subset=['past_time_sec1', 'past_time_sec2', 'past_time_sec3',
'past_time_sec4', 'past_time_sec5']).reset_index(drop=True)
# レースID付与
def set_race_id(params):
param_list = params.split('_')
race_date, place, race_num = param_list[0], param_list[1], param_list[2],
return f'{race_date}{place}{race_num}'
df['tmp'] = df['race_date'].astype(
str) + '_' + df['place'].astype(str) + '_' + df['race_num'].astype(str)
df['race_id'] = df['tmp'].map(set_race_id)
df = df.drop(columns=['tmp'])
# 予測に使用しない列を削除(必要に応じて変更)
df = df.drop(columns=['horse', 'jockey', 'race_num', 'stable',
'race_name', 'rank', 'pop', 'gap', 'tansho', 'win1', 'time_sec'])
# ダミー列定義(One-Hot変換対象)
dummy_columns = ['sex', 'place', 'course_type', 'course_lr', 'weather', 'ground', 'past_course_type1', 'past_course_lr1', 'past_weather1', 'past_ground1', 'past_gap1', 'past_course_type2', 'past_course_lr2', 'past_weather2', 'past_ground2', 'past_gap2',
'past_course_type3', 'past_course_lr3', 'past_weather3', 'past_ground3', 'past_gap3', 'past_course_type4', 'past_course_lr4', 'past_weather4', 'past_ground4', 'past_gap4', 'past_course_type5', 'past_course_lr5', 'past_weather5', 'past_ground5', 'past_gap5']
# ダミー化
df_dummy = df[dummy_columns]
df_dummy = pd.get_dummies(df_dummy, dummy_na=True)
df_main = df.drop(columns=dummy_columns)
# 標準化前に必要な情報を退避
df_main['kitaichi'] = df_main['win3'] * df_main['fukusho']
train_kitaichi = df_main.pop('kitaichi')
train_labels = df_main.pop('win3')
train_date = df_main.pop('race_date')
train_raceids = df_main.pop('race_id')
df_main = df_main.drop(columns=['fukusho'])
df_main = df_main.astype(float)
standard_file = 'standard.csv'
df_main.to_csv(standard_file, index=False)
# 標準化
ss = StandardScaler()
df_main = pd.DataFrame(ss.fit_transform(
df_main), columns=df_main.columns, index=df_main.index)
# ダミー列とマージ
df = pd.concat([df_main, df_dummy], axis=1)
df['kitaichi'] = train_kitaichi.values
df['win3'] = train_labels.values
df['race_date'] = train_date.values
df['race_id'] = train_raceids.values
file_name3 = 'data3.csv'
df.to_csv(file_name3, index=False)
|
keisukee/horse_racing
|
normalize_3.py
|
normalize_3.py
|
py
| 2,492 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71484383548
|
A, K = map(int, input().split())
ans = A
for i in range(len(str(A))):
for p in range(10):
for q in range(10):
s = str(A)[:i] + str(p) + str(q) * (len(str(A))-i-1)
if (len(set(str(int(s)))) <= K):
ans = min(ans, abs(A-int(s)))
print(ans)
|
knuu/competitive-programming
|
atcoder/corp/codefes2014qa_d.py
|
codefes2014qa_d.py
|
py
| 290 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39922767694
|
# THIS FILE IS SAFE TO EDIT. It will not be overwritten when rerunning go-raml.
from flask import jsonify, request
import sqlite3
def books_postHandler():
connection = sqlite3.connect("BookStore")
cursor=connection.cursor()
inputs = request.get_json()
cursor.execute("CREATE TABLE IF NOT EXISTS BookStore (BookId INTEGER PRIMARY KEY AUTOINCREMENT,title TEXT,subTitle TEXT,authors TEXT)")
cursor.execute("INSERT INTO BookStore VALUES(NULL,?,?,?)", (inputs["title"],inputs["subTitle"],inputs["authors"]))
connection.commit()
connection.close()
return jsonify("successfully")
|
BolaNasr/BookStore-API
|
server/handlers/books_postHandler.py
|
books_postHandler.py
|
py
| 608 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21509830674
|
import pickle
import numpy as np
referrence_vec = {} ## list of numpy average vectors to compare with
synonym_dict={}
names=["safety","products and services","technical skills","soft skills","orientation","onboarding"]
synonym_dict["safety"] = ["safety", "wellbeing","welfare","security"]
synonym_dict["products and services"] = ["products","services","resource","utility","system","amenity"]
synonym_dict["technical skills"] = ["technical","technological","applied"]
synonym_dict["soft skills"] = ["soft","communication","interpersonal"]
synonym_dict["orientation"]= ["orientation","accommodation","familiarization","acclimatization","introduction","initiation"]
synonym_dict["onboarding"]= ["induction","integration","onboarding"]
EMBEDDING_DIM=100
with open('embeddings_index.p', 'rb') as fp:
embeddings_index = pickle.load(fp)
for name in names:
synonyms = synonym_dict[name]
vec_list = [embeddings_index[x] for x in synonyms]
referrence_vec[name] = vec_list
with open('referrence_vec.p', 'wb') as fp:
pickle.dump(referrence_vec, fp, protocol=pickle.HIGHEST_PROTOCOL)
|
yashbhtngr/employee-training-chatbot
|
comparision_vectors.py
|
comparision_vectors.py
|
py
| 1,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32592778621
|
import geoip2.database
"""
Requirements:
geoip2
use this page to download the db:
https://dev.maxmind.com/geoip/geolite2-free-geolocation-data?lang=en
"""
# This creates a Reader object. You should use the same object
# across multiple requests as creation of it is expensive.
with geoip2.database.Reader('/path/to/GeoLite2-City.mmdb') as reader:
# Replace "city" with the method corresponding to the database
# that you are using, e.g., "country".
response = reader.city('203.0.113.0')
print(
response.country.iso_code +
response.country.name +
response.country.names['zh-CN'] +
response.subdivisions.most_specific.name +
response.subdivisions.most_specific.iso_code +
response.city.name +
response.postal.code +
response.location.latitude +
response.location.longitude +
response.traits.network, sep="\n"
)
""
|
steriospydev/tutools
|
Functions/get_ip_location.py
|
get_ip_location.py
|
py
| 927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42602524063
|
#!/usr/bin/env python
import numpy as np
from subprocess import call
from ase.io import read
def argparse():
import argparse
parser = argparse.ArgumentParser(description = """
This code will give you the (Total/Partial) Raidial Distribution Function.
Return npy file.
""")
# Positional arguments
parser.add_argument('chem1', type=str, help='chem1,2, are chemical symbols consisting bonds.')
parser.add_argument('chem2', type=str, help='e.g. Ge Te | "a": any symbols, "x": do all partial.')
parser.add_argument('alist_file', type=str, help='ASE readable atoms list file name.')
# Optional arguments
parser.add_argument('-n', '--image_slice', type=str, default=':', help='Image slice following python convention. default=":" (e.g.) -n :1000:10')
parser.add_argument('-r', '--rcut', type = float, default=8.5, help='Maximum radius for RDF. Default: 8.5')
parser.add_argument('-b', '--nBin', type=int, default=500, help='Number of bins. Default: 500')
parser.add_argument('-g', '--gsmear', type=float, default=0., help='Width(simga, STD) of Gaussian smearing in Angstrom unit. Zero means no smearing. [default: 0]')
parser.add_argument('-e', '--rectify_cut', type=float, default=None, help='All of drastic kink higher than this will be omitted. [Default: no rectify]')
parser.add_argument('-m', '--multiply', type=float, default=1., help='Multiply this value to RDF (re-scale). [default: 1.]')
parser.add_argument('-s', '--dont_save', dest='save_bool', action='store_false', help='If provided, npy will not be saved. Default: Save array')
parser.add_argument('-o', '--dont_load', dest='load_bool', action='store_false', help='If provided, npy will not be loaded. Default: Load if possible')
parser.add_argument('-t', '--dont_share_y', action='store_true', help='Subplots will not share y-axes if provided.')
parser.add_argument('-j', '--x_lower', type=float, default=0, help='Lower bound for RDF x-axis [Default: 0]')
parser.add_argument('-u', '--rdf_upper', type=float, default=None, help='Upper bound for RDF plot [Default: automatic]')
parser.add_argument('-l', '--rdf_lower', type=float, default=0, help='Lower bound for RDF plot [Default: 0]')
parser.add_argument('-p', '--s_upper', type=float, default=None, help='Upper bound for S(Q) plot [Default: automatic]')
parser.add_argument('-q', '--s_lower', type=float, default=0, help='Lower bound for S(Q) plot [Default: 0]')
parser.add_argument('-x', '--xtick_list', type=float, nargs='+', default=None, help='Specify x ticks of RDF. [Default: automatic]')
parser.add_argument('-y', '--ytick_list', type=float, nargs='+', default=None, help='Specify y ticks of RDF. [Default: automatic]')
parser.add_argument('-v', '--s_xtick_list', type=float, nargs='+', default=None, help='Specify x ticks of S(Q). [Default: automatic]')
parser.add_argument('-w', '--s_ytick_list', type=float, nargs='+', default=None, help='Specify y ticks of S(Q). [Default: automatic]')
return parser.parse_args()
def get_RDF(
alist,
rcut,
nBin=500,
symbol_tuple=None,
log=False,
):
from asap3.analysis.rdf import RadialDistributionFunction as RDF
RDFobj = RDF(
atoms=alist[0],
rMax=rcut,
nBins=nBin,
)
for i in range(1,len(alist)):
RDFobj.atoms = alist[i]
RDFobj.update()
if log and i % 1000 == 999:
print('\t Updating '+str(i+1)+" th image's RDF")
## Total RDF
if symbol_tuple == ('a', 'a'):
rdf = RDFobj.get_rdf()
## Partial RDF
else:
# Get normalize constant
(unique, counts) = np.unique(alist[0].get_chemical_symbols(), return_counts=True)
norm_const = counts[list(unique).index(symbol_tuple[1])] / np.sum(counts, dtype=np.float)
#
from chemical_symbol_number_inverter import invert_chem_sym_num
spec_inds = invert_chem_sym_num(symbol_tuple)
#
rdf = RDFobj.get_rdf(elements=tuple(spec_inds)) / norm_const
x = np.arange(nBin) / float(nBin) * rcut
## Return curve
return np.transpose(np.concatenate(([x], [rdf])))
def get_s_factor(
r,
RDF,
rho,
):
"""
inf sin(kr)
S(k) = 1 + 4 \pi \rho dr (sum) r^2 {g(r)-1} ---------
r=0 kr
where \rho: Number density
g(r): RDF
"""
dr = r[1] - r[0]
k = np.fft.fftfreq(len(r)) / dr
kr_matrix = k.reshape(-1,1) *r.reshape(-1,1).T
S = 1. +4*np.pi *rho *dr *np.sum(
np.reshape(r**2 *(RDF-1), (1,-1)) *np.sinc(kr_matrix/np.pi),
axis=1,
)
# print(np.reshape(r**2 *(RDF-1), (1,-1)) *np.sinc(kr_matrix/np.pi))
# print(np.sum(np.reshape(r**2 *(RDF-1), (1,-1)) *np.sinc(kr_matrix/np.pi), axis=1))
realpart = k >= 0.
return k[realpart], S[realpart]
def get_curve(
alist,
image_slice,
alist_file,
chem1,
chem2,
nBin = 500,
rcut = 8.5,
load_bool = True,
save_bool = True,
rectify_cut = None,
gsmear_std = 0.,
):
# Slice process
from ss_util import str_slice_to_list
slice_list = str_slice_to_list(image_slice)
# out file
out_fname = 'rdf-saved/{}_slice-{}-{}-{}_sym-{}-{}_nBin-{}_rcut-{}_.npy'.format(
alist_file, *slice_list, chem1, chem2, nBin, rcut)
out_fname2 = 'rdf-saved/{}_slice-{}-{}-{}_sym-{}-{}_nBin-{}_rcut-{}_.npy'.format(
alist_file, *slice_list, chem2, chem1, nBin, rcut)
## Main
dr = rcut /nBin
try:
assert load_bool == True
curve = np.load(out_fname)
except:
try:
assert load_bool == True
curve = np.load(out_fname2)
except:
do_calc = True
if load_bool:
print('Failed to load saved npy file. Calculation will be carried out')
print(' Failed to load npy file "{}"'.format(out_fname))
print(' or equivalent data "{}"'.format(out_fname2))
else:
print('File "{}" has been loaded.'.format(out_fname2))
do_calc = False
if do_calc:
curve = get_RDF(alist, rcut, nBin, (chem1, chem2), log=True)
if save_bool:
from ss_util import pick_folder_from_path as pffp
folder = pffp(out_fname)
call('mkdir -p {}'.format(folder), shell=True)
np.save(out_fname, curve)
print('=================================================================================================='.center(120))
print('RDF saved! ----------> {}'.format(out_fname).center(120))
print('=================================================================================================='.center(120))
else:
print('File "{}" has been loaded.'.format(out_fname))
# @ Rectify curve
if rectify_cut:
from ss_util import rectify_curve
curve = rectify_curve(curve, rectify_cut)
if not gsmear_std == 0:
print(' Gaussian smearing...')
# from gaussian_smear import gsmear
# agr= gsmear(angd,agr,gsmear_std)
from scipy.ndimage.filters import gaussian_filter1d
curve[:,1] = gaussian_filter1d(curve[:,1], gsmear_std /dr)
# Debug option
print('Integration of RDF.={}'.format(np.trapz(curve[:,1], curve[:,0])))
return curve
if __name__ == '__main__':
## Intro
import datetime
now = datetime.datetime.now()
time = now.strftime('%Y-%m-%d %H:%M:%S')
print('')
print('>>>>> Code by Young Jae Choi @ POSTECH <<<<<'.center(120))
print(('Code runtime : '+time).center(120))
print('')
print('=================================================================================================='.center(120))
print('This code will give you the (Total/Partial) Raidial Distribution Function'.center(120))
print('=================================================================================================='.center(120))
print('')
args = argparse()
## Read input params
# params
chem1 = args.chem1
chem2 = args.chem2
rcut = args.rcut
nBin = args.nBin
gsmear_std = args.gsmear
rectify_cut = args.rectify_cut
#
den_list = []
## Read inputs
alist = read(args.alist_file, args.image_slice)
if not isinstance(alist, list):
alist = [alist]
den_list = []
for atoms in alist:
den_list.append(len(atoms) / atoms.get_volume())
num_den = np.mean(den_list)
# In case symbol is 'x'
chem_list = np.unique(alist[0].get_chemical_symbols()).tolist()
if chem1 == 'x':
chem1_list = chem_list[:]
else:
chem1_list = [chem1]
if chem2 == 'x':
chem2_list = chem_list[:]
else:
chem2_list = [chem2]
# Make symbol_sets
symbol_sets = []
if len(chem1_list) == 1 or len(chem2_list) == 1:
for s1 in chem1_list:
for s2 in chem2_list:
symbol_sets.append([s1, s2])
else:
for i in range(len(chem_list)):
for j in range(i,len(chem_list)):
symbol_sets.append([chem_list[i], chem_list[j]])
# Main
curve_list = []
for symb_set in symbol_sets:
cv = get_curve(
alist,
args.image_slice,
args.alist_file,
symb_set[0],
symb_set[1],
nBin,
rcut,
args.load_bool,
args.save_bool,
rectify_cut,
gsmear_std,
)
cv[:,1] *= args.multiply,
curve_list.append(cv)
# @ Get structure factor
k_list = []
S_list = []
for curve in curve_list:
k, S = get_s_factor(curve[:,0], curve[:,1], num_den)
k_list.append(k)
S_list.append(S)
# @ Plot
title = '{} slice-{} symb-{},{} nBin-{} rcut-{}'.format(
args.alist_file, args.image_slice, chem1, chem2, nBin, rcut)
import matplotlib.pyplot as plt
font = {'family':'sans-serif', 'sans-serif':'Arial'}
plt.rc('font', **font)
if args.dont_share_y:
fig, axs = plt.subplots(len(curve_list), sharex=True)
else:
fig, axs = plt.subplots(len(curve_list), sharex=True, sharey=True)
if not isinstance(axs, np.ndarray):
axs = [axs]
# Plot RDF
symbol_set_plot = []
for i in range(len(symbol_sets)):
symbol_set_plot.append([])
for j in range(len(symbol_sets[i])):
sym = symbol_sets[i][j]
if sym == 'X':
symbol_set_plot[i].append('V')
else:
symbol_set_plot[i].append(sym)
if args.rdf_upper is not None:
rdf_upper = args.rdf_upper
else:
rdf_upper = np.max(np.array(curve_list)[:,:,1]) * 1.10
for i in range(len(curve_list)):
#
axs[i].plot(curve_list[i][:,0], curve_list[i][:,1], c='k', lw=2)
#
if (symbol_set_plot[i][0], symbol_set_plot[i][1]) == ('a', 'a'):
axs[i].set_ylabel(r'$g_{\rm tot} \it (r)$', fontsize='x-large')
else:
axs[i].set_ylabel(r'$g\rm _{{{}}} \it (r)$'.format(symbol_set_plot[i][0]+symbol_set_plot[i][1]), fontsize='x-large')
#
if args.xtick_list is not None:
axs[i].set_xticks(args.xtick_list)
else:
intvl = int(rcut // 10 + 1)
axs[i].set_xticks(range(0, int(rcut)+1, intvl))
#
if args.ytick_list is not None:
axs[i].set_yticks(args.ytick_list)
else:
intvl = int(rdf_upper // 4 + 1)
axs[i].set_yticks(range(0, int(rdf_upper)+1, intvl))
#
axs[i].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=False)
axs[i].set_xlim(args.x_lower, rcut)
axs[i].set_ylim(args.rdf_lower, rdf_upper)
axs[i].axhline(1., linestyle='dashed', linewidth=1, c='k')
axs[i].grid(alpha=0.5)
axs[-1].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=True)
axs[-1].set_xlabel(r'Distance $\rm (\AA)$', fontsize='x-large')
axs[0].set_title(title, pad=10)
bottom = (1.-len(axs)*0.1) /2.
plt.subplots_adjust(left=0.25, bottom=bottom, right=0.75, top=1-bottom, wspace=0.20, hspace=0.20)
# plt.subplots_adjust(left=0.30, bottom=0.40, right=0.70, top=1-bottom, wspace=0.20, hspace=0.20)
# Plot S(Q)
if args.s_upper is not None:
s_upper = args.s_upper
else:
s_upper = np.max(np.array(S_list)) * 1.10
if args.dont_share_y:
fig, axs = plt.subplots(len(curve_list), sharex=True)
else:
fig, axs = plt.subplots(len(curve_list), sharex=True, sharey=True)
if not isinstance(axs, np.ndarray):
axs = [axs]
for i in range(len(curve_list)):
#
axs[i].plot(k_list[i], S_list[i], c='k', lw=2)
#
if (symbol_set_plot[i][0], symbol_set_plot[i][0]) == ('a', 'a'):
axs[i].set_ylabel(r'$S_{\rm tot} (Q)$', fontsize='x-large')
else:
axs[i].set_ylabel(r'$S\rm _{{{}}} (Q)$'.format(symbol_set_plot[i][0]+symbol_set_plot[i][1]), fontsize='x-large')
#
if args.s_xtick_list is not None:
axs[i].set_xticks(args.s_xtick_list)
else:
intvl = int(np.max(k_list[i]) // 10 + 1)
axs[i].set_xticks(range(0, int(np.max(k_list[i]))+1, intvl))
#
if args.s_ytick_list is not None:
axs[i].set_yticks(args.s_ytick_list)
else:
intvl = int(s_upper // 4 + 1)
axs[i].set_yticks(range(0, int(s_upper)+1, intvl))
#
axs[i].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=False)
axs[i].set_xlim(0., np.max(k_list[i]))
axs[i].set_ylim(args.s_lower, s_upper)
axs[i].axhline(1., linestyle='dashed', linewidth=1, c='k')
axs[i].grid(alpha=0.5)
axs[-1].tick_params(axis="both",direction="in", labelsize='x-large', labelbottom=True)
axs[-1].set_xlabel(r'$\rm Q\ (\AA^{-1})$', fontsize='x-large')
axs[0].set_title(title, pad=10)
bottom = (1.-len(axs)*0.1) /2.
plt.subplots_adjust(left=0.25, bottom=bottom, right=0.75, top=1-bottom, wspace=0.20, hspace=0.20)
# plt.subplots_adjust(left=0.30, bottom=0.40, right=0.70, top=1-bottom, wspace=0.20, hspace=0.20)
plt.show()
|
hitergelei/tools
|
ase_rdf.py
|
ase_rdf.py
|
py
| 14,474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10506264622
|
"""
a very simple MNIST classifier
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
#import data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#create nodes for the input images and target output classes
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
#define the weights w and biases b for model
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#implement regression model
#multiply the vectorized input images x by the weight matrix W, add the bias b
y = tf.matmul(x, W) + b
#specify a loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
#train the model
#minimize cross entropy using gradient descent with a learning rate of 0.5
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
#initialize variable with specified values
tf.global_variables_initializer().run()
#run the training procedure
for _ in range(1000):
batch = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})
#evaluate the model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='d:/Workspace/tensorflow/MNIST_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
RuanYB/tensorflow
|
mnist.py
|
mnist.py
|
py
| 1,897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26889693373
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
def viz2(img, regions, rooms, all_ctrs):
all_ctrs_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
cv2.drawContours(all_ctrs_img, all_ctrs, -1, (0, 255, 0), 3)
filtered_ctrs_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
for region in regions + rooms:
cv2.drawContours(filtered_ctrs_img, [region], -1, (0, 255, 0), 3)
f, ax = plt.subplots(1, 2, figsize=(20, 14))
ax[0].imshow(all_ctrs_img)
ax[0].axis('off')
ax[0].set_title('All contours simplified')
ax[1].imshow(filtered_ctrs_img)
ax[1].axis('off')
ax[1].set_title('All contours filtered and simplified')
f.savefig('fig2.jpg')
return
|
jmou/quarks-knit
|
viz2.py
|
viz2.py
|
py
| 736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34118032788
|
# Going to be extremely similar to the staff groups file
from __future__ import print_function
import os.path
import json
from typing import get_type_hints
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
# importing module
import oracledb # needed for connection to PowerSchool server (ordcle database)
import os # needed for environement variable reading
from datetime import *
# setup db connection
un = 'PSNavigator' #PSNavigator is read only, PS is read/write
pw = os.environ.get('POWERSCHOOL_DB_PASSWORD') #the password for the database account
cs = os.environ.get('POWERSCHOOL_PROD_DB') #the IP address, port, and database name to connect to
print("Username: " + str(un) + " |Password: " + str(pw) + " |Server: " + str(cs)) #debug so we can see where oracle is trying to connect to/with
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group', 'https://www.googleapis.com/auth/admin.directory.group.member', 'https://www.googleapis.com/auth/apps.licensing']
emailSuffix = os.environ.get('EMAIL_SUFFIX')
studentSuffix = os.environ.get('STUDENT_SUFFIX')
allStudentGroup = os.environ.get('ALL_STUDENT_GROUP')
studentOU = os.environ.get('STUDENT_OU')
gradYearPrefix = os.environ.get('GRAD_YEAR_PREFIX')
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('admin', 'directory_v1', credentials=creds)
# function to take a group by email, and return all the members of the group as well as their role. Makes a dict with these pairings, then adds that dict as well as the group email to the overall memberLists dict
def getGroupMembers(groupEmail):
try:
studentMemberToken = '' # blank primer token for multi-page query results
tempDict = {} # create a temp dict that will hold the members and their roles
print(f'Getting members of {groupEmail}') # debug
while studentMemberToken is not None: # while we still have results to process
studentMemberResults = service.members().list(groupKey=groupEmail, pageToken=studentMemberToken, includeDerivedMembership='True').execute() # get the members of the group by email
studentMemberToken = studentMemberResults.get('nextPageToken')
studentMembers = studentMemberResults.get('members', []) # separate the actual members array from the rest of the result
for member in studentMembers: # go through each member and store their email and role in variables
studentEmail = member.get('email')
studentMemberType = member.get('role')
# print(f'{staffMemberEmail} is a {staffMemberType}')
tempDict.update({studentEmail : studentMemberType}) # add the email : role entry to the dict
memberLists.update({groupEmail : tempDict}) # update the overall master member dict with with this group's email and member sub-dict
except Exception as er:
if ("notFound" in str(er)):
print(f'ERROR: Group {groupEmail} not found')
print(f'ERROR: Group {groupEmail} not found',file=log)
else:
print(f'ERROR: {er}')
print(f'ERROR: {er}',file=log)
# go through all student members in the OU, look at their school access lists, and see if they are in the groups they belong in
def processGroups(orgUnit):
userToken = ''
queryString = "orgUnitPath='" + orgUnit + "'" # have to have the orgUnit enclosed by its own set of quotes in order to work
print(queryString)
while userToken is not None: # do a while loop while we still have the next page token to get more results with
userResults = service.users().list(customer='my_customer', orderBy='email', projection='full', pageToken=userToken, query=queryString).execute()
userToken = userResults.get('nextPageToken')
users = userResults.get('users', [])
for user in users:
# print(user) # debug
try:
ou = user.get('orgUnitPath')
if ('test' not in ou.lower()) and ('fbla' not in ou.lower()) and ('pre students' not in ou.lower()): # ignore any accounts that are in an OU that contains the word test, fbla, pre students
email = user.get('primaryEmail') # .get allows us to retrieve the value of one of the sub results
homeschool = str(user.get('customSchemas').get('Synchronization_Data').get('Homeschool_ID')) # get their homeschool ID
gradYear = str(user.get('customSchemas').get('Synchronization_Data').get('Graduation_Year')) # get their homeschool ID
print(f'{email} should be a part of {allStudentGroup}, {schoolAbbreviations.get(homeschool) + studentSuffix + emailSuffix} and {gradYearPrefix + gradYear + emailSuffix}')
print(f'{email} should be a part of {allStudentGroup}, {schoolAbbreviations.get(homeschool) + studentSuffix + emailSuffix} and {gradYearPrefix + gradYear + emailSuffix}', file=log)
addBodyDict = {'email' : email, 'role' : 'MEMBER'} # define a dict for the member email and role type, which is this case is just their email and the normal member role
# Check to see if they are a member of the all student group, if not we need to add them
if not memberLists.get(allStudentGroup).get(email):
print(f'ACTION: {email} is currently not a member of {allStudentGroup}, will be added')
print(f'ACTION: {email} is currently not a member of {allStudentGroup}, will be added', file=log)
service.members().insert(groupKey=allStudentGroup, body=addBodyDict).execute() # do the addition to the group
# else: # debug
# print(f'INFO: {email} is already a part of {allStudentGroup}, no action needed')
# print(f'INFO: {email} is already a part of {allStudentGroup}, no action needed', file=log)
# go through each school code : abbreviation pair to check membership for each building group
for schoolEntry in schoolAbbreviations.keys():
try:
schoolGroupEmail = schoolAbbreviations.get(schoolEntry) + studentSuffix + emailSuffix
if schoolEntry == homeschool: # if the school id number we are currently is their school, they should be a part of that school's groups
if not memberLists.get(schoolGroupEmail).get(email): # if they are not a member of the group
print(f'ACTION: {email} is currently not a member of {schoolGroupEmail}, will be added')
print(f'ACTION: {email} is currently not a member of {schoolGroupEmail}, will be added', file=log)
service.members().insert(groupKey=schoolGroupEmail, body=addBodyDict).execute() # do the addition to the group
# else: # debug
# print(f'INFO: {email} is already a part of {schoolGroupEmail}, no action needed')
# print(f'INFO: {email} is already a part of {schoolGroupEmail}, no action needed', file=log)
else: # if the current school entry is not their school, we need to make sure they are NOT part of that schools groups and remove them if they are
if memberLists.get(schoolGroupEmail).get(email): # if they are a member of the group
if memberLists.get(schoolGroupEmail).get(email) == 'MEMBER': # check and see if they are just a member, if so remove them, otherwise we do not want to touch the managers and owners
print(f'ACTION: {email} should not be a member of {schoolGroupEmail}, will be removed')
print(f'ACTION: {email} should not be a member of {schoolGroupEmail}, will be removed', file=log)
service.members().delete(groupKey=schoolGroupEmail, memberKey=email).execute() # do the removal from the group
else: # if they are an elevated member just give a warning
print(f'WARNING: {email} is an elevated role in {schoolGroupEmail} and will NOT be removed')
print(f'WARNING: {email} is an elevated role in {schoolGroupEmail} and will NOT be removed', file=log)
except Exception as er:
print(f'ERROR: in building {schoolEntry} on user {email}: {er}')
print(f'ERROR: in building {schoolEntry} on user {email}: {er}', file=log)
# go through each grad year group to check membership
for year in gradYears:
try:
gradYearEmail = gradYearPrefix + str(year) + emailSuffix
if str(year) == gradYear: # if the year we are currently on is their grad year, they should be a part of the group
if not memberLists.get(gradYearEmail).get(email):
print(f'ACTION: {email} is currently not a member of {gradYearEmail}, will be added')
print(f'ACTION: {email} is currently not a member of {gradYearEmail}, will be added', file=log)
service.members().insert(groupKey=gradYearEmail, body=addBodyDict).execute() # do the addition to the group
# else: # debug
# print(f'INFO: {email} is already a part of {gradYearEmail}, no action needed')
# print(f'INFO: {email} is already a part of {gradYearEmail}, no action needed', file=log)
else: # if the year is not their grad year, we need to make sure they are NOT a part of that group
if memberLists.get(gradYearEmail).get(email):
if memberLists.get(gradYearEmail).get(email) == 'MEMBER': # check and see if they are just a member, if so remove them, otherwise we do not want to touch the managers and owners
print(f'ACTION: {email} should not be a member of {gradYearEmail}, will be removed')
print(f'ACTION: {email} should not be a member of {gradYearEmail}, will be removed', file=log)
service.members().delete(groupKey=gradYearEmail, memberKey=email).execute() # do the removal from the group
else: # if they are an elevated member just give a warning
print(f'WARNING: {email} is an elevated role in {gradYearEmail} and will NOT be removed')
print(f'WARNING: {email} is an elevated role in {gradYearEmail} and will NOT be removed', file=log)
except Exception as er:
print(f'ERROR: in grad year entry {schoolEntry} on user {email}: {er}')
print(f'ERROR: in grad year entry {schoolEntry} on user {email}: {er}', file=log)
except Exception as er:
print(f'ERROR: on {user} - {er}')
print(f'ERROR: on {user} - {er}',file=log)
# main program
with oracledb.connect(user=un, password=pw, dsn=cs) as con: # create the connecton to the database
with con.cursor() as cur: # start an entry cursor
with open('StudentGroupsLog.txt', 'w') as log:
startTime = datetime.now()
startTime = startTime.strftime('%H:%M:%S')
currentYear = int(datetime.now().strftime('%Y')) # get current year for calculations of grad year classes
print(f'Execution started at {startTime}')
print(f'Execution started at {startTime}', file=log)
# Start by getting a list of schools id's and abbreviations for the "real" schools which are not excluded from state reporting
cur.execute('SELECT abbreviation, school_number FROM schools WHERE State_ExcludeFromReporting = 0')
schools = cur.fetchall()
schoolAbbreviations = {} # define a dict to store the school codes and abbreviations linked
for school in schools:
# store results in variables mostly just for readability
schoolAbbrev = school[0].lower() # convert to lower case since email groups are all lower
schoolNum = str(school[1])
# print(f'School {schoolAbbrev} - Code {schoolNum}')
schoolAbbreviations.update({schoolNum : schoolAbbrev})
print(f'Schools numbers and their abbreviations: {schoolAbbreviations}')
print(f'Schools numbers and their abbreviations: {schoolAbbreviations}', file=log)
memberLists = {} # make a master dict for group memberships, that will have sub-dict sof each member and their role as its values
gradYears = [] # make an array that will hold the next 14 years to have as reference for graduation years
for i in range(17):
gradYears.append(currentYear + (i-1)) # start with 0 (-1) from the current year and go through the next 15 years
print(f'The graduation years in range: {gradYears}') # debug
print(f'The graduation years in range: {gradYears}', file=log) # debug
# find the members of each group once at the start so we do not have to constantly query via the api whether a user is a member, we can just do a list comparison
for entry in schoolAbbreviations.values():
# go through each school abbreviation and find their student group
studentGroup = entry + studentSuffix + emailSuffix
getGroupMembers(studentGroup)
for year in gradYears:
classGroup = gradYearPrefix + str(year) + emailSuffix
getGroupMembers(classGroup)
getGroupMembers(allStudentGroup) # get membership for the district wide student group added to dict
print(memberLists) # debug, now should have a dict containing each group email as the keys, and the value is a dict of its own containing the emails and roles of each member of the group
# print(memberLists, file=log) # debug, now should have a dict containing each group email as the keys, and the value is a dict of its own containing the emails and roles of each member of the group
processGroups(studentOU) # process the student groups for the main student OU, this will also include any sub-OUs
endTime = datetime.now()
endTime = endTime.strftime('%H:%M:%S')
print(f'Execution ended at {endTime}')
print(f'Execution ended at {endTime}', file=log)
|
Philip-Greyson/D118-Google-Groups-Licensing
|
doStudentGroups.pyw
|
doStudentGroups.pyw
|
pyw
| 16,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22858483582
|
import sys
CASE_FMT = "{} {}"
def pos(x, y):
if x == -1 and y == -1:
return 0
if x == -1 and y == 0:
return 1
if x == -1 and y == 1:
return 2
if x == 0 and y == -1:
return 3
if x == 0 and y == 0:
return 4
if x == 0 and y == 1:
return 5
if x == 1 and y == -1:
return 6
if x == 1 and y == 0:
return 7
if x == 1 and y == 1:
return 8
def main():
t = int(input())
for i in range(1, t + 1):
a = int(input())
positions = [0 for _ in range(9)]
wp = [(3 * x - 1, 2) for x in range(1, 1000 // 9 + 1)]
wanted = wp.pop()
while True:
if sum(positions) == 9:
positions = [0 for _ in range(9)]
wanted = wp.pop()
print(CASE_FMT.format(wanted[0], wanted[1]))
sys.stdout.flush()
k, m = list(map(int, input().split()))
if k == -1 and m == -1:
exit(1)
if k == 0 and m == 0:
break
positions[pos(wanted[0] - k, wanted[1] - m)] = 1
if __name__ == "__main__":
main()
|
tivvit/codejam-2018
|
Q1/go-gopher/main-final.py
|
main-final.py
|
py
| 1,158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39473494610
|
#!/usr/bin/python3
# coding=utf-8
import sys,os
import string
import random
import time
import subprocess
from binascii import unhexlify
random.seed(time.time)
charset=string.ascii_letters+string.ascii_lowercase+string.ascii_uppercase
patchbytes=16
print("You are allowed a maximum patch of {} bytes".format(patchbytes))
print("You need to input a patch following format")
print("starting offset[space]ending offset[space]value to be put in hex")
print("Different groups of patches need to be comma separated")
print("Example: 20 21 4141, 65 69 4141424243")
patchstring=input()
patchstring=patchstring.split(',')
numpatch=0
for i in patchstring:
i=i.split()
start=int(i[0])
end=int(i[1])
patch=unhexlify(i[2])
if end<start:
print("Invalid input")
sys.exit(0)
numpatch+=(end-start+1)
if numpatch>patchbytes:
print("Exceeded patch size")
sys.exit(0)
f=open('patch','rb')
data=f.read()
f.close()
for i in patchstring:
i=i.split()
start=int(i[0])-1
end=int(i[1])
try:
patch=unhexlify(i[2])
except:
print("Unsupported Input")
sys.exit(1)
temps=data[:start]
tempe=data[end:]
data=temps+patch+tempe
filename="/tmp/"
for i in range(10):filename+=charset[random.randint(0,len(charset)-1)]
f=open(filename,'wb')
f.write(data)
f.close()
os.system("chmod +x {}".format(filename))
try:
process=subprocess.Popen(filename,stdout=subprocess.PIPE)
response,error = process.communicate()
print (response.decode('ISO-8859-1'))
except:
print ("Unable to run patched file")
os.remove(filename)
|
Himanshukr000/CTF-DOCKERS
|
SeasidesCTF/rev/patch/wrapper.py
|
wrapper.py
|
py
| 1,601 |
python
|
en
|
code
| 25 |
github-code
|
6
|
5675740119
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import os, re, collections, getpass, functools, click, six, logging, json, threading
import dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, ALL
import os, collections, getpass, functools
_name_dev_dd={
'A-UL': '207',
'B-UL': '202',
'A-TZ': '220',
'B-TZ': '221',
'A-LV': '222',
'B-LV': '223',
'A-SA': '224',
'B-SA': '225'}
_dev_name_dd={vv:kk for kk,vv in _name_dev_dd.items()}
def _get_devices(path='/device/dfe'):
return [{}]
# _root=nalct.NalctConfiguration.NalctConfiguration(quiet=True, _context=_context)()._asdict()
# _devices=next(iter(_root.get('entity', DotMap()).get('container', DotMap()).match(('name','device'))), DotMap())
# _dev_ent=_devices.get('container', DotMap()).get('entity', [])
# _dfe=next(iter(list(filter(lambda x: x.get('name') in ['dfe'], _dev_ent))), DotMap())
# # _dfe=next(iter(_devices.get('container', DotMap()).get('entity', []).match(('name','dfe'))), DotMap())
# _devs=list(map(lambda x: x.get('name'), _dfe.get('container', DotMap()).get('entity', []) ))
# return [{'label': _dev_name_dd.get(_dev, _dev), 'value':os.path.join(path, _dev)} for _dev in _devs]
_command_funcs=[]
_devices=_get_devices()
logging.getLogger(__name__).debug('Devices: {}'.format(_devices))
# _commands=sorted(list(set( [_cmd for _device in [_dev.get('value') for _dev in _devices] for _cmds in nalct.NalctCommanding.NalctCommanding(path=_device, _context=_context).commands for _cmd in _cmds ] )))
_commands=[]
# _dev_cmds={_device:list(nalct.NalctCommanding.NalctCommanding(path=_device, _context=_context).commands) for _device in [_dev.get('value') for _dev in _devices] }
_dev_cmds={}
# _commands=[]
# [_commands.extend(_vv) for _vv in _dev_cmds.values()]
_commands=sorted(list(set( _commands )))
_get_click_context = lambda command: click.Context(command).__enter__()
# _commands=collections.OrderedDict([(_cmd.command.name, _cmd) for _cmd in map(_get_click_context, _commands)])
# _cmd_exe_dd = collections.OrderedDict([(_cmd_name, _command_funcs[ii]) for ii,_cmd_name in enumerate(_commands.keys())])
_cmd_exe_dd = {}
_all_services={} #cc.__name__ : cc for cc in nalct.DockerService.DockerService.yield_progeny()}
_all_service_names=sorted(set( filter(lambda x: isinstance(x, six.string_types), [getattr(vv,'component', None) for vv in _all_services.values()]) ))
input_groups = html.Div(
[
dbc.Container(
children=[
html.Label([
"Devices to Command",
dcc.Dropdown(
id='nalct-device-select-dropdown',
options=_devices,
multi=True,
value=[_dev.get('value') for _dev in _devices])
]),
dbc.InputGroup(
[
# dbc.InputGroupAddon("Nalct Command", addon_type="prepend"),
dbc.InputGroupText("Nalct Command"),
dbc.Select(options=[{'label': _command_name, 'value': _command_name} for _command_name in _commands],id='nalct-command-input'),
],),
dbc.Container(id='nalct-command-parameters')
],
id='nalct-command-builder'
),
],
)
layout = dbc.Container(
children=[
dbc.Alert("PyNAPL -- NetAcquire Commanding", color="success"),
input_groups,
dbc.Container(
children=[dbc.InputGroup([dbc.Button("Execute", id='nalct-launch', outline=True, color="info", className="mr-1", disabled=True),]),],
id='nalct-command-cfg-exec-container'
),
dbc.Container(
children=[],
id='nalct-command-exec-container'
),
],
className="p-5",
)
def make_bool_option(_command, _param):
return dbc.FormGroup([
dbc.Checklist(
options=[ {"label": _param.name, "value": _param.name} ],
value=[],
id={'type': 'nalct-command-line-flags', 'index': '{}-{}'.format(_command, _param.name)},
switch=True,
),
dbc.FormText(_param.help, color="secondary",)]
)
def make_command_option(_command, _param):
_ctx=_context
if re.match(r'^.*enclave.*$', _param.name):
return dbc.FormGroup([
dbc.Label(_param.name, html_for='nalct-command-line-options-{}-{}'.format(_command, _param.name)),
dbc.Select(
options=[{'label': _enclave, 'value': _enclave} for _enclave in sorted(_ctx.active.keys())] if re.match(r'^kill$', _command) else [{'label': _enclave, 'value': _enclave} for _enclave in sorted(_ctx.enclaves.keys())],
id={'type': 'nalct-command-line-options', 'index': '{}-{}'.format(_command, _param.name)}),
dbc.FormText(_param.help, color="secondary",)]
)
elif re.match(r'^.*service.*$', _param.name):
return dbc.FormGroup([
dbc.Label(_param.name, html_for='nalct-command-line-options-{}-{}'.format(_command, _param.name)),
dbc.Select(
options=[{'label': _service, 'value': _service} for _service in _all_service_names],
id={'type': 'nalct-command-line-options', 'index': '{}-{}'.format(_command, _param.name)}),
dbc.FormText(_param.help, color="secondary",)]
)
else:
return dbc.FormGroup([
dbc.Label(_param.name, html_for='nalct-command-line-options-{}-{}'.format(_command, _param.name)),
dbc.Input(placeholder="{} value...".format(_param.name), type="text", id={'type': 'nalct-command-line-options', 'index': '{}-{}'.format(_command, _param.name)}),
dbc.FormText(_param.help, color="secondary",)]
)
def build_command_form(command):
if not(command): return
_ctx=None #_commands.get(command)
_params=[] #_ctx.command.params
_flags=[] #list(filter(lambda x: x.is_flag, _params))
_options=[] #list(filter(lambda x: not(x.is_flag), _params))
_content=[]
if _options:
options = dbc.FormGroup([
dbc.FormGroup([make_command_option(command, _param) for _param in _options],
id='nalct-command-line-options-{}'.format(command))])
_content.append(options)
if _flags:
flags = dbc.FormGroup([
dbc.Label("Flags", html_for='nalct-command-line-flags-{}'.format(command)),
dbc.FormGroup([make_bool_option(command, _param) for _param in _flags],
id='nalct-command-line-flags-{}'.format(command))])
_content.append(flags)
if _content:
_content= [dbc.Label("Nalct {} Options".format(command.capitalize()), 'nalct-command-options-{}'.format(command))] + _content
return dbc.FormGroup(_content)
@app.callback([Output("nalct-command-parameters", "children"), Output('nalct-launch', 'disabled')], [Input("nalct-command-input", "value")])
def update_command_form(nalct_command):
logging.getLogger('.'.join([__name__, 'update_command_form'])).info('Command: "{}"'.format(nalct_command))
if nalct_command:
return [build_command_form(nalct_command)], False
return [], True
def _invoke(command, devices):
pass
@app.callback(
Output('nalct-command-exec-container', 'children'),
[Input('nalct-launch', 'n_clicks')],
[State("nalct-command-input", "value"),
State("nalct-device-select-dropdown", "value")],
)
def launch_nalct_command(launch_button_clicks, command, devices):
logging.getLogger('.'.join([__name__, 'launch_nalct_command'])).info('Command: "{}", Devices: {}'.format(command, devices))
if command and devices:
_opts=SmartDict(command=command, devices=devices)
try:
threading.Thread(target=_invoke, args=(command, devices), name='.'.join([__name__, 'launch_nalct_command','invoke'])).start()
return [dbc.Toast(
[
html.P("Launching `nalct.Command({})`".format(', '.join('{}={}'.format(kk,vv) for kk,vv in _opts.items())), className="mb-0"),
html.A("Status", href="/apps/app2", className="mb-0"),
dbc.Spinner(spinner_style={"width": "3rem", "height": "3rem"})],
id="nalct-launch-notifier",
header="Launching Nalct Command",
icon="primary",
duration=5000,
),]
except Exception as e:
return [dbc.Toast(
[html.P("Failed to Launch `nalct.{}({})`: {}".format(command, _opts, e), className="mb-0")],
id="nalct-launch-notifier",
header="Uh-oh!",
icon="danger",
duration=5000,
)]
return [None]
################################################################################
# vim:set sr et ts=4 sw=4 ft=python fenc=utf-8: // See Vim, :help 'modeline'
|
meghanstell/SNOWIE
|
megalodon/web/apps/launch.py
|
launch.py
|
py
| 9,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40787804441
|
'''
Created on Feb 18, 2018
@author: fdunaway
'''
class thermalCalculations:
# Formula to calculate thermal rise rate:
#f(x)=1.11676445E-14 * x^4 - 1.313037E-10 * x^3 + 4.08270207E-07 * x^2 + 0.00141231184 * x + 0.9994399220259089
# takes the number of seconds and returns the temperature rise of the heater.
def deltaTemp(nSeconds):
return ((1.11676445E-14 * nSeconds ** 4)
- (1.313037E-10 * nSeconds ** 3)
+ (4.08270207E-7 * nSeconds ** 2)
+ (0.00141231184 * nSeconds)
+ 0.9994399220259089)
#dTimeSeconds = 1000
#dTempC = deltaTemp(dTimeSeconds)
#print('Expected temperature rise for {:1.2f} seconds is {:1.2f} C'.format(dTimeSeconds, dTempC))
# returns the number of seconds for the delta T
def secondsToTemp(dTemp):
#inital guess of seconds for dT
sec = 20
dT = 0
while dT <= dTemp:
dT = thermalCalculations.deltaTemp(sec)
# print('dT: ', dT, ' target: ', dTemp)
if dT >= dTemp:
return sec
if dT <= dTemp:
sec = sec + 10
else:
sec = sec - 10
return sec
|
mrncmoose/smart_controller
|
pi-code/ThermalPrediction/PredictDeltaTemp.py
|
PredictDeltaTemp.py
|
py
| 1,200 |
python
|
en
|
code
| 3 |
github-code
|
6
|
16970226908
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open("README.md") as f:
long_description = f.read()
version = {}
with open(path.join(here, "emv", "__init__.py")) as fp:
exec(fp.read(), version)
setup(
name="emv",
version=version["__version__"],
description="EMV Smartcard Protocol Library",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="Russ Garrett",
author_email="[email protected]",
url="https://github.com/russss/python-emv",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
keywords="smartcard emv payment",
python_requires=">=3.4",
packages=["emv", "emv.protocol", "emv.command"],
install_requires=[
"pyscard==2.0.0",
"pycountry==20.7.3",
"terminaltables==3.1.0",
"click==7.1.2",
],
entry_points={"console_scripts": {"emvtool=emv.command.client:run"}},
)
|
russss/python-emv
|
setup.py
|
setup.py
|
py
| 1,114 |
python
|
en
|
code
| 100 |
github-code
|
6
|
31014617376
|
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
import NeuralTrainerCustoms as ntc
import AdaMod as am
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import Input, Layer, Dense, Activation, Embedding, LSTM, Bidirectional, Lambda, concatenate
from keras.layers.wrappers import TimeDistributed
import keras.losses
# import tensorflow as tf
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_accuracy
from keras.utils.generic_utils import get_custom_objects
losses = ntc.Losses()
class NeuralModel:
embedding_size = 300
hidden_size = 100
def __init__(self, maxlen, num_tags, word_index, embeddings, save_weights=False):
self.maxlen = maxlen
self.vocab_size = len(word_index)+1
self.num_tags = num_tags
self.word_index = word_index
self.embeddings = embeddings
self.model = None
self.tags = ['']*num_tags
self.arg_classes = ['']*num_tags
self.transition_matrix = None
self.save_weights = save_weights
self.read_tag_mapping()
self.set_transition_matrix()
num_measures = 1 + 3*(num_tags - 2)
def read_tag_mapping(self):
f = open('tag_mapping.txt', 'r', encoding='utf-8')
lines = f.readlines()
tags = {}
for mapping in lines:
if(mapping == ''):
continue
map = mapping.split('\t')
tags[int(map[1][0])-1] = map[0]
for i in range(0, self.num_tags):
self.tags[i] = tags[i]
if tags[i] == '(O)':
self.arg_classes[i] = '|'
elif tags[i] == '(P)':
self.arg_classes[i] = 'premise'
elif tags[i] == '(C)':
self.arg_classes[i] = 'claim'
elif tags[i] == '(I)':
self.arg_classes[i] = 'inside'
def set_transition_matrix(self):
transition_matrix = np.array([[1]*self.num_tags]*self.num_tags)
# matrix is initialized to 1
# this function sets some entries to -1
for i in range(0, self.num_tags):
if self.tags[i] == '(O)':
for j in range(0, self.num_tags):
if self.tags[j] == '(P)': # impossible transition to (O)
transition_matrix[i][j] = -1
elif self.tags[j] == '(C)': # impossible transition to (O)
transition_matrix[i][j] = -1
elif self.tags[i] == '(P)':
for j in range(0, self.num_tags):
if self.tags[j] == '(P)': # impossible transition to (P)
transition_matrix[i][j] = -1
elif self.tags[j] == '(C)': # impossible transition to (P)
transition_matrix[i][j] = -1
elif self.tags[i] == '(C)':
for j in range(0, self.num_tags):
if self.tags[j] == '(P)': # impossible transition to (C)
transition_matrix[i][j] = -1
elif self.tags[j] == '(C)': # impossible transition to (C)
transition_matrix[i][j] = -1
elif self.tags[i] == '(I)':
for j in range(0, self.num_tags):
if self.tags[j] == '(O)': # impossible transition to (I)
transition_matrix[i][j] = -1
print(transition_matrix) #debug
self.transition_matrix = transition_matrix
# def switch_loss_wrapper(self, crf_layer):
# # current_epoch = self.monitor.current_epoch
# def switch_loss(y_true, y_pred):
# if not K.is_tensor(y_pred):
# y_pred = K.constant(y_pred)
# y_true = K.cast(y_true, y_pred.dtype)
# pure_mae = K.mean(K.abs(y_pred - y_true), axis=-1)
# y_true_aux = K.squeeze(y_true, axis=-1)
# zero = K.constant(0)
# simple_loss = K.switch(K.equal(y_true_aux, zero), K.zeros_like(pure_mae), pure_mae)
# # print('ypred shape', K.int_shape(y_pred))
# I_prob = K.squeeze(crf_layer[:,:,:1], axis=-1)
# ypred_size = K.int_shape(y_pred)[1]
# tiled = K.tile(y_pred, [1, 2, 1]) #repeat array like [1, 2, 3] -> [1, 2, 3, 1, 2, 3]
# rolled_y_pred = tiled[:,ypred_size-1:-1] #crop repeated array (from len-1) -> [3, 1, 2] <- (to -1)
# dist_dif = K.abs((rolled_y_pred - y_pred) - K.ones_like(y_pred))
# dist_err_mae = K.switch(K.greater(I_prob, K.constant(0.5)), K.mean(K.abs(y_pred - y_true + dist_dif), axis=-1), K.mean(K.abs(y_pred - y_true), axis=-1))
# dist_err_loss = K.switch(K.equal(y_true_aux, zero), K.zeros_like(dist_err_mae), dist_err_mae)
# simple_loss = keras.losses.mean_squared_error(y_true, y_pred)
# dist_err_loss = keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
# return K.switch(K.less(current_epoch, K.constant(100)), dist_err_loss, simple_loss)
# return switch_loss
def createEmbeddings(self, word_index, embeddings):
embeddings_index = {}
path = 'Embeddings/' + embeddings + '.txt'
f = open(path, "r", encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((self.vocab_size, self.embedding_size))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def create_biLSTM(self, input):
embeddingMatrix = self.createEmbeddings(self.word_index, self.embeddings)
emb = Embedding(self.vocab_size, self.embedding_size, weights=[embeddingMatrix], input_length=self.maxlen,
trainable=False, mask_zero=True, name='embedding')(input)
biLSTM_tensor = TimeDistributed(Dense(self.hidden_size, activation='relu'), name='time_distributed_1')(emb)
biLSTM_tensor = Bidirectional(LSTM(self.hidden_size, return_sequences=True, activation='pentanh', recurrent_activation='pentanh'), name='biLSTM_1')(biLSTM_tensor)
biLSTM_tensor = Bidirectional(LSTM(self.hidden_size, return_sequences=True, activation='pentanh', recurrent_activation='pentanh'), name='biLSTM_2')(biLSTM_tensor)
return biLSTM_tensor
def create_CRF(self, biLSTM_tensor, learn, test):
crf_tensor = TimeDistributed(Dense(20, activation='relu'), name='time_distributed_2')(biLSTM_tensor)
chain_matrix = keras.initializers.Constant(self.transition_matrix)
if learn == 'marginal': #loaded model or std CRF-dist model
crf = CRF(self.num_tags, sparse_target=False, learn_mode=learn, test_mode=test,
chain_initializer=chain_matrix, name='crf_layer')
# crf = CRF(self.num_tags, sparse_target=False, learn_mode=learn, test_mode=test, name='crf_layer')
else: #baseline model
crf = CRF(self.num_tags, sparse_target=False, learn_mode=learn, test_mode=test, name='crf_layer')
crf_tensor = crf(crf_tensor)
return crf_tensor
def create_dist_layer(self, biLSTM_tensor, crf_tensor):
dist_tensor = TimeDistributed(Dense(1, activation='relu'), name='distance_layer')(biLSTM_tensor)
soft_argmax = ntc.SoftArgMax()
soft_argmax.create_soft_argmax_layer()
# zero_switch = ntc.SoftArgMax()
# zero_switch.create_zero_switch_layer()
concat = concatenate([crf_tensor, dist_tensor], axis=-1, name='concatenate')
### LAYER OPTIONS:
##### soft_argmax.layer
##### zero_switch.layer
output = TimeDistributed(soft_argmax.layer, name='softargmax')(concat)
return (output, soft_argmax)
def create_model(self, fold_name=''):
input = Input(shape=(self.maxlen,), name='input')
biLSTM_tensor = self.create_biLSTM(input)
crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
temp_model = Model(input=input, output=crf_tensor)
if self.save_weights:
print('MODEL LOADED FROM FILE')
base_crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
baseline_model = Model(input=input, output=base_crf_tensor)
print(baseline_model.summary()) #debug
baseline_model.compile(optimizer='adam', loss=crf_loss, metrics=[crf_accuracy])
# baseline_model.run_eagerly = True #debug
baseline_model.load_weights('./tmp/'+fold_name+'/baseline_checkpoint.h5', by_name=True)
base_layers = baseline_model.layers
model_layers = temp_model.layers
for i in range(0, len(base_layers)):
print(model_layers[i].name, base_layers[i].name)
assert model_layers[i].name == base_layers[i].name
layer_name = base_layers[i].name
temp_model.get_layer(layer_name).set_weights(base_layers[i].get_weights())
temp_model.get_layer(layer_name).trainable = False
(dist_tensor, soft_argmax) = self.create_dist_layer(temp_model.get_layer('biLSTM_2').output, temp_model.output)
self.model = Model(input=input, output=[temp_model.output,dist_tensor])
print(self.model.summary()) #debug
#loss_weights=[1.0, 0.10],
####LOSSES:
######'mean_absolute_error'
######'loss_func'
######'consecutive_dist_loss'
####OPTIMIZERS:
######'adam'
######am.AdaMod() ??
# get_custom_objects().update({'consecutive_dist_loss': losses.consecutive_dist_loss_wrapper(crf_tensor)})
# get_custom_objects().update({'switch_loss': losses.switch_loss_wrapper(crf_tensor)})
#
# keras.losses.consecutive_dist_loss = losses.consecutive_dist_loss_wrapper(crf_tensor)
self.model.compile(optimizer='adam', loss=[crf_loss,losses.loss_func], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# self.model.compile(optimizer='adam', loss=[crf_loss,losses.consecutive_dist_loss_wrapper(temp_model.output)], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# self.model.compile(optimizer='adam', loss=[crf_loss,keras.losses.mean_squared_error], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# self.model.run_eagerly = True #debug
def create_baseline_model(self):
input = Input(shape=(self.maxlen,))
biLSTM_tensor = self.create_biLSTM(input)
crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
self.model = Model(input=input, output=crf_tensor)
print(self.model.summary()) #debug
self.model.compile(optimizer='adam', loss=crf_loss, metrics=[crf_accuracy])
# self.model.run_eagerly = True #debug
def recompile_model_new_loss(self, loss, fold_name=''):
input = Input(shape=(self.maxlen,), name='input')
biLSTM_tensor = self.create_biLSTM(input)
crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
(dist_tensor, soft_argmax) = self.create_dist_layer(biLSTM_tensor, crf_tensor)
temp_model = Model(input=input, output=crf_tensor)
if self.save_weights:
print('MODEL LOADED FROM FILE')
base_crf_tensor = self.create_CRF(biLSTM_tensor, 'marginal', 'marginal')
baseline_model = Model(input=input, output=base_crf_tensor)
baseline_model.compile(optimizer='adam', loss=crf_loss, metrics=[crf_accuracy])
# baseline_model.run_eagerly = True #debug
baseline_model.load_weights('./tmp/'+fold_name+'/baseline_checkpoint.h5', by_name=True)
base_layers = baseline_model.layers
model_layers = temp_model.layers
for i in range(0, len(base_layers)):
print(model_layers[i].name, base_layers[i].name)
assert model_layers[i].name == base_layers[i].name
layer_name = base_layers[i].name
temp_model.get_layer(layer_name).set_weights(base_layers[i].get_weights())
temp_model.get_layer(layer_name).trainable = False
(dist_tensor, soft_argmax) = self.create_dist_layer(temp_model.get_layer('biLSTM_2').output, temp_model.output)
new_model = Model(input=input, output=[temp_model.output,dist_tensor])
new_layers = new_model.layers
model_layers = self.model.layers
total_layers = len(model_layers)
for i in range(total_layers-3, total_layers):
assert model_layers[i].name == new_layers[i].name
layer_name = model_layers[i].name
new_model.get_layer(layer_name).set_weights(model_layers[i].get_weights())
print(new_model.summary()) #debug
self.model = new_model
if loss == 'consecutive_dist_loss':
self.model.compile(optimizer='adam', loss=[crf_loss,losses.consecutive_dist_loss_wrapper(temp_model.output)], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
# new_model.compile(optimizer='adam', loss=[crf_loss,keras.losses.mean_squared_logarithmic_error], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
else:
self.model.compile(optimizer='adam', loss=[crf_loss,losses.loss_func], loss_weights=[1.0, 0.10], metrics={'crf_layer':[crf_accuracy], 'softargmax':'mae'})
|
fspring/NeuralArgMining
|
NeuralModel.py
|
NeuralModel.py
|
py
| 13,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
811362006
|
# Rotate List - https://leetcode.com/problems/rotate-list/
'''Given a linked list, rotate the list to the right by k places, where k is non-negative.
Example 1:
Input: 1->2->3->4->5->NULL, k = 2
Output: 4->5->1->2->3->NULL
Explanation:
rotate 1 steps to the right: 5->1->2->3->4->NULL
rotate 2 steps to the right: 4->5->1->2->3->NULL'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head:
return None
if not head.next:
return head
length = 1
oldTail = head
# close the linked list into the ring
while oldTail.next:
length += 1
oldTail = oldTail.next
oldTail.next = head
# find new tail : (n - k % n - 1)th node
# and new head : (n - k % n)th node
newTail = head
for i in range(length - k % length - 1):
newTail = newTail.next
newHead = newTail.next
# break the ring
newTail.next = None
return newHead
# Another approach
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
def reverseList(node, length):
current = node
prev = None
for i in range(length):
nextNode = current.next
current.next = prev
prev = current
current = nextNode
return prev, current
if not head or k == 0:
return head
length = 0
current = head
while current:
length += 1
current = current.next
k = k % length
if k == 0:
return head
reverseWholeList, current = reverseList(head, length)
firstKReversedNode, current = reverseList(reverseWholeList, k)
nodeAfterFirstKNodes, current = reverseList(current, length - k)
result = firstKReversedNode
while firstKReversedNode and firstKReversedNode.next:
firstKReversedNode = firstKReversedNode.next
firstKReversedNode.next = nodeAfterFirstKNodes
return result
|
Saima-Chaity/Leetcode
|
LinkedList/rotateList.py
|
rotateList.py
|
py
| 2,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11753378103
|
import urllib3
import requests
from bs4 import BeautifulSoup
from csv import writer
import csv
import pandas as pd
url = 'https://www.mubawab.tn/fr/cc/immobilier-a-louer-all:o:i:sc:houses-for-rent:p:' + str(1)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
lists = soup.find_all('li', class_='listingBox w100')
for list in lists:
estate_local = list.find('h3', class_='listingH3').text.split()[-1].strip()
estate_type = "Maison"
estate_surface = getattr(list.find('h4', class_='listingH4 floatR'),'text', None)
estate_piece = list.find('h4', class_='listingH4 floatR').text.split()[0].strip()
estate_price = getattr(list.find("span", class_= "priceTag hardShadow float-right floatL yellowBg"),'text', None)
if (str(estate_price)=='None'):
estate_price = getattr(list.find("span", class_= "priceTag hardShadow float-right floatL"),'text', None)
info = [estate_local, estate_type, estate_surface, estate_piece, str(estate_price)]
print(info)
|
sofienne-chouiekh/Scraping_data_estate_location
|
test.py
|
test.py
|
py
| 1,091 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44999838
|
import networkx as nx
import numpy as np
# from GPy.kern import Kern
from functools import reduce
from itertools import product
import copy
# class Ours(Kern):
# def __init__(self, input_dim, G, scale=1., variance = 1,
# active_dims=None, name='ours', K_matrix = None, kern = "linear",
# pava = True):
# Kern.__init__(self, input_dim, active_dims, name)
# self.scale = scale
# self.N_objs = input_dim
# self.G = G
# self.K_matrix = K_matrix
# self.kern = kern
# self.variance = variance
# self.pava = pava
# def kernel_f(self, sigma, sigma_prime):
# if self.kern == "exp":
# return np.exp(-self.scale*np.linalg.norm(self.phi_(sigma) - self.phi_(sigma_prime)))
# elif self.kern == "linear":
# return np.dot(self.phi_(sigma), self.phi_(sigma_prime))
# elif self.kern == "linear*exp":
# phi_sigma = self.phi_(sigma)
# phi_sigma_prime = self.phi_(sigma_prime)
# l = np.dot(phi_sigma, phi_sigma_prime)
# e = np.exp(-self.scale*np.linalg.norm(phi_sigma - phi_sigma_prime))
# return l*e
# def _index(self, X, X2):
# if X2 is None: i1 = i2 = X.astype('int').flat
# else: i1, i2 = X.astype('int').flat, X2.astype('int').flat
# return self.K_matrix[i1,:][:,i2]
# def K(self, X, X2=None): return self.variance * self._index(X, X2)
# def Kdiag(self, X): return self.variance * self._index(X,None).diagonal()
# def update_gradients_full(self, dL_dK, X, X2=None): pass
# def update_gradients_diag(self, dL_dKdiag, X): raise NotImplementedError
# def gradients_X(self, dL_dK, X, X2=None): raise NotImplementedError
# def gradients_X_diag(self, dL_dKdiag, X): raise NotImplementedError
# def calc_v(self, groups):
# v = np.zeros(len(groups))
# B_i, B_i_ = set(), set()
# k = 0
# while len(B_i) < self.N_objs:
# B_i = B_i.union(groups[len(groups) - 1 - k])
# # B_i = B_i.union(groups[k])
# v[k] = - (self.F(B_i) - self.F(B_i_)) / (len(B_i)-len(B_i_))
# B_i_ = B_i.copy()
# k += 1
# return v
# def F(self, A_): return nx.cut_size(self.G, A_, None, 'weight')
# def phi_(self, A):
# assert type(A[0]) == set
# A_is = A.copy()
# if not self.pava:
# v = self.calc_v(A_is)
# else:
# v = []
# k = len(A_is)
# while len(v) < len(A_is):
# B_i = reduce(lambda a,b: a.union(b), A_is[k-1:])
# B_i_ = reduce(lambda a,b: a.union(b), A_is[k:]) if k < len(A_is) else set([])
# v_ = - (self.F(B_i) - self.F(B_i_)) / (len(B_i)-len(B_i_))
# if len(v) != 0 and v_ < v[0]:
# A_is[k-1:k+1] = [A_is[k-1].union(A_is[k])]
# v.pop(0)
# continue
# v.insert(0,v_)
# k -= 1
# w = np.zeros(self.N_objs)
# # Reordering
# for i in range(len(A_is)): w[list(A_is[i])] = v[i]
# # Not Reordering
# # for a,i in zip(A_is,range(len(v))):
# # w[list(a)] = v[i]
# return - w
# def F(A_, G): return nx.cut_size(G, A_, None, 'weight')
def F(A_, G):
return nx.cut_size(G, A_, None)
def phi_(A, N_objs, G):
assert type(A[0]) == set
A_is = A.copy()
v = []
k = len(A_is)
while len(v) < len(A_is):
B_i = reduce(lambda a, b: a.union(b), A_is[k - 1 :])
B_i_ = reduce(lambda a, b: a.union(b), A_is[k:]) if k < len(A_is) else set([])
v_ = -(F(B_i, G) - F(B_i_, G)) / (len(B_i) - len(B_i_))
if len(v) != 0 and v_ < v[0]:
A_is[k - 1 : k + 1] = [A_is[k - 1].union(A_is[k])]
v.pop(0)
continue
v.insert(0, v_)
k -= 1
w = np.zeros(N_objs)
for i in range(len(A_is)):
w[list(A_is[i])] = v[i]
return -w
def get_phi(X_ours, G, N_objs, train_ind, test_ind):
X_phi = np.zeros((len(X_ours), N_objs))
for i, x in enumerate(X_ours):
f = phi_(x, N_objs, G)
X_phi[i] = f / np.linalg.norm(f)
return X_phi[train_ind.ravel()], X_phi[test_ind.ravel()]
def phi_interleaving(A_inter, G, N_objs, heuristic=False, samples=2000):
absents = list(set(list(range(N_objs))) - set(list(A_inter)))
inter = [set()]
for o in A_inter:
inter.append(set(([o])))
inter.append(set())
possible_positions = list(range(0, len(inter), 2))
X_inter_phi = np.zeros(N_objs)
if not heuristic or samples >= len(possible_positions) ** len(absents):
coherent_set = product(possible_positions, repeat=len(absents))
div = len(possible_positions) ** len(absents)
else:
rng = np.random.RandomState(N_objs)
coherent_set = rng.choice(possible_positions, (samples, len(absents)))
div = samples
for i, abs_pos in enumerate(coherent_set):
cur = copy.deepcopy(inter)
for pos, o in zip(abs_pos, absents):
cur[pos].add(o)
while set() in cur:
cur.remove(set())
f = phi_(cur, N_objs, G)
X_inter_phi += f # / np.linalg.norm(f)
# weighting more the certain partitions
# w = 1 / N_objs
# w1 = 2 * w
# w2 = round((1-w1*len(A_inter)),2) / len(absents)
# for i in range(N_objs):
# X_inter_phi[i] = X_inter_phi[i] * (w1 if i in A_inter else w2)
return X_inter_phi / div
# %%
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
from sushi_dataset import sushi_dataset as load_dataset
my_dpi = 96
N_objs = 10
X, X_ours, y, y_train, y_test, train_ind, test_ind, G =\
load_dataset(0, "full", N=2500)
plt.figure(figsize=(500/my_dpi/1.6, 500/my_dpi/1.6), dpi=my_dpi)
# plt.figure()
# plt.subplot(1,2,1)
# A_1 = [set([x]) for x in X[2][:5]]+ [set([x for x in X[2][5:]])]
# A_2 = [set([x]) for x in X[2][:4:-1]]+[set([x for x in X[2][:5]])]
A_1 = X_ours[2]
A_2 = X_ours[2][::-1]
print(A_1)
print(A_2)
w_1 = phi_(A_1, N_objs, G)
w_2 = phi_(A_2, N_objs, G)
# plt.scatter(range(1,1+N_objs), w_1, label = r"$\phi(A_1)$")
# plt.scatter(range(1,1+N_objs), w_2, label = r"$\phi(A_2)$")
# plt.xticks(range(1,1+N_objs))
# plt.yticks(np.arange(-5,7,2))
plt.vlines(0,-0.5,10.5, "black", "--", alpha = 0.7)
plt.scatter(w_1, range(1,1+N_objs), label = r"$\phi(A)$", color = "gold")
plt.scatter(w_2, range(1,1+N_objs), label = r"$\phi(A')$", color = "red")
# plt.yticks(range(1,1+N_objs), df_sushi.iloc[:,0].tolist())
# plt.yticks(range(1,1+N_objs), [str(set([x])) for x in range(1,1+N_objs)])
plt.yticks(range(1,1+N_objs), range(1,1+N_objs))
# plt.xticks(np.arange(-5,7,2))
# plt.ylabel("W")
plt.xlabel(r"$\phi_d$", fontsize = 18)
plt.ylabel(r"$d$ ", fontsize = 18).set_rotation(0)
# plt.xlim(-6,8)
plt.ylim(0.5,10.5)
plt.legend(fontsize = 12, borderpad=0.01, borderaxespad=0, labelspacing=0.4,handletextpad=-0.3, scatterpoints=1, loc = "upper right")
plt.tight_layout()
plt.savefig("cached_results/interpretation21.pdf", bbox_inches="tight")
plt.show()
# plt.subplot(1,2,2)
plt.figure(figsize=(500/my_dpi/1.6, 500/my_dpi/1.6), dpi=my_dpi)
plt.vlines(0,-0.5,9.5, "black", "--", alpha = 0.7)
plt.scatter(w_1*w_2, range(1,1+N_objs), color = "orange")#, label = r"$\phi(A_1)_i\cdot\phi(A_2)_i \forall i=1\ldots n$")
plt.yticks([])
plt.xlabel(r"$\phi(A)_d\phi(A')_d$", fontsize = 18)
# plt.xlim(-6,12)
plt.ylim(-0.5,9.5)
plt.tight_layout()
plt.savefig("cached_results/interpretation22.pdf",bbox_inches="tight")
# plt.legend()
plt.show()
# # %%
# str(w_1.round(2).tolist())[1:-1].replace(",", "\\")
# str(w_2.round(2).tolist())[1:-1].replace(",", "\\")
# str((w_1 * w_2).round(2).tolist())[1:-1].replace(",", "\\")
# # %%
from copy import deepcopy
A_is = deepcopy(X_ours[2])
v = []
k = len(A_is)
while len(v) < len(A_is):
B_i = reduce(lambda a,b: a.union(b), A_is[k-1:])
B_i_ = reduce(lambda a,b: a.union(b), A_is[k:]) if k < len(A_is) else set([])
v_ = - (F(B_i, G) - F(B_i_, G)) / (len(B_i)-len(B_i_))
if len(v) != 0 and v_ < v[0]:
A_is[k-1:k+1] = [A_is[k-1].union(A_is[k])]
v.pop(0)
continue
v.insert(0,v_)
k -= 1
my_dpi = 96
plt.figure(figsize=(450/my_dpi/1.6, 250/my_dpi/1.6), dpi=my_dpi)
for i in range(len(A_is)):
A_is[i] = set([x+1 for x in A_is[i]])
plt.hlines(0,-0.5,9.5, "black", "--", alpha = 0.7)
plt.scatter(range(len(v)) , -np.array(v), 10)
a = str(A_is).replace("[","").replace("]","").split("{")[1:]
a = [("\n" if i % 2 == 0 else "")+"{"+x.replace(",","") for i,x in enumerate(a) ]
plt.xticks(range(len(v)), a, fontsize = 7)
plt.yticks(np.arange(-4,6,2), fontsize = 7)
plt.yticks(fontsize = 7)
plt.xlim(-0.5,6.5)
plt.ylabel("values", fontsize = 8)
plt.tight_layout()
plt.savefig("cached_results/interpretation1.pdf")
plt.show()
|
MichelangeloConserva/CutFunctionKernel
|
interleaving/our_kernel.py
|
our_kernel.py
|
py
| 8,950 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26552265149
|
import os
import sys
import subprocess
import concurrent.futures
import tomllib
addon_base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
files_to_ignore_lower = [
x.lower() for x in ["initSettings.sqf", "initKeybinds.sqf", "XEH_PREP.sqf"]
]
sqfvm_exe = os.path.join(addon_base_path, "sqfvm.exe")
virtual_paths = [
# would need to add more even more to /include to use it
"P:/a3|/a3", # "{}|/a3".format(os.path.join(addon_base_path, "include", "a3")),
"P:/a3|/A3",
"P:/x/cba|/x/cba",
"{}|/z/ace".format(addon_base_path),
]
def get_files_to_process(basePath):
arma_files = []
for root, _dirs, files in os.walk(os.path.join(addon_base_path, "addons")):
for file in files:
if file.endswith(".sqf") or file == "config.cpp":
if file.lower() in files_to_ignore_lower:
continue
skipPreprocessing = False
for addonTomlPath in [os.path.join(root, "addon.toml"), os.path.join(os.path.dirname(root), "addon.toml")]:
if os.path.isfile(addonTomlPath):
with open(addonTomlPath, "rb") as f:
tomlFile = tomllib.load(f)
try:
skipPreprocessing = not tomlFile.get('rapify')['enabled']
except:
pass
if file == "config.cpp" and skipPreprocessing:
continue # ignore configs with __has_include
filePath = os.path.join(root, file)
arma_files.append(filePath)
return arma_files
def process_file(filePath, skipA3Warnings=True):
with open(filePath, "r", encoding="utf-8", errors="ignore") as file:
content = file.read()
if content.startswith("//pragma SKIP_COMPILE"):
return False
cmd = [sqfvm_exe, "--input", filePath, "--parse-only", "--automated"]
for v in virtual_paths:
cmd.append("-v")
cmd.append(v)
# cmd.append("-V")
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
try:
ret = proc.wait(12) # max wait - seconds
except Exception as _e:
print("sqfvm timed out: {}".format(filePath))
return True
# print("{} = {}".format(filePath, ret))
fileHasError = False
keepReadingLines = True
while keepReadingLines:
line = proc.stdout.readline()
if not line:
keepReadingLines = False
else:
line = line.rstrip()
if line.startswith("[ERR]"):
fileHasError = True
if not (
skipA3Warnings
and line.startswith("[WRN]")
and ("a3/" in line)
and (("Unexpected IFDEF" in line) or ("defined twice" in line))
):
print(" {}".format(line))
return fileHasError
def main():
if not os.path.isfile(sqfvm_exe):
print("Error: sqfvm.exe not found in base folder [{}]".format(sqfvm_exe))
return 1
error_count = 0
arma_files = get_files_to_process(addon_base_path)
print("Checking {} files".format(len(arma_files)))
with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:
for fileError in executor.map(process_file, arma_files):
if fileError:
error_count += 1
print("Errors: {}".format(error_count))
return error_count
if __name__ == "__main__":
sys.exit(main())
|
acemod/ACE3
|
tools/sqfvmChecker.py
|
sqfvmChecker.py
|
py
| 3,556 |
python
|
en
|
code
| 966 |
github-code
|
6
|
33754030058
|
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import SoundFile, ImageFile
# This program requires LEGO EV3 MicroPython v2.0 or higher.
# Click "Open user guide" on the EV3 extension tab for more information.
import time as t
import math
# Create your objects here.
ev3 = EV3Brick()
ev3.speaker.set_volume(10)
sensoreSx = ColorSensor(Port.S1)
sensoreDx = ColorSensor(Port.S2)
sensoreVM = ColorSensor(Port.S3)
sensoreDist = UltrasonicSensor(Port.S4)
Watch = StopWatch()
motoreSx = Motor(Port.B)
motoreDx = Motor(Port.C)
motoreSx.control.limits(800, 800, 100)
motoreDx.control.limits(800, 800, 100)
"""
motoreBig = Motor(Port.A)
motoreSmall = Motor(Port.D)
"""
# Threshold Colori
# Nero per ora non usato
rgbNero = [[0, 0, 0], [0, 20, 20]]
# .............
rgbVerde = [[4, 9, 9], [9, 18, 18]]
rgbBianco = [[25, 25, 25], [100, 100, 100]]
rgbSLine = [40, 40, 40]
# Velocità
velF = 50
velB = -40
velBV = -70
velFV = 70
velSLF = 50
velSLB = -50
velSLFmin = 40
velStuck = 70
velOstF = 70
velOstB = 32
camb = 30
biasSx = 5
biasDx = 5
# Angoli
ang10CM = 350
ang1CM = ang10CM / 10
ang180 = 950
ang90 = math.floor((ang180 / 2))
# Potenze e velMinime
velMin = 30
pManov = 500
# Misc
thrVerde = 15
thrDoppioVerde = 10
cmRobot = 15
mCmRobot = math.floor((cmRobot / 2))
mCmRobot = mCmRobot * ang1CM
angTurn = 15
tVerde = 1300
cW = 0.1
valWNero = 0
valWNCheck = 0
valWVerde = 0
valWVCheck = 0
tempoW = 1000
stepVerde = 5
stepNero = 5
bSx = 0
bDx = 0
tStall = 3000
stallThresh = 220
# wait
def wait():
pass
# Funzione di assegnazione colore
# 0 = Nero, 1 = Bianco, 2 = Verde
def assCol(rgbS, uscS = 0):
if ((rgbS[0] >= rgbVerde[0][0]) and (rgbS[1] >= rgbVerde[0][1]) and (rgbS[2] >= rgbVerde[0][2])) and ((rgbS[0] <= rgbVerde[1][0]) and (rgbS[1] <= rgbVerde[1][1]) and (rgbS[2] <= rgbVerde[1][2])):
uscS = 2
elif ((rgbS[0] > rgbBianco[0][0]) and (rgbS[1] > rgbBianco[0][1]) and (rgbS[2] > rgbBianco[0][2])):
uscS = 1
else:
uscS = 0
return uscS
# Assegnazione colore single line
def assColSL(rgbS, uscS = 0):
if ((rgbS[0] > rgbSLine[0]) or (rgbS[1] > rgbSLine[1]) or (rgbS[2] > rgbSLine[2])):
uscS = 1
else:
uscS = 0
return uscS
# Funzione movimento via controllo dc (s = sinistra, d = destra (Potenze di sx e dx))
def mDc(s, d):
motoreSx.dc(s)
motoreDx.dc(d)
# Funzione movimento via controllo angolo (aggiunto)
def mAng(power ,angs, angd, then, waitEnd):
motoreSx.run_angle(power, angs, then, False)
motoreDx.run_angle(power, angd, then, waitEnd)
# Funzione movimento doppio verde
def doppioVerde():
mAng(pManov, ang10CM, ang10CM, Stop.HOLD, True)
mAng(pManov, ang180, -ang180, Stop.HOLD, True)
mAng(pManov, ang1CM * 5, ang1CM * 5, Stop.HOLD, True)
# Funzione movimento 90 gradi
def ang90Sx():
mDc(velBV, velFV)
t.sleep(tVerde)
def ang90Dx():
mDc(velFV, velBV)
t.sleep(tVerde)
# Funzione motore con time
def mTime(ssx, sdx, tsx, tdx, then, wait):
motoreSx.run_time(ssx, tsx, then, False)
motoreDx.run_time(sdx, tdx, then, wait)
# Funzione movimento edge
tSx = 0
ttSx = 0
tttSx = 0
tDx = 0
ttDx = 0
tttDx = 0
# Movimenti angolari
def mSx():
mDc(velB, velF + biasSx)
def mDx():
mDc(velF, velB - biasDx)
def mStop():
motoreSx.hold()
motoreDx.hold()
# Funzione verifica verde, resistuisce bool True se vero
def verGreen(sensore):
val = 0
val = sensore
mDc(velMin, velMin)
intT = 0
intTT = 0
while assCol(val.rgb()) == 2:
intT += 1
if assCol(sensoreSx.rgb()) == 2 and assCol(sensoreDx.rgb()) == 2:
intTT += 1
if intTT >= thrDoppioVerde:
doppioVerde()
elif intT >= thrVerde:
print(str(intT) + " ||| " + str(thrVerde))
return True
else:
print(str(intT) + " ||| " + str(thrVerde))
intT = 0
return False
# verifica del doppio verde
def verDoppioGreen():
mStop()
intT = 0
for i in range(2):
mAng(pManov, stepVerde, stepVerde, Stop.HOLD, False)
rgbSx = sensoreSx.rgb()
rgbDx = sensoreDx.rgb()
colSx = assCol(rgbSx)
colDx = assCol(rgbDx)
if colDx != 2 and colSx != 2:
intT += 1
if intT == 0:
return True
else:
return False
mAng(pManov, stepVerde * -3, stepVerde * -3, Stop.HOLD, True)
def SLineSx():
mStop()
temp = 1
colTemp = 0
watVerde = Watch.time() + tVerde
while temp:
colTemp = assColSL(sensoreSx.rgb())
if colTemp == 1:
mDc(velSLF, velSLF - camb)
else:
mDc(velSLB, velSLF)
if Watch.time() >= watVerde:
temp = 0
while assCol(sensoreDx.rgb()) != 0:
mDc(-50, 50)
def SLineDx():
mStop()
temp = 1
colTemp = 0
watVerde = Watch.time() + tVerde
while temp:
colTemp = assColSL(sensoreDx.rgb())
if colTemp == 1:
mDc(velSLF - camb, velSLF)
else:
mDc(velSLF, velSLB)
if Watch.time() >= watVerde:
temp = 0
while assCol(sensoreSx.rgb()) != 0:
mDc(50, -50)
wPrima = 0
angPSx = 0
angPDx = 0
angDSx = 0
angDDx = 0
def stuckFix():
global wPrima
global angPSx
global angPDx
print("ANGOLI: " + str(motoreSx.angle()) + " | " + str(angPSx))
if Watch.time() > wPrima + tStall:
wPrima = Watch.time()
if ((motoreSx.angle() - angPSx) < stallThresh and (motoreDx.angle() - angPDx) < stallThresh):
ev3.speaker.beep(100, 100)
angPSx = motoreSx.angle()
angPDx = motoreDx.angle()
# Attivazione dell'anti-stallo
mDc(velStuck, velStuck)
t.sleep(0.5)
while assCol(sensoreSx.rgb()) != 1 and assCol(sensoreDx.rgb()) != 1:
mDc(velStuck, velStuck)
else:
angPSx = motoreSx.angle()
angPDx = motoreDx.angle()
def DEBUG():
while True:
dist = sensoreDist.distance()
isTouch = False
rgbSx = sensoreSx.rgb()
rgbDx = sensoreDx.rgb()
colSx = assCol(rgbSx)
colDx = assCol(rgbDx)
isG = 0
if dist <= 55:
isTouch = True
if colSx == 2 or colDx == 2:
isG = 1
debug = "isGreen : " + str(isG) + "\n" + "Sx : " + str(rgbSx) + "\n" + "Dx : " + str(rgbDx) + "\n" + str(colSx) + " ! " + str(colDx) + "\n\n\n\n"
debug = debug + "\nvalWNero : " + str(valWNero) + "\nDist : " + str(dist) + " | isTouch : " + str(isTouch)
print(debug)
def ostacolo():
tempOst = 1
while tempOst:
mDc(velMin, velMin)
if sensoreDist.distance() < 110:
tempOst = 0
mAng(pManov, ang1CM * -5, ang1CM * -5, Stop.HOLD, True)
mAng(pManov, ang180 / 3, -ang180 / 3, Stop.HOLD, True)
mDc(velOstB, velOstF)
tempOst = 1
while tempOst:
if assCol(sensoreDx.rgb()) == 0:
tempOst = 0
mAng(pManov, ang1CM * 2, ang1CM * 2, Stop.HOLD, True)
while assCol(sensoreSx.rgb()) != 0:
mDc(50, -50)
while True:
bDx = bDx
bSx = bSx
dist = sensoreDist.distance()
isTouch = False
rgbSx = sensoreSx.rgb()
rgbDx = sensoreDx.rgb()
colSx = assCol(rgbSx)
colDx = assCol(rgbDx)
sMSx = motoreSx.speed()
sMDx = motoreDx.speed()
isG = 0
if dist <= 55:
isTouch = True
valWNCheck = 0
valWVCheck = 0
if (valWNero + tempoW) <= Watch.time():
valWNCheck = 1
debug = "isGreen : " + str(isG) + "\n" + "Sx : " + str(rgbSx) + "\n" + "Dx : " + str(rgbDx) + "\n" + str(colSx) + " ! " + str(colDx) + "\n\n\n\n"
debug = debug + "\nvalWNCheck : " + str(valWNCheck) + " | valWNero : " + str(valWNero) + " | valWNeroS : " + str((valWNero + tempoW)) + "\nDist : " + str(dist)
debug = debug + "\nmSpeed : " + str(sMSx) + " | " + str(sMDx)
print(debug)
# Inizio azioni
# Verde Sx
if (colSx == 2 and valWNCheck):
if verGreen(sensoreSx):
ev3.speaker.beep(200, 50)
SLineSx()
ev3.speaker.beep(100, 50)
# Verde Dx
elif (colDx == 2 and valWNCheck):
if verGreen(sensoreDx):
ev3.speaker.beep(200, 50)
SLineDx()
ev3.speaker.beep(100, 50)
# Doppio Nero
elif (colSx == 0 and colDx == 0):
valWNero = Watch.time()
# Doppio Verde Rimosso
# Nero Sx
elif (colSx == 0 and colDx != 0):
bSx = bSx + 0
bDx = 0
mSx()
# Nero Dx
elif (colDx == 0 and colSx != 0 ):
bDx = bDx + 0
bSx = 0
mDx()
# Eccezione
else:
mDc(velF, velF)
if (sensoreDist.distance() < 130):
ostacolo()
stuckFix()
|
KiroWasHere/Robocup-2023
|
RoboSusa[DEPRECATED]/4BERobocup [DEPRECATED]/main.py
|
main.py
|
py
| 9,063 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3709323219
|
import pandas as pd
import numpy as np
from cloudservice import get_documenttask, download_doc
from cloudservice import get_doctag, create_doctag, delete_doctag
from cloudservice import create_doctagrel, delete_doctagrel
from cloudservice import change_step
from cloudservice import get_docs_byid, fill_docinfo
from cloudservice import get_all_projs, get_file_projs
import time, os, shutil
import config
import core
import utils
from datetime import datetime
from wordapi import transdoc
from pptapi import transppt
def analysis_log(info, info_obj):
print(info, info_obj)
def on_loop(project_id):
docresponse = get_documenttask(projid=project_id)
docdata = pd.DataFrame(docresponse)
if len(docdata) == 0:
return
docdata = docdata[docdata['step'] == 1]
docdata = docdata.tail(config.n_for_project_in_loop)
docdata = (docdata
# .sort_values('name')
.dropna(subset=['fileUrl', 'step'])
.reset_index()
)
# basepath = os.path.join(config.root_dir, str(project_id))
basepath = r'E:\file-local-analysis'
for indx, dt in docdata.iterrows():
info_log_obj = {'id': dt['fileId'], 'name': dt['name']}
print()
analysis_log('开始', info_log_obj)
# if not dt['fileUrl'].startswith('http'):
# analysis_log('无文件', info_log_obj)
# continue
try:
# curpath = os.path.join(basepath, dt['name'])
curpath = dt['fileUrl']
# transformed = core.transform(curpath, basepath, extname)
ext_tuple = os.path.splitext(dt['name'])
extname = ext_tuple[1]
# 补写
# if extname != '.dwg' and extname != '.rar':
# continue
# analysis_log('开始', info_log_obj)
# 补写
if extname == '.doc':
transdoc.doc2docx(curpath, basepath, remove=False)
curpath = os.path.join(basepath, dt['name'])
if extname == '.ppt':
transppt.ppt2pptx(curpath, basepath, remove=False)
curpath = os.path.join(basepath, dt['name'])
# dwg rar本地转移 在线分析不用
if extname == '.dwg':
shutil.copy(curpath, basepath)
curpath = os.path.join(basepath, dt['name'])
if extname == '.rar' or extname == '.zip':
shutil.copy(curpath, basepath)
curpath = os.path.join(basepath, dt['name'])
# 很大的
if os.path.getsize(dt['fileUrl']) > 100 * 1000 * 1000:
analysis_log('文件过大', info_log_obj)
dt['step'] = 2
change_step(dt['id'], dt.to_dict(), projid=project_id)
continue
except Exception as e:
analysis_log('下载和转换文件', info_log_obj)
continue
# 分析成字段
try:
kwords, kwfreq, pharr, nwarr, sumarr, *img_none = core.analysis(
curpath, extname, imgdir=None, do_drawings=True)
kwords_arr = kwords.split(',')
real_kwords = []
for kw in kwords_arr:
if is_real_kw(kw):
real_kwords.append(kw)
if len(real_kwords) > 5:
low_kw = real_kwords[5:]
else:
low_kw = []
except Exception as e:
analysis_log('分析成字段', info_log_obj)
print(e)
# avoid always fail
dt['step'] = 2
change_step(dt['id'], dt.to_dict(), projid=project_id)
# avoid always fail
continue
# 文件表写入字段
file_table_write_success = False
try:
doc_record = get_docs_byid(dt['fileId'], projid=project_id)
# choose summary
real_summary = []
for su in sumarr:
if is_real_summary(su):
real_summary.append(su)
summarylimit = 3
if len(real_summary) > summarylimit:
real_summary = sorted(real_summary, key=lambda x: len(x), reverse=True)[:summarylimit]
nwlimit = 900
nwarr = utils.remove_blank(nwarr)
if len(nwarr) > nwlimit:
nwarr = nwarr[:nwlimit]
updated = {
# "keyWord": kwords,
"keyWord": ','.join(low_kw),
"abstract": ','.join(real_summary),
"newWords": nwarr,
"wordFrequency": kwfreq,
"phrases": pharr
}
doc_record.update(updated)
# print(doc_record)
fill_docinfo(doc_record['id'], doc_record, projid=project_id)
file_table_write_success = True
except Exception as e:
analysis_log('文件表填入', info_log_obj)
continue
# 创建新标签并关联
try:
if not real_kwords:
analysis_log('无内容', info_log_obj)
else:
alltags = get_doctag(projid=project_id)
if len(real_kwords) >= config.web_keywords_num:
curtags = real_kwords[:config.web_keywords_num]
else:
curtags = real_kwords
dtrels = []
for curtag in curtags:
existq = False
for t in alltags:
if str(t['name']).upper() == str(curtag).upper():
dtrels.append((dt['fileId'], t['id']))
existq = True
break
if not existq:
tagid = create_doctag(curtag, projid=project_id)
dtrels.append((dt['fileId'], tagid))
# 写入关联文件和标签
create_doctagrel(dtrels, projid=project_id)
except:
analysis_log('标签', info_log_obj)
continue
# 更改task的阶段为已完成
if file_table_write_success:
dt['step'] = 2
change_step(dt['id'], dt.to_dict(), projid=project_id)
# 删除本地下载文件
pass
analysis_log('完成', info_log_obj)
# delete_doctagrel(13, projid=project_id)
print('end proj')
def is_real_kw(kw: str) -> bool:
if len(kw) < 2:
return False
undercount = 0
for c in kw:
if c == '_':
undercount += 1
if undercount / len(kw) > 0.499:
return False
return True
def is_real_summary(su) -> bool:
if len(su) < 6:
return False
return True
def find_needed_project_ids():
pids = np.loadtxt(r'.\ftp-pids.csv', dtype=int)
return pids
def exitq() -> bool:
with open('stop.cms') as sf:
sign = sf.readline()
sign = int(sign)
if sign > 0:
return True
return False
if __name__ == '__main__':
# projects = find_needed_project_ids()
# loop_id = 0
# while True:
# if exitq():
# print('exit')
# print(datetime.now())
# break
# loop_id += 1
# print('loop: ' + str(loop_id))
# for pid in projects:
# time.sleep(0.1)
# on_loop(project_id=pid)
# print('loop: ' + str(loop_id) + ' / proj: ' + str(pid))
# time.sleep(2)
projects = find_needed_project_ids() # with exclude
have_file_projects = get_file_projs()
loop_id = 0
while True:
if exitq():
print('exit')
print(datetime.now())
break
loop_id += 1
print('loop: ' + str(loop_id))
for pid in projects:
if pid not in have_file_projects:
continue
time.sleep(0.1)
print('loop: ' + str(loop_id) + ' / proj: ' + str(pid))
on_loop(project_id=pid)
time.sleep(2)
|
pengyang486868/PY-read-Document
|
analysislocal.py
|
analysislocal.py
|
py
| 8,022 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30357818911
|
from pyface.qt import QtCore, QtGui, is_qt4
from pyface.image_resource import ImageResource
from pyface.timer.api import do_later
from pyface.ui_traits import Image
from traits.api import (
Any,
Bool,
Button,
Dict,
Event,
List,
HasTraits,
Instance,
Int,
Property,
Str,
cached_property,
observe,
)
from traitsui.api import (
EnumEditor,
InstanceEditor,
Group,
Item,
Label,
ObjectColumn,
TableColumn,
TableFilter,
UI,
View,
default_handler,
spring,
)
from traitsui.editors.table_editor import (
BaseTableEditor,
ReversedList,
customize_filter,
)
from traitsui.ui_traits import SequenceTypes
from .editor import Editor
from .table_model import TableModel, SortFilterTableModel
if is_qt4:
def set_qheader_section_resize_mode(header):
return header.setResizeMode
else:
def set_qheader_section_resize_mode(header):
return header.setSectionResizeMode
class TableEditor(Editor, BaseTableEditor):
"""Editor that presents data in a table. Optionally, tables can have
a set of filters that reduce the set of data displayed, according to
their criteria.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: The table view control associated with the editor:
table_view = Any()
def _table_view_default(self):
return TableView(editor=self)
#: A wrapper around the source model which provides filtering and sorting:
model = Instance(SortFilterTableModel)
def _model_default(self):
return SortFilterTableModel(editor=self)
#: The table model associated with the editor:
source_model = Instance(TableModel)
def _source_model_default(self):
return TableModel(editor=self)
#: The set of columns currently defined on the editor:
columns = List(TableColumn)
#: The currently selected row(s), column(s), or cell(s).
selected = Any()
#: The current selected row
selected_row = Property(Any, observe="selected")
selected_indices = Property(Any, observe="selected")
#: Current filter object (should be a TableFilter or callable or None):
filter = Any()
#: The indices of the table items currently passing the table filter:
filtered_indices = List(Int)
#: Current filter summary message
filter_summary = Str("All items")
#: Update the filtered contents.
update_filter = Event()
#: The event fired when a cell is clicked on:
click = Event()
#: The event fired when a cell is double-clicked on:
dclick = Event()
#: The Traits UI associated with the table editor toolbar:
toolbar_ui = Instance(UI)
#: The index of the row that was last right clicked on its vertical header
header_row = Int()
#: Whether to auto-size the columns or not.
auto_size = Bool(False)
#: Dictionary mapping image names to QIcons
images = Dict()
#: Dictionary mapping ImageResource objects to QIcons
image_resources = Dict()
#: An image being converted:
image = Image
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget."""
factory = self.factory
self.filter = factory.filter
columns = factory.columns[:]
if (len(columns) == 0) and (len(self.value) > 0):
columns = [
ObjectColumn(name=name)
for name in self.value[0].editable_traits()
]
self.columns = columns
if factory.table_view_factory is not None:
self.table_view = factory.table_view_factory(editor=self)
if factory.source_model_factory is not None:
self.source_model = factory.source_model_factory(editor=self)
if factory.model_factory is not None:
self.model = factory.model_factory(editor=self)
# Create the table view and model
self.model.setDynamicSortFilter(True)
self.model.setSourceModel(self.source_model)
self.table_view.setModel(self.model)
# When sorting is enabled, the first column is initially displayed with
# the triangle indicating it is the sort index, even though no sorting
# has actually been done. Sort here for UI/model consistency.
if self.factory.sortable and not self.factory.reorderable:
self.model.sort(0, QtCore.Qt.SortOrder.AscendingOrder)
# Connect to the mode specific selection handler and select the first
# row/column/cell. Do this before creating the edit_view to make sure
# that it has a valid item to use when constructing its view.
smodel = self.table_view.selectionModel()
mode_slot = getattr(self, "_on_%s_selection" % factory.selection_mode)
smodel.selectionChanged.connect(mode_slot)
self.table_view.setCurrentIndex(self.model.index(0, 0))
# Create the toolbar if necessary
if factory.show_toolbar and len(factory.filters) > 0:
main_view = QtGui.QWidget()
layout = QtGui.QVBoxLayout(main_view)
layout.setContentsMargins(0, 0, 0, 0)
self.toolbar_ui = self.edit_traits(
parent=parent,
kind="subpanel",
view=View(
Group(
Item("filter{View}", editor=factory._filter_editor),
Item("filter_summary{Results}", style="readonly"),
spring,
orientation="horizontal",
),
resizable=True,
),
)
self.toolbar_ui.parent = self.ui
layout.addWidget(self.toolbar_ui.control)
layout.addWidget(self.table_view)
else:
main_view = self.table_view
# Create auxiliary editor and encompassing splitter if necessary
mode = factory.selection_mode
if (factory.edit_view == " ") or mode not in {"row", "rows"}:
self.control = main_view
else:
if factory.orientation == "horizontal":
self.control = QtGui.QSplitter(QtCore.Qt.Orientation.Horizontal)
else:
self.control = QtGui.QSplitter(QtCore.Qt.Orientation.Vertical)
self.control.setSizePolicy(
QtGui.QSizePolicy.Policy.Expanding, QtGui.QSizePolicy.Policy.Expanding
)
self.control.addWidget(main_view)
self.control.setStretchFactor(0, 2)
# Create the row editor below the table view
editor = InstanceEditor(view=factory.edit_view, kind="subpanel")
self._ui = self.edit_traits(
parent=self.control,
kind="subpanel",
view=View(
Item(
"selected_row",
style="custom",
editor=editor,
show_label=False,
resizable=True,
width=factory.edit_view_width,
height=factory.edit_view_height,
),
resizable=True,
handler=factory.edit_view_handler,
),
)
self._ui.parent = self.ui
self.control.addWidget(self._ui.control)
self.control.setStretchFactor(1, 1)
# Connect to the click and double click handlers
self.table_view.clicked.connect(self._on_click)
self.table_view.doubleClicked.connect(self._on_dclick)
# Make sure we listen for 'items' changes as well as complete list
# replacements
self.context_object.on_trait_change(
self.update_editor, self.extended_name + "_items", dispatch="ui"
)
# Listen for changes to traits on the objects in the list
self.context_object.on_trait_change(
self.refresh_editor, self.extended_name + ".-", dispatch="ui"
)
# Listen for changes on column definitions
self.on_trait_change(self._update_columns, "columns", dispatch="ui")
self.on_trait_change(
self._update_columns, "columns_items", dispatch="ui"
)
# Set up the required externally synchronized traits
is_list = mode in ("rows", "columns", "cells")
self.sync_value(factory.click, "click", "to")
self.sync_value(factory.dclick, "dclick", "to")
self.sync_value(factory.columns_name, "columns", is_list=True)
self.sync_value(factory.selected, "selected", is_list=is_list)
self.sync_value(
factory.selected_indices, "selected_indices", is_list=is_list
)
self.sync_value(factory.filter_name, "filter", "from")
self.sync_value(factory.filtered_indices, "filtered_indices", "to")
self.sync_value(factory.update_filter_name, "update_filter", "from")
self.auto_size = self.factory.auto_size
# Initialize the ItemDelegates for each column
self._update_columns()
def dispose(self):
"""Disposes of the contents of an editor."""
self.model.beginResetModel()
self.model.endResetModel()
# Make sure that the auxiliary UIs are properly disposed
if self.toolbar_ui is not None:
self.toolbar_ui.dispose()
if self._ui is not None:
self._ui.dispose()
# Remove listener for 'items' changes on object trait
self.context_object.on_trait_change(
self.update_editor, self.extended_name + "_items", remove=True
)
# Remove listener for changes to traits on the objects in the list
self.context_object.on_trait_change(
self.refresh_editor, self.extended_name + ".-", remove=True
)
# Remove listeners for column definition changes
self.on_trait_change(self._update_columns, "columns", remove=True)
self.on_trait_change(
self._update_columns, "columns_items", remove=True
)
super().dispose()
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor."""
if self._no_notify:
return
self.table_view.setUpdatesEnabled(False)
try:
filtering = (
len(self.factory.filters) > 0 or self.filter is not None
)
if filtering:
self._update_filtering()
# invalidate the model, but do not reset it. Resetting the model
# may cause problems if the selection sync'ed traits are being used
# externally to manage the selections
self.model.invalidate()
self.table_view.resizeColumnsToContents()
if self.auto_size:
self.table_view.resizeRowsToContents()
finally:
self.table_view.setUpdatesEnabled(True)
def restore_prefs(self, prefs):
"""Restores any saved user preference information associated with the
editor.
"""
header = self.table_view.horizontalHeader()
if header is not None and "column_state" in prefs:
header.restoreState(prefs["column_state"])
def save_prefs(self):
"""Returns any user preference information associated with the editor."""
prefs = {}
header = self.table_view.horizontalHeader()
if header is not None:
prefs["column_state"] = header.saveState().data()
return prefs
def refresh_editor(self):
"""Requests that the underlying table widget to redraw itself."""
self.table_view.viewport().update()
def create_new_row(self):
"""Creates a new row object using the provided factory."""
factory = self.factory
kw = factory.row_factory_kw.copy()
if "__table_editor__" in kw:
kw["__table_editor__"] = self
return self.ui.evaluate(
factory.row_factory, *factory.row_factory_args, **kw
)
def items(self):
"""Returns the raw list of model objects."""
items = self.value
if not isinstance(items, SequenceTypes):
items = [items]
if self.factory and self.factory.reverse:
items = ReversedList(items)
return items
def callx(self, func, *args, **kw):
"""Call a function without notifying the underlying table view or
model."""
old = self._no_notify
self._no_notify = True
try:
func(*args, **kw)
finally:
self._no_notify = old
def setx(self, **keywords):
"""Set one or more attributes without notifying the underlying table
view or model."""
old = self._no_notify
self._no_notify = True
try:
for name, value in keywords.items():
setattr(self, name, value)
finally:
self._no_notify = old
def set_selection(self, objects=[], notify=True):
"""Sets the current selection to a set of specified objects."""
if not isinstance(objects, list):
objects = [objects]
mode = self.factory.selection_mode
indexes = []
flags = QtGui.QItemSelectionModel.SelectionFlag.ClearAndSelect
# In the case of row or column selection, we need a dummy value for the
# other dimension that has not been filtered.
source_index = self.model.mapToSource(self.model.index(0, 0))
source_row, source_column = source_index.row(), source_index.column()
# Selection mode is 'row' or 'rows'
if mode.startswith("row"):
flags |= QtGui.QItemSelectionModel.SelectionFlag.Rows
items = self.items()
for obj in objects:
try:
row = items.index(obj)
except ValueError:
continue
indexes.append(self.source_model.index(row, source_column))
# Selection mode is 'column' or 'columns'
elif mode.startswith("column"):
flags |= QtGui.QItemSelectionModel.SelectionFlag.Columns
for name in objects:
column = self._column_index_from_name(name)
if column != -1:
indexes.append(self.source_model.index(source_row, column))
# Selection mode is 'cell' or 'cells'
else:
items = self.items()
for obj, name in objects:
try:
row = items.index(obj)
except ValueError:
continue
column = self._column_index_from_name(name)
if column != -1:
indexes.append(self.source_model.index(row, column))
# Perform the selection so that only one signal is emitted
selection = QtGui.QItemSelection()
smodel = self.table_view.selectionModel()
if smodel is None:
# guard against selection during tear-down
return
for index in indexes:
index = self.model.mapFromSource(index)
if index.isValid():
smodel.setCurrentIndex(
index, QtGui.QItemSelectionModel.SelectionFlag.NoUpdate
)
selection.select(index, index)
smodel.blockSignals(not notify)
try:
if len(selection.indexes()):
smodel.clear()
smodel.select(selection, flags)
else:
smodel.clear()
finally:
smodel.blockSignals(False)
self.refresh_editor()
# -------------------------------------------------------------------------
# Private methods:
# -------------------------------------------------------------------------
def _column_index_from_name(self, name):
"""Returns the index of the column with the given name or -1 if no
column exists with that name."""
for i, column in enumerate(self.columns):
if name == column.name:
return i
return -1
def _customize_filters(self, filter):
"""Allows the user to customize the current set of table filters."""
filter_editor = TableFilterEditor(editor=self)
ui = filter_editor.edit_traits(parent=self.control)
if ui.result:
self.factory.filters = filter_editor.templates
self.filter = filter_editor.selected_filter
else:
self.setx(filter=filter)
def _update_filtering(self):
"""Update the filter summary and the filtered indices."""
items = self.items()
num_items = len(items)
f = self.filter
if f is None:
self._filtered_cache = None
self.filtered_indices = list(range(num_items))
self.filter_summary = "All %i items" % num_items
else:
if not callable(f):
f = f.filter
self._filtered_cache = fc = [f(item) for item in items]
self.filtered_indices = fi = [i for i, ok in enumerate(fc) if ok]
self.filter_summary = "%i of %i items" % (len(fi), num_items)
def _add_image(self, image_resource):
"""Adds a new image to the image map."""
image = image_resource.create_icon()
self.image_resources[image_resource] = image
self.images[image_resource.name] = image
return image
def _get_image(self, image):
"""Converts a user specified image to a QIcon."""
if isinstance(image, str):
self.image = image
image = self.image
if isinstance(image, ImageResource):
result = self.image_resources.get(image)
if result is not None:
return result
return self._add_image(image)
return self.images.get(image)
def _create_empty_menu(self):
"""Create a QMenu to display in empty space below the rows.
Returns a QMenu or None if no menu to display.
"""
if not self.factory.editable or self.factory.row_factory is None:
return None
empty_menu = QtGui.QMenu(self.table_view)
action = empty_menu.addAction("Add new item")
action.triggered.connect(self._on_context_append)
return empty_menu
def _create_header_menu(self):
"""Create a QMenu to display in the vertical header.
Returns a QMenu or None if no menu to display.
"""
header_menu = QtGui.QMenu(self.table_view)
if self.factory.editable:
if self.factory.row_factory is not None:
action = header_menu.addAction("Insert new item")
action.triggered.connect(self._on_context_insert)
if self.factory.deletable:
action = header_menu.addAction("Delete item")
action.triggered.connect(self._on_context_remove)
if self.factory.reorderable:
show_up = (self.header_row > 0)
show_down = (self.header_row < self.model.rowCount() - 1)
if not header_menu.isEmpty() and (show_up or show_down):
header_menu.addSeparator()
if show_up:
header_menu_up = header_menu.addAction("Move item up")
header_menu_up.triggered.connect(self._on_context_move_up)
if show_down:
header_menu_down = header_menu.addAction("Move item down")
header_menu_down.triggered.connect(self._on_context_move_down)
if header_menu.isEmpty():
return None
else:
return header_menu
# -- Trait Property getters/setters ---------------------------------------
@cached_property
def _get_selected_row(self):
"""Gets the selected row, or the first row if multiple rows are
selected."""
mode = self.factory.selection_mode
if mode.startswith("column"):
return None
elif mode == "row":
return self.selected
try:
if mode == "rows":
return self.selected[0]
elif mode == "cell":
return self.selected[0]
elif mode == "cells":
return self.selected[0][0]
except IndexError:
return None
@cached_property
def _get_selected_indices(self):
"""Gets the row,column indices which match the selected trait"""
selection_items = self.table_view.selectionModel().selection()
indices = self.model.mapSelectionToSource(selection_items).indexes()
if self.factory.selection_mode.startswith("row"):
indices = sorted(set(index.row() for index in indices))
elif self.factory.selection_mode.startswith("column"):
indices = sorted(set(index.column() for index in indices))
else:
indices = [(index.row(), index.column()) for index in indices]
if self.factory.selection_mode in {"rows", "columns", "cells"}:
return indices
elif len(indices) > 0:
return indices[0]
else:
return -1
def _set_selected_indices(self, indices):
if not isinstance(indices, list):
indices = [indices]
selected = []
if self.factory.selection_mode.startswith("row"):
for row in indices:
selected.append(self.value[row])
elif self.factory.selection_mode.startswith("column"):
for col in indices:
selected.append(self.columns[col].name)
else:
for row, col in indices:
selected.append((self.value[row], self.columns[col].name))
self.selected = selected
self.set_selection(self.selected, False)
# -- Trait Change Handlers ------------------------------------------------
def _filter_changed(self, old_filter, new_filter):
"""Handles the current filter being changed."""
if not self._no_notify:
if new_filter is customize_filter:
do_later(self._customize_filters, old_filter)
else:
self._update_filtering()
self.model.invalidate()
self.set_selection(self.selected)
def _update_columns(self):
"""Handle the column list being changed."""
self.table_view.setItemDelegate(TableDelegate(self.table_view))
for i, column in enumerate(self.columns):
if column.renderer:
self.table_view.setItemDelegateForColumn(i, column.renderer)
self.model.invalidate()
self.table_view.resizeColumnsToContents()
if self.auto_size:
self.table_view.resizeRowsToContents()
def _selected_changed(self, new):
"""Handle the selected row/column/cell being changed externally."""
if not self._no_notify:
self.set_selection(self.selected, notify=False)
def _update_filter_changed(self):
"""The filter has changed internally."""
self._filter_changed(self.filter, self.filter)
# -- Event Handlers -------------------------------------------------------
def _on_row_selection(self, added, removed):
"""Handle the row selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedRows()
if len(indexes):
index = self.model.mapToSource(indexes[0])
selected = items[index.row()]
else:
selected = None
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_rows_selection(self, added, removed):
"""Handle the rows selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedRows()
selected = [
items[self.model.mapToSource(index).row()] for index in indexes
]
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_column_selection(self, added, removed):
"""Handle the column selection being changed."""
indexes = self.table_view.selectionModel().selectedColumns()
if len(indexes):
index = self.model.mapToSource(indexes[0])
selected = self.columns[index.column()].name
else:
selected = ""
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_columns_selection(self, added, removed):
"""Handle the columns selection being changed."""
indexes = self.table_view.selectionModel().selectedColumns()
selected = [
self.columns[self.model.mapToSource(index).column()].name
for index in indexes
]
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_cell_selection(self, added, removed):
"""Handle the cell selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedIndexes()
if len(indexes):
index = self.model.mapToSource(indexes[0])
obj = items[index.row()]
column_name = self.columns[index.column()].name
else:
obj = None
column_name = ""
selected = (obj, column_name)
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_cells_selection(self, added, removed):
"""Handle the cells selection being changed."""
items = self.items()
indexes = self.table_view.selectionModel().selectedIndexes()
selected = []
for index in indexes:
index = self.model.mapToSource(index)
obj = items[index.row()]
column_name = self.columns[index.column()].name
selected.append((obj, column_name))
self.setx(selected=selected)
self.ui.evaluate(self.factory.on_select, self.selected)
def _on_click(self, index):
"""Handle a cell being clicked."""
index = self.model.mapToSource(index)
column = self.columns[index.column()]
obj = self.items()[index.row()]
# Fire the same event on the editor after mapping it to a model object
# and column name:
self.click = (obj, column)
# Invoke the column's click handler:
column.on_click(obj)
def _on_dclick(self, index):
"""Handle a cell being double clicked."""
index = self.model.mapToSource(index)
column = self.columns[index.column()]
obj = self.items()[index.row()]
# Fire the same event on the editor after mapping it to a model object
# and column name:
self.dclick = (obj, column)
# Invoke the column's double-click handler:
column.on_dclick(obj)
def _on_context_insert(self):
"""Handle 'insert item' being selected from the header context menu."""
self.model.insertRow(self.header_row)
def _on_context_append(self):
"""Handle 'add item' being selected from the empty space context
menu."""
self.model.insertRow(self.model.rowCount())
def _on_context_remove(self):
"""Handle 'remove item' being selected from the header context menu."""
self.model.removeRow(self.header_row)
def _on_context_move_up(self):
"""Handle 'move up' being selected from the header context menu."""
self.model.moveRow(self.header_row, self.header_row - 1)
def _on_context_move_down(self):
"""Handle 'move down' being selected from the header context menu."""
self.model.moveRow(self.header_row, self.header_row + 1)
# Define the SimpleEditor class.
SimpleEditor = TableEditor
# Define the ReadonlyEditor class.
ReadonlyEditor = TableEditor
# -------------------------------------------------------------------------
# Qt widgets that have been configured to behave as expected by Traits UI:
# -------------------------------------------------------------------------
class TableDelegate(QtGui.QStyledItemDelegate):
"""A QStyledItemDelegate which fetches Traits UI editors."""
def createEditor(self, parent, option, index):
"""Reimplemented to return the editor for a given index."""
model = index.model()
index = model.mapToSource(index)
table_editor = model._editor
column = table_editor.columns[index.column()]
obj = table_editor.items()[index.row()]
factory = column.get_editor(obj)
style = column.get_style(obj)
if factory is None:
return None
target, name = column.target_name(obj)
handler = default_handler()
if table_editor.ui.context is None:
ui = UI(handler=handler)
else:
context = table_editor.ui.context.copy()
context["table_editor_object"] = context["object"]
context["object"] = target
ui = UI(handler=handler, context=context)
# Create and initialize the editor
factory_method = getattr(factory, style + "_editor")
editor = factory_method(ui, target, name, "", parent)
editor.prepare(parent)
control = editor.control
control.setParent(parent)
# Required for QMouseEvents to propagate to the widget
control.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
# The table view's background will shine through unless the editor
# paints its own background
control.setAutoFillBackground(True)
# Make sure that editors are disposed of correctly
# will be disposed in closeEditor of the TableView
control._editor = editor
return control
def updateEditorGeometry(self, editor, option, index):
"""Update the editor's geometry."""
editor.setGeometry(option.rect)
def paint(self, painter, option, index):
self.initStyleOption(option, index)
if (option.state & QtGui.QStyle.StateFlag.State_Selected) and (
option.state & QtGui.QStyle.StateFlag.State_Active
):
factory = self.parent()._editor.factory
if factory.selection_bg_color is not None:
option.palette.setColor(
QtGui.QPalette.ColorRole.Highlight, factory.selection_bg_color_
)
if factory.selection_color is not None:
option.palette.setColor(
QtGui.QPalette.ColorRole.HighlightedText, factory.selection_color_
)
QtGui.QApplication.style().drawControl(
QtGui.QStyle.ControlElement.CE_ItemViewItem, option, painter, None
)
class TableView(QtGui.QTableView):
"""A QTableView configured to behave as expected by TraitsUI."""
_SELECTION_MAP = {
"row": (
QtGui.QAbstractItemView.SelectionBehavior.SelectRows,
QtGui.QAbstractItemView.SelectionMode.SingleSelection,
),
"rows": (
QtGui.QAbstractItemView.SelectionBehavior.SelectRows,
QtGui.QAbstractItemView.SelectionMode.ExtendedSelection,
),
"column": (
QtGui.QAbstractItemView.SelectionBehavior.SelectColumns,
QtGui.QAbstractItemView.SelectionMode.SingleSelection,
),
"columns": (
QtGui.QAbstractItemView.SelectionBehavior.SelectColumns,
QtGui.QAbstractItemView.SelectionMode.ExtendedSelection,
),
"cell": (
QtGui.QAbstractItemView.SelectionBehavior.SelectItems,
QtGui.QAbstractItemView.SelectionMode.SingleSelection,
),
"cells": (
QtGui.QAbstractItemView.SelectionBehavior.SelectItems,
QtGui.QAbstractItemView.SelectionMode.ExtendedSelection,
),
}
def __init__(self, editor):
"""Initialise the object."""
QtGui.QTableView.__init__(self)
self._initial_size = False
self._editor = editor
factory = editor.factory
# Configure the grid lines.
self.setShowGrid(factory.show_lines)
# Configure the selection behaviour.
self.setCornerButtonEnabled(False)
behav, mode = self._SELECTION_MAP[factory.selection_mode]
self.setSelectionBehavior(behav)
self.setSelectionMode(mode)
# Configure the editing behavior.
triggers = (
QtGui.QAbstractItemView.EditTrigger.DoubleClicked
| QtGui.QAbstractItemView.EditTrigger.SelectedClicked
)
if factory.edit_on_first_click and not factory.reorderable:
triggers |= QtGui.QAbstractItemView.EditTrigger.CurrentChanged
self.setEditTriggers(triggers)
# Configure the reordering and sorting behavior.
self.setDragEnabled(True)
self.viewport().setAcceptDrops(True)
self.setDropIndicatorShown(True)
if factory.reorderable:
self.setDragDropMode(QtGui.QAbstractItemView.DragDropMode.InternalMove)
if factory.sortable:
self.setSortingEnabled(True)
if factory._qt_stylesheet is not None:
self.setStyleSheet(factory._qt_stylesheet)
self.resizeColumnsToContents()
def setModel(self, model):
super().setModel(model)
self._update_header_sizing()
def contextMenuEvent(self, event):
"""Reimplemented to create context menus for cells and empty space."""
# Determine the logical indices of the cell where click occured
hheader, vheader = self.horizontalHeader(), self.verticalHeader()
position = event.globalPos()
row = vheader.logicalIndexAt(vheader.mapFromGlobal(position))
column = hheader.logicalIndexAt(hheader.mapFromGlobal(position))
# Map the logical row index to a real index for the source model
model = self.model()
row = model.mapToSource(model.index(row, 0)).row()
# Show a context menu for empty space at bottom of table...
editor = self._editor
if row == -1:
empty_menu = editor._create_empty_menu()
if empty_menu is not None:
event.accept()
empty_menu.exec_(position)
# ...or show a context menu for a cell.
elif column != -1:
obj = editor.items()[row]
column = editor.columns[column]
menu_manager = column.get_menu(obj)
if menu_manager is None:
menu_manager = editor.factory.menu
if menu_manager is not None:
event.accept()
selected = editor.selected
if not isinstance(selected, SequenceTypes):
selected = [selected]
if obj not in selected:
selected = [obj]
editor.set_menu_context(selected, obj, column)
menu = menu_manager.create_menu(self, controller=editor)
menu.exec_(position)
def eventFilter(self, obj, event):
"""Reimplemented to create context menu for the vertical header."""
vheader = self.verticalHeader()
if obj is vheader and event.type() == QtCore.QEvent.Type.ContextMenu:
position = event.globalPos()
editor = self._editor
row = vheader.logicalIndexAt(event.pos().y())
if row == -1:
empty_menu = editor._create_empty_menu()
if empty_menu is not None:
event.accept()
empty_menu.exec_(position)
else:
editor.header_row = row
header_menu = editor._create_header_menu()
if header_menu is not None:
event.accept()
header_menu.exec_(position)
return True
else:
return QtGui.QTableView.eventFilter(self, obj, event)
def resizeEvent(self, event):
"""Reimplemented to size the table columns when the size of the table
changes. Because the layout algorithm requires that the available
space be known, we have to wait until the UI that contains this table
gives it its initial size."""
QtGui.QTableView.resizeEvent(self, event)
if self._editor.auto_size:
self.resizeColumnsToContents()
self.resizeRowsToContents()
else:
parent = self.parent()
if (
not self._initial_size
and parent
and (self.isVisible() or isinstance(parent, QtGui.QMainWindow))
):
self._initial_size = True
if self._editor.auto_size:
self.resizeColumnsToContents()
self.resizeRowsToContents()
def sizeHint(self):
"""Reimplemented to define a better size hint for the width of the
TableEditor."""
size_hint = QtGui.QTableView.sizeHint(self)
# This method is sometimes called by Qt after the editor has been
# disposed but before this control has been deleted:
if self._editor.factory is None:
return size_hint
width = self.style().pixelMetric(
QtGui.QStyle.PixelMetric.PM_ScrollBarExtent, QtGui.QStyleOptionHeader(), self
)
for column in range(len(self._editor.columns)):
width += self.sizeHintForColumn(column)
size_hint.setWidth(width)
return size_hint
def sizeHintForColumn(self, column_index):
"""Reimplemented to support absolute width specification via
TableColumns and to improve the metric for autosizing columns."""
editor = self._editor
column = editor.columns[column_index]
requested_width = column.get_width()
# Autosize based on column contents and label width. Qt's default
# implementation of this function does content, we handle the label.
if requested_width < 1:
base_width = QtGui.QTableView.sizeHintForColumn(self, column_index)
# Determine what font to use in the calculation
font = column.get_text_font(None)
if font is None:
font = self.font()
font.setBold(True)
else:
font = QtGui.QFont(font)
# Determine the width of the column label
text = column.get_label()
# QFontMetrics.width() is deprecated and Qt docs suggest using
# horizontalAdvance() instead, but is only available since Qt 5.11
if QtCore.__version_info__ >= (5, 11):
width = QtGui.QFontMetrics(font).horizontalAdvance(text)
else:
width = QtGui.QFontMetrics(font).width(text)
# Add margin to the calculated width as appropriate
style = self.style()
option = QtGui.QStyleOptionHeader()
width += (
style.pixelMetric(
QtGui.QStyle.PixelMetric.PM_HeaderGripMargin, option, self
)
* 2
)
if editor.factory.sortable and not editor.factory.reorderable:
# Add size of sort indicator
width += style.pixelMetric(
QtGui.QStyle.PixelMetric.PM_HeaderMarkSize, option, self
)
# Add distance between sort indicator and text
width += style.pixelMetric(
QtGui.QStyle.PixelMetric.PM_HeaderMargin, option, self
)
return max(base_width, width)
# Or else set width absolutely
else:
return requested_width
def resizeColumnsToContents(self):
"""Support proportional column width specifications."""
# TODO: The proportional size specification approach found in the
# TableColumns is not entirely compatible with the ability to
# specify the resize_mode. Namely, there are combinations of
# specifications that are redundant, and others which are
# contradictory. Rework this method so that the various values
# for **width** have a well-defined, sensible meaning for each
# of the possible values of resize_mode.
editor = self._editor
available_space = self.viewport().width()
hheader = self.horizontalHeader()
# Compute sizes for columns with absolute or no size requests
proportional = []
for column_index in range(len(editor.columns)):
column = editor.columns[column_index]
requested_width = column.get_width()
if (
column.resize_mode in ("interactive", "stretch")
and 0 < requested_width <= 1.0
):
proportional.append((column_index, requested_width))
elif (
column.resize_mode == "interactive"
and requested_width < 0
and self._initial_size
):
# Keep previous size if initial sizing has been done
available_space -= hheader.sectionSize(column_index)
else:
base_width = hheader.sectionSizeHint(column_index)
width = max(base_width, self.sizeHintForColumn(column_index))
hheader.resizeSection(column_index, width)
available_space -= width
# Now use the remaining space for columns with proportional width
# requests
for column_index, percent in proportional:
base_width = hheader.sectionSizeHint(column_index)
width = max(base_width, int(percent * available_space))
hheader.resizeSection(column_index, width)
def closeEditor(self, control, hint):
# dispose traits editor associated with control if any
editor = getattr(control, "_editor", None)
if editor is not None:
editor.dispose()
delattr(control, "_editor")
return super().closeEditor(control, hint)
def _update_header_sizing(self):
"""Header section sizing can be done only after a valid model is set.
Otherwise results in segfault with Qt5.
"""
editor = self._editor
factory = editor.factory
# Configure the row headings.
vheader = self.verticalHeader()
set_resize_mode = set_qheader_section_resize_mode(vheader)
insertable = factory.row_factory is not None
if (
factory.editable and (insertable or factory.deletable)
) or factory.reorderable:
vheader.installEventFilter(self)
set_resize_mode(QtGui.QHeaderView.ResizeMode.ResizeToContents)
elif not factory.show_row_labels:
vheader.hide()
if factory.row_height > 0:
vheader.setDefaultSectionSize(factory.row_height)
self.setAlternatingRowColors(factory.alternate_bg_color)
self.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollMode.ScrollPerPixel)
# Configure the column headings.
# We detect if there are any stretchy sections at all; if not, then
# we make the last non-fixed-size column stretchy.
hheader = self.horizontalHeader()
set_resize_mode = set_qheader_section_resize_mode(hheader)
resize_mode_map = dict(
interactive=QtGui.QHeaderView.ResizeMode.Interactive,
fixed=QtGui.QHeaderView.ResizeMode.Fixed,
stretch=QtGui.QHeaderView.ResizeMode.Stretch,
resize_to_contents=QtGui.QHeaderView.ResizeMode.ResizeToContents,
)
stretchable_columns = []
for i, column in enumerate(editor.columns):
set_resize_mode(i, resize_mode_map[column.resize_mode])
if column.resize_mode in ("stretch", "interactive"):
stretchable_columns.append(i)
if not stretchable_columns:
# Use the behavior from before the "resize_mode" trait was added
# to TableColumn
hheader.setStretchLastSection(True)
else:
# hheader.setSectionResizeMode(
# stretchable_columns[-1], QtGui.QHeaderView.ResizeMode.Stretch)
hheader.setStretchLastSection(False)
if factory.show_column_labels:
hheader.setHighlightSections(False)
else:
hheader.hide()
# -------------------------------------------------------------------------
# Editor for configuring the filters available to a TableEditor:
# -------------------------------------------------------------------------
class TableFilterEditor(HasTraits):
"""An editor that manages table filters."""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: TableEditor this editor is associated with
editor = Instance(TableEditor)
#: The list of filters
filters = List(TableFilter)
#: The list of available templates from which filters can be created
templates = Property(List(TableFilter), observe="filters")
#: The currently selected filter template
selected_template = Instance(TableFilter)
#: The currently selected filter
selected_filter = Instance(TableFilter, allow_none=True)
#: The view to use for the current filter
selected_filter_view = Property(observe="selected_filter")
#: Buttons for add/removing filters
add_button = Button("New")
remove_button = Button("Delete")
# The default view for this editor
view = View(
Group(
Group(
Group(
Item("add_button", enabled_when="selected_template"),
Item(
"remove_button",
enabled_when="len(templates) > 1 and "
"selected_filter is not None",
),
orientation="horizontal",
show_labels=False,
),
Label("Base filter for new filters:"),
Item("selected_template", editor=EnumEditor(name="templates")),
Item(
"selected_filter",
style="custom",
editor=EnumEditor(name="filters", mode="list"),
),
show_labels=False,
),
Item(
"selected_filter",
width=0.75,
style="custom",
editor=InstanceEditor(view_name="selected_filter_view"),
),
id="TableFilterEditorSplit",
show_labels=False,
layout="split",
orientation="horizontal",
),
id="traitsui.qt.table_editor.TableFilterEditor",
buttons=["OK", "Cancel"],
kind="livemodal",
resizable=True,
width=800,
height=400,
title="Customize filters",
)
# -------------------------------------------------------------------------
# Private methods:
# -------------------------------------------------------------------------
# -- Trait Property getter/setters ----------------------------------------
@cached_property
def _get_selected_filter_view(self):
view = None
if self.selected_filter:
model = self.editor.model
index = model.mapToSource(model.index(0, 0))
if index.isValid():
obj = self.editor.items()[index.row()]
else:
obj = None
view = self.selected_filter.edit_view(obj)
return view
@cached_property
def _get_templates(self):
templates = [f for f in self.editor.factory.filters if f.template]
templates.extend(self.filters)
return templates
# -- Trait Change Handlers ------------------------------------------------
def _editor_changed(self):
self.filters = [
f.clone_traits()
for f in self.editor.factory.filters
if not f.template
]
self.selected_template = self.templates[0]
@observe('add_button')
def _create_and_select_new_filter(self, event):
"""Create a new filter based on the selected template and select it."""
new_filter = self.selected_template.clone_traits()
new_filter.template = False
new_filter.name = new_filter._name = "New filter"
self.filters.append(new_filter)
self.selected_filter = new_filter
@observe("remove_button")
def _delete_selected_filter(self, event):
"""Delete the currently selected filter."""
if self.selected_template == self.selected_filter:
self.selected_template = self.templates[0]
index = self.filters.index(self.selected_filter)
del self.filters[index]
if index < len(self.filters):
self.selected_filter = self.filters[index]
else:
self.selected_filter = None
@observe("selected_filter:name")
def _update_filter_list(self, event):
"""A hack to make the EnumEditor watching the list of filters refresh
their text when the name of the selected filter changes.
"""
filters = self.filters
self.filters = []
self.filters = filters
|
enthought/traitsui
|
traitsui/qt/table_editor.py
|
table_editor.py
|
py
| 49,857 |
python
|
en
|
code
| 290 |
github-code
|
6
|
28322769353
|
from django.urls import path, include
from . import views
app_name = 'api'
employment = [
path('', views.EmploymentListEmployee.as_view(), name='list'),
]
employee = [
path('<int:pk>/employment/', include((employment, 'employment'))),
]
urlpatterns = [
path('employee/', include((employee, 'employee'))),
]
|
crowmurk/mallenom
|
mallenom/api/urls.py
|
urls.py
|
py
| 325 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73883744829
|
import os
_, filename = os.path.split('/a/b/c/t.txt')
print(filename)
metro_areas = [
('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),
('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),
('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),
('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),
('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),
]
print('{:15} | {:^9} | {:^9}'.format('', 'lat.', 'long.'))
fmt = '{:15} | {:9.4f} | {:9.4f}'
for name, cc, pop, (latitude, latitude) in metro_areas:
if latitude <= 0:
print(fmt.format(name, latitude, latitude))
from collections import namedtuple
City = namedtuple('City', 'name country population coordinates')
tokyo = City('Tokyo', 'JP', 36.933, (35.689722, 139.691667))
print(tokyo)
for key, value in tokyo._asdict().items():
print(key + ":", value)
|
yubo-yue/yubo-python
|
fluentpython/ch02.py
|
ch02.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31141179192
|
import transformers
import torch
def shape(structure):
try:
return structure.shape
except AttributeError:
return (f"list[{len(structure)}]", *shape(structure[0]))
short_prompt = """To be or not to"""
long_prompt = """It was the best of times, it was the worst"""
if __name__ == "__main__":
print("Started")
model_uri = "gpt2"
tokenizer = transformers.AutoTokenizer.from_pretrained(model_uri)
tokenizer.pad_token = tokenizer.eos_token
model = transformers.AutoModelForCausalLM.from_pretrained(model_uri)
model.to("cuda:0")
inputs = tokenizer([short_prompt, long_prompt], padding=True, return_tensors="pt").to("cuda:0")
generated = []
for _ in range(10):
print(inputs.keys())
print(shape(inputs["input_ids"]))
print(shape(inputs["attention_mask"]))
print(inputs["attention_mask"].sum(axis=1, dtype=torch.int64))
with torch.no_grad():
outputs = model(**inputs)
print(shape(outputs.past_key_values))
print()
next_tokens = outputs.logits[:, -1, :].max(axis=-1).indices
generated.append(tokenizer.decode(next_tokens))
insertion_points = inputs["attention_mask"].sum(axis=1, dtype=torch.int64)
new_column = torch.tensor(tokenizer.pad_token_id).repeat(2).to("cuda:0")
new_inputs = torch.cat((inputs["input_ids"], new_column[:, None]), dim=1)
new_inputs.scatter_(1, insertion_points[:, None], next_tokens[:, None])
mask = inputs["attention_mask"]
new_mask_column = torch.zeros((len(inputs["input_ids"]), 1)).to("cuda:0")
new_mask = torch.cat((mask, new_mask_column), dim=1)
new_mask.scatter_(1, insertion_points[:, None], torch.ones(2, 1).to("cuda:0"))
# inputs["input_ids"] = new_inputs
# inputs["attention_mask"] = new_mask
# inputs.past_key_values = outputs.past_key_values
inputs = {
"input_ids": new_inputs,
"attention_mask": new_mask,
# "past_key_values": outputs.past_key_values
}
print(tokenizer.batch_decode(new_inputs, ignore_special_tokens=True))
|
jjjmillist/ttc-workbench
|
scripts/23-05-18@13:50:44.py
|
23-05-18@13:50:44.py
|
py
| 2,176 |
python
|
en
|
code
| 0 |
github-code
|
6
|
728222177
|
import string
# Initializing Variables
num_sentences = 0
num_words = 0
the_num_sentences = 0
frequency_the = 0
# Task 0
with open('war_and_peace.txt', 'r') as f: # opening and reading file
for line in f:
line = line.rstrip() # removing the space on right side
num_sentences += line.count('.') + line.count('!') + line.count('?') # counting the sentences
with open('war_and_peace.txt', 'r') as f:
for line in f:
words = line.split(None) # splitting into words and storing in list
num_words += len(words) # finding length of list
print("Number of sentences:", num_sentences)
print("Number of words:", num_words)
# Task 1
text = open('war_and_peace.txt', "r") # Open the file in read mode
d = dict() # Create an empty dictionary
for line in text: # Loop through each line of the file
line = line.strip() # Remove the leading spaces and newline character
# Convert the characters in line to
line = line.lower() # lowercase to avoid case mismatch
line = line.translate(line.maketrans("", "", string.punctuation)) # Remove the punctuation marks from the line
words = line.split(" ") # Split the line into words
for word in words: # Iterate over each word in line
if word in d: # Check if the word is already in dictionary
d[word] = d[word] + 1 # Increment count of word by 1
else:
d[word] = 1
sorted_by_value = sorted(d.items(), key=lambda kv: kv[1], reverse=True) # sorting dictionary as higher value first
with open('result.csv', 'w', newline='') as f: # creating csv file
for w in sorted_by_value:
f.write(w[0] + "," + str(d[w[0]]) + "," + str(d[w[0]] / num_words) + '\n') # writing in to csv file
# Task 2
with open('war_and_peace.txt', 'r') as f:
for line in f:
line = line.rstrip()
the_num_sentences += (line.count(". the") + line.count("! the") + line.count("? the") +
line.count('. The') + line.count('! The') + line.count('? The'))
print("Number of sentences starts with 'the' :", the_num_sentences)
frequency_the = the_num_sentences / num_sentences
print("Frequency of THE sentences:", frequency_the)
# Task 3
import re
from itertools import islice
from collections import Counter
s = open("war_and_peace.txt") #opening file
g = s.read()
words = re.findall("\w+", g) #finding the reg-ex
letter = (Counter(zip(words, islice(words, 1, None)))) #counting The frequent two word combunations
print(letter.most_common()[0])
|
ruchitakatkar04/CNS-project-1
|
project1.py
|
project1.py
|
py
| 2,634 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31022754626
|
import sys
import numpy as np
def compute_class_precision_recall(L,K):
_,L = np.unique(np.array(L),return_inverse=True)
_,K = np.unique(np.array(K),return_inverse=True)
if(len(L) != len(K)):
sys.stderr.write("Labels and clusters are not of the same length.")
sys.exit(1)
num_elements = len(L)
num_labels = L.max() + 1
num_clusters = K.max() + 1
X_L = np.tile(L, (num_labels,1) ).T
X_K = np.tile(K, (num_clusters,1) ).T
L_j = np.equal( np.tile(np.arange(num_labels),(num_elements,1)) , X_L ).astype(float)
K_j = np.equal( np.tile(np.arange(num_clusters),(num_elements,1)) , X_K ).astype(float)
P_ij = np.dot(L_j.T,K_j)
S_i = P_ij.sum(axis=1)
T_i = P_ij.sum(axis=0)
R_i = ( P_ij * P_ij ).sum(axis=1) / ( S_i * S_i )
P_i = ( P_ij.T * P_ij.T ).sum(axis=1) / ( T_i * T_i )
return [(P_i , R_i) , (S_i , T_i)]
def calc_b3(L , K , class_norm=False, beta=1.0):
precision_recall , class_sizes = compute_class_precision_recall(L,K)
if(class_norm == True):
precision = precision_recall[0].sum() / class_sizes[1].size
recall = precision_recall[1].sum() / class_sizes[0].size
else:
precision = ( precision_recall[0] * class_sizes[1] ).sum() / class_sizes[1].sum()
recall = ( precision_recall[1] * class_sizes[0] ).sum() / class_sizes[0].sum()
f_measure = (1 + beta**2) * (precision * recall) /( (beta**2) * precision + recall )
return [f_measure,precision,recall]
# L = np.array([1,3,3,3,3,4,2,2,0,3,3])
# K = np.array([1,2,3,4,5,5,5,6,2,1,1])
# # Standard BCUBED
# [fmeasure, precision, recall] = calc_b3(L,K)
#
# print( calc_b3(L,K) )
|
fspring/NeuralArgMining
|
b_cubed_measures.py
|
b_cubed_measures.py
|
py
| 1,672 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36734211163
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Pramod Bharadwaj Chandrashekar, Li Liu
@email: [email protected], [email protected]
"""
import numpy as np
from sklearn.cluster import KMeans
import scipy.stats as stats
def get_cdf_pval(data):
""" Function for guassian mixture of dat and computing pvalues """
cdf_pvals = []
for i in range(0, len(data)):
mn_samp = np.mean(data[i, :])
sd_samp = np.std(data[i, :])
kcl = KMeans(n_clusters=2, random_state=0).fit(np.reshape(data[i], [-1, 1]))
cluster_1_id = np.where(kcl.labels_ == 0)[0]
c1_mn, c1_sd = np.mean(data[i, cluster_1_id]), np.std(data[i, cluster_1_id])
cdf_pval_1 = np.reshape(1.0 - stats.norm.cdf(data[i, :], c1_mn, c1_sd), [-1, 1])
cluster_2_id = np.where(kcl.labels_ == 1)[0]
c2_mn, c2_sd = np.mean(data[i, cluster_2_id]), np.std(data[i, cluster_2_id])
cdf_pval_2 = np.reshape(1.0 - stats.norm.cdf(data[i, :], c2_mn, c2_sd), [-1, 1])
cdf_pval_3 = np.reshape(1.0 - stats.norm.cdf(data[i, :], mn_samp, sd_samp), [-1, 1])
cdf_pvals.append(np.concatenate((cdf_pval_1, cdf_pval_2, cdf_pval_3), axis=1))
return cdf_pvals
def get_important_bins(pval_data):
""" Fetch important bins based on pvalues"""
imp_bins = []
# Bonferroni Corrected pvals check
if len(np.where(pval_data*200 < 0.05)[0]) > 0:
imp_bins = np.where(pval_data*200 < 0.05)[0]
# Normal pval check
elif len(np.where(pval_data < 0.05)[0]):
imp_bins = np.where(pval_data < 0.05)[0]
# Top 10 bins
else:
sorted_bins = np.argsort(pval_data)
imp_bins = sorted_bins[0:20]
#imp_bins = np.argpartition(pval_data, 10)
return imp_bins
|
liliulab/DeepCORE
|
DeepCORE_attention_util.py
|
DeepCORE_attention_util.py
|
py
| 1,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74281642429
|
'''
Given a binary search tree (BST), find the lowest common ancestor (LCA) of two given nodes in the BST.
According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).”
Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8
Output: 6
Explanation: The LCA of nodes 2 and 8 is 6.
Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4
Output: 2
Explanation: The LCA of nodes 2 and 4 is 2, since a node can be a descendant of itself according to the LCA definition.
'''
# Python program to find LCA of n1 and n2 using one
# traversal of Binary tree
# A binary tree node
class Node:
# Constructor to create a new tree node
def __init__(self, key):
self.key = key
self.left = None
self.right = None
# This function returns pointer to LCA of two given
# values n1 and n2
# This function assumes that n1 and n2 are present in
# Binary Tree
def findLCA(root, n1, n2):
# Base Case
if root is None: return root
# If either n1 or n2 matches with root's key, report
# the presence by returning root (Note that if a key is
# ancestor of other, then the ancestor key becomes LCA
if root.key == n1 or root.key == n2:
return root
# Look for keys in left and right subtrees
left_lca = findLCA(root.left, n1, n2)
right_lca = findLCA(root.right, n1, n2)
# If both of the above calls return Non-NULL, then one key
# is present in once subtree and other is present in other,
# So this node is the LCA
if left_lca and right_lca:
return root
# Otherwise check if left subtree or right subtree is LCA
return left_lca if left_lca is not None else right_lca
# Driver program to test above function
# Let us create a binary tree given in the above example
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
print("LCA(4,5) = ", findLCA(root, 4, 5).key)
print("LCA(4,6) = ", findLCA(root, 4, 6).key)
print("LCA(3,4) = ", findLCA(root, 3, 4).key)
print("LCA(2,4) = ", findLCA(root, 2, 4).key)
|
casanas10/practice_python
|
Recursion/lca_of_bst.py
|
lca_of_bst.py
|
py
| 2,292 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18804693997
|
import copy
from typing import Dict, Optional, TypeVar
from pymilvus.exceptions import CollectionNotExistException, ExceptionsMessage
from pymilvus.settings import Config
Index = TypeVar("Index")
Collection = TypeVar("Collection")
class Index:
def __init__(
self,
collection: Collection,
field_name: str,
index_params: Dict,
**kwargs,
) -> Index:
"""Creates index on a specified field according to the index parameters.
Args:
collection(Collection): The collection in which the index is created
field_name(str): The name of the field to create an index for.
index_params(dict): Indexing parameters.
kwargs:
* *index_name* (``str``) --
The name of index which will be created. If no index name is specified,
default index name will be used.
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import *
>>> from pymilvus.schema import *
>>> from pymilvus.types import DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7fac15e53470>
>>> field1 = FieldSchema("int64", DataType.INT64, is_primary=True)
>>> field2 = FieldSchema("fvec", DataType.FLOAT_VECTOR, is_primary=False, dim=128)
>>> schema = CollectionSchema(fields=[field1, field2])
>>> collection = Collection(name='test_collection', schema=schema)
>>> # insert some data
>>> index_params = {
... "index_type": "IVF_FLAT",
... "metric_type": "L2",
... "params": {"nlist": 128}}
>>> index = Index(collection, "fvec", index_params)
>>> index.params
{'index_type': 'IVF_FLAT', 'metric_type': 'L2', 'params': {'nlist': 128}}
>>> index.collection_name
test_collection
>>> index.field_name
fvec
>>> index.drop()
"""
from .collection import Collection
if not isinstance(collection, Collection):
raise CollectionNotExistException(message=ExceptionsMessage.CollectionType)
self._collection = collection
self._field_name = field_name
self._index_params = index_params
index_name = kwargs.get("index_name", Config.IndexName)
self._index_name = index_name
self._kwargs = kwargs
if self._kwargs.pop("construct_only", False):
return
conn = self._get_connection()
conn.create_index(self._collection.name, self._field_name, self._index_params, **kwargs)
indexes = conn.list_indexes(self._collection.name)
for index in indexes:
if index.field_name == self._field_name:
self._index_name = index.index_name
break
def _get_connection(self):
return self._collection._get_connection()
@property
def params(self) -> dict:
"""dict: The index parameters"""
return copy.deepcopy(self._index_params)
@property
def collection_name(self) -> str:
"""str: The corresponding collection name"""
return self._collection.name
@property
def field_name(self) -> str:
"""str: The corresponding field name."""
return self._field_name
@property
def index_name(self) -> str:
"""str: The corresponding index name."""
return self._index_name
def __eq__(self, other: Index) -> bool:
"""The order of the fields of index must be consistent."""
return self.to_dict() == other.to_dict()
def to_dict(self):
"""Put collection name, field name and index params into dict."""
return {
"collection": self._collection._name,
"field": self._field_name,
"index_name": self._index_name,
"index_param": self.params,
}
def drop(self, timeout: Optional[float] = None, **kwargs):
"""Drop an index and its corresponding index files.
Args:
timeout(float, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur
kwargs:
* *index_name* (``str``) --
The name of index. If no index is specified, the default index name is used.
"""
copy_kwargs = copy.deepcopy(kwargs)
index_name = copy_kwargs.pop("index_name", Config.IndexName)
conn = self._get_connection()
conn.drop_index(
collection_name=self._collection.name,
field_name=self.field_name,
index_name=index_name,
timeout=timeout,
**copy_kwargs,
)
|
milvus-io/pymilvus
|
pymilvus/orm/index.py
|
index.py
|
py
| 4,921 |
python
|
en
|
code
| 744 |
github-code
|
6
|
43585654615
|
"""fitnesspro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import re_path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import re_path
from fitnessproapp import views
urlpatterns = [
re_path('admin/', admin.site.urls),
re_path(r'^$', views.login, name='login'),
re_path(r'^Forgot_password$',views.Forgot_password, name='Forgot_password'),
re_path(r'^index/$', views.index, name='index'),
re_path(r'^User_profile/$',views.User_profile,name='User_profile'),
re_path(r'^User_edit_profile/$',views.User_edit_profile,name='User_edit_profile'),
re_path(r'^about/$', views.about, name='about'),
re_path(r'^classes/$', views.classes, name='classes'),
re_path(r'^train/$', views.train, name='train'),
re_path(r'^selecttrainer/$', views.selecttrainer, name='selecttrainer'),
re_path(r'^shedule/$', views.shedule, name='shedule'),
re_path(r'^contact/$', views.contact, name='contact'),
re_path(r'^signup/$', views.signup, name='signup'),
re_path(r'^userpaymentpage/$', views.userpaymentpage, name='userpaymentpage'),
re_path(r'^online_training/$', views.online_training, name='online_training'),
re_path(r'^offline_training/$', views.offline_training, name='offline_training'),
re_path(r'^onlin/$', views.onlin, name='onlin'),
re_path(r'^onedit/(?P<i_id>[0-9]+)/$', views.onedit, name='onedit'),
re_path(r'^onlineedit/(?P<oned_id>[0-9]+)/$', views.onlineedit, name='onlineedit'),
re_path(r'^offlin/$', views.offlin, name='offlin'),
re_path(r'^Usert_profile/$',views.Usert_profile,name='Usert_profile'),
re_path(r'^Usert_edit_profile/$',views.Usert_edit_profile,name='Usert_edit_profile'),
re_path(r'^offedit/(?P<i_id>[0-9]+)/$', views.offedit, name='offedit'),
re_path(r'^offlineedit/(?P<offd_id>[0-9]+)/$', views.offlineedit, name='offlineedit'),
re_path(r'^staffd/$', views.staffd, name='staffd'),
re_path(r'^maint/$', views.maint, name='maint'),
re_path(r'^admhome/$', views.admhome, name='admhome'),
re_path(r'^admreg/$', views.admreg, name='admreg'),
re_path(r'^admregedit/(?P<i_id>[0-9]+)/$', views.admregedit, name='admregedit'),
re_path(r'^admregistration/(?P<reg_id>[0-9]+)/$', views.admregistration, name='admregistration'),
re_path(r'^admintimetable/$', views.admintimetable, name='admintimetable'),
re_path(r'^admin_view_timetable/$', views.admin_view_timetable, name='admin_view_timetable'),
re_path(r'^admin_edit_timetable/(?P<i_id>[0-9]+)/$', views.admin_edit_timetable, name='admin_edit_timetable'),
re_path(r'^admin_editpage/(?P<timet_id>[0-9]+)/$', views.admin_editpage, name='admin_editpage'),
re_path(r'^delete_batch/(?P<p_id>[0-9]+)/$', views.delete_batch, name='delete_batch'),
re_path(r'^admin_userpayment/$', views.admin_userpayment, name='admin_userpayment'),
re_path(r'^admin_payment/$', views.admin_payment, name='admin_payment'),
re_path(r'^admin_pay_page/$', views.admin_pay_page, name='admin_pay_page'),
re_path(r'^Trainee_logout/$', views.Trainee_logout, name='Trainee_logout'),
re_path(r'^Trainer_logout/$', views.Trainer_logout, name='Trainer_logout'),
re_path(r'^SuperAdmin_logout/$', views.SuperAdmin_logout, name='SuperAdmin_logout'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
sanjaymurali1910/fitnessclub
|
fitnesspro/urls.py
|
urls.py
|
py
| 4,115 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11946226959
|
import sys, os, urllib.request, urllib.error, urllib.parse, logging, pwd
import subprocess, site, cgi, datetime, threading, copy, json
import uuid, time, re
from html import escape # ***MUST COME before `from lxml import html`!***
from collections import defaultdict, OrderedDict
from lxml import html
from lxml.html import builder
from http.cookies import SimpleCookie
logging.basicConfig(
level=logging.DEBUG if __debug__ else logging.INFO,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
LOCK = threading.Lock()
try: # command-line testing won't have module available
import uwsgi
#logging.debug('uwsgi: %s', dir(uwsgi))
except ImportError:
uwsgi = type('uwsgi', (), {'opt': {}}) # object with empty opt attribute
uwsgi.lock = LOCK.acquire
uwsgi.unlock = LOCK.release
#logging.debug('uwsgi.opt: %s', repr(uwsgi.opt))
#logging.debug('sys.argv: %s', sys.argv) # only shows [uwsgi]
# 2017-12-28 set `chdir` option in pyturn.uwsgi so now PWD should be correct
#logging.debug('current working directory: %s', os.path.abspath('.')) # was '/'
# so we can see that sys.argv is useless for uwsgi operation
THISDIR = os.path.dirname(uwsgi.opt.get('wsgi-file', b'').decode())
if THISDIR and os.getcwd() != THISDIR:
logging.warning('having to chdir from %s to %s', os.getcwd(), THISDIR)
os.chdir(THISDIR)
else:
logging.warning('THISDIR: %s, os.getcwd(): %s', THISDIR, os.getcwd())
APPDIR = (uwsgi.opt.get('check_static', b'').decode() or
os.path.join(THISDIR, 'html'))
MIMETYPES = {'png': 'image/png', 'ico': 'image/x-icon', 'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',}
DATA = {
'groups': {}, # active groups
'finished': {}, # inactive groups (for "Report" page)
}
HTTPSESSIONS = {} # data like username, linked with session keys, goes here
EXPECTED_ERRORS = (
NotImplementedError,
ValueError,
KeyError,
IndexError,
SystemError,
)
PARSED = html.parse(os.path.join(APPDIR, 'index.html')).getroot()
PAGE = html.tostring(PARSED.getroottree())
DEBUG = ['all'] # populate from querystring
# create translation table of illegal characters for groupnames
# ":" is used in this program for internal purposes, so disallow that
# "/" cannot be allowed because we create a filename from groupname
# otherwise, mostly being permissive
ILLEGAL = str.maketrans(dict.fromkeys('''([{:/'"}])'''))
def debug(category, *args):
'''
log debug code only for given category
reduces log size and allows for granular approach to debugging
'''
if not __debug__:
return
elif category in DEBUG:
logging.debug(*args)
def findpath(env):
'''
locate directory where files are stored, and requested file
side effect: splits off querystring and stores its debug values in DEBUG
NOTE: DEBUG is a global and as such will be affected by any client adding
`debug=` args to his querystring. so the net result in debugging will be
the union of what all the clients request.
'''
start = APPDIR
parsed = urllib.parse.urlparse(
urllib.parse.unquote(env.get('REQUEST_URI', '')))
if parsed.query:
query = urllib.parse.parse_qs(parsed.query or '')
DEBUG[:] = list(set(DEBUG) | set(query.get('debug', [])))
debug('all', 'findpath: start: %s' % start)
path = urllib.parse.unquote(env.get('HTTP_PATH', ''))
#debug('all', 'path, attempt 1: %s', path)
path = path or parsed.path
#debug('all', 'path, attempt 2: %s', path)
path = (path or '/').lstrip('/')
debug('all', 'findpath: should not be None at this point: "%s"', path)
return start, path
def loadpage(path, data=None):
'''
input template and populate the HTML with data array
eventually client-side JavaScript will perform many of these functions.
'''
data = data or DATA
parsed = html.fromstring(PAGE)
postdict = data.get('postdict', {})
debug('load', 'loadpage: postdict: %s', postdict)
set_values(parsed, postdict,
['username', 'groupname', 'httpsession_key', 'joined'])
if 'groups' in data:
groups = populate_grouplist(parsed, data)
else:
groups = []
debug('load', 'loadpage: groups: %s', groups)
# only show load indicator if no path specified;
# get rid of meta refresh if path has already been chosen
if path == '':
debug('load', 'showing load indicator')
hide_except('loading', parsed)
return html.tostring(parsed).decode()
else:
for tag in parsed.xpath('//meta[@http-equiv="refresh"]'):
tag.getparent().remove(tag)
if 'text' in postdict:
message = builder.PRE(postdict['text'])
parsed.xpath('//div[@id="error-text"]')[0].append(message)
debug('load', 'showing error page')
hide_except('error', parsed)
elif postdict.get('joined'):
debug('join', 'found "joined": %s', data['postdict'])
group = sanitize(postdict['groupname'])
if not group in groups:
if not group in data['finished']:
debug('all', 'nonexistent group, showing joinform again')
hide_except('joinform', parsed)
else:
create_report(parsed, group, data)
debug('all', 'showing report page')
hide_except('report', parsed)
else:
groupdata = data['groups'][group]
speaker = select_speaker(group, data)
userdata = groupdata['participants'][postdict['username']]
remaining = groupdata['talksession']['remaining']
set_text(parsed, ['talksession-speaker'],
['Current speaker is %s' % speaker if speaker else
'Waiting for next speaker'])
set_text(parsed, ['talksession-time'], [formatseconds(remaining)])
debug('talk', 'userdata[request]: %.6f', userdata['request'])
buttonvalue = 'Cancel request' if userdata['request'] else 'My Turn'
debug('talk', 'setting buttonvalue to %s', buttonvalue)
set_button(parsed, ['myturn-button'], [buttonvalue])
debug('talk', 'showing talk page')
hide_except('talksession', parsed)
elif (postdict.get('submit') == 'Join' and postdict.get('username') and
postdict.get('group', '') == ''):
# some browsers won't return `group` in postdict at all if
# selected element is empty (as it is by default in this case)
debug('join', 'showing groupform after joinform')
hide_except('groupform', parsed)
else:
debug('load', 'showing joinform by default')
hide_except('joinform', parsed)
return html.tostring(parsed).decode()
def create_report(parsed=None, group=None, data=None, **formatting):
'''
show participants with the amount of time each spoke
>>> parsed = html.fromstring("""
... <div id="report-body" class="body">
... <div id="report-wrapper" class="pagewrapper top">
... <div id="report-box" class="box">
... <table>
... <tr><th>Name</th><th>Elapsed Time</th></tr>
... <tr><td>(none)</td><td>00:00:00</td></tr>
... </table>
... </div><!-- box -->
... </div><!-- pagewrapper -->
... </div><!-- body -->""")
>>> data = json.loads("""{"finished": {"test": {"groupname": "test",
... "participants": {"jc": {"spoke": 48.5}, "Ed": {"spoke": 3.25}}}}}""")
>>> formatting = {'pretty_print': True, 'with_tail': False}
>>> print(create_report(parsed, 'test', data, **formatting).decode('utf8'))
<div id="report-body" class="body">
<div id="report-wrapper" class="pagewrapper top">
<div id="report-box" class="box">
<table>
<tr>
<th>Name</th>
<th>Elapsed Time</th>
</tr>
<tr>
<td>jc</td>
<td>00:00:48</td>
</tr>
<tr>
<td>Ed</td>
<td>00:00:03</td>
</tr>
</table>
</div>
<!-- box -->
</div>
<!-- pagewrapper -->
</div>
<BLANKLINE>
'''
parsed = parsed if parsed is not None else copy.deepcopy(PARSED)
data = data or DATA
body_div = parsed.xpath('//*[@id="report-body"]')[0]
rows = body_div.xpath('.//table/tr')
debug('report', 'create_report: rows: %s', rows)
template = rows[1]
table = template.getparent()
table.remove(template)
try:
participants = data['finished'][group]['participants']
except KeyError as nosuchgroup:
logging.warning('No such group %s', nosuchgroup)
participants = {}
speakers = sorted(participants, key=lambda u: -participants[u]['spoke'])
columns = template.xpath('./td')
debug('report', 'create_report: speakers: %s', speakers)
for speaker in speakers:
debug('report', 'adding speaker "%s" to report', speaker)
columns[0].text = speaker
columns[1].text = formatseconds(participants[speaker]['spoke'])
debug('report', 'template now: %s', html.tostring(template))
table.append(html.fromstring(html.tostring(template)))
debug('report', 'table now: %s', html.tostring(table))
return html.tostring(body_div, **formatting)
def set_text(parsed, idlist, values):
'''
pre-set page text
'''
debug('all', 'setting values of %s from %s', idlist, values)
for index in range(len(idlist)):
elementid = idlist[index]
value = values[index]
element = parsed.xpath('//*[@id="%s"]' % elementid)[0]
debug('all', 'before: %s', html.tostring(element))
element.text = value
debug('all', 'after: %s', html.tostring(element))
def set_button(parsed, idlist, values):
'''
modify button values
>>> content = html.fromstring('<div><input id="test" value="Test"></div>')
>>> set_button(content, ['test'], ['new value'])
>>> content.xpath('//*[@id="test"]')[0].get('value')
'new value'
'''
for index in range(len(idlist)):
elementid = idlist[index]
value = values[index]
element = parsed.xpath('//*[@id="%s"]' % elementid)[0]
debug('buttons', 'before: %s', html.tostring(element))
element.set('value', value)
debug('buttons', 'after: %s', html.tostring(element))
def set_values(parsed, postdict, fieldlist):
'''
pre-set form input values from postdict
'''
debug('hidden', 'setting values of %s from %s', fieldlist, postdict)
for fieldname in fieldlist:
value = postdict.get(fieldname, '')
if not value:
debug('hidden', 'skipping %s, no value found', fieldname)
continue
elements = parsed.xpath('//input[@name="%s"]' % fieldname)
for element in elements:
debug('hidden', 'before: %s', html.tostring(element))
element.set('value', value)
debug('hidden', 'after: %s', html.tostring(element))
def populate_grouplist(parsed=None, data=None, formatted='list', **options):
'''
fill in 'select' element with options for each available group
if `formatted` is 'list', just return list of groups, oldest first
>>> options = {'pretty_print': True, 'with_tail': False}
>>> data = {'groups': {'test': {'timestamp': 0}, 'again': {'timestamp': 1}}}
>>> print(populate_grouplist(None, data, 'element', **options))
<select id="group-select" name="group" data-contents=":test:again">
<option value="">(Create new group)</option>
<option value="test">test</option>
<option value="again" selected>again</option></select>
<BLANKLINE>
>>> data['groups']['test']['timestamp'] = 2
>>> populate_grouplist(None, data)
['again', 'test']
'''
# sorting a dict gives you a list of keys
data = data or DATA
session_key = data.get('httpsession_key', None)
session = HTTPSESSIONS.get(session_key, {})
added_group = session.get('added_group', None)
parsed = parsed if parsed is not None else html.fromstring(PAGE)
groups = sorted(data['groups'],
key=lambda g: data['groups'][g]['timestamp'])
contents = ':'.join([''] + groups)
grouplist = parsed.xpath('//select[@name="group"]')[0]
debug('grouplist', 'populate_grouplist: %s', grouplist)
for group in groups:
newgroup = builder.OPTION(group, value=group)
grouplist.append(newgroup)
# make newest group the "selected" one
# except for someone who just created a group, mark *that* one selected
for group in grouplist.getchildren():
try:
del group.attrib['selected']
except KeyError:
pass
try:
grouplist[grouplist.index(added_group)].set('selected', 'selected')
except (KeyError, ValueError, IndexError, TypeError):
grouplist[-1].set('selected', 'selected')
grouplist.set("data-contents", contents)
if formatted == 'list':
return groups
else:
return html.tostring(grouplist, **options).decode()
def hide_except(keep, tree):
'''
set "display: none" for all sections of the page we don't want to see
'''
for page in tree.xpath('//div[@class="body"]'):
if not page.get('id').startswith(keep):
page.set('style', 'display: none')
elif 'style' in page.attrib:
del page.attrib['style']
def data_merge(data, cookie):
'''
anything missing in data['postdict'] gets set from cookie if found
'''
if cookie:
if not data['postdict'].get('username'):
logging.debug('data_merge: setting username from cookie')
data['postdict']['username'] = cookie['username'].value
else:
logging.debug('data_merge: found username already in postdict')
if not data['postdict'].get('http_sessionkey'):
logging.debug('data_merge: setting session key from cookie')
data['postdict']['http_sessionkey'] = cookie['sessionid'].value
else:
logging.debug('data_merge: session key already in postdict')
else:
logging.debug('data_merge: cookie: %r, postdict: %s',
cookie, data.get('postdict'))
def server(env=None, start_response=None):
'''
primary server process, sends page with current groups list
'''
status_code, mimetype, page = '500 Server error', 'text/html', '(Unknown)'
start, path = findpath(env)
cookie, data = handle_post(env)
logging.debug('server: cookie: %s', cookie)
data_merge(data, cookie) # set any missing data from cookie
debug('all', 'server: data: %s', data)
if path in ('groups',):
page = populate_grouplist(None, data, formatted='element')
status_code = '200 OK'
elif path.startswith('report/'):
group = path.split('/')[1]
page = create_report(group=group).decode('utf8')
status_code = '200 OK'
elif path.startswith('groups/'):
group = path.split('/')[1]
try:
page = json.dumps(data['groups'][group])
except KeyError as groupname:
debug('all', 'group %s does not exist in %s', groupname, data)
page = '{}'
status_code = '200 OK'
elif path in ('', 'noscript', 'app'):
page = loadpage(path, data)
status_code = '200 OK'
elif path == 'status':
page = escape(json.dumps(data))
status_code = '200 OK'
else:
try:
page, mimetype = render(os.path.join(start, path))
status_code = '200 OK'
except (IOError, OSError) as filenotfound:
status_code = '404 File not found'
page = '<h1>No such page: %s</h1>' % str(filenotfound)
headers = [('Content-type', mimetype)]
if cookie is not None:
logging.debug('setting cookie headers %r', cookie.output())
headers.extend(cookie_headers(cookie))
start_response(status_code, headers)
debug('all', 'page: %s', page[:128])
return [page.encode('utf8')]
def cookie_headers(cookie):
'''
make list of tuples for cookie values
>>> cookie = SimpleCookie()
>>> cookie['test'] = 'this'
>>> cookie['test']['path'] = '/'
>>> cookie_headers(cookie)
[('Set-Cookie', 'test=this; Path=/')]
'''
cookies = cookie.output().split('\r\n')
return [tuple(re.compile(': ').split(c, 1)) for c in cookies]
def handle_post(env):
'''
process the form submission and return data structures
note what dict(parse_qsl(formdata)) does:
>>> from urllib.parse import parse_qsl
>>> parse_qsl('a=b&b=c&a=d&a=e')
[('a', 'b'), ('b', 'c'), ('a', 'd'), ('a', 'e')]
>>> OrderedDict(_)
OrderedDict([('a', 'e'), ('b', 'c')])
>>>
so only use it where you know that no key will have more than
one value.
parse_qs will instead return a dict of lists.
'''
uwsgi.lock() # lock access to DATA global
worker = getattr(uwsgi, 'worker_id', lambda *args: None)()
DATA['handler'] = (worker, env.get('uwsgi.core'))
timestamp = datetime.datetime.utcnow().timestamp()
cookie = SimpleCookie(env['HTTP_COOKIE']) if 'HTTP_COOKIE' in env else None
try:
if env.get('REQUEST_METHOD') != 'POST':
DATA['postdict'] = {}
return cookie, copy.deepcopy(DATA)
form = cgi.FieldStorage(fp=env['wsgi.input'], environ=env)
DATA['postdict'] = postdict = {k: form.getfirst(k) for k in form.keys()}
debug('all', 'handle_post: %s, postdict: %s', form, postdict)
# [groupname, total, turn] and submit=Submit if group creation
# [username, group] and submit=Join if joining a group
postdict['timestamp'] = timestamp
if not postdict.get('httpsession_key'):
postdict['httpsession_key'] = uuid.uuid4().hex
debug('sessions', 'set httpsession_key = %s',
postdict['httpsession_key'])
try:
buttonvalue = postdict['submit']
except KeyError:
raise ValueError('No "submit" button found')
cookie = update_httpsession(postdict)
if buttonvalue == 'Join':
# username being added to group
# don't allow if name already in group
groups = DATA['groups']
debug('join', 'processing Join: %s', postdict)
username = postdict.get('username', '')
group = sanitize(postdict.get('group', ''))
if not username:
raise ValueError('Name field cannot be empty')
elif group in groups:
postdict['groupname'] = group
if username in groups[group]['participants']:
raise ValueError('"%s" is already a member of %s' % (
username, group))
groups[group]['participants'][username] = defaultdict(
float, # for `speaking` and `spoke` times
{'timestamp': timestamp, 'requests': []}
)
postdict['joined'] = '%s:%s' % (username, group)
if 'talksession' not in groups[group]:
groups[group]['talksession'] = {
'start': timestamp,
'speaker': None,
'tick': 0,
}
counter = threading.Thread(
target=countdown,
name=group,
args=(group,))
counter.daemon = True # leave no zombies on exit
counter.start()
# else group not in groups, no problem, return to add group form
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Submit':
# groupname, total (time), turn (time) being added to groups
# don't allow if groupname already being used
groups = DATA['groups']
group = postdict['groupname'] = sanitize(postdict['groupname'])
if not group in groups:
groups[group] = postdict
groups[group]['participants'] = {}
return cookie, copy.deepcopy(DATA)
else:
raise ValueError((
'Group {group[groupname]} already exists with total time '
'{group[total]} minutes and turn time '
'{group[turn]} seconds').format(group=groups[group]))
elif buttonvalue == 'OK':
# affirming receipt of error message or Help screen
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Help':
raise UserWarning('Help requested')
elif buttonvalue == 'My Turn':
# attempting to speak in ongoing session
# this can be reached either by normal HTML form submission
# or by XHR from JavaScript on client side
debug('button', 'My Turn button pressed, env: %s', env)
groups = DATA['groups']
group = sanitize(postdict['groupname'])
username = postdict['username']
try:
userdata = groups[group]['participants'][username]
if not userdata['request']:
debug('button', "userdata: setting %s's request to %.6f",
username, timestamp)
userdata['request'] = timestamp
userdata['requests'].append([timestamp, None])
else:
logging.warning('ignoring newer request %.6f, '
'keeping %.6f', userdata['request'],
timestamp)
except KeyError:
raise SystemError('Group %s is no longer active' % group)
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Cancel request':
debug('button', 'My Turn button released')
groups = DATA['groups']
group = sanitize(postdict['groupname'])
username = postdict['username']
try:
userdata = groups[group]['participants'][username]
if userdata['request']:
userdata['request'] = None
userdata['requests'][-1][1] = timestamp
else:
logging.error('no speaking request found for %s', username)
except KeyError:
raise SystemError('Group %s is no longer active' % group)
return cookie, copy.deepcopy(DATA)
elif buttonvalue == 'Check status':
return cookie, copy.deepcopy(DATA)
else:
raise ValueError('Unknown form submitted')
except UserWarning as request:
if str(request) == 'Help requested':
debug('all', 'displaying help screen')
DATA['postdict']['text'] = read(os.path.join(THISDIR, 'README.md'))
return cookie, copy.deepcopy(DATA)
except EXPECTED_ERRORS as failed:
debug('all', 'displaying error: "%r"', failed)
DATA['postdict']['text'] = repr(failed)
return cookie, copy.deepcopy(DATA)
finally:
uwsgi.unlock()
def most_eligible_speaker(group, data=None):
'''
participant who first requested to speak who has spoken least
>>> data = {
... 'groups': {
... 'test': {
... 'participants': {
... 'alice': {'spoke': 3, 'request': '2017-10-01T14:21:37.024529'},
... 'bob': {'spoke': 2, 'request': '2017-10-01T14:21:37.024531'},
... 'chuck': {'spoke': 3, 'request': '2017-10-01T14:21:37.024530'}}}}}
>>> most_eligible_speaker('test', data)
'bob'
>>> data = {
... 'groups': {
... 'test': {
... 'participants': {
... 'alice': {'spoke': 2, 'request': '2017-10-01T14:21:37.024531'},
... 'bob': {'spoke': 2, 'request': '2017-10-01T14:21:37.024531'},
... 'chuck': {'spoke': 2, 'request': '2017-10-01T14:21:37.024530'}}}}}
>>> most_eligible_speaker('test', data)
'chuck'
'''
data = data or DATA
groupdata = data['groups'][group]
people = groupdata['participants']
waiting = filter(lambda p: people[p]['request'], people)
speaker_pool = sorted(waiting, key=lambda p:
(people[p]['spoke'], people[p]['request']))
return (speaker_pool or [None])[0]
def select_speaker(group, data=None):
'''
let current speaker finish his turn before considering most eligible
SIDE EFFECTS:
when `turn` time is up or speaker voluntarily relinquishes turn:
sets speaker's `speaking` count to zero in data dict
sets speaker to new speaker
NOTE: not using uwsgi.lock for this, shouldn't be necessary. no
possible race conditions are known at time of coding (jc).
'''
data = data or DATA
groupdata = data['groups'][group]
talksession = groupdata['talksession']
turntime = float(groupdata['turn'])
if talksession['speaker']:
speaker = groupdata['participants'][talksession['speaker']]
if speaker['speaking'] >= turntime or not speaker['request']:
speaker['speaking'] = 0
talksession['speaker'] = most_eligible_speaker(group, data)
else:
talksession['speaker'] = most_eligible_speaker(group, data)
return talksession['speaker']
def sanitize(name):
'''
can't count on someone entering, say, '../../../.hidden/evil' as groupname
in addition to ILLEGAL characters, also strip leading '.' and '-',
the first hiding the file from normal listing, the second making removal
difficult because it looks like an option to rm, so one needs to
`rm -- -evilfile`.
>>> sanitize('../../../.-hidden/::evil')
'hiddenevil'
>>> sanitize(None)
'''
return name.translate(ILLEGAL).lstrip('-.') if name is not None else None
def countdown(group, data=None):
'''
expire the talksession after `minutes`
currently only using uwsgi.lock() when moving group to `finished`.
may need to reevaluate that (jc).
>>> now = datetime.datetime.utcnow().timestamp()
>>> data = {'finished': {}, 'groups': {
... 'test': {
... 'total': '.001',
... 'talksession': {'start': now, 'speaker': None},
... 'participants': {'nobody': {'requests': [[0.1, 0.2]]}},
... }}}
>>> countdown('test', data)
'''
data = data or DATA
groups = data['groups']
sleeptime = .25 # seconds
try:
minutes = float(groups[group]['total'])
groups[group]['talksession']['remaining'] = minutes * 60
ending = (datetime.datetime.fromtimestamp(
groups[group]['talksession']['start']) +
datetime.timedelta(minutes=minutes)).timestamp()
debug('countdown', 'countdown ending: %.6f', ending)
while True:
time.sleep(sleeptime)
now = datetime.datetime.utcnow().timestamp()
debug('countdown', 'countdown now: %.6f', now)
if now > ending:
debug('countdown', 'countdown ended at %.6f', now)
break
speaker = select_speaker(group, data)
debug('countdown', 'countdown: speaker: %s', speaker)
if speaker:
speakerdata = groups[group]['participants'][speaker]
speakerdata['speaking'] += sleeptime
speakerdata['spoke'] += sleeptime
groups[group]['talksession']['remaining'] -= sleeptime
groups[group]['talksession']['tick'] += 1
# should we uwsgi.lock() here in case group is currently being updated?
# if so, need uwsgi.unlock() in `finally` clause
data['finished'][group] = data['groups'].pop(group)
# now save the report of clicks, not same as report of time spoken
reportdir = os.path.join('statistics', group)
reportname = os.path.join(reportdir, '%.6f.json' % now)
try:
participants = data['finished'][group]['participants']
except KeyError:
logging.error("No such key 'participants' in %s",
data['finished'][group])
return
os.makedirs(reportdir, exist_ok=True)
report = open(reportname, 'w')
report.write(json.dumps([{speaker: participants[speaker]['requests']}
for speaker in participants],
indent=4))
report.close()
except KeyError as error:
logging.error('countdown: was group "%s" removed? KeyError: %s',
group, error, exc_info=True)
logging.info('data: %s', data)
def update_httpsession(postdict):
'''
simple implementation of user (http) sessions
this is for keeping state between client and server, this is *not*
the same as discussion (talk) sessions!
another thread should go through and remove expired httpsessions
'''
# FIXME: this session mechanism can only be somewhat secure with https
# FIXME: a thread needs to remove old httpsessions to save memory
timestamp = postdict['timestamp']
cookie = None
if 'httpsession_key' in postdict and postdict['httpsession_key']:
session_key = postdict['httpsession_key']
# only bother storing session once a username has been entered
if postdict.get('username', None):
username = postdict['username']
newgroup = sanitize(postdict.get('group', None))
if session_key in HTTPSESSIONS:
if HTTPSESSIONS[session_key]['username'] != username:
logging.warning(
'changing session username from "%s" to "%s"',
HTTPSESSIONS[session_key]['username'],
username)
if newgroup:
HTTPSESSIONS[session_key]['added_group'] = newgroup
HTTPSESSIONS[session_key]['updated'] = timestamp
else:
HTTPSESSIONS[session_key] = {
'timestamp': timestamp,
'updated': timestamp,
'added_group': None,
'username': username}
cookie = SimpleCookie()
cookie['sessionid'] = session_key
cookie['sessionid']['path'] = '/'
logging.debug('cookie: %s', cookie)
cookie['username'] = username
cookie['username']['path'] = '/'
logging.debug('cookie: %s', cookie)
else:
debug('sessions',
'no username yet associated with session %s', session_key)
else:
logging.warning('no httpsession_key in POST')
return cookie
def render(pagename, standalone=True):
'''
Return content with Content-type header
'''
debug('render', 'render(%s, %s) called', pagename, standalone)
if pagename.endswith('.html'):
debug('render', 'rendering static HTML content')
return (read(pagename), 'text/html')
elif not pagename.endswith(('.png', '.ico', '.jpg', '.jpeg')):
# assume plain text
logging.warning('app is serving %s instead of nginx', pagename)
return (read(pagename), 'text/plain')
elif standalone:
logging.warning('app is serving %s instead of nginx', pagename)
return (read(pagename),
MIMETYPES.get(os.path.splitext(pagename)[1], 'text/plain'))
else:
logging.error('not standalone, and no match for filetype')
raise OSError('File not found: %s' % pagename)
def read(filename):
'''
Return contents of a file
'''
debug('read', 'read: returning contents of %s', filename)
with open(filename) as infile:
data = infile.read()
debug('read', 'data: %s', data[:128])
return data
def formatseconds(seconds):
'''
return rounded-up seconds count as HH:MM:SS
https://stackoverflow.com/a/31946730/493161
>>> formatseconds(666.50001)
'00:11:07'
'''
return '{:0>8}'.format(str(datetime.timedelta(seconds=round(seconds))))
if __name__ == '__main__':
print(server(os.environ, lambda *args: None))
|
jcomeauictx/pyturn
|
myturn.py
|
myturn.py
|
py
| 32,136 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27834268107
|
#! /usr/bin/python3
import numpy as np
from matplotlib import pyplot as plt
# Simple Euler forward
# Input variables
Q = 10.
b = [20.]
S = 1E-2
D = 2E-2
h_b = 4
intermittency = 1
# Constants
phi = 3.97
g = 9.805
rho_s = 2700.
rho = 1000.
tau_star_crit = 0.0495
# Derived variables
a1 = 1. / h_b
a2 = S**0.7 / ( 2.9 * (rho_s - rho)/rho * g**0.3 * D**0.9 )
kh = D**.1 / (2.9 * g**.3 * S**.3)
# Starting values
t = [0.]
dt = 10000
nt = 120
# Equilibrium width?
beq = 0.17 / ( g**.5 * ((rho_s - rho)/rho)**(5/3.) * 1.2**(5/3.)
* tau_star_crit**(5/3.) ) * Q * S**(7/6.) / D**1.5
# Depth?
h = kh * (Q/b[-1])**0.6
# Tau*
tau_star_bed = h * S / ( ((rho_s - rho)/rho) * D)
tau_star_bank = tau_star_bed / 1.2
# Compute through time
for i in range(nt):
bi = b[-1]
tau_star_bank = a2 * (Q/bi)**(3/5.) / 1.2
if tau_star_bank > tau_star_crit:
bi += a1 * ( tau_star_bank - tau_star_crit )**(3/2.) \
* dt * intermittency
else:
b = beq
break
b.append(bi)
t.append(t[-1] + dt)
t = np.array(t)
b = np.array(b)
plt.figure()
plt.hlines(beq, t[0] / (24.*60.*60.), t[-1] / (24.*60.*60.),
'.5', label='Equilibrium width', linewidth=2)
plt.plot(t / (24.*60.*60.), b, 'k-', label='Transient width',
linewidth=2)
plt.xlabel('Flood duration [days]')
plt.ylabel('Channel width [m]')
plt.legend()
plt.tight_layout()
|
MNiMORPH/OTTAR
|
examples/standalone-widening-intuitive/transport-limited-width.py
|
transport-limited-width.py
|
py
| 1,398 |
python
|
en
|
code
| 5 |
github-code
|
6
|
16832434416
|
# 图形画布
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib # 导入图表模块
import matplotlib.pyplot as plt # 导入绘图模块
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=0, height=0, dpi=100):
# 避免中文乱码
matplotlib.rcParams['font.sans-serif'] = ['SimHei'] #选择字体为SimHei
matplotlib.rcParams['axes.unicode_minus'] = False #处理坐标抽轴线的负刻度值情况
# 创建图形
fig = plt.figure(figsize=(width, height), dpi=dpi)
# 初始化图形画布
FigureCanvas.__init__(self, fig)
self.setParent(parent) # 设置父类
# 折线图
def broken_line(self,number,train_list):
'''
linewidth:折线的宽度
marker:折点的形状
markerfacecolor:折点实心颜色
markersize:折点大小
number:车票数量
train_list:车次
'''
#enumerate内建序列函数,返回(i,value)索引和值
day_x = ['第二天', '第三天','第四天','第五天', '第六天'] # X轴折线点
for index, n in enumerate(number):
plt.plot(day_x, n, linewidth=1, marker='o',
markerfacecolor='blue', markersize=8, label=train_list[index]) # 绘制折线marker:折点,label:图例
plt.grid(linestyle=":")
plt.legend(bbox_to_anchor=(-0.03,1)) # 让图例生效,并设置图例显示位置
plt.title('卧铺车票数量走势图') # 标题名称
# bbox_to_anchor(num1, num2), bbox_to_anchor被赋予的二元组中,第一个数值用于控制legend的左右移动,值越大越向右边移动,第二个数值用于控制legend的上下移动,值越大,越向上移动。
|
yunmi02/MyProject
|
11/源程序/ticket _analysis/chart.py
|
chart.py
|
py
| 1,900 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
33247661764
|
import sys
from pathlib import Path
import cv2
import imutils
import numpy as np
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1]
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
code_dir_path = ROOT.joinpath("Code")
data_dir_path = ROOT.joinpath("Data")
class BlurTool:
def __init__(self):
cfg_path = code_dir_path.joinpath("yolov7.cfg")
weight_path = code_dir_path.joinpath("yolov7.weights")
self.net = cv2.dnn.readNetFromDarknet(str(cfg_path), str(weight_path))
self.layers = [(self.net.getLayerNames()[i - 1]) for i in self.net.getUnconnectedOutLayers()]
def _check_file(self, file_name):
self.file_path = data_dir_path.joinpath(file_name)
assert self.file_path.exists(), "File does not exist!"
suffix = self.file_path.suffix
if suffix in [".png", ".jpg"]:
self.is_video = False
elif suffix in [".mp4"]:
self.is_video = True
else:
raise ValueError(f"No Support for the Format: {suffix}")
def process(self, file_name="LP.png"):
self._check_file(file_name)
if not self.is_video:
self.process_image()
else:
self.process_video()
def process_image(self):
frame = cv2.imread(str(self.file_path))
vehicles = self.detect_vehicle(frame)
# for vehicle in vehicles:
# xmin, xmax, ymin, ymax = vehicle
# self.detect_license_plate(frame[ymin: ymax, xmin: xmax])
xmin, xmax, ymin, ymax = vehicles[1]
self.detect_license_plate(frame[ymin: ymax, xmin: xmax])
# print(vehicles)
def process_video(self):
cap = cv2.VideoCapture()
def detect_vehicle(self, frame):
frame_height, frame_width = frame.shape[:2]
vehicle_classes = [2, 3, 5, 7]
vehicle_boxes, vehicle_scores, vehicles = [], [], []
vehicle_conf_thr, vehicle_nms_thr = 0.5, 0.2
# ?: size
# preprocess -> set input -> forward
blob = cv2.dnn.blobFromImage(frame, scalefactor=1 / 255, size=(320, 320), mean=[0, 0, 0], swapRB=True)
self.net.setInput(blob)
outputs = self.net.forward(self.layers)
for output in outputs:
for detection in output:
# [x, y, w, h, conf, score1, score2, ..., score80]
scores = detection[5:]
class_id = np.argmax(scores)
conf = scores[class_id]
if (class_id in vehicle_classes) and (conf > vehicle_conf_thr):
x, y = detection[0] * frame_width, detection[1] * frame_height
w, h = detection[2] * frame_width, detection[3] * frame_height
xmin, xmax = x - w / 2, x + w / 2
ymin, ymax = y - h / 2, y + h / 2
vehicle_boxes.append((xmin, xmax, ymin, ymax))
vehicle_scores.append(float(conf))
# postprocess: nms -> size filter
vehicle_indices = cv2.dnn.NMSBoxes(vehicle_boxes, vehicle_scores, vehicle_conf_thr, vehicle_nms_thr)
for index in vehicle_indices:
xmin, xmax, ymin, ymax = map(int, vehicle_boxes[index])
if (xmax - xmin) * (ymax - ymin) >= frame_width * frame_height * 0.03:
# [(xmin, xmax, ymin, ymax), ...]
vehicles.append((xmin, xmax, ymin, ymax))
return vehicles
def detect_license_plate(self, frame):
license_plate = None
# convert to grey scale -> reduce noise -> detect edges
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_reduce_noise = cv2.bilateralFilter(frame_gray, d=13, sigmaColor=15, sigmaSpace=15)
frame_edge = cv2.Canny(frame_reduce_noise, threshold1=30, threshold2=200)
contours = imutils.grab_contours(cv2.findContours(frame_edge.copy(), mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE))
# check which one has a rectangle shape (4 sides) and closed figure
for cnt in sorted(contours, key=cv2.contourArea, reverse=True)[:5]:
peri = cv2.arcLength(curve=cnt, closed=True)
approx = cv2.approxPolyDP(curve=cnt, epsilon=0.1 * peri, closed=True)
if len(approx) == 4:
x, y, w, h = cv2.boundingRect(approx)
license_plate = (x, y, x + w, y + h)
break
return license_plate
# # open -> threshold -> edge detection
# frame_open = cv2.morphologyEx(frame_reduce_noise, op=cv2.MORPH_OPEN, kernel=np.ones((23, 23), np.uint8))
# frame_add_weight = cv2.addWeighted(src1=frame_reduce_noise, alpha=1, src2=frame_open, beta=-1, gamma=0)
# _, frame_thresh = cv2.threshold(frame_add_weight, thresh=0, maxval=255, type=cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# frame_edge = cv2.Canny(frame_thresh, threshold1=100, threshold2=200)
# frame_edge = cv2.morphologyEx(frame_edge, op=cv2.MORPH_CLOSE, kernel=np.ones((10, 10), np.uint8))
# frame_edge = cv2.morphologyEx(frame_edge, cv2.MORPH_OPEN, kernel=np.ones((10, 10), np.uint8))
# contours = imutils.grab_contours(cv2.findContours(frame_edge.copy() , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE))
# for cnt in sorted(contours, key=cv2.contourArea, reverse=True)[:5]:
# (x, y), (w, h), angle = cv2.minAreaRect(cnt)
# cv2.imshow("test", frame_edge)
# cv2.waitKey()
# exit()
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
# cv2.imshow("test", frame)
# cv2.waitKey()
# cv2.drawContours(frame, [a], -1, (0, 255, 0), 3)
# cv2.imshow("test", frame)
# cv2.waitKey()
blur_tool = BlurTool()
blur_tool.process(file_name="LP.png") # LP.png frame.jpg
"""
# face detection
prototxt_path = "Code/prototxt.txt"
model_path = "Code/res10_300x300_ssd_iter_140000_fp16.caffemodel"
model = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
xmin, xmax, ymin, ymax = map(int, vehicle_boxes[index])
vehicles.append()
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 255), 5)
cv2.imshow("test", frame)
cv2.waitKey()
"""
|
Beau-Yang/CapstoneProject
|
Code/blur_tool_version.py
|
blur_tool_version.py
|
py
| 6,194 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25499068282
|
# -*- coding: utf-8 -*-
__author__ = 'Liang Zhao'
__email__ = '[email protected]'
__version__ = '1.0.0'
import hashlib
import unicodedata
def get_hash_key(x):
return hashlib.sha1(str(x)).hexdigest()
def normalize_str(s):
if isinstance(s, str):
s = s.decode("utf8")
s = unicodedata.normalize("NFKD", s)
return s.encode('ASCII', 'ignore').lower()
CO_CAPITAL_OSI_WAY = {
'Ciudad de México': ['Mexico', 'Ciudad de México', 'Ciudad de México'],
'Brasília' : ['Brazil', 'Brasília', 'Brasília'],
'Buenos Aires' : ['Argentina', '-', 'Buenos Aires'],
'Caracas' : ['Venezuela', 'Caracas', 'Caracas'],
'Bogotá' : ['Colombia', 'Bogotá', 'Bogotá'],
'Quito' : ['Ecuador', 'Pichincha', 'Quito'],
'San Salvador' : ['El Salvador', 'San Salvador', 'San Salvador'],
'Asunción' : ['Paraguay', 'Asunción', 'Asunción'],
'Montevideo' : ['Uruguay', 'Montevideo', 'Montevideo']
}
CO_CAPITAL_LOOKUP = dict( (normalize_str(k), v) for k, v in CO_CAPITAL_OSI_WAY.iteritems())
def osi_capital_city_province_corrector(loc):
"""Makes the state info correction as per OSI for
capital city of the country
:param loc: location tuple (Country, State, City)
:returns: ( Country, State, City )
"""
ci = normalize_str(loc[2])
if ci in CO_CAPITAL_LOOKUP and \
normalize_str(loc[0]) == normalize_str(CO_CAPITAL_LOOKUP[ci][0]):
return CO_CAPITAL_LOOKUP[ci]
else:
return loc
def formatting_warning(msg0):
surrogate, possibility, eIdKeyword = msg0
warning = {}
warning['derivedFrom'] = {}
warning['derivedFrom']['source'] = "Raw Data-Sift Twitter Stream"
warning['derivedFrom']['end'] = surrogate['derivedFrom']['end']
warning['derivedFrom']['start'] = surrogate['derivedFrom']['start']
warning['comments'] = "Dynamic Query Expansion model(classifier=classification.randomforest-CU v0.0.1)"
warning['derivedFrom']['derivedIds'] = (surrogate['embersId'],eIdKeyword) # from tweets
warning['model'] = 'Dynamic Query Expansion model(classifier=classification.randomforest-CU v0.0.1)'
warning['confidence'] = possibility
warning['confidenceIsProbability'] = True
warning['eventType'] = surrogate['eventType']
warning['location'] = surrogate['location']
warning['coordinates'] = surrogate['coordinates']
warning['population'] = surrogate['population']
warning['eventDate'] = surrogate['eventDate']
warning['date'] = surrogate['date']
warning['embersId'] = get_hash_key(warning)
return warning
def formatting_surrogate_dict(warning0): #surrogate
warning = {}
warning['derivedFrom'] = {}
warning['derivedFrom']['source'] = "Local Modularity Spatial Scan"
warning['derivedFrom']['end'] = warning0[3][1] # last tweet timestamp
warning['derivedFrom']['start'] = warning0[3][0] # first tweet tinestamp
warning['comments'] = "Local Modularity Spatial Scan(classifier=classification.randomforest-CU v0.0.1)"
warning['derivedFrom']['derivedIds'] = warning0[6] # raw data: embersID, processed data: parent ID
warning['model'] = 'Local Modularity Spatial Scan(classifier=classification.randomforest-CU v0.0.1)'
warning['confidence'] = warning0[5]
warning['confidenceIsProbability'] = True
warning['eventType'] = warning0[1]
warning['location'] = osi_capital_city_province_corrector(warning0[0])
warning['coordinates'] = warning0[4]
warning['population'] = warning0[2]
warning['eventDate'] = warning0[3][2]
warning['date'] = warning0[3][1]
warning['embersId'] = get_hash_key(warning)
return warning
def formatting_surrogate(warnings0,dates):
surrogates = []
for dat in warnings0:
location = dat[0][0][0][0]
coordinates = tuple(dat[0][0][0][1])
eventType = dat[0][0][1][0]
population = dat[0][0][1][1]
confidence = dat[0][1]
embersId = dat[1]
surrogates.append(formatting_surrogate_dict((location,eventType,population,
dates,coordinates,confidence,embersId)))
return surrogates
def formatting_keywords(country,date,keyspace,embersIds): #eIdKeyword
temp,keyelements = keyspace
del temp
msg = {}
msg['country'] = country
msg['keywords'] = keyelements.keys()
msg['date'] = date[:10]
msg['derivedFrom'] = embersIds
msg['embersId'] = get_hash_key(msg)
return msg
def formalizing(surrogates0,threshold,dates,eIdKeyword):
warnings = []
for dat in surrogates0:
confidence = dat['confidence']
if(confidence>threshold):
possibility = float('%.3f'%Possibility_computing(confidence,threshold))
if(possibility >= 0.2):
warnings.append(formatting_warning((dat,possibility,eIdKeyword)))
return warnings
def Possibility_computing(count,threshold):
if(threshold > 0):
return (count-threshold)*0.8/float(1.0 - threshold)
else:
return count*0.8
def main():
pass
if __name__ == '__main__':
main()
|
klyc0k/EDSFilter
|
kmethods/warning_format.py
|
warning_format.py
|
py
| 5,127 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73966289468
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
import json
#药监总局地址:http://scxk.nmpa.gov.cn:81/xk/
if __name__ == "__main__":
url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'
headers = {
'User-Agent': user_agent
}
data = {
"on": "true",""
"page": "1",
"pageSize": "15",""
"productName":"",
"conditionType": "1",
"applyname":"",
"applysn":"",
}
json_ids = requests.post(url=url,headers=headers,data=data).json()
print(json_ids)
all_data_list = [] #存储所有企业的详情数据
id_list = [] #存储企业的ID
for dic in json_ids['list']:
id_list.append(dic['ID'])
print(id_list)#批量获取的ID
#获取企业详情页信息
print()
post_url = 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById'
for id in id_list:
data = {
'id':id
}
detail_json = requests.post(url=url,headers=headers,data=data).json()
print(detail_json,"--------")
all_data_list.append(detail_json)
#持久化存储
fp = open('../alldata.json', 'w', encoding='utf-8')
json.dump(all_data_list,fp=fp,ensure_ascii=False)
print("over")
|
xjuun/Note
|
Python/爬虫/code/request基础/06.requests之药监总局相关数据爬取.py
|
06.requests之药监总局相关数据爬取.py
|
py
| 1,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33043283636
|
import base64
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.core.files.base import ContentFile
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_http_methods
from .forms import DinoImageForm, DinosaurForm
from .models import DinoImage, Dinosaur, Favorite
def register(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
messages.success(request, f"Account created for {username}!")
return redirect("login")
else:
form = UserCreationForm()
return render(request, "registration/register.html", {"form": form})
def user_login(request):
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
messages.success(request, f"You are now logged in as {user.username}")
return redirect("home")
else:
form = AuthenticationForm()
return render(request, "registration/login.html", {"form": form})
def user_logout(request):
logout(request)
messages.success(request, "You have been logged out")
return redirect("home")
@login_required
def home(request):
dinos = Dinosaur.objects.all()
return render(request, "home.html", {"dinos": dinos})
@login_required
# def dinosaur_detail(request, pk):
# dino = get_object_or_404(Dinosaur, pk=pk)
# images = DinoImage.objects.filter(dinosaur=dino)
# image_urls = []
# for image in images:
# image_urls.append(
# f"data:{image.content_type};base64,{base64.b64encode(image.image.read()).decode()}"
# )
# is_favorited = Favorite.objects.filter(user=request.user, dinosaur=dino).exists()
# return render(
# request,
# "dinosaur_detail.html",
# {"dino": dino, "image_urls": image_urls, "is_favorited": is_favorited},
# )
def dinosaur_detail(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
images = DinoImage.objects.filter(dinosaur=dino)
image_urls = []
for image in images:
image_path = os.path.join(settings.MEDIA_ROOT, str(image.image))
with open(image_path, "rb") as f:
image_data = f.read()
image_base64 = base64.b64encode(image_data).decode()
image_url = f"data:image/jpeg;base64,{image_base64}"
image_urls.append(image_url)
return render(
request,
"dinosaur_detail.html",
{"dino": dino, "images": images, "image_urls": image_urls},
)
@login_required
def add_dinosaur(request):
if request.method == "POST":
form = DinosaurForm(request.POST, request.FILES)
if form.is_valid():
dino = form.save()
messages.success(request, f"{dino.name} has been added to the database!")
return redirect("dinosaur_detail", pk=dino.pk)
else:
form = DinosaurForm()
return render(request, "add_dinosaur.html", {"form": form})
@login_required
def search_results(request):
query = request.GET.get("q")
print(
query
) # Add this line to check that the search query is being retrieved correctly
if query:
dinosaurs = Dinosaur.objects.filter(name__icontains=query)
else:
dinosaurs = Dinosaur.objects.all()
return render(request, "search_results.html", {"dinosaurs": dinosaurs})
@login_required
def edit_dinosaur(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
form = DinosaurForm(request.POST, request.FILES, instance=dino)
if form.is_valid():
form.save()
return redirect("dinosaur_detail", pk=dino.pk)
else:
form = DinosaurForm(instance=dino)
return render(request, "dinosaur_edit.html", {"form": form, "dino": dino})
@login_required
def update_dinosaur(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
form = DinosaurForm(request.POST, request.FILES, instance=dino)
if form.is_valid():
dino = form.save()
messages.success(request, f"{dino.name} has been updated!")
return redirect("dinosaur_detail", pk=dino.pk)
else:
form = DinosaurForm(instance=dino)
return render(request, "add_dinosaur.html", {"form": form})
@login_required
def delete_dinosaur(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
dino.delete()
messages.success(request, f"{dino.name} has been deleted from the database!")
return redirect("home")
return render(request, "delete_dinosaur.html", {"dino": dino})
@login_required
@require_http_methods(["POST"])
def add_image(request, pk):
dinosaur = get_object_or_404(Dinosaur, pk=pk)
if request.method == "POST":
form = DinoImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.dinosaur = dinosaur
image.save()
return redirect("dinosaur_detail", pk=dinosaur.pk)
else:
form = DinoImageForm()
return render(request, "add_image.html", {"form": form, "dinosaur": dinosaur})
@login_required
@require_http_methods(["POST"])
def delete_image(request, pk):
dino_image = get_object_or_404(DinoImage, pk=pk)
dinosaur_pk = dino_image.dinosaur.pk
dino_image.delete()
return redirect("dinosaur_detail", pk=dinosaur_pk)
@login_required
@require_http_methods(["POST"])
def toggle_favorite(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
fav, created = Favorite.objects.get_or_create(user=request.user, dinosaur=dino)
if not created:
fav.delete()
return JsonResponse({"success": True, "is_favorited": not created})
@login_required
def add_favorite(request, pk):
dino = get_object_or_404(Dinosaur, pk=pk)
fav, created = Favorite.objects.get_or_create(user=request.user, dinosaur=dino)
if created:
messages.success(request, "Added to favorites.")
else:
messages.error(request, "This dinosaur is already in your favorites.")
return redirect("dinosaur_detail", pk=dino.pk)
@login_required
def list_favorites(request):
favorites = Favorite.objects.filter(user=request.user)
dinos = [fav.dinosaur for fav in favorites]
return render(request, "list_favorites.html", {"dinos": dinos})
|
Vleyked/django-template
|
dinosaur_app/dinosaurs/views.py
|
views.py
|
py
| 6,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35987644586
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argopandas as argo
wmos = [4902596, 4902597]
var_names = ['PRES', 'TEMP', 'PSAL', 'DOXY', 'CHLA', 'BBP700']
for wmo in wmos:
ix = argo.float(wmo).synthetic_prof
up = ix.subset_direction('asc')
down = ix.subset_direction('desc')
up['CYCLE'] = [int(f[-6:-3]) for f in up.file]
down['CYCLE'] = [int(f[-7:-4]) for f in down.file]
cycles = set(up.CYCLE.unique()).intersection(down.CYCLE.unique())
for cycle in cycles:
fig, ax = plt.subplots()
up_sub = up.loc[up.CYCLE == cycle]
down_sub = down.loc[down.CYCLE == cycle]
up_data = up_sub.levels
down_data = down_sub.levels
sns.lineplot(data=up_data, x='DOXY', y='PRES', sort=False, estimator=None, ax=ax)
sns.lineplot(data=down_data, x='DOXY', y='PRES', sort=False, estimator=None, ax=ax)
ax.set_ylim((200,0))
|
cgrdn/argo-sci
|
src/pac-provor/initial_plot.py
|
initial_plot.py
|
py
| 951 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3901695878
|
from parse import parse
import pygame
from pygame.locals import *
from cube import Cube
from const import *
from pygame.math import Vector3
from utils import *
from drawable_chunk import DrawableChunk
from hero import Hero
from level import *
class Level(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.cubes = []
self.drawables = []
self.events = []
self.image_tileset = pygame.image.load("res/tileset.png").convert_alpha()
self.size = Vector3()
def add_drawable(self, drawable):
self.drawables.append(drawable)
def read(self, filename):
tileset_width = self.image_tileset.get_width() // TILE_SIZE
with open(filename) as f:
lines = [line.rstrip() for line in f]
for line in lines:
r = parse("{:d}:{:d}:{:d} {:d}:{:d}:{:d}", line)
c0 = to_2d_coords(
r[3],
tileset_width,
)
c1 = to_2d_coords(
r[4],
tileset_width,
)
c2 = to_2d_coords(
r[5],
tileset_width,
)
coords = [c0, c1, c2]
cube = Cube(coords)
cube.position = Vector3(
r[0] * Cube.SIZE, r[1] * Cube.SIZE, r[2] * Cube.SIZE
)
cube.indexes = Vector3(r[0], r[1], r[2])
cube.zindex = sum(cube.indexes)
self.cubes.append(cube)
self.update_size()
def update_size(self):
for cube in self.cubes:
if self.size.x < cube.indexes.x + 1:
self.size.x = cube.indexes.x + 1
if self.size.y < cube.indexes.y + 1:
self.size.y = cube.indexes.y + 1
if self.size.z < cube.indexes.z + 1:
self.size.z = cube.indexes.z + 1
def get_drawable(self, x, y, z):
for i in range(len(self.drawables)):
drawable = self.drawables[i]
if drawable.position.x // 16 == x and drawable.position.y // 16 == y and drawable.position.z // 16 == z:
return drawable
def get_cube(self, x, y, z):
for i in range(len(self.cubes)):
cube = self.cubes[i]
if cube.indexes.x == x and cube.indexes.y == y and cube.indexes.z == z:
return cube
def get_cube_index(self, x, y, z):
for i in range(len(self.cubes)):
cube = self.cubes[i]
if cube.indexes.x == x and cube.indexes.y == y and cube.indexes.z == z:
return i
def draw(self, camera, surface_display):
drawables_with_chunks = []
# Work In Progress: split drawables into chunks when needed
for drawable in self.drawables:
if isinstance(drawable, Hero):
# assum that only hero needs chunk display
# draw in a temporary surface
surface_tmp = pygame.Surface(
(drawable.drawable_width, drawable.drawable_height), pygame.SRCALPHA
)
drawable.draw(0, 0, surface_tmp)
# assum 2 chunks
nb_chunk = 2 # drawable_height // Cube.SIZE
for number in range(nb_chunk):
drawable_chunk = DrawableChunk(
drawable.position.x,
drawable.position.y,
drawable.position.z + 16, # Shift
)
# TODO fix drawing when hero jump
drawable_chunk.zindex = (
sum(
list(
map(
(lambda x: x / Cube.SIZE),
drawable_chunk.position,
)
)
)
+ number
- 1
)
drawable_chunk.number = nb_chunk - number - 1
drawable_chunk.surface = surface_tmp
drawable_chunk.size = Vector2(
drawable.drawable_width, drawable.drawable_height
)
drawables_with_chunks.append(drawable_chunk)
else:
drawables_with_chunks.append(drawable)
sorted_drawables = sorted(
self.cubes + drawables_with_chunks, key=lambda drawable: drawable.zindex
)
for drawable in sorted_drawables:
drawable_iso = cartesian_to_isometric(
(drawable.position.x, drawable.position.y)
)
x = camera.x + drawable_iso.x - Cube.SIZE
y = camera.y + drawable_iso.y - drawable.position.z
if isinstance(drawable, Cube):
drawable.draw(x, y, surface_display, self.image_tileset)
else:
drawable.draw(x, y, surface_display)
def clear(self):
self.drawables.clear()
self.cubes.clear()
self.events.clear()
|
odrevet/isometric-map
|
level.py
|
level.py
|
py
| 5,207 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71578033787
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple check list from AllenNLP repo:
https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release:
git tag VERSION -m 'Adds tag VERSION for pypi'
Push the tag to git:
git push --tags origin master
4. Build both the sources and the wheel.
Do not change anything in setup.py between creating the wheel and the
source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level
directory. (this will build a wheel for the python version you use to
build it).
For the sources, run: "python setup.py sdist" You should now have a /dist
directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading package to test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url,
use the following command then:
twine upload dist/* -r pypitest\
--repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github.
"""
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname: str) -> str:
""" Read and return README as str. """
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="asta",
version="0.0.7",
author="Brendan Whitaker",
description=("Shape annotations for homogeneous numpy arrays and pytorch/tensorflow tensors."),
license="GPLv3",
packages=["asta"],
long_description=read("README"),
long_description_content_type="text/plain",
install_requires=["toml", "numpy", "sympy", "oxentiel"],
package_data={"asta": ["defaults/astarc"]},
include_package_data=True,
python_requires=">=3.7.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3.7",
],
)
|
langfield/asta
|
setup.py
|
setup.py
|
py
| 2,854 |
python
|
en
|
code
| 14 |
github-code
|
6
|
14870918627
|
import logging
def initLogging():
# Use simple logging in this file
# See whether I can seperate logging from this program and my library
logging.basicConfig(filename='test_logging.log',level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt='%Y%m%d-%H%M%S')
logger = logging.getLogger('root')
# This will setup the default logger
# The default level for this logging system is info
# a = logging
a = logger
a.info('Hello world')
a.error('This is an error')
a.warning('This is a warning')
a.debug('Debug information')
# initLogging()
# Make sure the log information from tenon would not contaminate here
tenonpath = '..'
import sys; sys.path.append(tenonpath)
import tenon
tenon.run(__file__, '../demo.blend')
if tenon.inblender():
tenon.render.write('demo.png')
tenon.logging.info('Write image to demo.png')
logging.info('The execution is completed')
|
qiuwch/tenon
|
test/test_logging.py
|
test_logging.py
|
py
| 934 |
python
|
en
|
code
| 9 |
github-code
|
6
|
72532713149
|
from pathlib import Path
from .code_description import CodeDescriptionParams, CodeDescriptionXLSXDocument
from .dataset_description import (
DatasetDescriptionParams,
DatasetDescriptionXLSXDocument,
)
from .manifest import ManifestXLSXDocument
def write_xlsx_files(
base_path: Path,
dataset_description_params: DatasetDescriptionParams,
code_description_params: CodeDescriptionParams,
) -> None:
dataset_description_xlsx = DatasetDescriptionXLSXDocument()
dataset_description_xlsx.save_document(
base_path=base_path, template_data=dataset_description_params
)
code_description_xlsx = CodeDescriptionXLSXDocument()
code_description_xlsx.save_document(
base_path=base_path, template_data=code_description_params
)
manifest_xlsx = ManifestXLSXDocument()
manifest_xlsx.save_document(base_path=base_path, template_data=None)
|
ITISFoundation/osparc-simcore
|
services/web/server/src/simcore_service_webserver/exporter/_formatter/xlsx/writer.py
|
writer.py
|
py
| 892 |
python
|
en
|
code
| 35 |
github-code
|
6
|
16705170564
|
from django.shortcuts import render, redirect
from .models import Todo
from .forms import TodoForm
def tasks_list(request):
todos = Todo.objects.all()
context = {'todos': todos}
return render(request, 'tasks_list.html', context)
def add_todo(request):
if request.method == 'POST':
form = TodoForm(request.POST)
if form.is_valid():
form.save()
return redirect('add_todo')
else:
form = TodoForm()
todos = Todo.objects.all()
context = {
'form': form,
'todos': todos,
}
return render(request, 'add_todo.html', context)
|
Chikitonik/DI_Bootcamp
|
Week_12_PY/Day2/exercises_xp/todo_project/todo_list/todos/views.py
|
views.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31546382074
|
from functools import reduce
def main():
# one liner functions
culc_sum = lambda number_list: sum(number_list)
check_palindrome = lambda number: str(number) == str(number)[::-1]
factorial = lambda number: reduce((lambda a, b: a * b), range(1, number + 1))
# check functions
print(culc_sum([1, 2, 3, 4, 5, 10]))
print(check_palindrome(12121))
print(factorial(6))
if __name__ == '__main__':
main()
|
lidorelias3/Lidor_Elias_Answers
|
python/One Liners/OneLiners.py
|
OneLiners.py
|
py
| 437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42560466022
|
# Sparse Search: Given a sorted array of strings that is interspersed with empty strings, write a
# method to find the location of a given string.
def sparse_search(l, item):
def search(start, end):
mid = start + (end - start) // 2
if l[mid] == "":
radius = 1
while l[mid] == "":
if mid+radius <= end and l[mid+radius] != "":
mid=mid+radius
elif mid-radius >= start and l[mid-radius] != "":
mid=mid-radius
if mid+radius > end and mid-radius < start:
return None
radius += 1
if l[mid] == item:
return mid
elif l[mid] > item:
return search(start, mid-1)
else:
return search(mid+1, end)
return search(0, len(l)-1)
l = ["a", "", "b", "", "", "", "c", "", ""]
print(sparse_search(l, "a"))
assert sparse_search(l, "a") == 0
print(sparse_search(l, "b"))
assert sparse_search(l, "b") == 2
print(sparse_search(l, "c"))
assert sparse_search(l, "c") == 6
|
JSchoreels/CrackingTheCodingInterview
|
Chapter_10_SortingAndSearching/ex_10_5_SparseSearch.py
|
ex_10_5_SparseSearch.py
|
py
| 1,077 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1199044805
|
import copy
import time
class Krpsim:
def __init__(self, agent, delay, verbose, random=False):
self.inventory = []
self.agent = agent.copy()
self.delay = delay
self.stock = (agent.stock)
self.verbose = verbose
self.random = random
@property
def stock(self):
return self._stock
@stock.setter
def stock(self, stock):
if isinstance(stock, dict):
self._stock = dict(stock)
else:
raise ValueError('Stock must be a dictionary')
def copy(self):
return copy.copy(self)
def generate_inventory(self):
if len(self.agent.get_available_process_lst()) == 0:
return self
i = 0
while True and i < 1000:
if self.random == False:
walk = self.agent.generate_inventory(self.inventory, self.delay)
else:
walk = self.agent.generate_walk(self.inventory, self.delay)
if walk == None or len(walk) == 0:
self.stock = self.agent.stock
self.agent.stock = self.agent.initial_stock
i += 1
continue
self.inventory.extend(walk)
if self.verbose:
print('agent stock')
self.agent.print_stocks(self.agent.stock)
self.stock = self.agent.stock
self.agent.stock = self.agent.initial_stock
if self.inventory != None and len(self.inventory) != 0:
break
if i >= 1000:
self.inventory.clear()
self.inventory = list(self.agent.walk)
return self
def run(self) -> None:
if self.verbose:
print('start')
print('inventory.stock:', self.stock)
else:
print("Evaluating ", end='')
stock = self.optimize()
print(" done.")
if self.agent.finite and 'time' in self.agent.optimize:
stock = self.optimize_time(stock)
self.print_trace(stock)
def optimize(self) -> dict:
prev_indi = self.agent.copy()
prev_indi.init_stocks()
indi = self.copy()
# for time measurement
dot_interval = 0.5
start_time = time.time()
next_dot_time = start_time + dot_interval
# for finite checking:
self.agent.finite = False
while indi.stock != prev_indi.stock:
prev_indi = indi
new_indi = indi.copy()
new_indi.agent.stock = dict(indi.stock)
new_indi.agent.initial_stock = dict(indi.stock)
new_indi.generate_inventory()
if self.verbose:
print('new stock:', new_indi.stock)
indi = new_indi.copy()
current_time = time.time()
if current_time >= next_dot_time:
print('.', end='', flush=True)
next_dot_time = current_time + dot_interval
if current_time - start_time >= self.delay:
break
elif indi.stock == prev_indi.stock or len(indi.agent.get_available_process_lst()) == 0:
self.agent.finite = True
break
self.inventory = list(new_indi.inventory)
return dict(new_indi.stock)
def optimize_time(self, stock_dict: dict) -> dict:
min_total_cycle = float('inf')
inventory = []
time_stock = []
for i in range(80):
if len(self.inventory) != 0:
total_cycle = self.inventory[-1][1]
total_cycle += int(
self.agent.process[self.inventory[len(self.inventory) - 1][0]].nb_cycle)
else:
total_cycle = float('inf')
if min_total_cycle > total_cycle:
min_total_cycle = total_cycle
inventory = list(self.inventory)
time_stock = dict(stock_dict)
elif min_total_cycle == total_cycle:
for optimize in self.agent.optimize:
if optimize != 'time' and len(time_stock) != 0 and time_stock[optimize] < stock[optimize]:
inventory = list(self.inventory)
time_stock = dict(stock_dict)
self.agent.cycle = 0
self.inventory.clear()
stock = self.optimize()
self.inventory.clear()
self.inventory = list(inventory)
return time_stock
def print_trace(self, stock_dict: dict) -> None:
print("Main walk")
if len(self.inventory) != 0:
total_cycle = self.inventory[-1][1]
else:
total_cycle = 0
biggest_cycle = 0
for item in self.inventory:
if (int(self.agent.process[item[0]].nb_cycle) > biggest_cycle):
biggest_cycle = int(self.agent.process[item[0]].nb_cycle)
print(f"{item[1]}:{item[0]}")
if len(self.inventory) != 0:
total_cycle += int(
self.agent.process[self.inventory[len(self.inventory) - 1][0]].nb_cycle)
if self.agent.finite is True:
print(f"no more process doable at time {total_cycle + 1}")
print("Stock :")
for key, value in stock_dict.items():
print(f" {key} => {value}")
|
jmcheon/krpsim
|
Krpsim.py
|
Krpsim.py
|
py
| 5,278 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24854584741
|
# -*- coding: utf-8 -*-
import copy
class Population():
"""
Hold relevant information and bookkeeping functions for a population.
"""
def __init__(self, pop_name, ea_mu, ea_lambda, dmax_init, dmax_overall,
parent_selection, overselection_top, p_m, survival_selection,
tournament_size_for_survival_selection, parsimony_technique,
pppc, functions, terminals):
self.pop_name = pop_name
self.ea_mu = ea_mu
self.ea_lambda = ea_lambda
self.dmax_init = dmax_init
self.dmax_overall = dmax_overall
self.parent_selection = parent_selection
self.overselection_top = overselection_top
self.p_m = p_m
self.survival_selection = survival_selection
self.tournament_size_for_survival_selection = tournament_size_for_survival_selection
self.parsimony_technique = parsimony_technique
self.pppc = pppc
self.functions = functions
self.terminals = terminals
self.individuals = None # list of ExprTree instances
self.best_individuals = [] # list of best individuals of each generation
# Per-run bookkeeping values
self.run_high_fitness = float('-inf')
self.run_high_score = float('-inf')
self.run_best_world_data = None
self.run_best_individual = None
self.evals_with_no_change = 0
# Per-generation bookkeeping values
self.gen_high_fitness = float('-inf')
self.gen_high_score = float('-inf')
self.gen_fitness_total = 0
self.gen_score_total = 0
self.gen_best_world_data = None
self.gen_best_individual = None
self.gen_best_solution = None
self.gen_max_tree_height = -1
self.gen_tree_height_total = 0
self.gen_max_tree_size = -1
self.gen_tree_size_total = 0
def reset_run_values(self):
"""
Reset values to prepare for a new run.
"""
self.individuals = None
self.best_individuals = []
self.run_high_fitness = float('-inf')
self.run_high_score = float('-inf')
self.run_best_world_data = None
self.run_best_solution = None
self.evals_with_no_change = 0
def calc_run_stats(self):
"""
Update stats for the current run. To be called immediately
after a generation has completed including bookkeeping.
"""
# Check and handle new run high score and fitness
if (self.gen_high_score > self.run_high_score):
self.run_high_score = self.gen_high_score
if (self.gen_high_fitness > self.run_high_fitness):
self.run_high_fitness = self.gen_high_fitness
print('New run', self.pop_name, 'high fitness: ', self.run_high_fitness)
self.run_best_individual = self.gen_best_individual
# # ALTERNATE METHOD:
# # We only care about the best from the final generation, so
# # just blindly copy after each generation and when this method
# # is no longer called, that's the final generation
# self.run_high_score = self.gen_high_score
# self.run_high_fitness = self.gen_high_fitness
# self.run_best_individual = self.gen_best_individual
# print('Gen', self.pop_name, 'high fitness: ', self.run_high_fitness)
def generation_bookkeeping(self):
"""
Update stats for the current generation. To be called immediately
after a generation has completed.
"""
self.gen_high_fitness = float('-inf')
self.gen_high_score = float('-inf')
self.gen_fitness_total = 0
self.gen_score_total = 0
self.gen_best_individual = None
self.gen_max_tree_height = -1
self.gen_tree_height_total = 0
self.gen_max_tree_size = -1
self.gen_tree_size_total = 0
for individual in self.individuals:
self.gen_fitness_total += individual.fitness
if (individual.fitness > self.gen_high_fitness):
self.gen_high_fitness = individual.fitness
self.gen_best_individual = copy.deepcopy(individual)
self.gen_score_total += individual.score
if (individual.score > self.gen_high_score):
self.gen_high_score = individual.score
self.gen_tree_height_total += individual.root.height
if (individual.root.height > self.gen_max_tree_height):
self.gen_max_tree_height = individual.root.height
self.gen_tree_size_total += individual.root.size
if (individual.root.size > self.gen_max_tree_size):
self.gen_max_tree_size = individual.root.size
# Save off best individual of the generation
self.best_individuals.append(copy.deepcopy(self.gen_best_individual))
def update_logs(self, eval_count, experiment_log, parsimony_log):
"""
Update the experiment and parsimony logs
"""
# Update log
experiment_log.write(str(eval_count) + '\t' \
+ str(self.gen_fitness_total / len(self.individuals)) + '\t'
+ str(self.gen_high_fitness) + '\n')
# Update parsimony log
parsimony_log.write(str(eval_count) + '\t' \
+ str(self.gen_tree_height_total / len(self.individuals)) + '\t'
+ str(self.gen_max_tree_height) + '\t'
+ str(self.gen_tree_size_total / len(self.individuals)) + '\t'
+ str(self.gen_max_tree_size) + '\t'
+ str(self.gen_score_total / len(self.individuals)) + '\t'
+ str(self.gen_high_score) + '\n')
|
dennisgbrown/pacman-competitive-coevolutionary-genetic-programming
|
code/population.py
|
population.py
|
py
| 5,791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25349797554
|
# Juhusliku valiku valimiseks importige arvutisse juhuslik teek
import random
# Valikute loend
options = ["kivi", "paber", "käärid"]
# Funktsioon mängu mängimiseks
def play_game():
# Arvuti teeb juhusliku valiku
computer_choice = random.choice(options)
# Kasutaja teeb valiku
user_choice = input("Valige kivi, paber või käärid: ")
# Kontrollige, kes mängu võidab
if computer_choice == user_choice:
print("Viik!")
elif (computer_choice == "kivi" and user_choice == "käärid") or (computer_choice == "paber" and user_choice == "kivi") or (computer_choice == "käärid" and user_choice == "paber"):
print("Arvuti võitis!")
else:
print("Kasutaja võitis!")
# Mängutsükkel mitme mängu mängimiseks
while True:
play_game()
play_again = input("Kas soovid veel mängida (jah/ei)? ")
if play_again.lower() == "ei":
break
# Hüvastijätu sõnum
print("Lõpetame mängimise, tänan, et mängisite!")
|
Joosepi/ulesanned
|
yl22.py
|
yl22.py
|
py
| 985 |
python
|
et
|
code
| 0 |
github-code
|
6
|
14177896732
|
"""Reads parameters of received http request"""
import http.client as http_client
import logging
from typing import Optional
import azure.functions as func
from .custom_error import DownloadBlobError
log = logging.getLogger(name="log." + __name__)
def main(req: func.HttpRequest, params_list: Optional[list] = None) -> dict[str, str]:
"""
Reads parameters of received http request.
Args:
req (azure.functions.HttpRequest): HTTP request sent to Azure Function's endpoint.
params_list (Optional[list], optional): list of parameters expected in the request.\
Defaults to ["invoice_id", "single_file_download", "file_format"].
Raises:
DownloadBlobError: if any of the expected parameters is not found in the request.
Returns:
dict[str, str]: dictionary of parameters and their values.
"""
log.debug(msg=f"Reading parameters of the request {req}.")
if params_list is None:
params_list = ["invoice_id", "file_format"]
params = {}
for param in params_list:
try:
params[param] = req.params[param]
except KeyError as exc:
message = f"No {param} parameter in the request."
raise DownloadBlobError(
exception_type="KeyError",
details=f"KeyError: {param}",
message=message,
status_code=http_client.BAD_REQUEST,
) from exc
log.debug(msg=f"Parameters of the request {req} read successfully.")
return params
|
wieczorekgrzegorz/ksef-krportal-communication
|
modules/download_blob/modules/read_params.py
|
read_params.py
|
py
| 1,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22329982080
|
import imp
import discord
from discord.ext import commands
import json
import os
from os import listdir
from os.path import isfile, join
from datetime import datetime
import subprocess
from discordLevelingSystem import DiscordLevelingSystem
import aiosqlite
def micsid(ctx):
return ctx.author.id == 481377376475938826 or ctx.author.id == 624076054969188363
def log(log):
now = datetime.now()
timern = now.strftime("%d/%m/%Y %H:%M:%S")
with open('./other/log.txt', 'a') as f:
f.write('\n')
f.write(f"{timern} | {log}")
cogs = []
for i in os.listdir("cogs/"):
if i == "__pycache__":
pass
else:
print(i[:-3])
class BotMakerCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.check(micsid)
async def logs(self, ctx):
file = discord.File("./other/log.txt")
await ctx.author.send(file=file)
@commands.command()
@commands.check(micsid)
async def msgserver(self, ctx, id:int, *, message):
for guild in self.client.guilds:
if guild.id == id:
return await guild.text_channels[0].send(message)
await ctx.send("guild not found")
@commands.command()
@commands.check(micsid)
async def reloadall(self, ctx):
lst = [f for f in listdir("cogs/") if isfile(join("cogs/", f))]
no_py = [s.replace('.py', '') for s in lst]
startup_extensions = ["cogs." + no_py for no_py in no_py]
startup_extensions.remove("cogs.Leveling")
try:
for cogs in startup_extensions:
self.client.reload_extension(cogs)
await ctx.send("All Reloaded")
except Exception as e:
print(e)
log(e)
@commands.command(hidden = True)
@commands.check(micsid)
async def pull(self, ctx):
gitstuff = subprocess.run(["git", "pull"], capture_output=True).stdout
await ctx.send(gitstuff.decode())
log(gitstuff.decode())
@commands.command(help="Dms all server owners")
@commands.check(micsid)
async def dm_owners(self,ctx,*, msg):
await ctx.send("Sending...")
log(f"DMing all owners with {msg}")
mins = 0
#predicts how long it will take
mins = len(self.client.guilds) * 0.1
await ctx.send(f"Estimated time: {mins} minutes")
owners = []
for server in self.client.guilds:
tosend = server.owner
owners.append(tosend)
owners = list(set(owners))
for i in owners:
try:
await i.send(msg)
except:
await ctx.send(f"Counld not send to {i}")
await ctx.send("Done")
@commands.command()
@commands.check(micsid)
async def ghoastping(self,ctx,*,member:discord.Member):
for i in ctx.guild.channels:
try:
x = await i.send(f"{member.mention}")
await x.delete()
except:
print(f"Can't send message in {i}")
@commands.command(hidden = True)
@commands.is_owner()
async def clearlog(self,ctx):
file = discord.File("./other/log.txt")
await ctx.author.send(file=file)
dirs = 'other/'
for f in os.listdir(dirs):
os.remove(os.path.join(dirs, f))
dirs = 'tempstorage/'
for f in os.listdir(dirs):
os.remove(os.path.join(dirs, f))
await ctx.send("Cleared")
await log("Cleared at " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
@commands.command(hidden = True)
@commands.check(micsid)
async def status(self, ctx):
gitstuff = subprocess.run(["git", "status"], capture_output=True).stdout
await ctx.send(gitstuff.decode())
log(gitstuff.decode())
@commands.command()
@commands.check(micsid)
async def load(self, ctx, extension):
self.client.load_extension(f"cogs.{extension}")
embed = discord.Embed(
title='Load', description=f'{extension} successfully loaded', color=0xff00c8)
await ctx.send(embed=embed)
@commands.command()
@commands.check(micsid)
async def unload(self, ctx, extension):
self.client.unload_extension(f"cogs.{extension}")
await ctx.send(f"The module '{extension}' has been unloaded successfully!")
@commands.command()
@commands.is_owner()
async def change_status(self, ctx, *, status):
status = status.replace("[[servers]]", str(len(self.client.guilds)))
await self.client.change_presence(activity=discord.Game(name=status))
await ctx.send(f"Status changed to {status}")
@commands.command()
@commands.is_owner()
async def commandlookup(self, ctx, command):
#check if command exists
if self.client.get_command(command) == None:
await ctx.send("Command not found")
return
#find the cog
for i in self.client.cogs:
if self.client.get_command(command) in self.client.get_cog(i).get_commands():
cog = i
await ctx.send(f"Cog: {cog}\nCommand: {command}")
#when a command is used, it will be logged
@commands.Cog.listener()
async def on_command(self, ctx):
#check if file exists
if os.path.isfile(f"databases/command_usage.db"):
async with aiosqlite.connect("databases/command_usage.db") as db:
#check if command is in database
async with db.execute("SELECT * FROM command_usage WHERE command = ?", (ctx.command.name,)) as cursor:
data = await cursor.fetchall()
#if command is not in database
if len(data) == 0:
await db.execute("INSERT INTO command_usage VALUES (?, ?, ?)", (ctx.command.name, 1, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
await db.commit()
#if command is in database
else:
await db.execute("UPDATE command_usage SET times_used = ?, last_used = ? WHERE command = ?", (data[0][1] + 1, datetime.now().strftime("%d/%m/%Y %H:%M:%S"), ctx.command.name))
await db.commit()
else:
async with aiosqlite.connect("databases/command_usage.db") as db:
await db.execute("CREATE TABLE command_usage (command TEXT, times_used INTEGER, last_used TEXT)")
await db.commit()
@commands.command()
@commands.check(micsid)
async def commandusage(self, ctx, command):
if os.path.isfile(f"databases/command_usage.db"):
async with aiosqlite.connect("databases/command_usage.db") as db:
async with db.execute("SELECT * FROM command_usage WHERE command = ?", (command,)) as cursor:
data = await cursor.fetchall()
if len(data) == 0:
await ctx.send("Command not found")
else:
embed = discord.Embed(title = "Command Usage", description = f"Command: {data[0][0]}\nTimes used: {data[0][1]}\nLast used: {data[0][2]}", color = 0xff00c8)
await ctx.send(embed = embed)
else:
await ctx.send("Command not found")
@commands.command()
@commands.check(micsid)
async def commandusagelist(self, ctx):
if os.path.isfile(f"databases/command_usage.db"):
async with aiosqlite.connect("databases/command_usage.db") as db:
async with db.execute("SELECT * FROM command_usage") as cursor:
data = await cursor.fetchall()
if len(data) == 0:
await ctx.send("No commands found")
else:
embed = discord.Embed(title = "Command Usage", description = "Command: Times used: Last used:", color = 0xff00c8)
for i in data:
embed.description += f"\n{i[0]}: {i[1]}: {i[2]}"
await ctx.send(embed = embed)
else:
await ctx.send("No commands found")
@commands.command()
@commands.is_owner()
async def server_invite(self, ctx, *, server):
guild = self.client.get_guild(int(server))
if guild == None:
await ctx.send("Server not found")
return
invite = await guild.channels[0].create_invite()
await ctx.send(invite)
@commands.command()
@commands.is_owner()
async def server_look_up(self, ctx, *, server):
guild = self.client.get_guild(int(server))
if guild == None:
await ctx.send("Server not found")
return
embed = discord.Embed(title = guild.name, description = f"ID: {guild.id}", color = 0xff00c8)
embed.add_field(name = "Owner", value = f"{guild.owner.name}#{guild.owner.discriminator}")
embed.add_field(name = "Members", value = guild.member_count)
embed.add_field(name = "Channels", value = len(guild.channels))
embed.add_field(name = "Roles", value = len(guild.roles))
embed.add_field(name = "Created at", value = guild.created_at.strftime("%d/%m/%Y %H:%M:%S"))
embed.add_field(name = "Owner ID", value = guild.owner.id)
try:
embed.set_thumbnail(url = guild.icon.url)
except:
pass
await ctx.send(embed = embed)
def setup(client):
client.add_cog(BotMakerCommands(client))
|
micfun123/Simplex_bot
|
cogs/micsid.py
|
micsid.py
|
py
| 9,632 |
python
|
en
|
code
| 24 |
github-code
|
6
|
18995573707
|
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from paddleocr import PaddleOCR,draw_ocr
# Paddleocr supports Chinese, English, French, German, Korean and Japanese.
# You can set the parameter `lang` as `ch`, `en`, `fr`, `german`, `korean`, `japan`
# to switch the language model in order.
ocr = PaddleOCR(use_angle_cls=True, lang='ch') # need to run only once to download and load model into memory
img_path = 'Im6.png'
result = ocr.ocr(img_path, cls=True)
#for line in result:
# print(line)
# Each line consists of a 4 * 2 list and a tuple,
# containing coordinates of a bounding box and ocr result with confidence, respectively.
# draw result
from PIL import Image
image = Image.open(img_path).convert('RGB')
boxes = [line[0] for line in result]
txts = [line[1][0] for line in result]
scores = [line[1][1] for line in result]
im_show = draw_ocr(image, boxes, txts, scores, font_path='./fonts/simfang.ttf')
im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
|
tota1Noob/autoBookmarkGen4PDF
|
moduleTryouts/ocrTest.py
|
ocrTest.py
|
py
| 983 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73955138107
|
from tkinter import *
from PIL import Image, ImageDraw
from src.Model import Model
b1 = "up"
xold, yold = None, None
image1, drawimg = None, None
model = Model()
def create_lines(canv):
canv.create_line(30, 0, 30, 140, smooth=TRUE, fill="red", width="1")
canv.create_line(110, 0, 110, 140, smooth=TRUE, fill="red", width="1")
canv.create_line(0, 30, 140, 30, smooth=TRUE, fill="red", width="1")
canv.create_line(0, 110, 140, 110, smooth=TRUE, fill="red", width="1")
def testCallback(canv):
global image1, model
#image1 = image1.resize((28,28))
image1.save("./valami.png")
model.testImage(image1)
def clearCallback(canv):
global image1, drawimg
canv.delete('all')
create_lines(canv)
drawimg.rectangle((0, 0, image1.size[0], image1.size[1]), fill=0)
def main():
global image1, drawimg
image1 = Image.new(mode="L", size=(28, 28))
drawimg = ImageDraw.Draw(image1)
root = Tk()
root.title("DRAW")
root.geometry('200x150')
drawing_area = Canvas(root)
drawing_area.grid(row=0, column=0, rowspan=2)
drawing_area.config(width=140, height=140)
drawing_area.configure(background='black')
create_lines(drawing_area)
drawing_area.bind("<Motion>", motion)
drawing_area.bind("<ButtonPress-1>", b1down)
drawing_area.bind("<ButtonRelease-1>", b1up)
B1 = Button(root, text="Test", command=lambda: testCallback(drawing_area))
B1.grid(row=0, column=1)
B2 = Button(root, text="Clear", command=lambda: clearCallback(drawing_area))
B2.grid(row=1, column=1)
root.mainloop()
def b1down(event):
global b1
b1 = "down"
def b1up(event):
global b1, xold, yold
b1 = "up"
xold = None
yold = None
def motion(event):
global drawimg
if b1 == "down":
global xold, yold
if xold is not None and yold is not None:
event.widget.create_line(xold, yold, event.x, event.y, smooth=TRUE, fill="white", width="10")
drawimg.line((xold / 5, yold / 5, event.x / 5, event.y / 5), fill=255, width=2)
xold = event.x
yold = event.y
if __name__ == "__main__":
model.gen_data()
model.train()
main()
|
Freyb/LegoAI-homework
|
src/Gui.py
|
Gui.py
|
py
| 2,185 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24133449429
|
#!/usr/bin/env python
import argparse
import csv
import logging
import math
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pylab import rcParams
def plot_bar(data_fh, target, xlabel, ylabel, zlabel, title, x_label, y_label, x_order, y_order, fig_width, fig_height, fontsize, xlabel_rotation, category, colours, stacked, z_annot):
'''
xlabel: groups on x axis
ylabel: colours
'''
logging.info('starting...')
import matplotlib.style
matplotlib.style.use('seaborn')
included = total = 0
results = {}
xvals = set()
yvals = set()
max_zval = 0.0
categories = {}
for row in csv.DictReader(data_fh, delimiter='\t'):
try:
included += 1
xval = row[xlabel] # group axis value
yval = row[ylabel] # sub-group axis value
xvals.add(xval)
yvals.add(yval)
zval = float(row[zlabel])
max_zval = max(max_zval, zval)
xy = '{},{}'.format(xval, yval)
results[xy] = zval
logging.debug('Added %s = %f', xy, zval)
if category is not None:
categories[xy] = row[category]
except:
logging.debug('Failed to include %s', row)
total += 1
logging.info('finished reading %i of %i records with max_zval %.2f', included, total, max_zval)
if len(results) == 0:
logging.warn('No data to plot')
return
if x_order is None:
xvals = sorted(list(xvals)) # groups
else:
xvals = x_order # groups
if y_order is None:
yvals = sorted(list(yvals)) # sub-groups
else:
yvals = y_order
logging.debug('xvals %s yvals %s', xvals, yvals)
#fig, ax = plt.subplots()
#fig_width = min(18, max(9, len(xvals) * len(yvals)))
fig = plt.figure(figsize=(fig_width, fig_height))
rcParams.update({'font.size': fontsize})
ax = fig.add_subplot(111)
width = fig_width / len(xvals) / len(yvals)
ind = np.arange(len(xvals)) * fig_width / len(xvals) # the x locations for the groups
logging.info('ind is %s, width is %f fig_width is %f', ind, width, fig_width)
bottom = None
for idx in range(len(yvals)): # each yval
if stacked:
offset = 0
else:
offset = idx * width * 0.9 - (len(yvals) - 1) * width / 2
vals = [results['{},{}'.format(x, yvals[idx])] for x in xvals] # each xval with that yval
if bottom is None:
bottom = [0] * len(vals)
logging.debug('adding values %s for %s at %s', vals, yvals[idx], ind + offset)
if category is None:
if stacked and bottom is not None:
rects = ax.bar(ind + offset, vals, width * 0.85, label=yvals[idx], bottom=bottom)
else:
rects = ax.bar(ind + offset, vals, width * 0.85, label=yvals[idx])
else:
rects = ax.bar(ind + offset, vals, width * 0.85)
for rect, val, b in zip(rects, xvals, bottom):
height = rect.get_height()
if z_annot is None:
if height < 0.01:
annot = ''
#annot = '{:.3e}'.format(height)
else:
annot = '{:.2f}'.format(height)
else:
annot = z_annot.format(height)
if stacked: # for stacked, put in centre of box
ax.annotate(annot,
xy=(rect.get_x() + rect.get_width() / 2, height / 2 + b),
xytext=(0, 3), # use 3 points offset
textcoords="offset points", # in both directions
ha='center', va='bottom')
else: # non-stacked, put at top of box
ax.annotate(annot,
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # use 3 points offset
textcoords="offset points", # in both directions
ha='center', va='bottom')
if category is not None:
label = '{} {}'.format(categories['{},{}'.format(val, yvals[idx])], yvals[idx])
rect.set_label(label)
if colours is not None:
for colour in colours:
cat, col = colour.split('=')
if cat == label:
rect.set_color(col)
if bottom is None:
bottom = vals
else:
bottom = [x[0] + x[1] for x in zip(bottom, vals)]
logging.debug('vals is %s bottom is %s', vals, bottom)
# Add some text for labels, title and custom x-axis tick labels, etc.
if y_label is not None:
ax.set_ylabel(y_label)
if x_label is not None:
ax.set_xlabel(x_label)
ax.set_title(title)
ax.set_xticks(ind)
ax.set_xticklabels(xvals, rotation=xlabel_rotation)
#ax.legend(loc='upper right')
# place legend at right based on https://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box/10154763#10154763
handles, labels = ax.get_legend_handles_labels()
labels_seen = set()
labels_u = []
handles_u = []
for handle, label in sorted(zip(handles, labels), key=lambda pair: pair[1]):
if label in labels_seen:
continue
labels_seen.add(label)
labels_u.append(label)
handles_u.append(handle)
lgd = ax.legend(handles_u, labels_u, loc='upper left', bbox_to_anchor=(1.01,1.0), borderaxespad=0)
lgd.get_frame().set_edgecolor('#000000')
#fig = plt.figure(figsize=(figsize, 1 + int(figsize * len(yvals) / len(xvals))))
#ax = fig.add_subplot(111)
logging.info('done processing %i of %i', included, total)
plt.tight_layout()
plt.savefig(target)
matplotlib.pyplot.close('all')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot a bar chart')
parser.add_argument('--x', required=True, help='x column name')
parser.add_argument('--y', required=True, help='y column name')
parser.add_argument('--z', required=True, help='z column name')
parser.add_argument('--z_annot', required=False, help='format for values (default is :.2f)')
parser.add_argument('--category', required=False, help='additional category column')
parser.add_argument('--colours', required=False, nargs='*', help='category colours')
parser.add_argument('--title', required=False, help='z column name')
parser.add_argument('--y_label', required=False, help='label on y axis')
parser.add_argument('--x_label', required=False, help='label on x axis')
parser.add_argument('--x_order', required=False, nargs='*', help='order of x axis')
parser.add_argument('--y_order', required=False, nargs='*', help='order of y axis')
parser.add_argument('--stacked', action='store_true', help='stack categories')
parser.add_argument('--verbose', action='store_true', help='more logging')
parser.add_argument('--target', required=False, default='plot.png', help='plot filename')
parser.add_argument('--height', required=False, type=float, default=8, help='height of plot')
parser.add_argument('--width', required=False, type=float, default=12, help='width of plot')
parser.add_argument('--fontsize', required=False, type=float, default=8, help='font size')
parser.add_argument('--x_label_rotation', required=False, default='horizontal', help='label rotation')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
plot_bar(sys.stdin, args.target, args.x, args.y, args.z, args.title, args.x_label, args.y_label, args.x_order, args.y_order, args.width, args.height, args.fontsize, args.x_label_rotation, args.category, args.colours, args.stacked, args.z_annot)
|
supernifty/plotme
|
plotme/bar.py
|
bar.py
|
py
| 7,407 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44551517900
|
import numpy as np
# this matrix will store the data
labels = np.array ([])
# and this vector will store the labels
points = np.array ([])
# open up the input text file
with open('bc.txt') as f:
#
# read in the lines and init the data and labels
lines = f.readlines ()
labels = np.zeros (len (lines))
points = np.zeros ((len (lines), 30))
counter = 0
#
# loop through each of the lines
for line in lines:
#
# get all of the items on the line
array = [x for x in line.split (',')]
#
# get the data point
for index in range (2, 32):
points[counter,index - 2] = float (array[index])
#
# if cancerous, 1, else -1
if (array[1] == 'M'):
labels [counter] = 1
else:
labels [counter] = -1
counter = counter + 1
# evaluates the loss function and returns the loss
#
# x is the data set
# y is the labels
# w is the current set of weights
# c is the weight of the slack variables
#
def f (x, y, w, c):
num_points = y.size
# print("num points %d" % num_points)
pairwise_max = pairwise_max_sum_term(x,y,w)
summation = np.sum(pairwise_max) * (1. / num_points)
# print("summation %d" % summation)
w_norm_squared = np.square(np.absolute(w)).sum()
# print("term: %f" % (num_points * c))
wns_lambda = .5 / (num_points * c)
# print("wns lambda %f " % wns_lambda)
w_norm_squared = wns_lambda * w_norm_squared
# print("w norm squared, %d" % w_norm_squared)
# fill in missing code here!!
return w_norm_squared + summation
def pairwise_max_sum_term(x, y, w):
"""
Gets us the numpy array representing the max
in the expression we're minimizing.
NOTE: this does NOT include the sum or division by n
:param x:
:param y:
:param w:
:return:
"""
second_term = get_2nd_term_in_max(x, y, w)
# print("checking value in max:")
# print(second_term)
dim = second_term.shape
zeros = np.zeros(dim)
# returns dimension (n,1) -- same dimensionality as invoked function
result = np.maximum(second_term, zeros)
# print("result of summation: %d" % result.sum())
# print(result)
return result.sum()
def get_2nd_term_in_max(x, y,w):
"""
Gets us the second term in the expression we're minimizing, not including
the max call.
:param x:
:param y:
:param w:
:return:
"""
# assume the matrices have the following dimensions
# w - (30,1)
# x - (n,30)
# y - (n,1)
num_points = y.size
# (n,1) matrix
w_times_x = np.dot(x, w)
# (1) matrix
y_w_x = np.multiply(y, w_times_x)
ones = np.full(y_w_x.shape, 1)
# returns dimension (n,1)
return np.subtract(ones, y_w_x)
def get_greater_than_0(np_arr):
"""
creates an np array of same dimension,
filling in a 0 for all elements less than 0, and 1 otherwise
:param np_arr:
:return:
"""
trim_to_zero_1 = np_arr.copy()
trim_to_zero_1[trim_to_zero_1 > 0] = 1
trim_to_zero_1[trim_to_zero_1 < 0] = 0
return trim_to_zero_1
def partial_L_dw_sum_no_max(x,y,w):
"""
gets the partial corresponding to the second term in the
minimized equation, but disregarding the maximum.
Note: we return a vector of size (n,d),
where n is our number of points and d is the dimensionality of w.
The rows can be summed up to get one vector dL/dw
:param w:
:param x:
:param y:
:return:
"""
greater_than_0 = get_greater_than_0(get_2nd_term_in_max(x,y,w))
greater_than_0_repeated = np.tile(greater_than_0, (30,1)).transpose()
# gets us dimension (n,30)
repeat_y = np.tile(y, (30,1)).transpose()
repeat_y *= -1.
# gets us dimension (n,30)
y_times_x = np.multiply(repeat_y,x)
result = (1/y.size) * np.multiply(y_times_x,greater_than_0_repeated)
# print("greater_than_0.shape")
# print(greater_than_0.shape)
# print("repeat_y.shape")
# print(repeat_y.shape)
# print("y time x shape")
# print(y_times_x.shape)
# print("result")
# print(result.shape)
return result
# evaluates and returns the gradient
#
# x is the data set
# y is the labels
# w is the current set of weights
# c is the weight of the slack variables
#
def gradient(x, y, w, c):
# assume the matrices have the following dimensions
# w - (30,1)
# x - (n,30)
# y - (n,1)
lbda = .5 / (y.size * c)
first_term = lbda * 2 * w
incomplete_2nd_partial = partial_L_dw_sum_no_max(x,y,w)
second_partial = np.sum(incomplete_2nd_partial,0)
# Note that the gradient has 30 dims because the data has 30 dims
return np.add(first_term, second_partial)
# make predictions using all of the data points in x
# print ‘success’ or ‘failure’ depending on whether the
# prediction is correct
#
# x is the data set
# y is the labels
# w is the current set of weights
#
def predict (x, y, w):
correct = 0
claimed_positives = 0
actual_positives = 0
true_positives = 0
for index in range(len (y)):
if ((np.dot (x[index], w) > 0) and (y[index] > 0)):
# true positive
claimed_positives += 1
actual_positives += 1
true_positives += 1
print ('success - true positive')
correct = correct + 1
elif ((np.dot (x[index], w) < 0) and (y[index] < 0)):
# true negative
print ('success - true negative')
correct = correct + 1
elif ((np.dot(x[index], w) > 0) and (y[index] < 0)):
claimed_positives += 1
# false positive
print ('failure - - false negative ')
else:
actual_positives += 1
# ((np.dot(x[index], w) < 0) and (y[index] > 0)):
# false negative
print('failure - false negative')
recall = true_positives * 1. / actual_positives
precision = true_positives * 1. / claimed_positives
print("True positives: %d. Actual positives: %d .claimed positives: %d"
% (true_positives, actual_positives, claimed_positives))
# print(true_positives * 1. / claimed_positives)
print("Precision: %f . Recall: %f" % (precision, recall))
f1_score = (2 * precision * recall) / (precision + recall)
print ('%d out of %d correct.' % (correct, len(y)))
print("f1 score: %f" % f1_score)
# performs gradient descent optimization, returns the learned set of weights
# uses the bold driver to set the learning rate
#
# x is the data set
# y is the labels
# w is the current set of weights to start with
# c is the weight of the slack variable
#
def gd_optimize (x, y, w, c):
rate = 1
w_last = w + np.full (30, 1.0)
# print("x dimensions")
# print(x.shape)
# print("y dimensions")
# print(y.shape)
# # print(y)
# print("w dimensions")
# print(w.shape)
# print("slack variable value: %d" % c)
while (abs(f (x, y, w, c) - f (x, y, w_last, c)) > 2e-6):
# while (abs(f (x, y, w, c) - f (x, y, w_last, c)) > 10e-4):
w_last = w
w = w - rate * gradient (x, y, w, c)
if f (x, y, w, c) > f (x, y, w_last, c):
rate = rate * .5
else:
rate = rate * 1.1
print (f (x, y, w, c))
return w
######## RUN ONCE YOU'RE READY TO TEST #########
w = np.zeros (30)
points_in = points[0:400]
labels_in = labels[0:400]
# Original is c = .1:
c = .1
# c = .02
w = gd_optimize (points_in, labels_in, w, c)
# output = f(points_in, labels_in, np.ones(30), c)
# output = f(points_in, labels_in, w, c)
# print("output of f: %f" % output)
predict (points[400:], labels[400:], w)
# OUTPUT FROM TRAINED W:
# c = .1
# error threshold = 2e-6
# Output: 46.708858
#
# TESTING RESULTS:
# True positives: 35. Actual positives: 39 .claimed positives: 48
# Precision: 0.729167 . Recall: 0.897436
# 152 out of 169 correct.
# f1 score: 0.804598
|
cjdiaz98/Comp330_HW4
|
SVM.py
|
SVM.py
|
py
| 8,225 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18520546657
|
import tensorflow as tf
class DoubleConvolutionBlock(tf.keras.layers.Layer):
def __init__(self, filter, layer_name):
super(DoubleConvolutionBlock, self).__init__(name="double_conv_3x3_" + layer_name)
self.filter = filter
self.pool = tf.keras.layers.MaxPool2D((2, 2))
self.layers = [
tf.keras.layers.Conv2D(
filter,
(3, 3),
padding="same",
kernel_initializer="he_normal",
use_bias=False,
),
tf.keras.layers.Conv2D(
filter,
(3, 3),
padding="same",
kernel_initializer="he_normal",
use_bias=False
)
]
def call(self, inputs, training=None, pool=True):
x = inputs
for layer in self.layers:
x = layer(x)
if training:
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
skip = x
return self.pool(x) if pool else x, skip
class UpSamplingBlock(tf.keras.layers.Layer):
def __init__(self, filter, layer_name):
super(UpSamplingBlock, self).__init__(name="up_sampling_"+layer_name)
self.filter = filter
self.upsample_convolution = [
tf.keras.layers.UpSampling2D((2, 2), interpolation="bilinear"),
tf.keras.layers.Conv2D(
self.filter,
(2, 2),
padding="same",
kernel_initializer="he_normal",
activation="relu"
),
]
self.double_conv_3x3 = DoubleConvolutionBlock(self.filter, layer_name)
def call(self, inputs, skip=None):
x = inputs
for layer in self.upsample_convolution:
x = layer(x)
if skip is not None:
x = tf.keras.layers.Concatenate()([x, skip])
x, _ = self.double_conv_3x3.call(x, pool=False)
return x
class UNet(tf.keras.models.Model):
def __init__(self, n_classes=1, filters=None, end_activation="sigmoid"):
super(UNet, self).__init__(name="UNet-for-semantic-segmentation")
self.filters = filters
if not self.filters:
self.filters = [64, 128, 256, 512]
self.end_activation = end_activation
self.n_classes = n_classes
self.contractions = []
for filter in self.filters:
self.contractions.append(DoubleConvolutionBlock(filter, layer_name=str(filter)))
self.latent_space = DoubleConvolutionBlock(self.filters[-1]*2, layer_name="latent_{}".format(self.filters[-1]*2))
self.expansions = []
for filter in self.filters[::-1]:
self.expansions.append(UpSamplingBlock(filter, layer_name=str(filter)))
self.conv_1x1 = tf.keras.layers.Conv2D(
self.n_classes,
(1, 1),
padding="same",
kernel_initializer="he_normal",
use_bias=False,
activation=end_activation
)
def call(self, inputs):
x = inputs
skip_connections = []
for contraction in self.contractions:
x, skip = contraction.call(x)
skip_connections.append(skip)
x, _ = self.latent_space.call(x, pool=False)
for expansion, skip_connection in zip(self.expansions, skip_connections[::-1]):
x = expansion.call(x, skip_connection)
x = self.conv_1x1(x)
return x
|
KushGabani/Biomedical-Image-Segmentation
|
unet.py
|
unet.py
|
py
| 3,501 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22100947365
|
import sys
n,k=map(int,sys.stdin.readline().split())
trees=list(map(int,sys.stdin.readline().split()))
start,end=1, max(trees)
while start<=end:
mid=(start+end)//2
sum=0
for i in trees:
if i>=mid:
sum+=i-mid
if sum>=k:
start=mid+1
else:
end=mid-1
print(end)
|
dhktjr0204/beakjoon
|
이분탐색/2805.py
|
2805.py
|
py
| 318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24628062484
|
import random
sproby=1
Vgadav=False
s=random.randint(1,100)
print('Ia zagadav chyslo. Sprobuy ugadaty ')
a=int(raw_input())
while (sproby<9) and (Vgadav==False) :
sproby=sproby+1
if a>s : print('Zabagato')
if a<s : print('Zamalo')
if a==s : Vgadav=True
else: a=int(raw_input('Sprobuy shche raz, sproba %d\n'% sproby))
if Vgadav==True:
print ('!!!!!Ty vgadav z %d sproby!!!!' % sproby)
else: print('??????? Idy v banu, ia zagadav chyslo %d ?????????' %s)
|
Nahtigal/PythonStudy
|
STR2/Rand.py
|
Rand.py
|
py
| 482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38714648302
|
from rest_framework import serializers
from .models import Animal
import serializer_company
class AnimalSerializer(serializers.HyperlinkedModelSerializer):
company = serializer_company.CompanySerializer()
class Meta:
model = Animal
fields = [
'id',
'name',
'type',
'sub_type',
'header_image',
'profile_image',
'tag_number',
'registration_number',
'dob',
'father',
'mother',
'attachment',
'company'
]
|
pohara9720/lma-python
|
lma/api/serializer_animal.py
|
serializer_animal.py
|
py
| 591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31126427233
|
from gpiozero import DigitalInputDevice
import time
class EncoderCounter(object):
def __init__(self,pin_number,side):
self.side = side
self.test_mode = False
self.pulse_count = 0
self.device = DigitalInputDevice(pin=pin_number)
self.device.pin.when_changed = self.count_pulses
self.previous = None
self.difs = []
def record_gap_between_high(self,_,state):
if(state == 1):
if self.previous==None:
self.previous = time.time()
else:
next = time.time()
dif = next-self.previous
self.previous = next
self.difs.append(dif)
def count_pulses(self,_,state):
self.pulse_count +=1
def reset(self):
self.pulse_count=0
def set_mode(self,mode):
if(mode=='normal'):
self.test_mode = False
self.device.pin.when_changed = self.count_ticks
if(mode=='test'):
self.test_mode = True
self.device.pin.when_changed = self.record_gap_between_high
def report_test(self,np):
result = np.array(self.difs)
result = result[20:-20]
centred = result - result.mean()
centred = np.absolute(centred)
sd = result.std()
outliers = result[centred>sd*2]
print(f'result for side: {self.side}')
print(f'max: {result.max()}, min: {result.min()}, mean {result.mean()} , sd {result.std()}')
print('outliers',outliers)
return result
# bot.stop()
|
gregorianrants/legobot-7
|
Encoder.py
|
Encoder.py
|
py
| 1,404 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21569497560
|
#!/usr/bin/env python
""" Script to:
Extract 3D images from a 4D image and then extract one selected slice
from each of these 3D images and combine them as a gif.
"""
# Author: Bishesh Khanal <[email protected]>
# Asclepios INRIA Sophia Antipolis
import subprocess
import sys
import argparse as ag
import bish_utils as bu
#import time as tm
def get_input_options():
''' Command line interface, get user input options.
'''
parser = ag.ArgumentParser()
parser.add_argument('in_img', help='Filename of the input 4D image')
parser.add_argument('out_gif', help='Filename of the output file')
parser.add_argument('total_tpts', help='Total number of time points to '
'extract from the input image', type=int)
parser.add_argument('slice_axis', help='Slice axis: 0,1 or 2')
parser.add_argument(dest='slice_num', help='slice number to be extracted')
parser.add_argument('resize', help='resize to maximum possible size in each'
' direction, e.g. 400x400')
parser.add_argument('-time_unit', help='unit of time for display. E.g yrs'
'\n If none, time info not overlayed over the video')
parser.add_argument(
'-time_step', help='time between tpt1 and tpt2. Used only when '
'time_unit is also used. Default 1.0', type=float)
parser.add_argument(
'delay', help='time delay between frames in milliseconds')
parser.add_argument(
'-rotate', help='If given overrides the default rotation used for '
'proper orientation of the slice.')
parser.add_argument(
'-crop', help='convert -crop ops: If given crops 2D slices before '
'combining. wxh+x+y')
ops = parser.parse_args()
return ops
def main():
'''
Extract 3D images from a 4D image and then extract one selected slice
from each of these 3D images and combine them as a gif.
'''
ops = get_input_options()
#split 4d image to get all the 3D images:
tmp3d_basename, file_ext = 'tmp3DImage', '.nii.gz'
cmd = 'ImageMath 4 %s%s TimeSeriesDisassemble %s' % (
tmp3d_basename, file_ext, ops.in_img)
bu.print_and_execute(cmd)
#ImageMath 4 tmp3DImage.nii.gz TimeSeriesDisassemble ops.in_img
#convert (from ImageMagick)
#Executables that must exist:
#ImageMath
extract_slice = "/home/bkhanal/works/tools/marcoSliceExtractor/myImgSliceExtractor"
axis = ops.slice_axis
num = 0
#print('number of time steps: %s \n' % (str(ops.total_tpts)))
while num < ops.total_tpts:
#outputs/results of the executables
index = str(num+100) #ImageMath extracted slice names start from 100.
tmp3DImage = '%s%s%s' % (tmp3d_basename, index, file_ext)
tmp2DImage = 'slice%s.png' % (index,)
cmd = '%s %s %s %s %s' % (
extract_slice, tmp3DImage, axis, ops.slice_num, tmp2DImage)
bu.print_and_execute(cmd, False)
# Rotate the image for proper orientation.
if ops.rotate is None:
cmd = 'convert -rotate 180 %s %s' % (tmp2DImage, tmp2DImage)
else:
cmd = 'convert -rotate %s %s %s' % (ops.rotate, tmp2DImage, tmp2DImage)
bu.print_and_execute(cmd, False)
if ops.crop:
cmd = 'convert %s -crop %s +repage %s' % (tmp2DImage, ops.crop, tmp2DImage)
bu.print_and_execute(cmd, False)
# Write time-point info
if ops.time_unit is not None:
if ops.time_step is not None:
tpt = float(num) * ops.time_step
else:
tpt = num
cmd = ('convert %s -gravity SouthWest -fill orange -pointsize 12 '
'-annotate +0+0 "%s %s" %s' % (
tmp2DImage, str(tpt), ops.time_unit, tmp2DImage))
bu.print_and_execute(cmd, False)
#Delete individual 3D files.
bu.print_and_execute('rm ' + tmp3DImage, False)
#Go to next file
num += 1
#Now make the animation and delete individual 2D slices:
cmd = 'convert slice1*.png -resize %s -set delay %s %s' % (
ops.resize, ops.delay, ops.out_gif)
bu.print_and_execute(cmd)
bu.print_and_execute('rm slice1*.png', False)
if __name__ == "__main__":
main()
|
Inria-Asclepios/simul-atrophy
|
scripts/extractSliceVideoFrom4d.py
|
extractSliceVideoFrom4d.py
|
py
| 4,284 |
python
|
en
|
code
| 7 |
github-code
|
6
|
43449602370
|
#!/usr/bin/env python3
import re
import json
import urllib.request
import pymysql.cursors
def ipToCountry(ip):
url = 'http://api.ipstack.com/' + ip + '?access_key=dfe38edcd4541577119d91e7053a584a'
data = urllib.request.urlopen(url).read().decode("utf-8")
json_data = json.loads(data)
if not json_data['country_name'] is None:
return json_data['country_name']
return 'none'
f = open('logs.txt', 'r')
#f = open('logs_lite.txt', 'r')
users = {}
product_categories = []
carts = {}
types_action = []
actions = []
users_cart_pay = []
users_products = {}
print("Processed rows:")
i = 1
for line in f:
date = re.search(r'\d{4}-\d{2}-\d{2}', line).group(0)
time = re.search(r'\d{2}:\d{2}:\d{2}', line).group(0)
action_name = re.search(r'\[\w{8}\]', line).group(0)[1:-1]
ip = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', line).group(0)
buf = re.search(r'(ttom.com).+', line).group(0)
user_action = re.sub(r'(ttom.com/)', '', buf)
type_action = 'none'
action = {}
action['category'] = 'none'
if not ip in users:
users[ip] = ipToCountry(ip)
users_products[ip] = []
if not user_action or re.match(r'pay\?', user_action):
type_action = "other"
elif re.match(r'cart\?', user_action):
type_action = "cart"
buf = re.search(r'(cart_id=).+', user_action).group(0)
cart_id = re.sub(r'(cart_id=)', '', buf)
if not cart_id in carts:
carts[cart_id] = 0
user_cart_pay = {}
user_cart_pay['user_cart'] = users_products.pop(ip)
user_cart_pay['cart_id'] = cart_id
user_cart_pay['ip'] = ip
users_cart_pay.append(user_cart_pay)
elif re.match(r'success_pay_', user_action):
type_action = "success_pay"
buf = re.search(r'(success_pay_).+', user_action).group(0)
cart_id = re.sub(r'(success_pay_)', '', buf)[:-1]
carts[cart_id] = 1
elif user_action.count('/') is 1:
type_action = "category"
category = user_action[:-1]
if not category in product_categories:
product_categories.append(category)
action['category'] = category
elif user_action.count('/') is 2:
type_action = "product"
category = re.split(r'/', user_action)[0]
if not category in product_categories:
product_categories.append(category)
if not ip in users_products:
users_products[ip] = []
if not category in users_products[ip]:
users_products[ip].append(action_name)
action['category'] = category
if not type_action in types_action:
types_action.append(type_action)
action['date'] = date
action['time'] = time
action['ip'] = ip
action['type_action'] = type_action
action['name'] = action_name
actions.append(action)
print('Read row #: ' + format(i))
i = i + 1
f.close()
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='logs',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
print("Table 'user': Adding data ...")
i = 1
with connection.cursor() as cursor:
for key, value in users.items() :
try:
sql = "INSERT INTO `user` (`ip`, `country`) VALUES (%s, %s)"
cursor.execute(sql, (key, value))
connection.commit()
print("Table 'user': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'user': Success!")
print("Table 'product_category': Adding data...")
i = 1
with connection.cursor() as cursor:
for value in product_categories :
try:
sql = "INSERT INTO `product_category` (`name`) VALUES (%s)"
cursor.execute(sql, (value))
connection.commit()
print("Table 'product_category': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'product_category': Success!")
print("Table 'action_type': Adding data...")
i = 1
with connection.cursor() as cursor:
for value in types_action :
try:
sql = "INSERT INTO `action_type` (`name`) VALUES (%s)"
cursor.execute(sql, (value))
connection.commit()
print("Table 'action_type': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'action_type': Success!")
print("Table 'cart': Adding data...")
i = 1
with connection.cursor() as cursor:
for key, value in carts.items() :
try:
sql = "INSERT INTO `cart` (`id_cart`, `success_pay_flag`) VALUES (%s, %s)"
if value is 1:
cursor.execute(sql, (key, '1'))
else :
cursor.execute(sql, (key, '0'))
connection.commit()
print("Table 'cart': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'cart': Success!")
print("Table 'action': Adding data...")
i = 1
with connection.cursor() as cursor:
for action in actions :
sql = "SELECT `id` FROM `user` WHERE `ip`=%s"
cursor.execute(sql, (action['ip']))
_user = cursor.fetchone()['id']
sql = "SELECT `id` FROM `action_type` WHERE `name`=%s"
cursor.execute(sql, (action['type_action']))
_action_type = cursor.fetchone()['id']
_product_category = 'none'
if not action['category'] is 'none' :
sql = "SELECT `id` FROM `product_category` WHERE `name`=%s"
cursor.execute(sql, (action['category']))
_product_category = cursor.fetchone()['id']
try:
if not _product_category is 'none' :
sql = "INSERT INTO `action` (`datetime`, `user` , `action_type` , `product_category` , `name`) VALUES (%s, %s, %s, %s, %s)"
cursor.execute(sql, (action['date'] + ' ' + action['time'], _user, _action_type, _product_category, action['name']))
else :
sql = "INSERT INTO `action` (`datetime`, `user` , `action_type` , `name`) VALUES (%s, %s, %s, %s)"
cursor.execute(sql, (action['date'] + ' ' + action['time'], _user, _action_type, action['name']))
connection.commit()
print("Table 'action': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'action': Success!")
print("Table 'cart_to_user': Adding data...")
i = 1
with connection.cursor() as cursor:
for value in users_cart_pay :
sql = "SELECT `id` FROM `user` WHERE `ip`=%s"
cursor.execute(sql, (value['ip']))
_user = cursor.fetchone()['id']
sql = "SELECT `id` FROM `cart` WHERE `id_cart`=%s"
cursor.execute(sql, (value['cart_id']))
_cart = cursor.fetchone()['id']
for action_name in value['user_cart'] :
sql = "SELECT `id` FROM `action` WHERE `name`=%s"
cursor.execute(sql, (action_name))
_action = cursor.fetchone()['id']
try:
sql = "INSERT INTO `cart_to_user` (`user`, `cart`, `action`) VALUES (%s, %s, %s)"
cursor.execute(sql, (_user, _cart, _action))
connection.commit()
print("Table 'cart_to_user': Inserted row #: " + format(i))
i = i + 1
except pymysql.err.IntegrityError as err:
if 'Duplicate entry' in format(err) :
print("Warning: Duplicate: {}".format(err))
else :
raise pymysql.err.IntegrityError(err)
print("Table 'cart_to_user': Success!")
finally:
connection.close()
|
VadimAspirin/usml
|
back/log_mapper.py
|
log_mapper.py
|
py
| 9,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9638599695
|
# this class to get user information from user input
class GetUserInfor:
def __init__(self):
print("""Welcome to Elena's Flight Clue.\n
We find the best flight deals and email you.
""")
self.first_name = input("What is your first name?\n").rstrip()
self.last_name = input("What is your last name?\n").rstrip()
self.email = self.check_email().rstrip()
def check_email(self):
email_1 = input("What is your email?\n")
email_2 = input("Type your email again.\n")
if email_1 == email_2:
print("You're in the club")
return email_1
else:
print("You may type wrong email, please type again")
self.check_email()
|
na-lin/100-days-of-Python
|
day39-Flight-deal-Finder/get_user_information.py
|
get_user_information.py
|
py
| 769 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14779367237
|
import adsk.fusion
import unittest
# note: load_tests is required for the "pattern" test filtering functionality in loadTestsFromModule in run()
from fscad.test_utils import FscadTestCase, load_tests
from fscad.fscad import *
class Builder2DTest(FscadTestCase):
def test_line_to(self):
builder = Builder2D((0, 0))
builder.line_to((0, 1))
builder.line_to((1, 1))
builder.line_to((1, 0))
builder.line_to((0, 0))
builder.build().create_occurrence()
def test_spline_line(self):
builder = Builder2D((0, 0))
builder.fit_spline_through((0, 1))
builder.fit_spline_through((1, 1))
builder.fit_spline_through((1, 0))
builder.fit_spline_through((0, 0))
builder.build().create_occurrence()
def test_spline(self):
builder = Builder2D((0, 0))
builder.fit_spline_through((.75, 1.25), (2, 2))
builder.line_to((builder.last_point.x, 0))
builder.line_to((0, 0))
builder.build().create_occurrence()
def test_spline_loop(self):
builder = Builder2D((0, 0))
builder.fit_spline_through((0, 1), (1, 1), (1, 0), (0, 0))
builder.build().create_occurrence()
def run(context):
import sys
test_suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__],
# pattern="spline_loop",
)
unittest.TextTestRunner(failfast=True).run(test_suite)
|
JesusFreke/fscad
|
tests/builder2d_test.py
|
builder2d_test.py
|
py
| 1,550 |
python
|
en
|
code
| 44 |
github-code
|
6
|
44083406715
|
# -*- coding: utf-8 -*-
## Add uid to data gathered from qMp nodes in GuifiSants
## http://dsg.ac.upc.edu/qmpsu/index.php
## meshmon-format.py
## (c) Llorenç Cerdà-Alabern, May 2020.
## debug: import pdb; pdb.set_trace()
import json
cache = {}
graph = []
tabs = {}
def find_node_by_address(d, k, v):
"""
find address v in d[*]['addresses'][k]
k: ether, inet6, inet, inet6ll
"""
if v in cache:
return cache[v]
else:
for n in d.values():
if 'addresses' in n and k in n['addresses']:
for a in n['addresses'][k]:
if v == a:
cache.update({v: n})
return n
return None
def add_ids_to_link(d, links):
"""
"""
for l in links:
if 'llocalIp' in l:
n = find_node_by_address(d, 'inet6ll', l['llocalIp'])
if n: l.update({'id': n['id']})
def add_iwdump_to_l(d, links, ifces, w):
"""
"""
for i in w.values():
for m in i.keys():
n = find_node_by_address(d, 'ether', m)
if n and 'id' in n:
for l in links:
if 'id' in l and l['id'] == n['id'] and l['viaDev'] in ifces \
and ifces[l['viaDev']] == 'wireless':
l.update({'iwdump': i[m]})
break
def get_interfaces(ifces):
"""
"""
res = {}
for i in ifces:
if 'devName' in i and 'type' in i:
res.update({i['devName']: i['type']})
return res
def add_links(d, ng, n):
"""
"""
i = get_interfaces(n['bmx6']['interfaces'])
ng.update({'interfaces': i})
l = n['bmx6']['links']
add_ids_to_link(d, l)
if 'iwdump' in n:
add_iwdump_to_l(d, l, i, n['iwdump'])
ng.update({'links': l})
def add_net_dev(ng, nd):
"""
"""
res = {}
for k,v in nd.items():
if k in ng['interfaces']: res.update({k: v})
if res: ng.update({'net_dev': res})
def build_graph(d):
"""
build a graph with the data gathered from the mesh in dict d
"""
global graph ; graph = [] # initialize
global cache ; cache = {} # initialize
for i in sorted(d.keys(), key=lambda k: d[k]['id']):
c = {}
for w in "loadavg cpu_info cpu_meminfo hostname uid id uptime processes cpu_stat brctl vmstat".split(' '):
if w in d[i]:
c.update({w: d[i][w]})
c.update({'ipv6': i})
graph.append(c)
if 'bmx6' in d[i] and 'interfaces' in d[i]['bmx6'] and 'links' in d[i]['bmx6']:
add_links(d, graph[-1], d[i])
if 'net_dev' in d[i]: add_net_dev(graph[-1], d[i]['net_dev'])
def si2f(x):
n = x.find('K')
if(n >= 0):
return float(x[:n]+'e3')
n = x.find('M')
if(n >= 0):
return float(x[:n]+'e6')
n = x.find('G')
if(n >= 0):
return float(x[:n]+'e9')
def build_rt(d):
"""
build rt with the data gathered from the mesh in dict d
"""
global tabs # initialize
tabs = {}
num_nodes = len(d) ;
rt =[[None] * num_nodes for n in range(0,num_nodes)]
adj =[[0] * num_nodes for n in range(0,num_nodes)]
metric = [[None] * num_nodes for n in range(0,num_nodes)]
uid = [None] * num_nodes
for i in sorted(d.keys(), key=lambda k: d[k]['id']):
nid = d[i]['id']
uid[nid] = d[i]['uid']
if 'originators' in d[i]['bmx6']:
for o in d[i]['bmx6']['originators']:
if 'primaryIp' in o:
n = find_node_by_address(d, 'inet6', o['primaryIp'])
if n:
if 'viaIp' in o:
via = find_node_by_address(d, 'inet6ll', o['viaIp'])
if via:
rt[nid][n['id']] = via['id']
if n['id'] == via['id']: adj[nid][n['id']] = 1
if 'metric' in o:
metric[nid][n['id']] = si2f(o['metric'])
tabs.update({'uid': uid})
tabs.update({'rt': rt})
tabs.update({'adj': adj})
tabs.update({'metric': metric})
tabs.update({'out_degree': [sum(x) for x in adj]})
tabs.update({'in_degree': [sum(x) for x in zip(*adj)]})
def show(i):
""
""
print(json.dumps(graph[i], indent=2))
# Local Variables:
# mode: python
# coding: utf-8
# python-indent-offset: 4
# python-indent-guess-indent-offset: t
# End:
|
llorenc/meshmon-parser
|
meshmon-format.py
|
meshmon-format.py
|
py
| 4,474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19475609630
|
from django.apps import AppConfig
from django.conf import settings
import os
import joblib
class SentimentConfig(AppConfig):
name = 'sentiment'
path = os.path.join(settings.MODELS, 'models.p')
path_emosi = os.path.join(settings.MODELS, 'models_emotion.p')
path_general = os.path.join(settings.MODELS, 'models_general.p')
# separation of data packed in the model joblib
with open(path, 'rb') as joblibFile:
data = joblib.load(joblibFile)
with open(path_emosi, 'rb') as joblibFile:
data_emosi = joblib.load(joblibFile)
with open(path_general, 'rb') as joblibFile:
data_general = joblib.load(joblibFile)
model = data['classifier']
vectorizer = data['vectorizer']
model_emosi = data_emosi['classifier']
vectorizer_emosi = data_emosi['vectorizer']
model_general = data_general['classifier']
vectorizer_general = data_general['vectorizer']
|
kholiqcode/skripsi
|
sentiment/apps.py
|
apps.py
|
py
| 927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40662041520
|
import os
from tkinter import *
class AuroraOS:
def __init__(self, master):
self.master = master
master.title("AuroraOS")
# GUI elementy
self.label = Label(master, text="Witaj w AuroraOS!")
self.label.pack()
self.dir_button = Button(master, text="DIR", command=self.show_dir)
self.dir_button.pack()
self.cd_button = Button(master, text="CD", command=self.change_dir)
self.cd_button.pack()
self.type_button = Button(master, text="TYPE", command=self.show_file)
self.type_button.pack()
self.copy_button = Button(master, text="COPY", command=self.copy_file)
self.copy_button.pack()
self.del_button = Button(master, text="DEL", command=self.delete_file)
self.del_button.pack()
# konsola
self.console_label = Label(master, text="Konsola:")
self.console_label.pack()
self.console = Text(master, height=10)
self.console.pack()
# pole tekstowe do wpisywania poleceń
self.command_label = Label(master, text="Wpisz polecenie:")
self.command_label.pack()
self.command_entry = Entry(master)
self.command_entry.pack()
self.command_button = Button(master, text="Wykonaj", command=self.execute_command)
self.command_button.pack()
def show_dir(self):
files = os.listdir(os.getcwd())
self.console.insert(END, "\n".join(files) + "\n")
def change_dir(self):
path = self.command_entry.get()
try:
os.chdir(path)
self.console.insert(END, f"Zmieniono bieżący katalog na {path}\n")
except FileNotFoundError:
self.console.insert(END, f"Nie znaleziono katalogu {path}\n")
except NotADirectoryError:
self.console.insert(END, f"{path} nie jest katalogiem\n")
def show_file(self):
path = self.command_entry.get()
try:
with open(path, "r") as file:
content = file.read()
self.console.insert(END, f"\n{content}\n")
except FileNotFoundError:
self.console.insert(END, f"Nie znaleziono pliku {path}\n")
def copy_file(self):
paths = self.command_entry.get().split()
try:
with open(paths[0], "rb") as source:
with open(paths[1], "wb") as destination:
destination.write(source.read())
self.console.insert(END, f"Skopiowano {paths[0]} do {paths[1]}\n")
except FileNotFoundError:
self.console.insert(END, f"Nie znaleziono pliku {paths[0]}\n")
def delete_file(self):
path = self.command_entry.get()
try:
os.remove(path)
self.console.insert(END, f"Usunięto plik {path}\n")
except FileNotFoundError:
self.console.insert(END, f"Nie znaleziono pliku {path}\n")
def execute_command(self):
command = self.command_entry.get()
if command == "EXIT":
self.master.quit()
else:
self.console.insert(END, f"{command}\n")
self.command_entry.delete(0, END)
root = Tk()
my_gui = AuroraOS(root)
root.mainloop()
|
Github673/Moje-Aplikacje
|
Python/System Operacyjny.py
|
System Operacyjny.py
|
py
| 3,312 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7047537895
|
# import cv2
#
# videoCapture = cv2.VideoCapture("/home/haoyu/yuhao_video/a827.avi")
#
#
# # fps = videoCapture.get()
# # size = (int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
# # int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
# #
#
# # videoWriter = cv2.VideoWriter('./data/video_plane.avi',)
#
# print(111)
#
# success, frame = videoCapture.read()
# num=0
# while 1:
# # cv2.imshow("Oto Video", frame) #
# # cv2.waitKey(1000 / int(fps)) #
# # videoWriter.write(frame) #
# #
# cv2.imshow("imgs", frame) #
# cv2.waitKey(1) #
# # videoWriter.write(frame) #
# # if num%2==0:
# # cv2.imwrite('./imgs/{0}.jpg'.format(num), frame)
# num+=1
# success, frame = videoCapture.read() #
#
import tensorflow as tf
import matplotlib.pyplot as plt
import time
import PIL.Image as Image
import numpy as np
import os
label_to_colours = {0: [0, 0,0],
1: [128,0,0],
2: [ 0 ,28 ,0 ],
3: [128 ,128 ,0 ]
}
#
def class_to_img(input):
new_tensor = input[:, :, :, [0]]
# new_tensor=np.expand_dims(new_tensor,axis=-1)
image_rgb = np.repeat(new_tensor, 3, axis=-1)
for num in range(len(input)):
shape=np.shape(input[num])
for i in range(shape[0]):
for j in range(shape[1]):
cls_max=np.argmax(input[num][i][j] ,axis=0)
image_rgb[num][i][j]=label_to_colours[cls_max]
# print(cls_max)
return image_rgb
# detector = Detector()0006
# path = "/home/haoyu/data_tracking_image_2/testing/image_02/0014"
path = "/home/llye/Desktop/imgcrop-ok/"#数据集合的目录
# path="../imgs22"
all_abs = []
for img_name in os.listdir(path):
abs_img = os.path.join(path, img_name)
all_abs.append(abs_img)
sort_abs_imgs = np.sort(all_abs)
print(sort_abs_imgs)
num = 0
globals_imgs_np=[]
for one_img in sort_abs_imgs:
with Image.open(one_img) as im:
num += 1
print(num)
#################尺寸变换
image_resize = im.resize((128, 128))
im_np = np.array(image_resize)
globals_imgs_np.append(im_np)
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
plt.ion()
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
# output_graph_path = "../pb/road_old.pb"
output_graph_path = "./lights_4cls.pb"
# output_graph_path = "./road_t_bn_5w.pb"
# output_graph_path = 'netsmodel/combined_modelok_pnet.pb'
# 这里是你保存的文件的位置
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
tf.import_graph_def(output_graph_def, name="")
# with tf.Session() as sess:
with tf.Session().as_default() as sess:
# print(a.eval())
# print(b.eval(session=sess))
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
input_x = sess.graph.get_tensor_by_name("Placeholder:0")
# output = sess.graph.get_tensor_by_name("generator/BatchNorm_16/FusedBatchNorm:0")
output = sess.graph.get_tensor_by_name("generator/add_10:0")
# 这个是你保存文件的名字,取0是tensor
# 输出的时候的名字
# for im_np in globals_imgs_np:
# print(im_np)
# # plt.clf()
# a = time.time()
# # pre_img = sess.run(output, {input_x: [np.array(image) / 255 - 0.5]})
# pre_img = sess.run(output, {input_x: [im_np/255-0.5]})
#
#
# ccc = np.argmax(pre_img[0], axis=2)
# aaa = time.time()
#
# ddd=np.multiply(im_np[:,:,2], ccc)
# # image = im_np
# ax2.imshow(ddd.astype(np.uint8))
# ax1.imshow(im_np.astype(np.uint8))
# plt.pause(0.02)
# img1=ax1.imshow(im_np.astype(np.uint8))
# img2=ax2.imshow(im_np.astype(np.uint8))
num=0
for im_np in globals_imgs_np[0:]:
# print(im_np)
# plt.clf()
a = time.time()
# pre_img = sess.run(output, {input_x: [np.array(image) / 255 - 0.5]})
aa=time.time()
pre_img = sess.run(output, {input_x: [im_np/255-0.5]})
print(time.time()-aa)
# output.eval(session=sess,input_x: [im_np/255-0.5])
ccc = np.argmax(pre_img, axis=1)
aaa = time.time()
print(pre_img)
num+=1
if ccc==0:
print("...............红色......................................")
r_img=Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/red/{0}.jpg".format(num))
if ccc==1:
print(".................................绿色.........................................")
r_img = Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/green/{0}.jpg".format(num))
if ccc==2:
print("....................................................黄色......................................")
r_img = Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/yellow/{0}.jpg".format(num))
if ccc == 3:
print("..........................................................................................")
r_img = Image.fromarray(np.uint8(im_np))
r_img.save("/home/llye/Desktop/other/{0}.jpg".format(num))
# ddd=np.multiply(im_np[:,:,2], ccc)
# image = im_np
# ax2.imshow(ddd.astype(np.uint8))
# ax1.imshow(im_np.astype(np.uint8))
# img1.set_data(im_np.astype(np.uint8))
# img2.set_data(ddd.astype(np.uint8))
# plt.pause(2)
plt.clf()
# import cv2
#
# cap = cv2.VideoCapture("./2222.mp4")
# print(cap)
#
#
# success, photo = cap.read()
# print(photo)
# while True:
# # cv2.waitKey(1) #
# #
# photo = cv2.resize(photo, (256, 540), fx=0.5, fy=0.5)
# # print(np.shape(photo))
# # aaa=pnet_detect(photo)
# # b, g, r = cv2.split(photo)
# # img = cv2.merge([r, g, b])
# # im = Image.fromarray(img, "RGB")
#
# # boxes = detector.detect(im)
# # for box in boxes:
# # x1 = int(box[0])
# # y1 = int(box[1])
# # x2 = int(box[2])
# # y2 = int(box[3])
# # w = x2 - x1
# # h = y2 - y1
# # cv2.rectangle(photo, (x1, y1), (x2, y2), (0, 0, 255), 1)
#
# cv2.imshow("capture", photo)
# success, photo = cap.read()
# if cv2.waitKey(100) & 0xFF == ord('q'):
# break
|
ylltest/myscripts-github
|
traffic_lights/new_pil_pd.py
|
new_pil_pd.py
|
py
| 6,966 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8201488763
|
from flask import render_template, redirect, url_for
from flask_login import login_user, logout_user, current_user
from . import home
from ..models import User, Account, Role, Course
from ..forms import LoginForm, RegisterForm
from sha_training_app import db
import datetime
@home.route('/')
def homepage():
courses = Course.query.limit(5)
return render_template('home/index.html', courses=courses)
@home.route('/courses')
def course_listing():
courses = Course.query.all()
return render_template('home/courses.html', courses=courses, standalone=True)
@home.route('/register', methods=['GET', 'POST'])
def register():
register_form = RegisterForm(csrf_enabled=True)
# The user has submitted the form, let's make sure it's valid
if register_form.validate_on_submit():
# Create a new user from the form data
user = User(
username=register_form.username.data,
email=register_form.email.data,
password=register_form.password.data
)
role = Role.query.filter_by(role_id=1).first()
account = Account(
role_id=role.role_id,
date_joined=datetime.datetime.now(),
first_name=register_form.first_name.data,
last_name=register_form.last_name.data
)
user.account = account
db.session.add(user)
db.session.add(account)
db.session.commit()
# At this point the user has been registered and should
# have been sent a confirmation email
return render_template('home/registration_success.html')
# Show the user the registration form
return render_template('home/register.html', form=register_form)
@home.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('user.account'))
login_form = LoginForm(csrf_enabled=True)
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user)
return redirect(url_for('user.account'))
return render_template('home/login.html', form=login_form)
@home.route('/logout')
def logout():
logout_user() # This should kill the session
return redirect(url_for('home.homepage'))
|
ScottishHD/training_site
|
sha_training_app/_home/views.py
|
views.py
|
py
| 2,385 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19108144876
|
import colors
import info
from icon_path import icon_path
from tooltip import Tooltip
from scan_media_window import ScanMediaWindow
from ingest_window import IngestWindow
from open_window import OpenWindow
from info_window import InfoWindow
try:
import tkinter
except ImportError:
import Tkinter as tkinter
class MenuView():
"""Provides a frame containing munu-level control buttons.
Attributes:
frame(Frame): the containing frame for this view.
"""
def __init__(self, master, open_manager, scan_statistics_window, preferences):
"""Args:
master(a UI container): Parent.
open_mangaer(OpenManager): Able to open a new dataset.
"""
# open manager
self._open_manager = open_manager
self._scan_statistics_window = scan_statistics_window
self._preferences = preferences
# make the containing frame
self.frame = tkinter.Frame(master)
# make the frame for the control buttons
button_frame = tkinter.Frame(self.frame, bg=colors.BACKGROUND)
button_frame.pack(side=tkinter.TOP, anchor="w")
# open button
self._open_icon = tkinter.PhotoImage(file=icon_path("open"))
open_button = tkinter.Button(button_frame,
image=self._open_icon, command=self._handle_open,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
open_button.pack(side=tkinter.LEFT)
Tooltip(open_button, "Open scanned output")
# scan statistics button
self._scan_statistics_icon = tkinter.PhotoImage(file=icon_path("view_scan_statistics"))
scan_statistics_button = tkinter.Button(button_frame,
image=self._scan_statistics_icon,
command=self._handle_scan_statistics_window,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
scan_statistics_button.pack(side=tkinter.LEFT, padx=(0,8))
Tooltip(scan_statistics_button, "Show scan statistics")
# ingest button
self._ingest_icon = tkinter.PhotoImage(file=icon_path("ingest"))
ingest_button = tkinter.Button(button_frame, image=self._ingest_icon,
command=self._handle_ingest,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
ingest_button.pack(side=tkinter.LEFT)
Tooltip(ingest_button, "Ingest files into a\nnew hashdb database")
# scan button
self._scan_icon = tkinter.PhotoImage(file=icon_path("scan"))
scan_button = tkinter.Button(button_frame, image=self._scan_icon,
command=self._handle_scan,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
scan_button.pack(side=tkinter.LEFT, padx=(0,8))
Tooltip(scan_button, "Scan a media image")
# info button
self._info_icon = tkinter.PhotoImage(file=icon_path(
"info"))
info_button = tkinter.Button(button_frame,
image=self._info_icon,
command=self._handle_info,
bg=colors.BACKGROUND,
activebackground=colors.ACTIVEBACKGROUND,
highlightthickness=0)
info_button.pack(side=tkinter.LEFT)
Tooltip(info_button, "About SectorScope %s" % info.VERSION)
def _handle_open(self):
OpenWindow(self.frame, self._open_manager)
def _handle_scan_statistics_window(self):
self._scan_statistics_window.show()
def _handle_ingest(self):
IngestWindow(self.frame)
# IngestWindow(self.frame, source_dir='/home/bdallen/KittyMaterial', hashdb_dir='/home/bdallen/Kitty/zzki.hdb')
def _handle_scan(self):
ScanMediaWindow(self.frame)
# ScanMediaWindow(self.frame, media='/home/bdallen/Kitty/jo-favorites-usb-2009-12-11.E01', hashdb_dir='/home/bdallen/Kitty/KittyMaterial.hdb', output_file='/home/bdallen/Kitty/zz_jo.json')
def _handle_info(self):
InfoWindow(self.frame)
|
NPS-DEEP/SectorScope
|
python/menu_view.py
|
menu_view.py
|
py
| 4,423 |
python
|
en
|
code
| 11 |
github-code
|
6
|
26038693036
|
from __future__ import annotations
import logging
import os
import re
import textwrap
from collections import defaultdict
from dataclasses import dataclass
from pants.backend.codegen.protobuf.protoc import Protoc
from pants.backend.codegen.protobuf.target_types import (
AllProtobufTargets,
ProtobufGrpcToggleField,
ProtobufSourceField,
ProtobufSourcesGeneratorTarget,
ProtobufSourceTarget,
)
from pants.backend.go import target_type_rules
from pants.backend.go.dependency_inference import (
GoImportPathsMappingAddressSet,
GoModuleImportPathsMapping,
GoModuleImportPathsMappings,
GoModuleImportPathsMappingsHook,
)
from pants.backend.go.target_type_rules import GoImportPathMappingRequest
from pants.backend.go.target_types import GoOwningGoModAddressField, GoPackageSourcesField
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.build_pkg import (
BuildGoPackageRequest,
FallibleBuildGoPackageRequest,
)
from pants.backend.go.util_rules.build_pkg_target import (
BuildGoPackageTargetRequest,
GoCodegenBuildRequest,
)
from pants.backend.go.util_rules.first_party_pkg import FallibleFirstPartyPkgAnalysis
from pants.backend.go.util_rules.go_mod import OwningGoMod, OwningGoModRequest
from pants.backend.go.util_rules.pkg_analyzer import PackageAnalyzerSetup
from pants.backend.go.util_rules.sdk import GoSdkProcess
from pants.backend.python.util_rules import pex
from pants.build_graph.address import Address
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import (
AddPrefix,
CreateDigest,
Digest,
DigestContents,
Directory,
FileContent,
MergeDigests,
RemovePrefix,
Snapshot,
)
from pants.engine.internals.native_engine import EMPTY_DIGEST
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
GeneratedSources,
GenerateSourcesRequest,
HydratedSources,
HydrateSourcesRequest,
SourcesPaths,
SourcesPathsRequest,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionRule
from pants.source.source_root import (
SourceRoot,
SourceRootRequest,
SourceRootsRequest,
SourceRootsResult,
)
from pants.util.dirutil import group_by_dir
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import softwrap
_logger = logging.getLogger(__name__)
class GoCodegenBuildProtobufRequest(GoCodegenBuildRequest):
generate_from = ProtobufSourceField
class GenerateGoFromProtobufRequest(GenerateSourcesRequest):
input = ProtobufSourceField
output = GoPackageSourcesField
@dataclass(frozen=True)
class _SetupGoProtocPlugin:
digest: Digest
_QUOTE_CHAR = r"(?:'|\")"
_IMPORT_PATH_RE = re.compile(rf"^\s*option\s+go_package\s+=\s+{_QUOTE_CHAR}(.*){_QUOTE_CHAR};")
def parse_go_package_option(content_raw: bytes) -> str | None:
content = content_raw.decode()
for line in content.splitlines():
m = _IMPORT_PATH_RE.match(line)
if m:
return m.group(1)
return None
class ProtobufGoModuleImportPathsMappingsHook(GoModuleImportPathsMappingsHook):
pass
@rule(desc="Map import paths for all Go Protobuf targets.", level=LogLevel.DEBUG)
async def map_import_paths_of_all_go_protobuf_targets(
_request: ProtobufGoModuleImportPathsMappingsHook,
all_protobuf_targets: AllProtobufTargets,
) -> GoModuleImportPathsMappings:
sources = await MultiGet(
Get(
HydratedSources,
HydrateSourcesRequest(
tgt[ProtobufSourceField],
for_sources_types=(ProtobufSourceField,),
enable_codegen=True,
),
)
for tgt in all_protobuf_targets
)
all_contents = await MultiGet(
Get(DigestContents, Digest, source.snapshot.digest) for source in sources
)
go_protobuf_mapping_metadata = []
owning_go_mod_gets = []
for tgt, contents in zip(all_protobuf_targets, all_contents):
if not contents:
continue
if len(contents) > 1:
raise AssertionError(
f"Protobuf target `{tgt.address}` mapped to more than one source file."
)
import_path = parse_go_package_option(contents[0].content)
if not import_path:
continue
owning_go_mod_gets.append(Get(OwningGoMod, OwningGoModRequest(tgt.address)))
go_protobuf_mapping_metadata.append((import_path, tgt.address))
owning_go_mod_targets = await MultiGet(owning_go_mod_gets)
import_paths_by_module: dict[Address, dict[str, set[Address]]] = defaultdict(
lambda: defaultdict(set)
)
for owning_go_mod, (import_path, address) in zip(
owning_go_mod_targets, go_protobuf_mapping_metadata
):
import_paths_by_module[owning_go_mod.address][import_path].add(address)
return GoModuleImportPathsMappings(
FrozenDict(
{
go_mod_addr: GoModuleImportPathsMapping(
mapping=FrozenDict(
{
import_path: GoImportPathsMappingAddressSet(
addresses=tuple(sorted(addresses)), infer_all=True
)
for import_path, addresses in import_path_mapping.items()
}
),
address_to_import_path=FrozenDict(
{
address: import_path
for import_path, addresses in import_path_mapping.items()
for address in addresses
}
),
)
for go_mod_addr, import_path_mapping in import_paths_by_module.items()
}
)
)
@dataclass(frozen=True)
class _SetupGoProtobufPackageBuildRequest:
"""Request type used to trigger setup of a BuildGoPackageRequest for entire generated Go
Protobuf package.
This type is separate so that a build of the full package can be cached no matter which one of
its component source files was requested. This occurs because a request to build any one of the
source files will be converted into this type and then built.
"""
addresses: tuple[Address, ...]
import_path: str
build_opts: GoBuildOptions
@rule
async def setup_full_package_build_request(
request: _SetupGoProtobufPackageBuildRequest,
protoc: Protoc,
go_protoc_plugin: _SetupGoProtocPlugin,
analyzer: PackageAnalyzerSetup,
platform: Platform,
) -> FallibleBuildGoPackageRequest:
output_dir = "_generated_files"
protoc_relpath = "__protoc"
protoc_go_plugin_relpath = "__protoc_gen_go"
transitive_targets, downloaded_protoc_binary, empty_output_dir = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
Get(DownloadedExternalTool, ExternalToolRequest, protoc.get_request(platform)),
Get(Digest, CreateDigest([Directory(output_dir)])),
)
go_mod_addr = await Get(OwningGoMod, OwningGoModRequest(transitive_targets.roots[0].address))
package_mapping = await Get(
GoModuleImportPathsMapping, GoImportPathMappingRequest(go_mod_addr.address)
)
all_sources = await Get(
SourceFiles,
SourceFilesRequest(
sources_fields=(
tgt[ProtobufSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ProtobufSourceField)
),
for_sources_types=(ProtobufSourceField,),
enable_codegen=True,
),
)
source_roots, input_digest = await MultiGet(
Get(SourceRootsResult, SourceRootsRequest, SourceRootsRequest.for_files(all_sources.files)),
Get(Digest, MergeDigests([all_sources.snapshot.digest, empty_output_dir])),
)
source_root_paths = sorted({sr.path for sr in source_roots.path_to_root.values()})
pkg_sources = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[ProtobufSourceField]))
for tgt in transitive_targets.roots
)
pkg_files = sorted({f for ps in pkg_sources for f in ps.files})
maybe_grpc_plugin_args = []
if any(tgt.get(ProtobufGrpcToggleField).value for tgt in transitive_targets.roots):
maybe_grpc_plugin_args = [
f"--go-grpc_out={output_dir}",
"--go-grpc_opt=paths=source_relative",
]
gen_result = await Get(
FallibleProcessResult,
Process(
argv=[
os.path.join(protoc_relpath, downloaded_protoc_binary.exe),
f"--plugin=go={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go')}",
f"--plugin=go-grpc={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go-grpc')}",
f"--go_out={output_dir}",
"--go_opt=paths=source_relative",
*(f"--proto_path={source_root}" for source_root in source_root_paths),
*maybe_grpc_plugin_args,
*pkg_files,
],
# Note: Necessary or else --plugin option needs absolute path.
env={"PATH": protoc_go_plugin_relpath},
input_digest=input_digest,
immutable_input_digests={
protoc_relpath: downloaded_protoc_binary.digest,
protoc_go_plugin_relpath: go_protoc_plugin.digest,
},
description=f"Generating Go sources from {request.import_path}.",
level=LogLevel.DEBUG,
output_directories=(output_dir,),
),
)
if gen_result.exit_code != 0:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=gen_result.exit_code,
stderr=gen_result.stderr.decode(),
)
# Ensure that the generated files are in a single package directory.
gen_sources = await Get(Snapshot, Digest, gen_result.output_digest)
files_by_dir = group_by_dir(gen_sources.files)
if len(files_by_dir) != 1:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=1,
stderr=textwrap.dedent(
f"""
Expected Go files generated from Protobuf sources to be output to a single directory.
- import path: {request.import_path}
- protobuf files: {', '.join(pkg_files)}
"""
).strip(),
)
gen_dir = list(files_by_dir.keys())[0]
# Analyze the generated sources.
input_digest = await Get(Digest, MergeDigests([gen_sources.digest, analyzer.digest]))
result = await Get(
FallibleProcessResult,
Process(
(analyzer.path, gen_dir),
input_digest=input_digest,
description=f"Determine metadata for generated Go package for {request.import_path}",
level=LogLevel.DEBUG,
env={"CGO_ENABLED": "0"}, # protobuf files should not have cgo!
),
)
# Parse the metadata from the analysis.
fallible_analysis = FallibleFirstPartyPkgAnalysis.from_process_result(
result,
dir_path=gen_dir,
import_path=request.import_path,
minimum_go_version="",
description_of_source=f"Go package generated from protobuf targets `{', '.join(str(addr) for addr in request.addresses)}`",
)
if not fallible_analysis.analysis:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=fallible_analysis.exit_code,
stderr=fallible_analysis.stderr,
)
analysis = fallible_analysis.analysis
# Obtain build requests for third-party dependencies.
# TODO: Consider how to merge this code with existing dependency inference code.
dep_build_request_addrs: set[Address] = set()
for dep_import_path in (*analysis.imports, *analysis.test_imports, *analysis.xtest_imports):
# Infer dependencies on other Go packages.
candidate_addresses = package_mapping.mapping.get(dep_import_path)
if candidate_addresses:
# TODO: Use explicit dependencies to disambiguate? This should never happen with Go backend though.
if candidate_addresses.infer_all:
dep_build_request_addrs.update(candidate_addresses.addresses)
else:
if len(candidate_addresses.addresses) > 1:
return FallibleBuildGoPackageRequest(
request=None,
import_path=request.import_path,
exit_code=result.exit_code,
stderr=textwrap.dedent(
f"""
Multiple addresses match import of `{dep_import_path}`.
addresses: {', '.join(str(a) for a in candidate_addresses.addresses)}
"""
).strip(),
)
dep_build_request_addrs.update(candidate_addresses.addresses)
dep_build_requests = await MultiGet(
Get(BuildGoPackageRequest, BuildGoPackageTargetRequest(addr, build_opts=request.build_opts))
for addr in sorted(dep_build_request_addrs)
)
return FallibleBuildGoPackageRequest(
request=BuildGoPackageRequest(
import_path=request.import_path,
pkg_name=analysis.name,
digest=gen_sources.digest,
dir_path=analysis.dir_path,
go_files=analysis.go_files,
s_files=analysis.s_files,
direct_dependencies=dep_build_requests,
minimum_go_version=analysis.minimum_go_version,
build_opts=request.build_opts,
),
import_path=request.import_path,
)
@rule
async def setup_build_go_package_request_for_protobuf(
request: GoCodegenBuildProtobufRequest,
) -> FallibleBuildGoPackageRequest:
# Hydrate the protobuf source to parse for the Go import path.
sources = await Get(HydratedSources, HydrateSourcesRequest(request.target[ProtobufSourceField]))
sources_content = await Get(DigestContents, Digest, sources.snapshot.digest)
assert len(sources_content) == 1
import_path = parse_go_package_option(sources_content[0].content)
if not import_path:
return FallibleBuildGoPackageRequest(
request=None,
import_path="",
exit_code=1,
stderr=f"No import path was set in Protobuf file via `option go_package` directive for {request.target.address}.",
)
go_mod_addr = await Get(OwningGoMod, OwningGoModRequest(request.target.address))
package_mapping = await Get(
GoModuleImportPathsMapping, GoImportPathMappingRequest(go_mod_addr.address)
)
# Request the full build of the package. This indirection is necessary so that requests for two or more
# Protobuf files in the same Go package result in a single cacheable rule invocation.
protobuf_target_addrs_set_for_import_path = package_mapping.mapping.get(import_path)
if not protobuf_target_addrs_set_for_import_path:
return FallibleBuildGoPackageRequest(
request=None,
import_path=import_path,
exit_code=1,
stderr=softwrap(
f"""
No Protobuf files exists for import path `{import_path}`.
Consider whether the import path was set correctly via the `option go_package` directive.
"""
),
)
return await Get(
FallibleBuildGoPackageRequest,
_SetupGoProtobufPackageBuildRequest(
addresses=protobuf_target_addrs_set_for_import_path.addresses,
import_path=import_path,
build_opts=request.build_opts,
),
)
@rule(desc="Generate Go source files from Protobuf", level=LogLevel.DEBUG)
async def generate_go_from_protobuf(
request: GenerateGoFromProtobufRequest,
protoc: Protoc,
go_protoc_plugin: _SetupGoProtocPlugin,
platform: Platform,
) -> GeneratedSources:
output_dir = "_generated_files"
protoc_relpath = "__protoc"
protoc_go_plugin_relpath = "__protoc_gen_go"
downloaded_protoc_binary, empty_output_dir, transitive_targets = await MultiGet(
Get(DownloadedExternalTool, ExternalToolRequest, protoc.get_request(platform)),
Get(Digest, CreateDigest([Directory(output_dir)])),
Get(TransitiveTargets, TransitiveTargetsRequest([request.protocol_target.address])),
)
# NB: By stripping the source roots, we avoid having to set the value `--proto_path`
# for Protobuf imports to be discoverable.
all_sources_stripped, target_sources_stripped = await MultiGet(
Get(
StrippedSourceFiles,
SourceFilesRequest(
tgt[ProtobufSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ProtobufSourceField)
),
),
Get(
StrippedSourceFiles, SourceFilesRequest([request.protocol_target[ProtobufSourceField]])
),
)
input_digest = await Get(
Digest, MergeDigests([all_sources_stripped.snapshot.digest, empty_output_dir])
)
maybe_grpc_plugin_args = []
if request.protocol_target.get(ProtobufGrpcToggleField).value:
maybe_grpc_plugin_args = [
f"--go-grpc_out={output_dir}",
"--go-grpc_opt=paths=source_relative",
]
result = await Get(
ProcessResult,
Process(
argv=[
os.path.join(protoc_relpath, downloaded_protoc_binary.exe),
f"--plugin=go={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go')}",
f"--plugin=go-grpc={os.path.join('.', protoc_go_plugin_relpath, 'protoc-gen-go-grpc')}",
f"--go_out={output_dir}",
"--go_opt=paths=source_relative",
*maybe_grpc_plugin_args,
*target_sources_stripped.snapshot.files,
],
# Note: Necessary or else --plugin option needs absolute path.
env={"PATH": protoc_go_plugin_relpath},
input_digest=input_digest,
immutable_input_digests={
protoc_relpath: downloaded_protoc_binary.digest,
protoc_go_plugin_relpath: go_protoc_plugin.digest,
},
description=f"Generating Go sources from {request.protocol_target.address}.",
level=LogLevel.DEBUG,
output_directories=(output_dir,),
),
)
normalized_digest, source_root = await MultiGet(
Get(Digest, RemovePrefix(result.output_digest, output_dir)),
Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(request.protocol_target)),
)
source_root_restored = (
await Get(Snapshot, AddPrefix(normalized_digest, source_root.path))
if source_root.path != "."
else await Get(Snapshot, Digest, normalized_digest)
)
return GeneratedSources(source_root_restored)
# Note: The versions of the Go protoc and gRPC plugins are hard coded in the following go.mod. To update,
# copy the following go.mod and go.sum contents to go.mod and go.sum files in a new directory. Then update the
# versions and run `go mod download all`. Copy the go.mod and go.sum contents back into these constants,
# making sure to replace tabs with `\t`.
GO_PROTOBUF_GO_MOD = """\
module org.pantsbuild.backend.go.protobuf
go 1.17
require (
\tgoogle.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0
\tgoogle.golang.org/protobuf v1.27.1
)
require (
\tgithub.com/golang/protobuf v1.5.0 // indirect
\tgithub.com/google/go-cmp v0.5.5 // indirect
\tgolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
)
"""
GO_PROTOBUF_GO_SUM = """\
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/grpc v1.2.0 h1:v8eFdETH8nqZHQ9x+0f2PLuU6W7zo5PFZuVEwH5126Y=
google.golang.org/grpc v1.2.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
"""
@rule
async def setup_go_protoc_plugin() -> _SetupGoProtocPlugin:
go_mod_digest = await Get(
Digest,
CreateDigest(
[
FileContent("go.mod", GO_PROTOBUF_GO_MOD.encode()),
FileContent("go.sum", GO_PROTOBUF_GO_SUM.encode()),
]
),
)
download_sources_result = await Get(
ProcessResult,
GoSdkProcess(
["mod", "download", "all"],
input_digest=go_mod_digest,
output_directories=("gopath",),
description="Download Go `protoc` plugin sources.",
allow_downloads=True,
),
)
go_plugin_build_result, go_grpc_plugin_build_result = await MultiGet(
Get(
ProcessResult,
GoSdkProcess(
["install", "google.golang.org/protobuf/cmd/[email protected]"],
input_digest=download_sources_result.output_digest,
output_files=["gopath/bin/protoc-gen-go"],
description="Build Go protobuf plugin for `protoc`.",
),
),
Get(
ProcessResult,
GoSdkProcess(
[
"install",
"google.golang.org/grpc/cmd/[email protected]",
],
input_digest=download_sources_result.output_digest,
output_files=["gopath/bin/protoc-gen-go-grpc"],
description="Build Go gRPC protobuf plugin for `protoc`.",
),
),
)
if go_plugin_build_result.output_digest == EMPTY_DIGEST:
raise AssertionError(
f"Failed to build protoc-gen-go:\n"
f"stdout:\n{go_plugin_build_result.stdout.decode()}\n\n"
f"stderr:\n{go_plugin_build_result.stderr.decode()}"
)
if go_grpc_plugin_build_result.output_digest == EMPTY_DIGEST:
raise AssertionError(
f"Failed to build protoc-gen-go-grpc:\n"
f"stdout:\n{go_grpc_plugin_build_result.stdout.decode()}\n\n"
f"stderr:\n{go_grpc_plugin_build_result.stderr.decode()}"
)
merged_output_digests = await Get(
Digest,
MergeDigests(
[go_plugin_build_result.output_digest, go_grpc_plugin_build_result.output_digest]
),
)
plugin_digest = await Get(Digest, RemovePrefix(merged_output_digests, "gopath/bin"))
return _SetupGoProtocPlugin(plugin_digest)
def rules():
return (
*collect_rules(),
UnionRule(GenerateSourcesRequest, GenerateGoFromProtobufRequest),
UnionRule(GoCodegenBuildRequest, GoCodegenBuildProtobufRequest),
UnionRule(GoModuleImportPathsMappingsHook, ProtobufGoModuleImportPathsMappingsHook),
ProtobufSourcesGeneratorTarget.register_plugin_field(GoOwningGoModAddressField),
ProtobufSourceTarget.register_plugin_field(GoOwningGoModAddressField),
# Rules needed for this to pass src/python/pants/init/load_backends_integration_test.py:
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*first_party_pkg.rules(),
*go_mod.rules(),
*link.rules(),
*sdk.rules(),
*target_type_rules.rules(),
*third_party_pkg.rules(),
*pex.rules(),
)
|
pantsbuild/pants
|
src/python/pants/backend/codegen/protobuf/go/rules.py
|
rules.py
|
py
| 25,015 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
9304520532
|
from rest_framework.generics import GenericAPIView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import BasePermission
from rest_framework import status
from game.serializers import GameSerializer, TileSerializer, NextMoveSerializer
from game.models import Tile, Game
from game.node import Node
from game.algorithm import Minimax
from game.heuristics import HeuristicSimpleTreat
from game.rules import GameRules
from game.analyzer import Analyzer
from game.internal_types import TileXY
class TilePermission(BasePermission):
def has_permission(self, request, view) -> bool:
serializer = TileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
game = Game.objects.get(pk=serializer.data["game_id"])
player = serializer.data["player"]
node = Node.from_game(game, player)
return GameRules().check_open_threes(node, TileXY.from_dict(serializer.data))
class GameView(GenericAPIView):
serializer_class = GameSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
class TileView(GenericAPIView):
serializer_class = TileSerializer
permission_classes = (TilePermission,)
@staticmethod
def _delete_tiles_by_captures(game, player, captures):
for capture in captures:
Tile.objects.filter(game=game, x_coordinate=capture[0].x, y_coordinate=capture[0].y).delete()
Tile.objects.filter(game=game, x_coordinate=capture[1].x, y_coordinate=capture[1].y).delete()
if player == game.player_1:
game.captures_o += 1
game.save()
elif player == game.player_2:
game.captures_x += 1
game.save()
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
tile = serializer.save()
game = tile.game
player = game.player_1 if tile.player == game.player_2 else game.player_2
node = Node.from_game(game=game, player=player)
captures = node.find_captures_to_delete(tile_xy=TileXY.from_serializer(tile))
node.update_from_captures(captures)
self._delete_tiles_by_captures(game, player, captures)
winner = GameRules().deeper_winner_check(node)
tiles = Tile.objects.filter(game=tile.game)
tiles_serializer = self.serializer_class(instance=tiles, many=True)
return Response(
{
"tiles": tiles_serializer.data,
"captures": {
'x': game.captures_x,
'o': game.captures_o,
},
"winner": winner,
},
status.HTTP_201_CREATED,
)
class NextMoveView(APIView):
serializer_class = NextMoveSerializer
def get(self, request, game_id: int, player: str): # TODO: validate if it is this user turn
serializer = self.serializer_class(data={"game": game_id, "player": player})
serializer.is_valid(raise_exception=True)
game = Game.objects.get(pk=game_id)
Analyzer.refresh()
value, chosen_node = self._get_move(game, player)
# self._print_logs(value, chosen_node)
return Response(
{
'coordinates': chosen_node.new_move if chosen_node else (9, 9),
'time': Analyzer.get(Analyzer.ALL_TIME),
},
status.HTTP_200_OK
)
@Analyzer.update_time(Analyzer.ALL_TIME)
def _get_move(self, game, player):
node = Node.from_game(game, player)
minimax = Minimax(HeuristicSimpleTreat())
value, chosen_node = minimax.calculate_minimax(node, 2)
self._print_logs(value, node)
return value, chosen_node
@staticmethod
def _print_logs(value: float, node: Node):
if not node:
return
# node.print_children(0)
Analyzer.print_results()
print(value)
|
earlyche/gomoku
|
backend/game/views.py
|
views.py
|
py
| 4,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24922743914
|
import time
import os
import sys
SDK_HOME_PATH = os.path.dirname(os.path.abspath(__file__)) + '/../../'
sys.path.append(SDK_HOME_PATH)
up_dir = os.path.dirname(os.path.abspath(__file__)) + '/../'
sys.path.append(up_dir)
from display.lcd import LCD as LCD
from ubo_keypad import * # Might have to revisit this form of import
#initialize LCD and Keypad
lcd = LCD()
class mykeypad(KEYPAD):
listIndex = 0
def __init__(self, *args, **kwargs):
super(mykeypad, self).__init__(*args, **kwargs)
self.test_result = False
def button_event(self):
self.logger.debug(self.buttonPressed)
if self.buttonPressed == "0":
items[self.listIndex][1]()
if self.buttonPressed == "1":
items[self.listIndex + 1][1]()
if self.buttonPressed == "2":
items[self.listIndex + 2][1]()
if self.buttonPressed == "up":
if self.listIndex > 0:
self.listIndex -= 1
if self.buttonPressed == "down":
if self.listIndex < 5:
self.listIndex += 1
displayItems(self.listIndex)
def action1():
item = "Item1"
print(item + " was pressed")
def action2():
item = "Item2"
print(item + " was pressed")
def action3():
item = "Item3"
print(item + " was pressed")
def action4():
item = "Item4"
print(item + " was pressed")
def action5():
item = "Item5"
print(item + " was pressed")
def action6():
item = "Item6"
print(item + " was pressed")
def action7():
item = "Item7"
print(item + " was pressed")
def action8():
item = "Item8"
print(item + " was pressed")
items = (("Item 1", action1),
("Item 2", action2),
("Item 3", action3),
("Item 4", action4),
("Item 5", action5),
("Item 6", action6),
("Item 7", action7),
("Item 8", action8))
def displayItems(i):
lcd.show_menu(title="scroll", menu_items=[items[i][0],
items[i+1][0],
items[i+2][0]])
def main():
try:
keypad = mykeypad()
except:
# did not detect keypad on i2c bus
print("failed to initialize keypad")
index = 0
displayItems(index)
while (1):
time.sleep(1)
pass
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(1)
except SystemExit:
os._exit(0)
|
ubopod/ubo-sdk
|
ubo_keypad/examples/keypad_example_scroll.py
|
keypad_example_scroll.py
|
py
| 2,605 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71765759867
|
"""
REPSVM Agresores v3, esquemas de pydantic
"""
from pydantic import BaseModel
from lib.schemas_base import OneBaseOut
class RepsvmAgresorOut(BaseModel):
"""Esquema para entregar agresores"""
id: int | None
distrito_id: int | None
distrito_clave: str | None
distrito_nombre: str | None
distrito_nombre_corto: str | None
consecutivo: int | None
delito_generico: str | None
delito_especifico: str | None
nombre: str | None
numero_causa: str | None
pena_impuesta: str | None
observaciones: str | None
sentencia_url: str | None
tipo_juzgado: str | None
tipo_sentencia: str | None
class Config:
"""SQLAlchemy config"""
orm_mode = True
class OneRepsvmAgresorOut(RepsvmAgresorOut, OneBaseOut):
"""Esquema para entregar un agresor"""
|
PJECZ/pjecz-plataforma-web-api-new
|
plataforma_web/v3/repsvm_agresores/schemas.py
|
schemas.py
|
py
| 824 |
python
|
es
|
code
| 0 |
github-code
|
6
|
1826616432
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label
from skimage.io import imread, imshow
def read_image(url):
imagem = cv2.cvtColor(imread(url), cv2.COLOR_RGB2HSV)
return imagem
def apply_mask(imagem):
x = imagem.shape[0]
y = imagem.shape[1]
mask = np.zeros((x+1,y+1))
for i in range(0,x):
for j in range(0,y):
h,s,v = imagem[i,j,:]
if (0 <= h <= 20) and (25 <= s <= 180):
mask[i,j] = 1
return mask,x,y
def opening_closing(mask):
kernel = np.ones((18,18),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
return closing
def label_image(closing,x,y):
label_image = label(closing)
flat_label_image = label_image.flatten()
# Número de regiões
num_regioes = max(flat_label_image)
regioes_dict = {}
# não leva em conta os que tem indice 0
for i in range(1,int(num_regioes)+1):
regioes_dict[str(i)] = len(np.where(flat_label_image == i)[0])
lista_chaves = list(regioes_dict.keys())
lista_valores = list(regioes_dict.values())
position = lista_valores.index(max(lista_valores))
maior_regiao_indice = lista_chaves[position]
area_imagem = x*y
area_regiao = regioes_dict[maior_regiao_indice]
porcentagem_ocupada = (area_regiao/area_imagem)*100
return num_regioes,porcentagem_ocupada
|
EricaFer/Nudity-Detection
|
utils/preprocessing.py
|
preprocessing.py
|
py
| 1,487 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
70205052348
|
import os
import argparse
import sys
import warnings
from pathlib import Path
warnings.filterwarnings('ignore')
import torch
import torchvision as tv
import pytorch_lightning as pl
import webdataset as wds
from resnet_sagemaker.models import ResNet
from resnet_sagemaker.callbacks import PlSageMakerLogger, ProfilerCallback
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
local_rank = int(os.environ.get("LOCAL_RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
torch.cuda.set_device(local_rank)
if world_size>1:
dist.init_process_group(
backend="nccl", init_method="env://",
)
def parse_args():
cmdline = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdline.add_argument('--train_file_dir', default='/opt/ml/input/data/train/',
help="""Path to dataset in WebDataset format.""")
cmdline.add_argument('--validation_file_dir', default='/opt/ml/input/data/validation/',
help="""Path to dataset in WebDataset format.""")
cmdline.add_argument('--max_epochs', default=20, type=int,
help="""Number of epochs.""")
cmdline.add_argument('--num_classes', default=1000, type=int,
help="""Number of classes.""")
cmdline.add_argument('--resnet_version', default=50, type=int,
help="""Resnet version.""")
cmdline.add_argument('-lr', '--learning_rate', default=1e-2, type=float,
help="""Base learning rate.""")
cmdline.add_argument('-b', '--batch_size', default=128, type=int,
help="""Size of each minibatch per GPU""")
cmdline.add_argument('--warmup_epochs', default=1, type=int,
help="""Number of epochs for learning rate warmup""")
cmdline.add_argument('--mixup_alpha', default=0.1, type=float,
help="""Extent of convex combination for training mixup""")
cmdline.add_argument('--optimizer', default='adamw', type=str,
help="""Optimizer type""")
cmdline.add_argument('--amp_backend', default='apex', type=str,
help="""Mixed precision backend""")
cmdline.add_argument('--amp_level', default='O2', type=str,
help="""Mixed precision level""")
cmdline.add_argument('--precision', default=16, type=int,
help="""Floating point precision""")
cmdline.add_argument('--profiler_start', default=128, type=int,
help="""Profiler start step""")
cmdline.add_argument('--profiler_steps', default=32, type=int,
help="""Profiler steps""")
cmdline.add_argument('--dataloader_workers', default=4, type=int,
help="""Number of data loaders""")
cmdline.add_argument('--profiler_type', default='smppy', type=str,
help="""Profiler type""")
return cmdline
def main(ARGS):
train_s3_loc = 'pipe:aws s3 cp {0}train_{{{1:04d}..{2:04d}}}.tar -'.format(ARGS.train_file_dir, 0, 2047)
val_s3_loc = 'pipe:aws s3 cp {0}val_{{{1:04d}..{2:04d}}}.tar -'.format(ARGS.validation_file_dir, 0, 127)
model_params = {'num_classes': ARGS.num_classes,
'resnet_version': ARGS.resnet_version,
'train_path': train_s3_loc,
'val_path': val_s3_loc,
'optimizer': ARGS.optimizer,
'lr': ARGS.learning_rate,
'batch_size': ARGS.batch_size,
'dataloader_workers': ARGS.dataloader_workers,
'max_epochs': ARGS.max_epochs,
'warmup_epochs': ARGS.warmup_epochs,
'mixup_alpha': ARGS.mixup_alpha,
}
trainer_params = {'gpus': [int(os.environ.get("LOCAL_RANK", 0))],
'max_epochs': ARGS.max_epochs,
'amp_backend': ARGS.amp_backend,
'amp_level': ARGS.amp_level,
'precision': ARGS.precision,
'progress_bar_refresh_rate': 0,
'logger': pl.loggers.TensorBoardLogger('logs/'),
'callbacks': [PlSageMakerLogger(),
ProfilerCallback(start_step=ARGS.profiler_start,
num_steps=ARGS.profiler_steps,
output_dir='logs/profiling/',
profiler_type=ARGS.profiler_type)]
}
model = ResNet(**model_params)
trainer = pl.Trainer(**trainer_params)
trainer.fit(model)
if __name__=='__main__':
cmdline = parse_args()
ARGS, unknown_args = cmdline.parse_known_args()
main(ARGS)
|
johnbensnyder/resnet-sagemaker
|
pytorch/train.py
|
train.py
|
py
| 4,937 |
python
|
en
|
code
| 2 |
github-code
|
6
|
72960120187
|
import string
def subtract(d1, d2):
res = dict()
for key in d1:
if key not in d2:
res[key] = None
return res
a = open('throughtelescope.txt')
b = open('word1.txt')
#c = subtract(a, b)
#for word in c.keys():
#print(word)
#print(subtract(a, b))
def linecount(filename):
count = 0
for line in open(filename):
count += 1
return count
print(linecount('subtractwords.py'))
|
derinsola01/Projects
|
subtractwords.py
|
subtractwords.py
|
py
| 414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29572346979
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal
from transformers import AutoTokenizer
from partial_tagger.data.collators import TransformerCollator
from partial_tagger.encoders.transformer import (
TransformerModelEncoderFactory,
TransformerModelWithHeadEncoderFactory,
)
from partial_tagger.training import Trainer
if TYPE_CHECKING:
from partial_tagger.encoders.base import BaseEncoderFactory
def create_trainer(
model_name: str = "roberta-base",
dropout: float = 0.2,
tokenizer_args: dict[str, Any] | None = None,
encoder_type: Literal["default", "with_head"] = "default",
) -> Trainer:
"""Creates an instance of Trainer."""
encoder_factory: BaseEncoderFactory
if encoder_type == "default":
encoder_factory = TransformerModelEncoderFactory(model_name, dropout)
elif encoder_type == "with_head":
encoder_factory = TransformerModelWithHeadEncoderFactory(model_name, dropout)
else:
raise ValueError(f"{encoder_type} is not supported.")
collator = TransformerCollator(
AutoTokenizer.from_pretrained(model_name), tokenizer_args
)
return Trainer(collator=collator, encoder_factory=encoder_factory)
|
yasufumy/pytorch-partial-tagger
|
src/partial_tagger/utils.py
|
utils.py
|
py
| 1,230 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7672099342
|
import random
class Vertices:
def __init__(self,x,y, volumePedido, valorPedido, qtdPacotes):
self.volumePedido = volumePedido
self.valorPedido = valorPedido
self.qtdPacotes = qtdPacotes
self.x = x
self.y = y
def __str__(self):
return str(self.v) + " " + str(self.p) + " " + str(self.n) + "\n"
class Veiculo:
def __init__(self, volumeMáximo, valorMáximo, qtdVeiculos, vf, vd, tc, td, ph, pkm, pf):
self.volumeMáximo = volumeMáximo
self.valorMáximo = valorMáximo
self.qtdVeiculos = qtdVeiculos
self.vf = vf
self.vd = vd
self.tc = tc
self.td = td
self.ph = ph
self.pkm = pkm
self.pf = pf
|
LuisFelypeFioravanti/TrabalhoGrafos
|
classes.py
|
classes.py
|
py
| 736 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
36721611160
|
import torch
from torch import nn
from modules import ConvSC, Inception
# stride를 만들어내는 모듈..
def stride_generator(N, reverse=False):
strides = [1, 2]*10
if reverse: return list(reversed(strides[:N]))
else: return strides[:N]
# N_S개 만큼 Stride를 생성한 후, ConvSC 생성해서
# 총 N_S의 깊이를 가진 Encoder 모듈 생성.
class Encoder(nn.Module):
def __init__(self,C_in, C_hid, N_S):
super(Encoder,self).__init__()
strides = stride_generator(N_S)
self.enc = nn.Sequential(
ConvSC(C_in, C_hid, stride=strides[0]),
*[ConvSC(C_hid, C_hid, stride=s) for s in strides[1:]]
)
def forward(self,x):
enc1 = self.enc[0](x)
latent = enc1
for i in range(1,len(self.enc)):
latent = self.enc[i](latent)
return latent,enc1
# Mid_Xnet으로부터 받은 C_hid의 채널을 토대로 최종적으로 C_out 크기의
# 채널으로 변환 여기서 C_out은 초기 C와 같음.
# 여기서는 C_hid의 채널 수로 일정하게 조금씩 Upsampling 되다가
# 마지막에 encoder에서 추출한 공간 정보를 concat한다.
class Decoder(nn.Module):
def __init__(self,C_hid, C_out, N_S):
super(Decoder,self).__init__()
strides = stride_generator(N_S, reverse=True)
self.dec = nn.Sequential(
*[ConvSC(C_hid, C_hid, stride=s, transpose=True) for s in strides[:-1]],
ConvSC(2*C_hid, C_hid, stride=strides[-1], transpose=True)
)
self.readout = nn.Conv2d(C_hid, C_out, 1)
def forward(self, hid, enc1=None):
for i in range(0,len(self.dec)-1):
hid = self.dec[i](hid)
Y = self.dec[-1](torch.cat([hid, enc1], dim=1))
Y = self.readout(Y)
return Y
# Translator를 나타내는 중앙 인셉션 네트워크 부분.
# 인셉션 네트워크에도 인코더, 디코더 부분이 존재하여 스킵 연결을 통해, 인코더에서 뽑은 코딩을
# 디코더에 주입하여 Temporal 정보를 학습함.
class Mid_Xnet(nn.Module):
def __init__(self, channel_in, channel_hid, N_T, incep_ker = [3,5,7,11], groups=8):
super(Mid_Xnet, self).__init__()
self.N_T = N_T
enc_layers = [Inception(channel_in, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups)]
for i in range(1, N_T-1):
enc_layers.append(Inception(channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups))
enc_layers.append(Inception(channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups))
dec_layers = [Inception(channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups)]
for i in range(1, N_T-1):
dec_layers.append(Inception(2*channel_hid, channel_hid//2, channel_hid, incep_ker= incep_ker, groups=groups))
dec_layers.append(Inception(2*channel_hid, channel_hid//2, channel_in, incep_ker= incep_ker, groups=groups))
self.enc = nn.Sequential(*enc_layers)
self.dec = nn.Sequential(*dec_layers)
def forward(self, x):
B, T, C, H, W = x.shape
x = x.reshape(B, T*C, H, W)
# encoder
skips = []
z = x
for i in range(self.N_T):
z = self.enc[i](z)
if i < self.N_T - 1:
skips.append(z)
# decoder
z = self.dec[0](z)
for i in range(1, self.N_T):
z = self.dec[i](torch.cat([z, skips[-i]], dim=1))
y = z.reshape(B, T, C, H, W)
return y
class SimVP(nn.Module):
def __init__(self, shape_in, hid_S=16, hid_T=256, N_S=4, N_T=8, incep_ker=[3,5,7,11], groups=8):
super(SimVP, self).__init__()
T, C, H, W = shape_in
self.enc = Encoder(C, hid_S, N_S)
self.hid = Mid_Xnet(T*hid_S, hid_T, N_T, incep_ker, groups)
self.dec = Decoder(hid_S, C, N_S)
def forward(self, x_raw):
B, T, C, H, W = x_raw.shape
x = x_raw.view(B*T, C, H, W)
embed, skip = self.enc(x)
_, C_, H_, W_ = embed.shape
z = embed.view(B, T, C_, H_, W_)
hid = self.hid(z)
hid = hid.reshape(B*T, C_, H_, W_)
Y = self.dec(hid, skip)
Y = Y.reshape(B, T, C, H, W)
return Y
|
J-PARK11/Video_Prediction_using_SimVP
|
model.py
|
model.py
|
py
| 4,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42442738126
|
from django.shortcuts import render
from .models import Twit,Company
import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.http import HttpResponse,HttpResponseRedirect,Http404
import jdatetime
from django.db.models import Q # new
import datetime
# Create your views here.
def signal_week(mdate):
week = []
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&',mdate,type(mdate))
for i in range(5):
print(mdate+datetime.timedelta(days=-i-1))
nextday = mdate+datetime.timedelta(days=i+1)
jnextday = jdatetime.date.fromgregorian(date=nextday)
print(':::::::::::::::::::::::::::::::::::::::::::',jnextday)
query = Twit.objects.filter(created_on__year=str(nextday.year)).filter(created_on__month=str(nextday.month)).filter(created_on__day=str(nextday.day))
if query:
week.append({
'date' : jnextday,
'len' : len(query)
})
if not week :
for i in range (5):
PerivousDay = mdate+datetime.timedelta(days=-i-1)
JPerivousDay = jdatetime.date.fromgregorian(date=PerivousDay)
query = Twit.objects.filter(created_on__year=str(PerivousDay.year)).filter(created_on__month=str(PerivousDay.month)).filter(created_on__day=str(PerivousDay.day))
if query:
week.append({
'date' : JPerivousDay,
'len' : len(query)
})
print(week)
return week
def index (request):
# test = Twit.objects.filter(id =1)
print(datetime.date.today())
# twits = Twit.objects.filter(created_on__date=datetime.date.today())
today = datetime.datetime.today()
# twits = Twit.objects.filter(created_on__year=today.year, created_on__month=today.month, created_on__day=today.day)
twits = Twit.objects.filter(created_on__year=today.year, created_on__month=today.month, created_on__day=today.day,\
status=1,avaiable=True,company__status=1)
if len(twits) == 0:
# twits = Twit.objects.all().order_by('-created_on')[:20]
twits = Twit.objects.filter(status=1,avaiable=True,company__status=1).order_by('-created_on')[:20]
companeis = Company.objects.filter(status=1)
print('twits : ',len(twits))
return render(request, 'home.html',{
'twits':twits,
'companeis': companeis,
'jdate' : jdatetime.date.today(),
'week' : signal_week(today)
})
class UnavailableTiwtView(LoginRequiredMixin,View):
def get_object(self, pk):
try:
return Twit.objects.get(pk=pk)
except Twit.DoesNotExist:
raise Http404
def get(self,request,pk) :
print("get pk : ",pk)
twit = self.get_object(pk)
twit.status =0
twit.save()
return HttpResponseRedirect('/')
def post(self,request,pk):
pass
class CompanyDetailView(View):
def get(self,request):
pass
class Search(View):
def get(self,request):
return HttpResponseRedirect('/')
def post(self,request):
today = datetime.datetime.today()
if request.POST.get('search'):
search_text = request.POST.get('search')
data = Twit.objects.filter(
Q(description__contains=search_text)|
Q(company__name__contains=search_text)|
Q(category__name__contains=search_text)).filter(status=1,avaiable=True,company__status=1)
companeis = Company.objects.filter(status=1)
print('******************twits***************** : ',len(data))
return render(request, 'search.html',{
'twits':data,
'companeis': companeis,
'jdate' : jdatetime.date.today(),
'search_text': search_text,
'week' : signal_week(today)
})
else:
return HttpResponseRedirect('/')
class SearchByDate(View):
def post(self,request):
pass
def get(self,request,year,month,day):
try:
date = jdatetime.date(int(year),int(month),int(day))
mdate = date.togregorian()
year,month,day = str(mdate).split('-')
week = signal_week(mdate)
except:
print("errror when convert data:" + str(year) + "-"+str(month) + "-" + str(day))
return HttpResponseRedirect('/')
finally:
data = Twit.objects.filter(created_on__year=str(year)).filter(created_on__month=str(month)).filter(created_on__day=str(day))
companeis = Company.objects.filter(status=1)
print('******************twits***************** : ',len(data))
return render(request, 'search.html',{
'twits':data,
'companeis': companeis,
'jdate' : date,
'search_text': date,
'week' : week,
})
return HttpResponseRedirect('/')
|
mhsharifi96/sursiz_ir
|
backend/bors/views.py
|
views.py
|
py
| 5,221 |
python
|
en
|
code
| 3 |
github-code
|
6
|
39556012979
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 12 10:37:33 2018
@author: Gerardo Cervantes
"""
import xml.etree.cElementTree as ET
from src.coordinates import Coordinates
from src.hotkeys import Hotkeys
class SharedPreferences():
COORDINATES_TAG = 'coordinates'
SPLIT_TAG = 'split_key'
RESET_TAG = 'reset_key'
ROUTE_TAG = 'route'
def write_preferences(self, file_name, coordinates, route_name, hotkeys):
xml_str = self.create_xml(coordinates, route_name, hotkeys)
with open(file_name, "wb") as f:
f.write(xml_str)
def create_xml(self, coordinates, route_name, hotkeys):
root = ET.Element("root")
ET.SubElement(root, self.ROUTE_TAG).text = self.to_valid_xml_str(route_name)
ET.SubElement(root, self.SPLIT_TAG).text = self.to_valid_xml_str(hotkeys.get_split_key())
ET.SubElement(root, self.RESET_TAG).text = self.to_valid_xml_str(hotkeys.get_reset_key())
xml_coordinates = ET.SubElement(root, self.COORDINATES_TAG)
x, y, width, height = coordinates.get_coordinates()
ET.SubElement(xml_coordinates, "x").text = str(x)
ET.SubElement(xml_coordinates, "y").text = str(y)
ET.SubElement(xml_coordinates, "width").text = str(width)
ET.SubElement(xml_coordinates, "height").text = str(height)
xml_str = ET.tostring(root, encoding='utf8', method='xml')
return xml_str
def to_valid_xml_str(self, text):
if text == '':
return ' '
return text
def parse_xml(self, file_name):
try:
tree = ET.parse(file_name)
except FileNotFoundError:
return None, None, None
root = tree.getroot()
route_name = root.find(self.ROUTE_TAG).text
split_key = root.find(self.SPLIT_TAG).text
reset_key = root.find(self.RESET_TAG).text
coordinates_xml = root.find(self.COORDINATES_TAG)
x = coordinates_xml.find("x").text
y = coordinates_xml.find("y").text
width = coordinates_xml.find("width").text
height = coordinates_xml.find("height").text
coordinates = Coordinates()
coordinates.set_coordinates(x, y, width, height)
hotkeys = Hotkeys()
hotkeys.set_split_key(split_key)
hotkeys.set_reset_key(reset_key)
return coordinates, route_name, hotkeys
if __name__ == "__main__":
shared_prefs = SharedPreferences()
coordinates = Coordinates()
coordinates.set_coordinates(20, 25, 50, 30)
hotkeys = Hotkeys()
file_name = 'example_pref_file.zd'
shared_prefs.write_preferences(file_name, coordinates, '', hotkeys)
coordinates, route_name, hotkeys = shared_prefs.parse_xml(file_name)
print(coordinates)
print(hotkeys.get_split_key())
print(hotkeys.get_reset_key())
print(route_name)
# xml_str = shared_prefs.create_xml(coordinates, 'Home', 'Other', 'Route')
# shared_prefs.xml_print(xml_str)
|
gcervantes8/Star-Classifier-For-Mario-64
|
src/shared_preferences.py
|
shared_preferences.py
|
py
| 3,050 |
python
|
en
|
code
| 8 |
github-code
|
6
|
111612310
|
import json
f = open('dados.json')
Dados = json.load(f)
Dados = [x for x in Dados if x['valor'] > 0]
menor = float("inf")
#menor faturamento diário
for x in Dados:
atual = x['valor']
if(menor > atual):
menor = atual
print('O menor valor de faturamento ocorrido em um dia do mês foi de ', menor)
#maior faturamento diário
maior = 0.0
for x in Dados:
atual = x['valor']
if(maior < atual):
maior = atual
print('O maior valor de faturamento ocorrido em um dia do mês foi de ', maior)
#faturamento diário superior à média mensal
totalValor = 0.0
for x in Dados:
atual = x['valor']
totalValor = totalValor + float(atual)
media = totalValor/30
dias = 0
for x in Dados:
atual = x['valor']
if(atual > media):
dias = dias + 1
print('O número de dias no mês em que o valor do faturamento diário superou à média mensal foi de ', dias)
f.close()
|
CaioPyro/Target_Sistemas
|
Faturamento/main.py
|
main.py
|
py
| 890 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
6742136861
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier("face.xml")
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = frame
# find face
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
#print ('Found {0} faces!'.format(len(faces)))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
khanab85/FaceDetectors
|
start.py
|
start.py
|
py
| 866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.