seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
42641515679
|
# -*- coding: utf-8 -*-
# @Time : 2020/12/13 11:04
# @Author : Joker
# @Site :
# @File : draw.py
# @Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
m = 20 # 行
n = 2 # 列
c = 5 # 分类数量
test_point = [2, 6] # 测试点数据
if __name__ == '__main__':
# 文件地址
path = "C:/Users/99259/source/repos/k-means/k-means/point.txt"
# 文件对象
file = []
# 源点数组
data = np.zeros((m + c, n))
# 读取数组文件
for line in open(path, "r"):
# 取出换行符
line = line.strip()
file.append(line)
# 将文件数据存储进数组中
for i in range(m + c):
for j in range(n):
data[i][j] = float(file[i].split(' ')[j])
# 同上面操作一样,不过处理的是分类数组
cate_path = "C:/Users/99259/source/repos/k-means/k-means/category.txt"
cate_file = []
cate = np.zeros(m)
for line in open(cate_path, 'r'):
# 取出换行符
line = line.strip()
cate_file.append(line)
for i in range(m):
cate[i] = int(cate_file[i])
# 解决中文乱码
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('数据点分布')
# 存储颜色数组
color = ['red', 'blue', 'pink', 'yellow', 'green', 'purple']
# 绘制源数据点
# 不同类别的点放在不同数组中
x = [[] for i in range(c)] # x轴数据
y = [[] for i in range(c)] # y轴数据
for i in range(m):
for j in range(c):
if cate[i] == j:
x[j].append(data[i][0])
y[j].append(data[i][1])
# 分别绘制不同类别的点
for i in range(c):
plt.scatter(x[i], y[i], color=color[i], label=("类别%d" % (i + 1)))
# 绘制中心点
point_x = []
point_y = []
for i in range(c):
point_x.append([data[m + i][0]])
point_y.append([data[m + i][1]])
plt.scatter(point_x, point_y, color='black', marker='*', label="中心点")
# 存储不同类的半径
radius = np.zeros(c)
# 遍历类别
for i in range(c):
# 记录x轴和y轴的最大值r
# 如果是一个点属于一类 就令其半径为0.2
max_dis = 0.2
# 遍历点
for j in range(len(x[i])):
dis_x = x[i][j] - point_x[i]
dis_y = y[i][j] - point_y[i]
# 计算欧式距离
dis = np.sqrt(pow(dis_x, 2) + pow(dis_y, 2))
# 更新最大半径
if dis > max_dis:
max_dis = dis
# 最大值最为该类的类半径
radius[i] = max_dis
# 分别绘制不同类的类半径
for i in range(c):
# 定义圆心和半径
x = point_x[i][0]
y = point_y[i][0]
r = radius[i]
# 点的横坐标为a
a = np.arange(x - r, x + r, 0.0001)
# 点的纵坐标为b
b = np.sqrt(pow(r, 2) - pow((a - x), 2))
# 绘制上半部分
plt.plot(a, y + b, color=color[i], linestyle='-')
# 绘制下半部分
plt.plot(a, y - b, color=color[i], linestyle='-')
# t暂时存储(2,6)的类别
# 可以在开头改变测试点的坐标
t = 0
# 设置一个很大的值
d = 100
# 遍历类别
for i in range(c):
# 计算测试点到每个中心的距离
dis = np.sqrt(pow((test_point[0] - point_x[i][0]), 2) + pow((test_point[1] - point_y[i][0]), 2))
# 寻找最小的距离
if dis < d:
d = dis
t = i
# 绘制测试点数据
plt.scatter(test_point[0], test_point[1], c=color[t], marker='x', label='(2,6)')
plt.legend()
# 保存图片
plt.savefig(r'C:/Users/99259/source/repos/k-means/k-means/show.png', dpi=300)
plt.show()
|
Chimaeras/Data_Mining_ex
|
src/category_draw.py
|
category_draw.py
|
py
| 3,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2856090188
|
import unittest
from conans.test.tools import TestClient
from conans.util.files import load
import os
import platform
class ConanEnvTest(unittest.TestCase):
def conan_env_deps_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.env_info.var1="bad value"
self.env_info.var2.append("value2")
self.env_info.var3="Another value"
self.env_info.path = "/dir"
'''
files = {}
files["conanfile.py"] = conanfile
client.save(files)
client.run("export lasote/stable")
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello2"
version = "0.1"
def config(self):
self.requires("Hello/0.1@lasote/stable")
def package_info(self):
self.env_info.var1="good value"
self.env_info.var2.append("value3")
'''
files["conanfile.py"] = conanfile
client.save(files, clean_first=True)
client.run("export lasote/stable")
client.run("install Hello2/0.1@lasote/stable --build -g virtualenv")
ext = "bat" if platform.system() == "Windows" else "sh"
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "activate.%s" % ext)))
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "deactivate.%s" % ext)))
activate_contents = load(os.path.join(client.current_folder, "activate.%s" % ext))
deactivate_contents = load(os.path.join(client.current_folder, "deactivate.%s" % ext))
self.assertNotIn("bad value", activate_contents)
self.assertIn("var1=good value", activate_contents)
if platform.system() == "Windows":
self.assertIn("var2=value3;value2;%var2%", activate_contents)
else:
self.assertIn("var2=value3:value2:$var2", activate_contents)
self.assertIn("Another value", activate_contents)
self.assertIn("PATH=/dir", activate_contents)
self.assertIn('var1=', deactivate_contents)
self.assertIn('var2=', deactivate_contents)
|
AversivePlusPlus/AversivePlusPlus
|
tools/conan/conans/test/integration/conan_env_test.py
|
conan_env_test.py
|
py
| 2,180 |
python
|
en
|
code
| 31 |
github-code
|
6
|
40796544139
|
import pydantic
from pydantic import validator
import typing
from uuid import UUID, uuid4
class SchemaCustomer(pydantic.BaseModel):
id: str
name: str
last_name: str
email: pydantic.EmailStr
age: pydantic.PositiveInt
@validator('id', pre=True, always=True)
def convert_id_to_str(cls, v):
return str(v)
class SchemaCustomerCreation(pydantic.BaseModel):
name: str
last_name: str
email: pydantic.EmailStr
age: pydantic.PositiveInt
class SchemaCustomerUpdate(pydantic.BaseModel):
name: typing.Union[str, None]
last_name: typing.Union[str, None]
email: typing.Union[pydantic.EmailStr, None]
age: typing.Union[pydantic.PositiveInt, None]
|
edmon1024/workshop-api-ejemplo-fastapi
|
app/schemas.py
|
schemas.py
|
py
| 708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26671808814
|
#Diccionario para los datos
clientes={45471:["Luis Perez",45,"BJX", True], 8944411:["FernandaGarcia",25,"JAL", True],
5223:["Alejandra Ortiz",33,"JDL", True]}
#se crean una funcion para agregar clientes
def Agregar():
#Bucle para evitar errores con el input del INE
while True:
#Try y except para que el input solo sea int
try:
INE = int(input("INE del pasajero: "))
break
except:
print("Error con la INE ")
# Se pide nombre
name = input("Nombre del pasajero: ")
#Bucle para evitar errores con el input de la edad
while True:
#Try y except para que el input solo sea int
try:
age = int(input("Edad del pasajero: "))
print("\n")
break
except:
print("Error con la edad \n")
print(
"Destino | Código IATA \n",
"Guanajuato | BJX \n",
"Guadalajara | GDL \n",
"Veracruz | JAL \n"
)
#input del IATA
iata = input("IATA del pasajero: ")
iata = iata.upper()
#Se abre If para que el input solo sea uno de los 3 codigos IATA
if iata == "BJX":
iata = "BJX"
elif iata == "GDL":
iata = "GDL"
elif iata == "JAL" :
iata = "JAL"
else:
print("Error con el IATA \n")
#While para evitar errores con el input del cliente
while True:
prefer = input("Cliente preferente (Si/No): ")
prefer = prefer.upper()
#Se abre If para que el input solo sea 'SI' o 'NO'
if prefer == "SI":
preferential = True
break
elif prefer == "NO":
preferential = False
break
else:
print("Error con la preferencia \n")
#Se agregan los datos al diccionarios la llave es la INE y lo demas los valores
clientes [INE] = [name, age, iata, preferential]
print("Pasajero agregado \n")
#while para evitar errores
while True:
#Inputo para agregar otro cliente
salir = input("Quiere agrecar otro cliente (Si/No): ")
salir = salir.upper()
if salir == "SI":
#Se llama otra vez a la funcion para que sea un bucle
print("\n")
Agregar()
elif salir == "NO":
#termina la funcion
print("\n")
print("Volviendo al menu")
print("\n")
break
else:
print("Error con la eleccion \n")
#Funcion para eliminar clientes
def eliminar():
while True:
#Input para preguntar si quiere elimnar
do = input("Quiere eliminar un cliente (Si/No): ")
do = do.upper()
#If para que solo sean dos input "SI" y "NO"
if do == "SI":
#Input para saber cual cliente se eliminara
del_Key = int(input("INE del pasajero que quiere eliminar: "))
#If para saber si el cliente esta en el diccionario
if del_Key in clientes:
#Se elimina
del clientes[del_Key]
print("Se ha eliminado al cliente \n")
break
else:
print("No se ha encontrado al cliente \n")
elif do == "NO":
#Termina la funcion
print("\n")
break
else:
print("Error con la eleccion \n")
#funcion para ver los clientes
def Mostrar():
print(" Mostrar todos los clientes ('1') \n",
"Mostar los cliente preferente ('2') \n",
"Mostar los clientes normales ('3')")
#input para la eleccion
hacer = input(":")
if hacer == "2":
print("\n")
#for para recorrer el diccionario
for key in clientes:
#for para recorrer la lista segun la llave
for a in clientes[key]:
# si el cliente es preferente se imprime
if a == True:
print(clientes[key])
print("\n")
elif hacer == "3":
print("\n")
for key in clientes:
for a in clientes[key]:
# si el cliente no es preferente se imprime
if a == False:
print(clientes[key])
print("\n")
elif hacer == "1":
print("\n")
for key in clientes:
#Se imprimen todos los clientes
print(clientes[key])
print("\n")
else:
print("Error con la eleccion \n")
#funcion para ver los promedios de los clientes
def edad():
edad_total = 0
print(" Edad promedio de todos los clientes ('1') \n",
"Edad promedio delos cliente preferentes ('2') \n")
#input para la eleccion
do = input(": ")
if do == "2":
print("\n")
cont = 0
for key in clientes:
for a in clientes[key]:
# si el cliente es preferente se suma su edad al total
if a == True:
edad_total += clientes[key][1]
cont += 1
print("\n")
#Se imprime el promedio
print("Edad promedio de los clientes preferentes: ", edad_total/cont)
elif do == "1":
print("\n")
for key in clientes:
#Se suma la edad de los clientes al total
edad_total += clientes[key][1]
#Se imprime el promedio
print("Edad promedio de todos los clientes: ", edad_total/(len(clientes)))
print("\n")
else:
print("Error con la eleccion \n")
|
JoseCarlosLugo/Ejercicio-retadores-6-7-8
|
Ejercicio_8_func.py
|
Ejercicio_8_func.py
|
py
| 6,125 |
python
|
es
|
code
| 0 |
github-code
|
6
|
11670856973
|
# See subject at https://www.ilemaths.net/sujet-suite-864999.html
"""
La suite de Conway
"""
from itertools import groupby, islice
def gen_conway(germe):
"""Génère la suite de Conway à partir du germe"""
while True:
yield germe
germe = ''.join(f"{len(tuple(g))}{c}" for c, g in groupby(germe))
def main():
"""Entrée principale du programme"""
germe = input("Donner le premier terme de la suite de Conway : ")
n = int(input("Combien de termes voulez-vous calculer ? "))
for i, terme in enumerate(islice(gen_conway(germe), n+1)):
print(f"terme numéro {i}: \t{terme}")
if __name__ == "__main__":
main()
|
bdaene/ilemaths
|
suite-864999.py
|
suite-864999.py
|
py
| 667 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
32094902662
|
''' 1251번 단어 나누기
문제
알파벳 소문자로 이루어진 단어
단어를 길이가 1 이상인 세 개의 더 작은 단어로 나누는
나눈 세 개의 작은 단어들을 앞뒤를 뒤집고, 이를 다시 원래의 순서대로 합친다.
단어 : arrested
세 단어로 나누기 : ar / rest / ed
각각 뒤집기 : ra / tser / de
합치기 : ratserde
단어가 주어지면, 이렇게 만들 수 있는 단어 중에서 사전순으로 가장 앞서는 단어를 출력하는 프로그램을 작성하시오.
입력
첫째 줄에 영어 소문자로 된 단어가 주어진다. 길이는 3 이상 50 이하이다.
출력
첫째 줄에 구하고자 하는 단어를 출력하면 된다.'''
'''
<0203 오전 강사님 풀이>
# 문자열을 두 번 나누는 모든 경우의 인덱스를 구한다.
인덱스 0 1 2 3 4 5 6 7
문자열[a r r e s t e d]
N = 8
i : 처음 잘라내는 지점
i >> 최소길이 min = 1 # 인덱스[1] 앞에서 자름
최대길이 max = N-2 # 인덱스[N-1] 앞에서 자름
range(1, N-1)
: 1 2 3 4 5 6
j : 두번째 잘라내는 지점
j >> 최소길이 min = i+1 # 인덱스[i+1] 앞에서 자름
최대길이 max = N-1 # 인덱스[N] 앞에서 자름
range(1, N-1)
range(1, N-1)
: 1 2 3 4 5 6
'''
"""mobitel"""
import sys
sys.stdin = open("input.txt", "r")
import heapq
lst = []
word = str(input()) # mobitel
N = len(word)
# 나누는 위치의 인덱스 구하기 (총 2번 나눔)
# 첫 번째 경우를 생각했을 때, i는 인덱스[1] 앞(=인덱스[0]뒤) ~ 맨 뒷자리의 문자 바로 앞에서 나눔
for i in range(1,N-1): # i = 1
for j in range(i+1,N): # j = range(2,N)
w1 = word[0:i] # w1 = m
w2 = word[i:j] # w2 = o
w3 = word[j:N] # w3 = bitel
# 구한 인덱스 위치로 두번 나누어 만는 3개의 문자열 덩어리
# 각각 인덱스로 뒤집은 뒤, heapq를 사용해 리스트에 정렬하여 넣는다.
heapq.heappush(lst, f'{w1[::-1]+ w2[::-1]+ w3[::-1]}')
print(lst[0]) # heapq를 사용해 순서를 정렬해 놨기 때문에 맨 처음 문자열을 출력
'''
from pprint import pprint
import heapq
for _ in word:
heapq.heappush(lst, ord(_))
print(ord(_))
print(lst)
for w in lst:
print(chr(w), end = ' ')
print()
print()
'''
'''
# 0 1
# 0 2
# 1 3
# 1 4
# 2 4
# 2 5
# 4 6
# 인접 행렬
# 7 * 7
# 정점간의 관계를 표현하고 있는 행렬
# 정접의 개수인 N에 의해 크기가 정해짐
N = 7
graph = [ [0] * N for _ in range (N)]
pprint(graph)
'''
|
doll2gom/TIL
|
KDT/week6/02.03/4_1251.py
|
4_1251.py
|
py
| 2,583 |
python
|
ko
|
code
| 2 |
github-code
|
6
|
74537456828
|
import numpy as np, cv2
def draw_histo(hist, shape=(200, 256)):
hist_img = np.full(shape, 255, np.uint8) # 흰색이 배경이 되도록 초기화
cv2.normalize(hist, hist, 0, shape[0], cv2.NORM_MINMAX) # 최솟값이 0, 최대값이 그래프의 높이 값을 갖도록 빈도값을 조정
gap = hist_img.shape[1]/hist.shape[0]
for i, h in enumerate(hist):
x = int(round(i*gap))
w = int(round(gap))
cv2.rectangle(hist_img, (x, 0, w, int(h)), 0, cv2.FILLED)
return cv2.flip(hist_img, 0)
|
binlee52/OpenCV-python
|
Common/histogram.py
|
histogram.py
|
py
| 539 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10699282838
|
import tensorflow as tf
import os
from xdnlp.utils import default_logger as logging
def load_data_from_directory(_path: str, batch_size, validation_split=0.1, seed=123, label_mode='categorical',
train=True):
"""train_dir: the train data dir
test_dir: the test data dir
Just set the directory:
```
main_directory/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
```
"""
train_ds = None
val_ds = None
class_names = None
if train:
train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(_path, 'train'), batch_size=batch_size, validation_split=validation_split,
subset='training', seed=seed, label_mode=label_mode)
class_names = train_ds.class_names
train_ds = train_ds.cache().prefetch(tf.data.AUTOTUNE)
val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(_path, 'train'), batch_size=batch_size, validation_split=validation_split,
subset='validation', seed=seed, label_mode=label_mode)
val_ds = val_ds.cache().prefetch(tf.data.AUTOTUNE)
test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(_path, 'test'), batch_size=batch_size, label_mode=label_mode)
if class_names is None:
class_names = test_ds.class_names
test_ds = test_ds.cache().prefetch(tf.data.AUTOTUNE)
logging.info(f"Load data from directory successfully, class_names: {class_names}")
return train_ds, val_ds, test_ds, class_names
def get_vectorize_layer(max_features, max_len, train_ds: tf.data.Dataset = None,
vocabulary=None, output_mode='int', split='whitespace') -> tf.keras.layers.TextVectorization:
vectorize_layer = tf.keras.layers.TextVectorization(
max_tokens=max_features,
split=split,
output_mode=output_mode,
output_sequence_length=max_len,
pad_to_max_tokens=True)
if train_ds is not None:
text_ds = train_ds.map(lambda x, y: x)
vectorize_layer.adapt(text_ds)
else:
assert (vocabulary is not None, "if train_ds is None, vocabulary can not be None")
vectorize_layer.adapt(tf.data.Dataset.from_tensor_slices(["just for init weights"]))
vectorize_layer.set_vocabulary(vocabulary)
logging.info(f"Generate vectorize layer successfully, and adapt: {vectorize_layer.is_adapted}")
return vectorize_layer
def get_bert_tokenizer(vocab):
lookup_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab,
key_dtype=tf.string,
values=tf.range(tf.size(vocab, out_type=tf.int64), dtype=tf.int64),
value_dtype=tf.int64),
num_oov_buckets=1,
lookup_key_dtype=tf.string
)
def train_format_data(filename):
pass
|
mikuh/xdnlp
|
xdnlp/classify/utils.py
|
utils.py
|
py
| 2,972 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71484039228
|
class Cube:
def __init__(self, x, y, z, s):
self.x, self.y, self.z = x, y, z
self.s = s
def is_in_cube(self, x, y, z):
return self.x <= x <= self.x + self.s and self.y <= y <= self.y + self.s and self.z <= z <= self.z + self.s
def intersect(self, C):
dxyz = [(0, 0, 0),
(C.s, 0, 0), (0, C.s, 0), (0, 0, C.s),
(C.s, C.s, 0), (C.s, 0, C.s), (0, C.s, C.s),
(C.s, C.s, C.s)]
for dx1, dy1, dz1 in dxyz:
nx1, ny1, nz1 = C.x + dx1, C.y + dy1, C.z + dz1
if self.is_in_cube(nx1, ny1, nz1):
for dx2, dy2, dz2 in dxyz:
nx2, ny2, nz2 = self.x + dx2, self.y + dy2, self.z + dz2
if C.is_in_cube(nx2, ny2, nz2):
a, b, c = abs(nx1 - nx2), abs(ny1 - ny2), abs(nz1 - nz2)
if a * b * c == 0:
continue
# print(a, b, c, end=':')
return 2 * (a * b + b * c + c * a)
return 0
edges = list()
inters = dict()
def calc_overlap(vs):
ret = sum(inters.get((vs[i], vs[i + 1]), 0) for i in range(len(vs) - 1))
if len(vs) > 2:
ret += inters.get((vs[-1], vs[0]), 0)
return ret
def dfs(v, par, vs, res):
if res == 0:
return calc_overlap(vs)
ret = -1
for e in edges[v]:
if e != par:
vs.append(e)
ret = max(ret, dfs(e, v, vs, res - 1))
vs.pop()
return ret
while True:
N, K, S = map(int, input().split())
# print((N, K, S))
if not (N | K | S):
break
cubes = []
for _ in range(N):
x, y, z = map(int, input().split())
cubes.append(Cube(x, y, z, S))
# cubes = [Cube(*map(int, input().split()), S) for _ in range(N)]
edges = [[] for _ in range(N)]
inters = dict()
for i in range(N):
for j in range(i + 1, N):
sur = cubes[i].intersect(cubes[j])
if sur > 0:
# print(i, j, cubes[i].intersect(cubes[j]))
inters[i, j] = inters[j, i] = sur
edges[i].append(j)
edges[j].append(i)
# print(edges, inters)
ans = -1
for i in range(N):
ans = max(ans, dfs(i, -1, [i], K - 1))
print(-1 if ans == -1 else S * S * 6 * K - ans)
|
knuu/competitive-programming
|
aoj/16/aoj1612.py
|
aoj1612.py
|
py
| 2,362 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70416778427
|
from config import config
import random
import requests
import chardet
from db.db_select import sqlhelper
import threading
lock = threading.Lock()
class Downloader(object):
@staticmethod
def download(url):
try:
r = requests.get(url=url, headers=config.get_header(), timeout=config.TIMEOUT)
r.encoding = chardet.detect(r.content)['encoding']
if (not r.ok) or len(r.content) < 500:
raise ConnectionError
else:
return r.text
except:
count = 0 # 重试次数
lock.acquire()
proxylist = sqlhelper.select(10)
lock.release()
if not proxylist:
return None
while count < config.RETRY_TIME:
try:
proxy = random.choice(proxylist)
ip = proxy[0]
port = proxy[1]
proxies = {"http": "http://{}:{}".format(ip, port), "https": "http://{}:{}".format(ip, port)}
r = requests.get(url=url, headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies)
r.encoding = chardet.detect(r.content)['encoding']
if (not r.ok) or len(r.content) < 500:
raise ConnectionError
else:
return r.text
except:
count += 1
return None
|
queenswang/IpProxyPool
|
spider/HtmlDownloader.py
|
HtmlDownloader.py
|
py
| 1,469 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6460673982
|
import logging
from pprint import pprint # noqa
from olefile import isOleFile, OleFileIO
from ingestors.support.timestamp import TimestampSupport
from ingestors.support.encoding import EncodingSupport
log = logging.getLogger(__name__)
class OLESupport(TimestampSupport, EncodingSupport):
"""Provides helpers for Microsoft OLE files."""
def decode_meta(self, meta, prop):
try:
value = getattr(meta, prop, None)
if not isinstance(value, bytes):
return
encoding = "cp%s" % meta.codepage
return self.decode_string(value, encoding)
except Exception:
log.warning("Could not read metadata: %s", prop)
def extract_ole_metadata(self, file_path, entity):
with open(file_path, "rb") as fh:
if not isOleFile(fh):
return
fh.seek(0)
try:
ole = OleFileIO(fh)
self.extract_olefileio_metadata(ole, entity)
except (RuntimeError, IOError):
# OLE reading can go fully recursive, at which point it's OK
# to just eat this runtime error quietly.
log.warning("Failed to read OLE data: %r", entity)
except Exception:
log.exception("Failed to read OLE data: %r", entity)
def extract_olefileio_metadata(self, ole, entity):
try:
entity.add("authoredAt", self.parse_timestamp(ole.root.getctime()))
except Exception:
log.warning("Failed to parse OLE ctime.")
try:
entity.add("modifiedAt", self.parse_timestamp(ole.root.getmtime()))
except Exception:
log.warning("Failed to parse OLE mtime.")
meta = ole.get_metadata()
entity.add("title", self.decode_meta(meta, "title"))
entity.add("author", self.decode_meta(meta, "author"))
entity.add("author", self.decode_meta(meta, "last_saved_by"))
entity.add("author", self.decode_meta(meta, "company"))
entity.add("summary", self.decode_meta(meta, "notes"))
entity.add("generator", self.decode_meta(meta, "creating_application"))
entity.add("authoredAt", self.decode_meta(meta, "create_time"))
entity.add("modifiedAt", self.decode_meta(meta, "last_saved_time"))
entity.add("language", self.decode_meta(meta, "language"))
|
alephdata/ingest-file
|
ingestors/support/ole.py
|
ole.py
|
py
| 2,390 |
python
|
en
|
code
| 45 |
github-code
|
6
|
19993528742
|
"""
This script crawls data about Malaysian stock indices and stores the output in a csv file.
"""
import requests
from bs4 import BeautifulSoup
import time
#Website to get the indices
base_url = 'https://www.investing.com/indices/malaysia-indices?'
print('Scraping: ' + base_url)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/72.0.3626.109 Safari/537.36'}
html_doc = requests.get(base_url, headers=headers).text
# parse the HTML contents using BeautifulSoup parser
soup = BeautifulSoup(html_doc, 'html.parser')
#KLCI
indiceKLCI = soup.select_one('#pair_29078 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastA = soup.select_one('#pair_29078 > td.pid-29078-last').text
LastA = LastA.replace(",","")
HighA = soup.select_one('#pair_29078 > td.pid-29078-high').text
HighA = HighA.replace(",","")
LowA = soup.select_one('#pair_29078 > td.pid-29078-low').text
LowA = LowA.replace(",","")
#Malaysia ACE
indiceMalaysiaACE = soup.select_one('#pair_29075 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastB = soup.select_one('#pair_29075 > td.pid-29075-last').text
LastB = LastB.replace(",","")
HighB = soup.select_one('#pair_29075 > td.pid-29075-high').text
HighB = HighB.replace(",","")
LowB = soup.select_one('#pair_29075 > td.pid-29075-low').text
LowB = LowB.replace(",","")
#FTSE BM Mid 70
indiceFTSEBMMid70 = soup.select_one('#pair_29076 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastC = soup.select_one('#pair_29076 > td.pid-29076-last').text
LastC = LastC.replace(",","")
HighC = soup.select_one('#pair_29076 > td.pid-29076-high').text
HighC = HighC.replace(",","")
LowC = soup.select_one('#pair_29076 > td.pid-29076-low').text
LowC = LowC.replace(",","")
#Malaysia Top 100
indiceMalaysiaTop100 = soup.select_one('#pair_29077 > td.bold.left.noWrap.elp.plusIconTd > a').text
LastD = soup.select_one('#pair_29077 > td.pid-29077-last').text
LastD = LastD.replace(",","")
HighD = soup.select_one('#pair_29077 > td.pid-29077-high').text
HighD = HighD.replace(",","")
LowD = soup.select_one('#pair_29077 > td.pid-29077-low').text
LowD = LowD.replace(",","")
indice_name = [indiceKLCI, indiceMalaysiaACE,
indiceFTSEBMMid70, indiceMalaysiaTop100]
Last = [LastA, LastB, LastC, LastD]
High = [HighA, HighB, HighC, HighD]
Low = [LowA, LowB, LowC, LowD]
Time = [time.strftime('%H:%M'),time.strftime('%H:%M') ,
time.strftime('%H:%M'), time.strftime('%H:%M')]
Date = [time.strftime('%d-%b-%Y'),time.strftime('%d-%b-%Y'),
time.strftime('%d-%b-%Y'),time.strftime('%d-%b-%Y')]
# save the scraped prices to a file whose name contains the
# current datetime
file_name = 'indices_' + time.strftime('%d-%b-%Y_%H-%M') + '.csv'
with open(file_name, 'w') as f:
for A, B, C, D, G, H in zip(indice_name, Last, High,
Low, Date, Time):
f.write(A + ',' + B + ',' + C + ',' + D + ',' + '[' + G + '|' + H + ']' + '\n')
|
ammar1y/Data-Mining-Assignment
|
Web crawlers/Malaysian stock indices crawler.py
|
Malaysian stock indices crawler.py
|
py
| 3,548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20972621530
|
import sys
import random
import math
from tools.model import io
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from detection import box, anchors, display, evaluate, loss
import argparse
from detection.models import models
from tools.image import cv
def random_box(dim, num_classes):
cx = random.uniform(0, dim[0])
cy = random.uniform(0, dim[1])
sx = random.uniform(0.1, 0.2) * dim[0]
sy = random.uniform(0.1, 0.2) * dim[1]
return (cx, cy, sx, sy)
if __name__ == '__main__':
random.seed(0)
torch.manual_seed(0)
parser = argparse.ArgumentParser(description='Test model')
parser.add_argument('--model', action='append', default=[],
help='model type and sub-parameters e.g. "unet --dropout 0.1"')
args = parser.parse_args()
print(args)
num_classes = 2
model_args = {'num_classes':num_classes, 'input_channels':3}
creation_params = io.parse_params(models, args.model)
model, encoder = io.create(models, creation_params, model_args)
print(model)
batches = 1
dim = (512, 512)
images = Variable(torch.FloatTensor(batches, 3, dim[1], dim[0]).uniform_(0, 1))
loc_preds, class_preds = model.cuda()(images.cuda())
def random_target():
num_boxes = random.randint(1, 50)
boxes = torch.Tensor ([random_box(dim, num_classes) for b in range(0, num_boxes)])
boxes = box.point_form(boxes)
label = torch.LongTensor(num_boxes).random_(0, num_classes)
return (boxes, label)
target_boxes = [random_target() for i in range(0, batches)]
target = [encoder.encode(dim, boxes, label) for boxes, label in target_boxes]
loc_target = Variable(torch.stack([loc for loc, _ in target]).cuda())
class_target = Variable(torch.stack([classes for _, classes in target]).cuda())
# print((loc_target, class_target), (loc_preds, class_preds))
print(loss.total_loss( (loc_target, class_target), (loc_preds, class_preds) ))
detections = encoder.decode_batch(images.detach(), loc_preds.detach(), class_preds.detach())
classes = {}
for i, (boxes, label, confs), (target_boxes, target_label) in zip(images.detach(), detections, target_boxes):
score = evaluate.mAP(boxes, label, confs, target_boxes.type_as(boxes), target_label.type_as(label), threshold = 0.1)
print(score)
# noise = target_boxes.clone().uniform_(-20, 30)
# score = evaluate.mAP(target_boxes + noise, target_label, torch.arange(target_label.size(0)), target_boxes, target_label, threshold=0.5)
# print(score)
# i = i.permute(1, 2, 0)
# key = cv.display(display.overlay(i, boxes, label, confidence=confs))
# if(key == 27):
# break
#print(boxes)
#loss = MultiBoxLoss(num_classes)
#target = (Variable(boxes.cuda()), Variable(label.cuda()))
#print(loss(out, target))
|
oliver-batchelor/detection
|
models/test.py
|
test.py
|
py
| 2,957 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6905516706
|
"""
Python Tutorial: https://docs.python.org/3/tutorial/errors.html
Errors & Exceptions
"""
def myf(x, y):
x/y
try:
# raise ZeroDivisionError("text of an exc...")
myf(4, 0)
# except BaseException as err:
# print(f"Base! {err}")
except ZeroDivisionError as err:
print(f"zero! {err}")
except TypeError as err:
print(f"I won't reach this stage! {err}")
else:
print('all went fine')
finally:
print('\nCLEANING THIS MESS')
|
hqpiotr/learning-python
|
0. Python Tutorial - docs/exceptions.py
|
exceptions.py
|
py
| 454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1498361105
|
n = int(input("Enter an integer: "))
a = 0
b = 1
count = 0
sum1 = 0
lst = list()
listfib = list()
listeven = list()
sum2 = 0
while count <= n:
lst.append(sum1)
count += 1
a = b
b = sum1
sum1 = a + b
for j in lst:
if j <= n:
listfib.append(j)
for k in listfib:
if k % 2 == 0:
listeven.append(k)
for l in listeven:
sum2 += l
print("The sum of even numbers of fibonacci sequence {} is: {}".format(
listfib, sum2))
|
BRAVO68WEB/codetantra-py-labs
|
Lab4b.py
|
Lab4b.py
|
py
| 463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74471362747
|
from faculty import Faculty
from student import Student
class University:
"""A class representing a university.
Attributes:
faculties (list[Faculty]): A list of faculties in the university.
"""
def __init__(
self,
uni_dict_data: dict = None,
):
"""Initializes a new instance of the University class.
Args:
uni_dict_data (dict): The dictionary data of the university.
"""
self.faculties = []
self.students = []
if uni_dict_data:
self.load_university(uni_dict_data)
def __str__(
self,
):
"""Returns a string representation of the University object."""
return f"Technical University of Moldova with {len(self.faculties)} faculties"
def create_faculty(
self,
name: str,
abbreviation: str,
study_field: str,
) -> Faculty:
"""Creates a new faculty with the given name, abbreviation and study field and adds it to the list of faculties.
Args:
name (str): The name of the faculty.
abbreviation (str): The abbreviation or short name of the faculty.
study_field (str): The field of study or specialization of the faculty.
Returns:
Faculty: The newly created faculty.
"""
faculty = Faculty(name, abbreviation, study_field)
self.faculties.append(faculty)
return faculty
def create_student(
self,
faculty_id: str,
first_name: str,
last_name: str,
email: str,
enrollment_date: str,
graduation_status: bool,
birth_date: str,
) -> Student:
"""Creates a new student with the given first name, last name, email, enrollment date, graduation status and
birth date and adds it to the list of students.
Args:
faculty_id (str): The ID of the faculty to add the student to.
first_name (str): The first name of the student.
last_name (str): The last name of the student.
email (str): The email address of the student.
enrollment_date (str): The date when the student was enrolled.
graduation_status (bool): The graduation status of the student.
birth_date (str): The date of birth of the student.
Returns:
Student: The newly created student.
"""
student = Student(first_name, last_name, email, enrollment_date, graduation_status, birth_date)
self.students.append(student)
self.faculties[int(faculty_id)].students.append(student)
return student
def load_university(
self,
uni_dict_data: dict,
) -> None:
"""Loads university data from the given location.
Args:
uni_dict_data (dict): The dictionary data of the university.
"""
for index, faculty in enumerate(uni_dict_data["faculties"]):
self.faculties.append(Faculty(faculty_dict_data=faculty))
self.students.extend(self.faculties[index].students)
def to_dict(
self,
) -> dict:
"""Returns the university as a dictionary.
Returns:
dict: The university as a dictionary.
"""
return {
"faculties": [faculty.to_dict() for faculty in self.faculties],
}
|
pyramixofficial/OOP
|
second_lab/university.py
|
university.py
|
py
| 3,390 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37379394096
|
try:
import Image
import ImageDraw
except:
from PIL import Image
from PIL import ImageDraw
import glob
import numpy as np
import os
import sys
def image_clip(img_path, size):
# 转换为数组进行分割操作,计算能完整分割的行数、列数
imarray = np.array(Image.open(img_path))
imshape = imarray.shape
image_col = int(imshape[1]/size[1])
image_row = int(imshape[0]/size[0])
img_name= img_path.split(".")[0].split("\\")[1]
# 两个for循环分割能完整分割的图像,并保存图像、坐标转换文件
for row in range(image_row):
for col in range(image_col):
clipArray = imarray[row*size[0]:(row+1)*size[0],col*size[1]:(col+1)*size[1]]
clipImg = Image.fromarray(clipArray)
folder = os.path.exists("E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/clip")
# 判断文件夹是否存在,不存在则新建国家文件
if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs("E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/clip") # makedirs 创建文件时如果路径不存在会创建这个路径
img_filepath = 'E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/clip/' + img_name + "_" +str(row) + "_" + str(col) + ".tif"
clipImg.save(img_filepath)
if __name__=='__main__':
img_dir = 'E:/wangyu_file/GID/Fine Land-cover Classification_15classes/image_RGB/'
# img_dir = 'E:/wangyu_file/GID/Fine Land-cover Classification_15classes/label_15classes/'
imgs = glob.glob('{}*.tif'.format(img_dir))
for img in imgs:
image_clip(img, [512, 512])
|
faye0078/RS-ImgShp2Dataset
|
train_example/model/Fast_NAS/data/slip_img.py
|
slip_img.py
|
py
| 1,752 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
14896890650
|
"""empty message
Revision ID: 97dd2d43d5f4
Revises: d5e28ae20d48
Create Date: 2018-05-30 00:51:39.536518
"""
# revision identifiers, used by Alembic.
revision = '97dd2d43d5f4'
down_revision = 'd5e28ae20d48'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('exam', sa.Column('hidden', sa.Boolean(), server_default=sa.literal(False), nullable=False))
op.add_column('exam_version', sa.Column('hidden', sa.Boolean(), server_default=sa.literal(False), autoincrement=False, nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('exam_version', 'hidden')
op.drop_column('exam', 'hidden')
# ### end Alembic commands ###
|
duvholt/memorizer
|
migrations/versions/97dd2d43d5f4_.py
|
97dd2d43d5f4_.py
|
py
| 828 |
python
|
en
|
code
| 16 |
github-code
|
6
|
25069435045
|
from typing import List, Any, Tuple
from ups_lib.av_request import AddressValidationRequest
from purplship.core.utils import (
XP,
DP,
request as http,
exec_parrallel,
Serializable,
Deserializable,
Envelope,
Pipeline,
Job,
)
from purplship.api.proxy import Proxy as BaseProxy
from purplship.mappers.ups.settings import Settings
class Proxy(BaseProxy):
settings: Settings
def _send_request(self, path: str, request: Serializable[Any]) -> str:
return http(
url=f"{self.settings.server_url}{path}",
data=bytearray(request.serialize(), "utf-8"),
headers={"Content-Type": "application/xml"},
method="POST",
)
def validate_address(
self, request: Serializable[AddressValidationRequest]
) -> Deserializable[str]:
response = self._send_request("/webservices/AV", request)
return Deserializable(response, XP.to_xml)
def get_rates(self, request: Serializable[Envelope]) -> Deserializable[str]:
response = self._send_request("/webservices/Rate", request)
return Deserializable(response, XP.to_xml)
def get_tracking(
self, request: Serializable[List[str]]
) -> Deserializable[List[Tuple[str, dict]]]:
"""
get_tracking makes parallel requests for each tracking number
"""
def get_tracking(tracking_number: str):
return tracking_number, http(
url=f"{self.settings.server_url}/track/v1/details/{tracking_number}",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"AccessLicenseNumber": self.settings.access_license_number,
"Username": self.settings.username,
"Password": self.settings.password,
},
method="GET",
)
responses: List[str] = exec_parrallel(get_tracking, request.serialize())
return Deserializable(
responses,
lambda res: [
(num, DP.to_dict(track)) for num, track in res if any(track.strip())
],
)
def create_shipment(self, request: Serializable[Envelope]) -> Deserializable[str]:
response = self._send_request("/webservices/Ship", request)
return Deserializable(response, XP.to_xml)
def cancel_shipment(self, request: Serializable) -> Deserializable[str]:
response = self._send_request("/webservices/Ship", request)
return Deserializable(response, XP.to_xml)
def schedule_pickup(self, request: Serializable[Pipeline]) -> Deserializable[str]:
def process(job: Job):
if job.data is None:
return job.fallback
return self._send_request("/webservices/Pickup", job.data)
pipeline: Pipeline = request.serialize()
response = pipeline.apply(process)
return Deserializable(XP.bundle_xml(response), XP.to_xml)
def modify_pickup(self, request: Serializable[Pipeline]) -> Deserializable[str]:
def process(job: Job):
if job.data is None:
return job.fallback
return self._send_request("/webservices/Pickup", job.data)
pipeline: Pipeline = request.serialize()
response = pipeline.apply(process)
return Deserializable(XP.bundle_xml(response), XP.to_xml)
def cancel_pickup(self, request: Serializable[Envelope]) -> Deserializable[str]:
response = self._send_request("/webservices/Pickup", request)
return Deserializable(response, XP.to_xml)
|
danh91/purplship
|
sdk/extensions/ups/purplship/mappers/ups/proxy.py
|
proxy.py
|
py
| 3,654 |
python
|
en
|
code
| null |
github-code
|
6
|
15512949000
|
#This is for the introduction and Asking user info
#Asking user their name and checking if it is correct
def name():
name_1=input("What is your name?")
right_name=input("Your name is {}. Is this correct? press [y/n]".format(name_1))
if right_name == 'y':
print("Hi {}. Welcome to my Car theft prevention app.".format(name_1))
else:
right_name_2 = input("Please enter your correct name.")
print("Hi {}. Welcome to my Car theft prevention app.".format(right_name_2))
name()
|
karthik-create/Car_theft-
|
intro.py
|
intro.py
|
py
| 513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70211001788
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from cer_manager.views import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'cer_manager.views.home', name='home'),
# url(r'^cer_manager/', include('cer_manager.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^index/$', cer_list),
url(r'^canshu/(.+)/$',insert),
url(r'^test/$',test),
url(r'^insert/$',insert),
url(r'^modify/(.+)/$',modify),
url('^css/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.STATIC_ROOT_CSS}),
url('^js/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.STATIC_ROOT_JS}),
)
|
colive/cer_manager
|
urls.py
|
urls.py
|
py
| 1,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19583758283
|
"""
This file is used to perform a random hyperparameter search on the Coco dataset using the baseline image captioner.
For more info on the ImageCaptionerBaseline class, please check out the docstrings in the image_captioning.py file.
"""
# Package loading
import argparse
import os
import sys
sys.path.append('..')
# Depending on the platform/IDE used, the home directory might be the socraticmodels or the
# socraticmodels/scripts directory. The following ensures that the current directory is the scripts folder.
try:
os.chdir('scripts')
except FileNotFoundError:
pass
# Local imports
from scripts.image_captioning import ImageCaptionerBaseline
def parse_arguments():
"""
Parses the arguments for the baseline COCO captioning hyperparameter tuning.
:return:
"""
# init argparser
parser = argparse.ArgumentParser(description='Baseline Image Captioning Hyperparameter tuning')
# Additional variables
parser.add_argument('--n-images', type=int, default=50, help='# images to include in the dataset')
parser.add_argument('--set-type', type=str, default='train', help='train/valid/test set')
parser.add_argument('--n-iterations', type=int, default=100, help='# of run iterations')
parser.add_argument('--n-captions', type=int, default=10, help='# captions the LM should generate')
parser.add_argument('--lm-max-length', type=int, default=40, help='max output length the LM should generate')
parser.add_argument('--lm-do-sample', type=bool, default=True, help='whether to use sampling during generation')
parser.add_argument('--lm-temp-min', type=float, default=0.5, help='minimum temperature param for the lm')
parser.add_argument('--lm-temp-max', type=float, default=1, help='maximum temperature param for the lm')
parser.add_argument('--n-objects-min', type=int, default=5, help='minimum number of objects in the LM prompt')
parser.add_argument('--n-objects-max', type=int, default=15, help='maximum number of objects in the LM prompt')
parser.add_argument('--n-places-min', type=int, default=1, help='minimum number of places in the LM prompt')
parser.add_argument('--n-places-max', type=int, default=6, help='maximum number of places in the LM prompt')
parser.add_argument('--caption-strategies', nargs="+", default=None)
# parse args
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse the arguments.
args = parse_arguments()
# Instantiate the baseline image captioner class.
image_captioner = ImageCaptionerBaseline(n_images=args.n_images, set_type=args.set_type)
# Run the hyperparameter search
image_captioner.random_parameter_search(
n_iterations=args.n_iterations, n_captions=args.n_captions, lm_max_length=args.lm_max_length,
lm_do_sample=args.lm_do_sample, lm_temp_min=args.lm_temp_min, lm_temp_max=args.lm_temp_max,
n_objects_min=args.n_objects_min, n_objects_max=args.n_objects_max, n_places_min=args.n_places_min,
n_places_max=args.n_places_max, caption_strategies=args.caption_strategies
)
|
milenakapralova/socraticmodels
|
scripts/coco_caption_base_hp_tune.py
|
coco_caption_base_hp_tune.py
|
py
| 3,103 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34131870149
|
import time
import numpy as np
def num_with_sqr_lt_n(n):
while not np.sqrt(n).is_integer():
n-=1
return int(np.sqrt(n))
def process(n, state):
num = state[0]
sub = state[1]
den = n - sub**2
if den%num==0:
den = den/num
sub = -1*sub
num = np.sqrt(n)+sub
#print('\n',den,sub,num,'\n')
a = 0
while num/den > 1:
a += 1
sub -= den
num -= den
#print('\n',den,sub,num,'\n')
return (den,sub,a)
def print_fn(n,state):
print('\n '+str(int(state[0])))
print('-----------')
print(u'\u221A'+str(n)+' + ('+str(int(state[1]))+')\n')
if __name__ == '__main__':
N = 13
odd_count = 0
stime = time.time()
for n in range(2,N+1):
print("\nN = %d "%(n))
a0 = num_with_sqr_lt_n(n)
a = [a0]
if a0 == np.sqrt(n):
#print(a)
continue
states = [(1,-1*a0)]
#print_fn(n,states[0])
currState = None
while True:
if currState == None:
currState = states[-1]
currState = process(n,currState)
a.append(currState[-1])
currState = (currState[0],currState[1])
if currState in states:
break
states.append(currState)
#print_fn(n,currState)
if len(a)%2==0:
odd_count+=1
print('['+str(a[0])+';('+','.join(list(map(str,a[1:])))+')]')
print("\nNumber of Continues Fractions having Odd Period : %d"%odd_count)
print("\nTotal Time Taken : %.3f seconds\n"%(time.time()-stime))
|
sadimanna/project_euler
|
p64.py
|
p64.py
|
py
| 1,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10173968880
|
from configparser import ConfigParser
# get the configparser object
config_object = ConfigParser()
# set config
config_object["SERVERCONFIG_BROWSER"] = {
"host": "127.0.0.1",
"port": "8888",
"web_directory": "www/"
}
config_object["SERVERCONFIG"] = {
"host": "127.0.0.1",
"port": "8080",
}
# Write the above sections to config.ini file
with open('config.ini', 'w') as conf:
config_object.write(conf)
|
kaumnen/diy-http-server
|
config/config.py
|
config.py
|
py
| 428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17287700821
|
import yaml
import argparse
from jinja2 import Environment, FileSystemLoader, Template
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--jobs',
required=True)
parser.add_argument('--job_config',
required=True)
return parser.parse_args()
def get_commandline(args):
config_data = yaml.load(open(args.job_config))
job_data = config_data[args.jobs]
env = Environment(loader=FileSystemLoader('Templates'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(args.jobs)
return template.render(job_data)
def main():
args = get_args()
commandline = get_commandline(args)
print(commandline)
if __name__ == '__main__':
main()
|
Chappers1992/Variability
|
run.py
|
run.py
|
py
| 759 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13155871471
|
import serial
import time
# pass in upper and lower 8 bit values
# returns the 16 bit value as an int
# def PrintContcatBytes(valueOne, valueTwo):
# print bin(valueOne)[2:].rjust(8,'0')
class ReturnValue(object):
def __init__(self, valid, pm10, pm25, pm100, num3, num5, num10, num25, num50, num100):
self.valid = valid
self.pm10 = pm10
self.pm25 = pm25
self.pm100 = pm100
self.num3 = num3
self.num5 = num5
self.num10 = num10
self.num25 = num25
self.num50 = num50
self.num100 = num100
def ConcatBytes(valueOne, valueTwo):
return int(bin(valueOne)[2:].rjust(8, '0') + bin(valueTwo)[2:].rjust(8, '0'), 2)
def readlineCRtest(port):
for i in range(256):
for j in range(256):
if i * 256 + j != ConcatBytes(i, j):
print (i, j, i * 256 + j, ConcatBytes(i, j))
print (i)
def readlineCR(port):
"""
Output values are explained here: https://www.dfrobot.com/wiki/index.php/PM2.5_laser_dust_sensor_SKU:SEN0177#Communication_protocol
:param port:
:return:
"""
data = []
summation = 0
data.append(ord(port.read()))
data.append(ord(port.read()))
# data.append(22) data.append(17) print (int("42", 16), int("4d", 16)) print int(bin(32)[2:].rjust(8, '0'),2) print (data[0], data[1])
while (data[0] != int("42", 16) and data[1] != int("4d", 16)):
print("failed - scooting over")
data.pop(0)
data.append(ord(port.read()))
summation += data[0] + data[1]
while len(data) < 17:
upperVal = ord(port.read())
lowerVal = ord(port.read())
if len(data) < 16:
summation += upperVal
summation += lowerVal
data.append(ConcatBytes(upperVal, lowerVal))
# for message in data:
# print message print "Last num should be: ", summation
if data[16] != summation:
return ReturnValue("False", 0, 0, 0, 0, 0, 0, 0, 0, 0)
# print (int(message[0], 16)) print (len(message), message) if (int(message[0], 16) != int("42", 16) or len(message) > 1 and int(message[1], 16) != int("4d", 16)):
# print("character deleted to scoot over") message = message[1:]
return ReturnValue("True", data[3], data[4], data[5], data[9], data[10], data[11], data[12], data[13], data[14])
# if ch == '\r' or ch == chr(66) or ch == '':
# return rv
port = serial.Serial("/dev/serial0", baudrate=9600, timeout=2)
while True:
boxOfStuff = readlineCR(port)
port.write(b"I typed stuff")
if boxOfStuff.valid:
print(boxOfStuff.pm10, boxOfStuff.pm25, boxOfStuff.pm100, boxOfStuff.num3, boxOfStuff.num5, boxOfStuff.num10,
boxOfStuff.num25, boxOfStuff.num50, boxOfStuff.num100)
else:
print("message failed")
|
learnlafayette/sensors
|
sensors/sensors/test/samples/pm_sample.py
|
pm_sample.py
|
py
| 2,794 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32412548511
|
### Spine by Chris Alexander
# Standard imports
# Custom imports
import DataFormat
import Interface
# Interface Input class
class Input(Interface.I, DataFormat.Format):
# The max number of bytes to receive from the socket
maxBytesReceive = 2048
# Initialise the Input Interface
def __init__(self, host = None, port = None):
# Init the parent Interface
super(Input, self).__init__(host, port)
# Initialisation for the Input
def initialise(self):
# Initialise the parent interface
super(Input, self).initialise()
# Bind the socket
self.socket.bind((self.host, self.port))
# Receive a single packet from the socket
def receive(self, ignoreFormat = 0, ignoreTransform = 0):
data, addr = self.socket.recvfrom(self.maxBytesReceive)
if self.dataformat and not ignoreFormat:
data = self.dataformat.inputConvert(data)
if self.transform and not ignoreTransform:
data = self.applyTransform(data)
return data
|
arnie-robot/Spine
|
src/Interface/input.py
|
input.py
|
py
| 1,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21340780197
|
import sqlite3
from recipe import Recipe, Quantity, stringsToQuantities
class Database:
def __init__(self,database):
self.connection = sqlite3.connect(database)
self.c = self.connection.cursor()
# self.c.execute("""DROP TABLE IF EXISTS ingredients""")
# self.c.execute("""DROP TABLE IF EXISTS recipes""")
# self.c.execute("""DROP TABLE IF EXISTS instructions""")
# self.c.execute("""DROP TABLE IF EXISTS recipeingredients""")
# self.c.execute("""DROP TABLE IF EXISTS recipeinstructions""")
#Create ingredients
self.c.execute("""CREATE TABLE IF NOT EXISTS ingredients (
ingredientID INTEGER PRIMARY KEY,
name TEXT,
UNIQUE(name)
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS recipes (
recipeID INTEGER PRIMARY KEY,
name TEXT,
UNIQUE(name)
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS instructions (
instructionID INTEGER PRIMARY KEY,
instruction TEXT,
num INTEGER
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS recipeingredients (
recipeID INTEGER NOT NULL,
ingredientID INTEGER NOT NULL,
quantity TEXT,
FOREIGN KEY (recipeID) REFERENCES recipes(recipeID),
FOREIGN KEY (ingredientID) REFERENCES ingredients(ingredientID)
)
""")
self.c.execute("""CREATE TABLE IF NOT EXISTS recipeinstructions (
recipeID INTEGER NOT NULL,
instructionID INTEGER NOT NULL,
FOREIGN KEY (recipeID) REFERENCES recipes(recipeID),
FOREIGN KEY (instructionID) REFERENCES instructions(instructionID)
)
""")
def addRecipe(self,recipe):
#self.c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (?)",(recipe.name,))
hold = self.c.execute("SELECT recipeID FROM recipes WHERE recipes.name = ?",(recipe.name,)).fetchone()
if hold is not None:
return None
self.c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (?)",(recipe.name,))
recipeID = hold = self.c.execute("SELECT recipeID FROM recipes WHERE recipes.name = ?",(recipe.name,)).fetchone()[0]
ingredientIDList = []
if type(recipe.ingredients) is not str:
for ingredient in recipe.ingredients:
self.c.execute("INSERT OR IGNORE INTO ingredients (name) VALUES (?)",(ingredient,))
hold2 = self.c.execute("SELECT ingredientID FROM ingredients WHERE ingredients.name = ?",(ingredient,)).fetchone()
ingredientIDList.extend(hold2)
else:
self.c.execute("INSERT OR IGNORE INTO ingredients (name) VALUES (?)",(recipe.ingredients,))
hold2 = self.c.execute("SELECT ingredientID FROM ingredients WHERE ingredients.name = ?",(recipe.ingredients,)).fetchone()
ingredientIDList.extend(hold2)
instructionIDList = []
if type(recipe.instructions) is not str:
for index, instruction in enumerate(recipe.instructions):
self.c.execute("INSERT OR IGNORE INTO instructions (instruction,num) VALUES (?,?)",(instruction,index+1))
hold3 = self.c.execute("SELECT instructionID FROM instructions WHERE instructions.instruction = ?",(instruction,)).fetchone()
instructionIDList.extend(hold3)
else:
self.c.execute("INSERT OR IGNORE INTO instructions (instruction,num) VALUES (?,?)",(recipe.instructions,1))
hold2 = self.c.execute("SELECT instructionID FROM instructions WHERE instructions.instruction = ?",(recipe.instructions,)).fetchone()
instructionIDList.extend(hold2)
for instructionID in instructionIDList:
self.c.execute("INSERT OR IGNORE INTO recipeinstructions (recipeID,instructionID) VALUES (?,?)",(recipeID,instructionID))
for index, ingredientID in enumerate(ingredientIDList):
self.c.execute("INSERT OR IGNORE INTO recipeingredients (recipeID,ingredientID,quantity) VALUES (?,?,?)",(recipeID,ingredientID,recipe.quantities[index].getStorageString()))
return recipe
def deleteRecipe(self,recipe_name):
#returns None if not found
check = self.c.execute("SELECT * FROM recipes WHERE recipes.name = ?",(recipe_name,)).fetchone()
self.c.execute("DELETE FROM recipes WHERE recipes.name = ?",(recipe_name,))
return check
def getRecipe(self,recipe_name): #return either list or single answer, if list then print options
recipeID = self.c.execute("SELECT recipes.recipeID FROM recipes WHERE recipes.name = ?",(recipe_name,)).fetchone()
if recipeID == None:
return None
elif len(recipeID) == 1:
ingredientsQuantities = self.c.execute("SELECT ingredients.name, recipeingredients.quantity FROM ingredients INNER JOIN recipeingredients ON ingredients.ingredientID = recipeingredients.ingredientID AND recipeingredients.recipeID = ?",(recipeID[0],)).fetchall()
instructions = self.c.execute("SELECT instructions.instruction, instructions.num FROM instructions INNER JOIN recipeinstructions ON recipeinstructions.instructionID = instructions.instructionID AND recipeinstructions.recipeID = ?",(recipeID[0],)).fetchall()
ingredients, str_quantities = zip(*ingredientsQuantities)
recipe = Recipe(recipe_name, instructions, ingredients, stringsToQuantities(str_quantities))
return [recipe]
else: # == 0, return None
return None
def keyWordSearchRecipes(self, keyword):
names = [name[0] for name in self.c.execute("SELECT recipes.name FROM recipes").fetchall()]
includes_keyword = []
for name in names:
if keyword in name:
includes_keyword.append(name)
if len(includes_keyword) == 0:
return None
return includes_keyword
def keyWordSearchIngredients(self, keyword):
test = self.c.execute("SELECT ingredients.name FROM ingredients").fetchall()
ingredient_names = []
[ingredient_names.append(name[0]) for name in test if keyword in name[0] ]
recipe_names = set([])
for ingredient_name in ingredient_names:
ingredientID = self.c.execute("SELECT ingredients.ingredientID FROM ingredients WHERE ingredients.name = ?",(ingredient_name,)).fetchone()
recipe_name_list = self.c.execute("SELECT recipes.name FROM recipes INNER JOIN recipeingredients ON recipeingredients.ingredientID = ? AND recipes.recipeID = recipeingredients.recipeID",(ingredientID[0],)).fetchall()
[recipe_names.add(name[0]) for name in recipe_name_list]
return list(recipe_names)
def getRecipeList(self):
return [name[0] for name in self.c.execute("SELECT recipes.name FROM recipes").fetchall()]
def getAllRecipes(self):
return [self.getRecipe(name[0])[0] for name in self.c.execute("SELECT recipes.name FROM recipes").fetchall()]
def close(self):
self.connection.commit()
self.connection.close()
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES ('dicks')")
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (2)")
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (3)")
# c.execute("INSERT OR IGNORE INTO recipes (name) VALUES (1)")
# test = c.execute("SELECT recipeID, * FROM recipes""")
# for row in test:
# print(row[0])
# addRecipe("lovely dicks",["dicks","penis","cock"],["2","3","4"],["cook the cokes","eat my ass"])
# print(getRecipeList())
# print(getRecipe("lovely dicks"))
# print(getRecipe("lovely dick"))
# connection.commit()
# connection.close()
|
fcopp/RecipeApplication
|
backend/backend.py
|
backend.py
|
py
| 7,842 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1480464469
|
from jinja2 import DebugUndefined
from app.models import db, Order
from datetime import datetime
def seed_orders():
christian = Order(
userId=1,
gigId=2,
gigImage='https://nerdrr.s3.amazonaws.com/fruits-basket.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 6, 5, 8, 10, 10, 10),
due=datetime(2022, 6, 12, 8, 10, 10, 10)
)
james = Order(
userId=4,
gigId=4,
gigImage='https://nerdrr.s3.amazonaws.com/dnd-mini.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 5, 29, 8, 10, 10, 10),
due=datetime(2022, 6, 8, 8, 10, 10, 10)
)
sherman = Order(
userId=2,
gigId=1,
gigImage='https://nerdrr.s3.amazonaws.com/indie-game.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 3, 11, 8, 10, 10, 10),
due=datetime(2022, 4, 10, 8, 10, 10, 10)
)
brian = Order(
userId=3,
gigId = 3,
gigImage='https://nerdrr.s3.amazonaws.com/demon-slayer.jpg',
deliveryInstructions='Please mail directly to me.',
placed=datetime(2022, 6, 7, 8, 10, 10, 10),
due=datetime(2022, 6, 10, 8, 10, 10, 10)
)
db.session.add(christian)
db.session.add(james)
db.session.add(sherman)
db.session.add(brian)
db.session.commit()
def undo_orders():
db.session.execute('TRUNCATE orders RESTART IDENTITY CASCADE;')
db.session.commit()
|
Amlovern/nerdrr
|
app/seeds/orders.py
|
orders.py
|
py
| 1,402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41791316904
|
import pytessy as pt
from PIL import ImageFilter, Image
if __name__ == "__main__":
# Create pytessy instance
ocrReader = pt.PyTessy()
files = ["cell_pic.jpg"]
for file in files:
# Load Image
img = Image.open(file)
# Scale up image
w, h = img.size
img = img.resize((2 * w, 2 * h))
# Sharpen image
img = img.filter(ImageFilter.SHARPEN)
# Convert to ctypes
imgBytes = img.tobytes()
bytesPerPixel = int(len(imgBytes) / (img.width * img.height))
# Use OCR on Image
imageStr = ocrReader.read(img.tobytes(), img.width, img.height, bytesPerPixel, raw=True, resolution=600)
print(file, imageStr)
|
TheNova22/OurVision
|
legacy1/testtessy.py
|
testtessy.py
|
py
| 628 |
python
|
en
|
code
| null |
github-code
|
6
|
24117960481
|
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
import numpy as np
from skimage.transform import resize
# hyper params
gamma = 0.98
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.data = []
self.lr = 0.002
# define architecture/layer parameters
self.input_channels = 3
self.conv_ch_l1 = 8
self.conv_ch_l2 = 12
self.height = 210
self.width = 160
self.kernel_size = 3
self.pool_size = 2
self.conv_out = 23256
self.fc1_size = 16
self.fc_out = 4
# deifne actual layer
# define first convolutional layer
self.conv1 = nn.Conv2d(in_channels = self.input_channels,
out_channels = self.conv_ch_l1,
kernel_size = self.kernel_size)
# add batch normalization layer
self.batch_norm1 = nn.BatchNorm2d(self.conv_ch_l1)
# define max-pool layer
self.pool = nn.MaxPool2d(self.pool_size, self.pool_size)
# define second convolution layer
self.conv2 = nn.Conv2d(in_channels = self.conv_ch_l1,
out_channels = self.conv_ch_l2,
kernel_size = self.kernel_size)
# define batch normalization layer
self.batch_norm2 = nn.BatchNorm2d(self.conv_ch_l2)
# define fully connected layers
self.fc1 = nn.Linear(self.conv_out, self.fc1_size)
self.fc2 = nn.Linear(self.fc1_size, self.fc_out)
# define optimizer
self.optimizer = optim.Adam(self.parameters() , lr = self.lr)
def forward(self, x):
# pass input through conv layer
out = self.pool(F.relu(self.conv1(x)))
out = self.batch_norm1(out)
out = self.pool(F.relu(self.conv2(out)))
# print(out.size())
# exit()
out = self.batch_norm2(out)
# reshape the conv out before passing it to fully connected layer
_,b,c,d = out.size()
fc_shape = b*c*d
# print("FC input size : ", fc_shape)
out = out.view(-1, fc_shape)
# pass input through fully connected layer
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
return out
# save data for training
def put_data(self, item):
self.data.append(item)
# once the episode is complete we train the episode
def train_policy(self):
R = 0
for r, log_prob in self.data[::-1]:
R = r + gamma * R
loss = -log_prob * R
# clean the previous gradients
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.data = []
def main():
# create the environment
env = gym.make('Breakout-v0')
pi = Policy()
score = 0.0
print_interval = 20
num_episodes = 100
for n_epi in range(num_episodes):
state = env.reset()
for t in range(100000):
# state is an image with channel last.
# pre-processing steps:
# 1. make image grayscale
# 2. resize image
# 3. add first dimension for batch
# 4. convert image to tensor
#img = np.dot(state[:,:,:3], [0.2989, 0.5870, 0.1140])
#img = resize(img, (63, 48), anti_aliasing=True)
# now image is converted to single channel, add dimension for channel
#img = np.expand_dims(img, axis=0)
img = np.rollaxis(state, 2, 0)
prob = pi(torch.from_numpy(img).unsqueeze(0).float())
m = Categorical(prob)
a = m.sample()
state_prime, r, done, _ = env.step(a.item())
# print(prob.size())
# print(prob)
# print(a)
# print(a.size())
# exit()
print("Output : ", prob)
print("Action : ", a.item())
print("Reward : ", r)
pi.put_data((r,torch.log(prob[0,a])))
state = state_prime
score += r
if done:
print("Episode ended : ", n_epi+1)
break
# if the episode is completed, train policy on recorded observations
pi.train_policy()
if (n_epi+1)%print_interval == 0 and n_epi > 0 :
print("Episode : {}, avg_score : {}".format(n_epi,
score/print_interval)
)
score = 0
env.close()
if __name__ == '__main__':
main()
|
sachinumrao/pytorch_tutorials
|
cnn_breakout_rl.py
|
cnn_breakout_rl.py
|
py
| 4,757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14150647036
|
import json
import re
from requests_toolbelt import MultipartEncoder
from todayLoginService import TodayLoginService
from liteTools import *
class AutoSign:
# 初始化签到类
def __init__(self, todayLoginService: TodayLoginService, userInfo):
self.session = todayLoginService.session
self.host = todayLoginService.host
self.userInfo = userInfo
self.taskInfo = None
self.task = None
self.form = {}
self.fileName = None
# 获取未签到的任务
def getUnSignTask(self):
LL.log(1, '获取未签到的任务')
headers = self.session.headers
headers['Content-Type'] = 'application/json'
# 第一次请求接口获取cookies(MOD_AUTH_CAS)
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 第二次请求接口,真正的拿到具体任务
res = self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
res = DT.resJsonEncode(res)
signLevel = self.userInfo.get('signLevel', 1)
if signLevel >= 0:
taskList = res['datas']['unSignedTasks'] # 未签到任务
if signLevel >= 1:
taskList += res['datas']['leaveTasks'] # 不需签到任务
if signLevel == 2:
taskList += res['datas']['signedTasks'] # 已签到任务
# 查询是否没有未签到任务
LL.log(1, '获取到的签到任务列表', taskList)
if len(taskList) < 1:
LL.log(1, '签到任务列表为空')
raise TaskError('签到任务列表为空')
# 自动获取最后一个未签到任务(如果title==0)
if self.userInfo['title'] == 0:
latestTask = taskList[0]
self.taskName = latestTask['taskName']
LL.log(1, '最后一个未签到的任务', latestTask['taskName'])
self.taskInfo = {'signInstanceWid': latestTask['signInstanceWid'],
'signWid': latestTask['signWid'], 'taskName': latestTask['taskName']}
return self.taskInfo
# 获取匹配标题的任务
for righttask in taskList:
if righttask['taskName'] == self.userInfo['title']:
self.taskName = righttask['taskName']
LL.log(1, '匹配标题的任务', righttask['taskName'])
self.taskInfo = {'signInstanceWid': righttask['signInstanceWid'],
'signWid': righttask['signWid'], 'taskName': righttask['taskName']}
return self.taskInfo
LL.log(1, '没有匹配标题的任务')
raise TaskError('没有匹配标题的任务')
# 获取历史签到任务详情
def getHistoryTaskInfo(self):
'''获取历史签到任务详情'''
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
# 获取签到月历
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuIntervalMonths'
res = self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
res = DT.resJsonEncode(res)
monthList = [i['id'] for i in res['datas']['rows']]
monthList.sort(reverse=True) # 降序排序月份
# 按月遍历
for month in monthList:
# 获取对应历史月签到情况
req = {"statisticYearMonth": month}
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuSignInfosByWeekMonth'
res = self.session.post(
url, headers=headers, data=json.dumps(req), verify=False)
res = DT.resJsonEncode(res)
monthSignList = list(res['datas']['rows'])
# 遍历查找历史月中每日的签到情况
monthSignList.sort(
key=lambda x: x['dayInMonth'], reverse=True) # 降序排序日信息
for daySignList in monthSignList:
# 遍历寻找和当前任务匹配的历史已签到任务
for task in daySignList['signedTasks']:
if task['signWid'] == self.taskInfo['signWid']:
# 找到和当前任务匹配的历史已签到任务,开始更新表单
historyTaskId = {
"wid": task['signInstanceWid'], "content": task['signWid']}
# 更新cookie
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getUnSeenQuestion'
self.session.post(url, headers=headers, data=json.dumps(
historyTaskId), verify=False)
# 获取历史任务详情
historyTaskId = {
"signInstanceWid": task['signInstanceWid'], "signWid": task['signWid']}
url = f'{self.host}wec-counselor-sign-apps/stu/sign/detailSignInstance'
res = self.session.post(
url, headers=headers, data=json.dumps(historyTaskId), verify=False)
res = DT.resJsonEncode(res)
# 其他模拟请求
url = f'{self.host}wec-counselor-sign-apps/stu/sign/queryNotice'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getQAconfigration'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 一些数据处理
result = res['datas']
result['longitude'] = float(result['longitude'])
result['latitude'] = float(result['latitude'])
self.userInfo['lon'] = result['longitude']
self.userInfo['lat'] = result['latitude']
result['photograph'] = result['photograph'] if len(
result['photograph']) != 0 else ""
result['extraFieldItems'] = [{"extraFieldItemValue": i['extraFieldItem'],
"extraFieldItemWid": i['extraFieldItemWid']} for i in result['signedStuInfo']['extraFieldItemVos']]
# 返回结果
LL.log(1, '历史签到情况的详情', result)
self.historyTaskInfo = result
return result
# 如果没有遍历找到结果
LL.log(2, "没有找到匹配的历史任务")
return "没有找到匹配的历史任务"
def getDetailTask(self):
LL.log(1, '获取具体的签到任务详情')
url = f'{self.host}wec-counselor-sign-apps/stu/sign/detailSignInstance'
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
res = self.session.post(url, headers=headers, data=json.dumps(
self.taskInfo), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '签到任务的详情', res['datas'])
self.task = res['datas']
# 上传图片到阿里云oss
def uploadPicture(self):
url = f'{self.host}wec-counselor-sign-apps/stu/oss/getUploadPolicy'
res = self.session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps({'fileType': 1}),
verify=False)
datas = DT.resJsonEncode(res).get('datas')
fileName = datas.get('fileName')
policy = datas.get('policy')
accessKeyId = datas.get('accessid')
signature = datas.get('signature')
policyHost = datas.get('host')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0'
}
multipart_encoder = MultipartEncoder(
fields={ # 这里根据需要进行参数格式设置
'key': fileName, 'policy': policy, 'OSSAccessKeyId': accessKeyId, 'success_action_status': '200',
'signature': signature,
'file': ('blob', open(RT.choicePhoto(self.userInfo['photo']), 'rb'), 'image/jpg')
})
headers['Content-Type'] = multipart_encoder.content_type
res = self.session.post(url=policyHost,
headers=headers,
data=multipart_encoder)
self.fileName = fileName
# 获取图片上传位置
def getPictureUrl(self):
url = f'{self.host}wec-counselor-sign-apps/stu/sign/previewAttachment'
params = {'ossKey': self.fileName}
res = self.session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps(params),
verify=False)
photoUrl = DT.resJsonEncode(res).get('datas')
return photoUrl
# 填充表单
def fillForm(self):
LL.log(1, '填充表单')
if self.userInfo['getHistorySign']:
self.getHistoryTaskInfo()
hti = self.historyTaskInfo
self.form['isNeedExtra'] = self.task['isNeedExtra']
self.form['signInstanceWid'] = self.task['signInstanceWid']
self.form['signPhotoUrl'] = hti['photograph'] # WARNING:存疑
self.form['extraFieldItems'] = hti['extraFieldItems']
self.form['longitude'], self.form['latitude'] = RT.locationOffset(
hti['longitude'], hti['latitude'], self.userInfo['global_locationOffsetRange'])
# 检查是否在签到范围内
self.form['isMalposition'] = 1
for place in self.task['signPlaceSelected']:
if MT.geoDistance(self.form['longitude'], self.form['latitude'], place['longitude'], place['latitude']) < place['radius']:
self.form['isMalposition'] = 0
break
self.form['abnormalReason'] = hti.get(
'abnormalReason', '回家') # WARNING: 未在历史信息中找到这个
self.form['position'] = hti['signAddress']
self.form['uaIsCpadaily'] = True
self.form['signVersion'] = '1.0.0'
else:
# 判断签到是否需要照片
if self.task['isPhoto'] == 1:
self.uploadPicture()
self.form['signPhotoUrl'] = self.getPictureUrl()
else:
self.form['signPhotoUrl'] = ''
# 检查是否需要额外信息
self.form['isNeedExtra'] = self.task['isNeedExtra']
if self.task['isNeedExtra'] == 1:
extraFields = self.task['extraField']
userItems = self.userInfo['forms']
extraFieldItemValues = []
for i in range(len(extraFields)):
userItem = userItems[i]['form']
extraField = extraFields[i]
if self.userInfo['checkTitle'] == 1:
if userItem['title'] != extraField['title']:
raise Exception(
f'\r\n第{i + 1}个配置出错了\r\n您的标题为:{userItem["title"]}\r\n系统的标题为:{extraField["title"]}')
extraFieldItems = extraField['extraFieldItems']
flag = False
for extraFieldItem in extraFieldItems:
if extraFieldItem['isSelected']:
data = extraFieldItem['content']
if extraFieldItem['content'] == userItem['value']:
flag = True
extraFieldItemValue = {'extraFieldItemValue': userItem['value'],
'extraFieldItemWid': extraFieldItem['wid']}
# 其他 额外的文本
if extraFieldItem['isOtherItems'] == 1:
flag = True
extraFieldItemValue = {'extraFieldItemValue': userItem['value'],
'extraFieldItemWid': extraFieldItem['wid']}
extraFieldItemValues.append(extraFieldItemValue)
if not flag:
raise Exception(
f'\r\n第{ i + 1 }个配置出错了\r\n表单未找到你设置的值:{userItem["value"]}\r\n,你上次系统选的值为:{ data }')
self.form['extraFieldItems'] = extraFieldItemValues
self.form['signInstanceWid'] = self.task['signInstanceWid']
self.form['longitude'] = self.userInfo['lon']
self.form['latitude'] = self.userInfo['lat']
# 检查是否在签到范围内
self.form['isMalposition'] = 1
for place in self.task['signPlaceSelected']:
if MT.geoDistance(self.form['longitude'], self.form['latitude'], place['longitude'], place['latitude']) < place['radius']:
self.form['isMalposition'] = 0
break
self.form['abnormalReason'] = self.userInfo['abnormalReason']
self.form['position'] = self.userInfo['address']
self.form['uaIsCpadaily'] = True
self.form['signVersion'] = '1.0.0'
LL.log(1, "填充完毕的表单", self.form)
def getSubmitExtension(self):
'''生成各种额外参数'''
extension = {
"lon": self.userInfo['lon'],
"lat": self.userInfo['lat'],
"model": self.userInfo['model'],
"appVersion": self.userInfo['appVersion'],
"systemVersion": self.userInfo['systemVersion'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"deviceId": self.userInfo['deviceId']
}
self.cpdailyExtension = CpdailyTools.encrypt_CpdailyExtension(
json.dumps(extension))
self.bodyString = CpdailyTools.encrypt_BodyString(
json.dumps(self.form))
self.submitData = {
"lon": self.userInfo['lon'],
"version": self.userInfo['signVersion'],
"calVersion": self.userInfo['calVersion'],
"deviceId": self.userInfo['deviceId'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"bodyString": self.bodyString,
"lat": self.userInfo['lat'],
"systemVersion": self.userInfo['systemVersion'],
"appVersion": self.userInfo['appVersion'],
"model": self.userInfo['model'],
}
self.submitData['sign'] = CpdailyTools.signAbstract(self.submitData)
# 提交签到信息
def submitForm(self):
LL.log(1, '提交签到信息')
self.getSubmitExtension()
headers = {
'User-Agent': self.session.headers['User-Agent'],
'CpdailyStandAlone': '0',
'extension': '1',
'Cpdaily-Extension': self.cpdailyExtension,
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding': 'gzip',
'Host': re.findall('//(.*?)/', self.host)[0],
'Connection': 'Keep-Alive'
}
LL.log(1, '即将提交的信息', headers, self.submitData)
res = self.session.post(f'{self.host}wec-counselor-sign-apps/stu/sign/submitSign', headers=headers,
data=json.dumps(self.submitData), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '提交后返回的信息', res['message'])
return '[%s]%s' % (res['message'], self.taskInfo['taskName'])
|
zuiqiangdexianyu/ruoli-sign-optimization
|
actions/autoSign.py
|
autoSign.py
|
py
| 16,038 |
python
|
en
|
code
| null |
github-code
|
6
|
9837240055
|
from urllib.parse import urljoin
import requests
import json
from fake_useragent import UserAgent
from lxml import html
import re
from pymongo import MongoClient
ua = UserAgent()
movie_records = []
first = True
base_url = "https://www.imdb.com/"
url = "https://www.imdb.com/search/title/?genres=drama&groups=top_250&sort=user_rating,desc&ref_=adv_prv"
def scrape(url):
global first
resp = requests.get(url = url,headers={'User-Agent':ua.random})
tree = html.fromstring(resp.content)
movie_data = tree.xpath("//div[@class = 'lister-item-content']")
for movie in movie_data:
p = {
'name':movie.xpath(".//h3/a/text()")[0],
'year' : re.findall('\d+',movie.xpath(".//h3/span[@class='lister-item-year text-muted unbold']/text()")[0])[0],
'duration' : re.findall('\d+',movie.xpath(".//p/span[@class='runtime']/text()")[0])[0],
'rating' : movie.xpath(".//div[@class='ratings-bar']/div[contains(@class,'inline-block ratings-imdb-rating')]/@data-value")[0]
}
movie_records.append(p)
if first:
next_page = tree.xpath("//div[@class = 'desc']/a/@href")
first = False
else:
next_page = tree.xpath("//div[@class='desc']/a[2]/@href")
if len(next_page) != 0:
surl = urljoin(base = base_url,url=next_page[0])
print(surl)
scrape(surl)
def insert_to_db(list_records):
client = MongoClient("mongodb://<user_name>:<pwd>@cluster0-shard-00-00.rsxac.mongodb.net:27017,cluster0-shard-00-01.rsxac.mongodb.net:27017,cluster0-shard-00-02.rsxac.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-3xsr69-shard-0&authSource=admin&retryWrites=true&w=majority")
db = client['imdb_movies']
collection = db['movies']
for m in movie_records:
exists = collection.find_one({'name': m['name']})
if exists:
if exists['year'] != m['year'] :
collection.replace_one({'name': exists['name']}, m)
print(f"Old item: {exists} New Item: {m}")
else:
collection.insert_one(m)
client.close()
scrape(url)
insert_to_db(movie_records)
print('number of movies ',len(movie_records))
|
shreyashettyk/DE
|
Imdb_data_extraction/imdb.py
|
imdb.py
|
py
| 2,184 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1369504657
|
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time, datetime
from lcd import *
from Email import *
import server
lcd_init ()
GPIO.setmode(GPIO.BOARD)
print('System start/restart - ' + str(datetime.datetime.now()))
#Switch for Bin 1 to be connected to pin 18 and 3.3v pin
#Switch for Bin 2 to be connected to pin 16 and 3.3v pin
GPIO.setup(16, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(18, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
lcd_string(" Dust-O-Matic ",LCD_LINE_1)
#This function will run when the button is triggered
def Notifier(channel):
if channel==18:
print('Bin 1 Full - '+ str(datetime.datetime.now()))
lcd_string(' TRAILER #1 FULL ',LCD_LINE_2)
SendEmail('TRAILER 1 FULL - PLEASE COLLECT', "")
lcd_string(' TRAILER #2 Filling ',LCD_LINE_3)
elif channel==16:
print('Bin 2 Full - ' + str(datetime.datetime.now()))
lcd_string(' TRAILER #2 FULL ',LCD_LINE_2)
SendEmail('TRAILER 2 FULL - PLEASE COLLECT', "")
lcd_string(' TRAILER #1 Filling ',LCD_LINE_3)
GPIO.add_event_detect(18, GPIO.RISING)
GPIO.add_event_detect(16, GPIO.RISING)
while True:
#print('Looping')
lcd_string("LAN: " + get_ip_address('eth0'),LCD_LINE_4)
#lcd_string("WLAN: " + get_ip_address('wlan0'),LCD_LINE_4)
if GPIO.event_detected(18):
time.sleep(0.005) # debounce for 5mSec
# only show valid edges
if GPIO.input(18)==1:
#lcd_string('TRAILER #1 TRIGGERED',LCD_LINE_2)
Notifier(18)
if GPIO.event_detected(16):
time.sleep(0.005)
if GPIO.input(16)==1:
Notifier(16)
time.sleep(0.5)
GPIO.cleanup()
|
CraigHissett/TM_Timber
|
BinSensor/BinSensor.py
|
BinSensor.py
|
py
| 1,875 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11900486194
|
import dash
from dash import html
from matplotlib import container
from navbar import create_navbar
import dash_bootstrap_components as dbc
from dash import Dash, html, dcc, Input, Output
import plotly.express as px
import pandas as pd
f_sb2021 = pd.read_csv("f_sb2021.csv", on_bad_lines='skip', sep=';')
f_sb2022 = pd.read_csv("f_sb2022.csv", on_bad_lines='skip', sep=';')
C2021 = pd.read_csv("C2021.csv", on_bad_lines='skip', sep=',')
C2022 = pd.read_csv("C2022.csv", on_bad_lines='skip', sep=',')
Delitos_2010_2021 = pd.read_csv("Delitos_2010_2021.csv", on_bad_lines='skip', sep=',')
Violencia_G_2015_2022 = pd.read_csv("Violencia_G_2015_2022.csv", on_bad_lines='skip', sep=',')
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
nav = create_navbar()
nivel_sisben= f_sb2021['nivel_sisben']
grupo_sisben= f_sb2022["Grupo"]
values = Delitos_2010_2021['GENERO'].value_counts()
Genero = Delitos_2010_2021['GENERO'].unique()
values2 = Delitos_2010_2021['DIA_SEMANA'].value_counts()
armas = Delitos_2010_2021['DIA_SEMANA'].unique()
delitos_ano_mes= pd.DataFrame({'count' : Delitos_2010_2021.groupby( [ "ANO", "MES"] ).size()}).reset_index()
#gb21_sex = f_sb2021.groupby("sexo_persona")['sexo_persona'].count()
#fig = px.histogram(f_sb2021, x=gb21_sex.index, y=gb21_sex, histfunc='sum')
def generate_table(dataframe, max_rows=16):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
app.layout=html.Div([
html.H1('Data Visualization',style={'textAlign':'center'}),
html.Div([
html.P('Alcaldia de Bucaramanga',style={'textAlign':'center'}),
]),
html.Div([
html.Table(style={'width':'90%'},
children=[
html.Tr(style={'width':'50%'},
children=[
html.Td(
children=[
html.H1('Grupo de delitos por mes',
style={'textAlign':'center'}),
dcc.Graph(id='linegraph',
figure = px.line(delitos_ano_mes, x="MES", y='count', color='ANO'))
]
),html.Td(
children=[
html.H1('Delitos por Género',
style={'textAlign':'center'}),
dcc.Graph(id='piegraph',
figure = px.pie(Delitos_2010_2021, values=values, names=Genero))
]
)
]
),
html.Tr(style={'width':'50%'},
children=[
html.Td(style={'width':'50%'},
children=[
html.H1('Nivel de Sisben Año 2021',
style={'textAlign':'center'}),
dcc.Graph(id='bargraph',
figure = px.histogram(f_sb2021, x=nivel_sisben, color=nivel_sisben, barmode='group'))
]
),html.Td(style={'width':'50%'},
children=[
html.H1('Grupo de Sisben Año 2022',
style={'textAlign':'center'}),
dcc.Graph(id='bargraph2',
figure = px.histogram(f_sb2022, x=grupo_sisben, color=grupo_sisben, barmode='group'))
]
)
]
),
]
),
html.Table(style={'width':'90%'},
children=[html.Tr(
children=[
html.Td(style={'width':'100%'},
children=[
html.H1('Días de la semana vs Delitos',
style={'textAlign':'center'}),
dcc.Graph(id='piegraph2',
figure = px.pie(Delitos_2010_2021, values=values2, names=armas))
]
)
]
),]),
]),
# End of all content DIV
])
def create_page_home():
layout = html.Div([
nav,
#header,
app.layout
])
return layout
|
jeanpierec/ljpiere_projects
|
DataScience_projects/Proyecto5_DS4ABucaramanga/home.py
|
home.py
|
py
| 4,956 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18100941624
|
"""
739. Daily Temperatures
https://leetcode.com/problems/daily-temperatures/
"""
from typing import List
from unittest import TestCase, main
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
stack: List[int] = []
# List of indexes, not temperatures
answer = [0 for _ in range(len(temperatures))]
# Pick up index and temperature from temperatures one by one
for idx, temparature in enumerate(temperatures):
# Loop while the stack has an item and the current temperature is
# greater than the peak in the stack.
while len(stack) != 0 and temperatures[stack[-1]] < temparature:
peak_idx = stack.pop()
# idx - peak_idx will be the num of days you have to wait to get a warmer temperature.
answer[peak_idx] = idx - peak_idx
# Now the stack is empty or the peak in the stack is less than or equal to the current one,
# just push it to the stack
stack.append(idx)
return answer
|
hirotake111/leetcode_diary
|
leetcode/739/solution.py
|
solution.py
|
py
| 1,081 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8271520226
|
##problem 16
import random
rolls = 10
success = 0
failure = 0
for i in range(rolls):
coinchoice = random.randint(1,3)
if (coinchoice == 1): ##heads in both faces
failure = failure+1
elif (coinchoice == 2): ##heads and tails
success = success+1
elif (coinchoice == 3): ##tails on both faces
failure = failure+1
probability = (success / failure)
print("The probability is", probability * 100)
##problem 22
k = 5
m = 10
n = 5
for i in range(k):
k = k-1
total = (m+n+1)
probability = m / total
print("The probability is" , probability)
|
jkiyak/CS355-Probability-and-Statistics-in-Computer-Science-
|
SP2019 CS 355_555-2E Probability/homework2.py
|
homework2.py
|
py
| 604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20893678055
|
# I don't understand the question, so this answer was not mine, it was from reddit.
recipes = '084601'
score = '37'
elf1 = 0
elf2 = 1
while recipes not in score[-7:]:
score += str(int(score[elf1]) + int(score[elf2]))
elf1 = (elf1 + int(score[elf1]) + 1) % len(score)
elf2 = (elf2 + int(score[elf2]) + 1) % len(score)
print('Part 1:', score[int(recipes):int(recipes)+10])
print('Part 2:', score.index(recipes))
|
EricKim987/adventOfCode2018
|
day14/day14.py
|
day14.py
|
py
| 423 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14175319666
|
import numpy as np
__author__ = 'punki'
class LinearRegresion:
def __init__(self, reg_lambda, transofrmation):
self.transofrmation = transofrmation
self.reg_lambda = reg_lambda
self.w = []
def fit(self, training_data_set):
x = np.array([self.transofrmation(z[0],z[1]) for z in training_data_set.get_x()])
y = training_data_set.get_y()
a1 = x.T.dot(x)
a2 = self.reg_lambda * np.identity(len(a1))
a3 = a1 + a2
b1 = x.T.dot(y)
self.w = np.linalg.inv(a3).dot(b1)
def error(self, data_set):
t_x = np.array([self.transofrmation(z[0],z[1]) for z in data_set.get_x()])
predicted = [1 if x>=0 else -1 for x in t_x.dot(self.w)]
correct = data_set.get_y()
return len(correct[correct != predicted])/float(len(correct))
|
tomasz-pankowski/LinearRegresion
|
common/LinearRegresion.py
|
LinearRegresion.py
|
py
| 835 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72729319867
|
# External dependencies
import openai
import io
import os
import tempfile
from datetime import datetime
from flask import render_template, request, url_for, redirect, flash, Response, session, send_file, Markup
from flask_login import login_user, login_required, logout_user, current_user
from flask_mail import Message
# Internal dependencies
from models import User, Log
from forms import SignupForm, LoginForm, RequestResetForm, ResetPasswordForm
from app import app, db, bcrypt, mail, login_manager, limiter
from prompt_template import prompt_template
# Security measures for the Heroku production environment
@app.before_request
def enforce_https():
if request.headers.get('X-Forwarded-Proto') == 'http' and not app.debug:
request_url = request.url.replace('http://', 'https://', 1)
return redirect(request_url, code=301)
@app.after_request
def set_hsts_header(response):
if request.url.startswith('https://'):
response.headers['Strict-Transport-Security'] = 'max-age=31536000' # One year
return response
@login_manager.user_loader
@limiter.limit("10/minute")
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/cleverletter/', methods=['GET', 'POST'])
@limiter.limit("10/minute")
def signup():
if current_user.is_authenticated:
return redirect(url_for('generator'))
form = SignupForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('signup.html', title='Sign Up', form=form)
@app.route('/cleverletter/login', methods=['GET', 'POST'])
@limiter.limit("10/minute")
def login():
if current_user.is_authenticated:
return redirect(url_for('generator'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('dashboard'))
else:
flash('Login Unsuccessful. Please make sure you used the correct credentials', 'warning')
return render_template('login.html', form=form)
@app.route('/cleverletter/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/cleverletter/generator', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def generator():
user_authenticated = current_user.is_authenticated
response = ""
job_title = ""
job_description = ""
employer_name = ""
employer_description = ""
additional_instructions = ""
if not current_user.is_authenticated:
flash(Markup('<a href="{}">Sign up</a> or <a href="{}">Login</a> to keep your CV and cover letter history'.format(url_for('signup'), url_for('login'))), 'warning')
# Retrieve CV from session for unauthenticated users or from the database for authenticated users
if current_user.is_authenticated and current_user.cv:
cv = current_user.cv
else:
cv = session.get('cv', "Your CV goes here")
if request.method == 'POST':
job_title = request.form.get('job_title')
job_description = request.form.get('job_description')
employer_name = request.form.get('employer_name')
employer_description = request.form.get('employer_description')
additional_instructions = request.form.get('additional_instructions')
session_cv = request.form.get('cv') # Assuming the CV is submitted as a form field
# Update CV in session for unauthenticated users
if not current_user.is_authenticated and session_cv:
session['cv'] = session_cv
cv = session_cv
if 'generate' in request.form:
if cv == "Your CV goes here":
flash('Please set your CV before generating a cover letter.', 'warning')
return render_template('dashboard.html', job_title=job_title, job_description=job_description,
employer_name=employer_name, employer_description=employer_description,
additional_instructions=additional_instructions)
prompt = prompt_template.format(cv=cv, job_title=job_title, job_description=job_description,
employer_name=employer_name, employer_description=employer_description,
additional_instructions=additional_instructions)
try:
response = get_completion(prompt)
except Exception as e:
flash('Error generating cover letter: {}'.format(str(e)), 'error')
return redirect(url_for('generator'))
# Save the response in the user's session
session['response'] = response
# Create a log entry only for authenticated users
if current_user.is_authenticated:
log = Log(job_title=job_title, employer_name=employer_name, user_id=current_user.id)
db.session.add(log)
try:
db.session.commit()
except Exception as e:
flash('Error saving log: {}'.format(str(e)), 'error')
return redirect(url_for('generator'))
# Save the response to a txt file in a temporary directory
filename = '{} - {} - {}.txt'.format(employer_name, job_title, datetime.now().strftime('%d-%b-%Y'))
temp_dir = tempfile.gettempdir()
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'w') as f:
f.write(response)
# Save the filename in the session
session['filename'] = file_path
elif 'clear' in request.form:
job_title = ""
job_description = ""
employer_name = ""
employer_description = ""
additional_instructions = ""
session['response'] = ""
elif 'download' in request.form:
# Get the filename from the session
file_path = session.get('filename')
if file_path and os.path.exists(file_path):
download_response = send_file(file_path, as_attachment=True)
os.remove(file_path) # delete the file after sending it
return download_response
else:
flash('No cover letter available for download.', 'warning')
return redirect(url_for('generator'))
return render_template('generator.html', response=response, job_title=job_title, job_description=job_description,
employer_name=employer_name, employer_description=employer_description,
additional_instructions=additional_instructions, cv=cv, user_authenticated=user_authenticated, user=current_user)
@app.route('/cleverletter/dashboard', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def dashboard():
# Initialize CV with a default value
cv = "Your CV goes here"
logs = None
user_authenticated = current_user.is_authenticated
if request.method == 'POST':
# Handle CV form submission
new_cv = request.form.get('cv')
if new_cv:
if current_user.is_authenticated:
current_user.cv = new_cv
db.session.commit()
flash('CV updated successfully.', 'success')
else:
session['cv'] = new_cv
flash('CV saved to session successfully.', 'success')
# Fetch CV from the authenticated user or from the session
if current_user.is_authenticated:
cv = current_user.cv if current_user.cv else cv
# Fetch the logs from the database
page = request.args.get('page', 1, type=int)
per_page = 10
logs = Log.query.filter_by(user_id=current_user.id).order_by(Log.timestamp.desc()).paginate(page=page, per_page=per_page)
else:
cv = session.get('cv', cv) # Use the session value if available, otherwise use the default
return render_template('dashboard.html', user_authenticated=user_authenticated, user=current_user, cv=cv, logs=logs)
@app.route('/cleverletter/reset_request', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def reset_request():
form = RequestResetForm()
message = None
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_reset_email(user)
message = 'An e-mail has been sent with instructions to reset your password.'
return render_template('reset_request.html', form=form, message=message)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='[email protected]',
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/cleverletter/reset_request/<token>', methods=['GET', 'POST'])
@limiter.limit("5/minute")
def reset_token(token):
user = User.verify_reset_token(token)
if not user:
# If the token is invalid or expired, redirect the user to the `reset_request` route.
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data)
user.password = hashed_password
db.session.commit()
return redirect(url_for('login'))
return render_template('reset_token.html', form=form)
@app.route('/cleverletter/delete_account', methods=['POST'])
@limiter.limit("5/minute")
@login_required
def delete_account():
user = User.query.get(current_user.id)
db.session.delete(user)
db.session.commit()
flash('Your account has been deleted.', 'success')
return redirect(url_for('signup'))
def get_completion(prompt, model="gpt-3.5-turbo"):
# Always use the development API key
api_key = app.config['OPENAI_API_KEY_DEV']
# Set the API key for this request
openai.api_key = api_key
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.5, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
|
joaomorossini/Clever-Letter-Generator
|
routes.py
|
routes.py
|
py
| 10,934 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28800521491
|
"""Function to calculate the enrichment score for a given similarity matrix."""
import numpy as np
import pandas as pd
from typing import List, Union
import scipy
from cytominer_eval.utils.operation_utils import assign_replicates
def enrichment(
similarity_melted_df: pd.DataFrame,
replicate_groups: List[str],
percentile: Union[float, List[float]],
) -> pd.DataFrame:
"""Calculate the enrichment score. This score is based on the fisher exact odds score.
Similar to the other functions, the closest connections are determined and checked with the replicates.
This score effectively calculates how much better the distribution of correct connections is compared to random.
Parameters
----------
similarity_melted_df : pandas.DataFrame
An elongated symmetrical matrix indicating pairwise correlations between
samples. Importantly, it must follow the exact structure as output from
:py:func:`cytominer_eval.transform.transform.metric_melt`.
replicate_groups : List
a list of metadata column names in the original profile dataframe to use as
replicate columns.
percentile : List of floats
Determines what percentage of top connections used for the enrichment calculation.
Returns
-------
dict
percentile, threshold, odds ratio and p value
"""
result = []
replicate_truth_df = assign_replicates(
similarity_melted_df=similarity_melted_df, replicate_groups=replicate_groups
)
# loop over all percentiles
if type(percentile) == float:
percentile = [percentile]
for p in percentile:
# threshold based on percentile of top connections
threshold = similarity_melted_df.similarity_metric.quantile(p)
# calculate the individual components of the contingency tables
v11 = len(
replicate_truth_df.query(
"group_replicate==True and similarity_metric>@threshold"
)
)
v12 = len(
replicate_truth_df.query(
"group_replicate==False and similarity_metric>@threshold"
)
)
v21 = len(
replicate_truth_df.query(
"group_replicate==True and similarity_metric<=@threshold"
)
)
v22 = len(
replicate_truth_df.query(
"group_replicate==False and similarity_metric<=@threshold"
)
)
v = np.asarray([[v11, v12], [v21, v22]])
r = scipy.stats.fisher_exact(v, alternative="greater")
result.append(
{
"enrichment_percentile": p,
"threshold": threshold,
"ods_ratio": r[0],
"p-value": r[1],
}
)
result_df = pd.DataFrame(result)
return result_df
|
cytomining/cytominer-eval
|
cytominer_eval/operations/enrichment.py
|
enrichment.py
|
py
| 2,845 |
python
|
en
|
code
| 7 |
github-code
|
6
|
25571472390
|
import logging
# fmt = "%(name)s----->%(message)s----->%(asctime)s"
# logging.basicConfig(level="DEBUG",format=fmt)
# logging.debug("这是debug信息")
# logging.info('这是info信息')
# logging.warning('这是警告信息')
# logging.error('这是错误信息')
# logging.critical('这是cri信息')
logger = logging.getLogger('heihei') #默认的打印级别是WARNING,所以当跟控制台日志的打印级别不一样时,以打印级别最高的为准。
logger.setLevel('INFO')
console_handler = logging.StreamHandler()
#控制台的等级
console_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
console_handler.setLevel(level='INFO')
logger.addHandler(console_handler)
file_handler = logging.FileHandler('1.txt', encoding='utf-8', mode='a')
file_handler.setLevel('INFO')
logger.addHandler(file_handler)
logging.debug("这是debug信息")
logging.info('这是info信息')
logging.warning('这是警告信息')
logging.error('这是错误信息')
logging.critical('这是cri信息')
USER_AGENTS
|
lishuangbo0123/basic
|
history_study/test.py
|
test.py
|
py
| 1,059 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
21490193625
|
#
# PyBank
# Ryan Eccleston-Murdock
# 28 November 2020
#
# Purpose: Analyze .csv financial data
#
# Sources:
import os
import csv
in_path = 'Resources'
in_file_name = 'budget_data.csv'
in_csvpath = os.path.join(in_path, in_file_name)
out_path = 'analysis'
out_file_name = 'financial_summary.csv'
out_csvpath = os.path.join(out_path, out_file_name)
with open(in_csvpath, 'r') as inFile:
# Create reader obj for budget data and skip header/field row
budget_sheet = csv.reader(inFile, delimiter=',')
header = next(budget_sheet)
total_months = 0
net = 0
months = []
profits = []
changes = []
# Gets count of total number of months, net profit/loss and months/profit
# pairs
for month, profit in budget_sheet:
total_months += 1
net += int(profit)
months.append(month)
profits.append(int(profit))
# Get month to month change
for i in range(len(profits) - 1):
change = profits[i + 1] - profits[i]
changes.append(change)
# Gets average change over period
average_change = sum(changes) / len(changes)
# Gets extrema
great_increase = max(changes)
great_increase_month = changes.index(great_increase)
great_decrease = min(changes)
great_decrease_month = changes.index(great_decrease)
# Summary
print('Financial Analysis')
print('--------------------------')
print('Total months: ', total_months)
print('Total: $', net)
print('Average Change: $', round(average_change, 2))
print('Greatest Increase in Profits: ', months[great_increase_month], '($', great_increase, ')')
print('Greatest Decrease in Profits: ', months[great_decrease_month], '($', great_decrease, ')')
print('--------------------------')
with open(out_csvpath, 'w', newline='') as outFile:
# Write analysis to .csv
summary = csv.writer(outFile, delimiter=',')
summary.writerow(['Financial Analysis'])
summary.writerow(['--------------------------'])
summary.writerow(['Total months', total_months])
summary.writerow(['Total', net])
summary.writerow(['Average Change', round(average_change, 2)])
summary.writerow(['Greatest Increase in Profits', months[great_increase_month], great_increase])
summary.writerow(['Greatest Decrease in Profits', months[great_decrease_month], great_decrease])
summary.writerow(['--------------------------'])
|
reccleston/python-challenge
|
PyBank/main.py
|
main.py
|
py
| 2,272 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43371065244
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class Spider:
try:
page = webdriver.Chrome()
url = "https://music.163.com/#/song?id=31654747"
page.get(url)
search = page.find_element_by_id("srch")
search.send_keys("aaa")
search.send_keys(Keys.ENTER)
except Exception as e:
print(e)
|
frebudd/python
|
autoinput.py
|
autoinput.py
|
py
| 382 |
python
|
en
|
code
| 2 |
github-code
|
6
|
25097408304
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 15 09:34:07 2022
@author: maria
"""
import numpy as np
import pandas as pd
from numpy import zeros, newaxis
import matplotlib.pyplot as plt
import scipy as sp
from scipy.signal import butter,filtfilt,medfilt
import csv
import re
#getting the F traces which are classified as cells by Suite2P (manually curated ROIs should be automatically saved)
def getcells(filePathF, filePathiscell):
"""
This function returns the ROIs that are classified as cells.
Careful, only use this if you have manually curated the Suite2P data!
Parameters
----------
filePathF : string
The path of where the fluorescence traces from Suite2P are located.
It will load the file as an array within the function.
This should be an array of shape [x,y] where x is the number of ROIs and y the corresponding values of F intensity
filePathiscell : string
The path of where the iscell file from Suite2P is located.
iscell should be an array of shape [x,y] where x is the number of ROIs and y is the classification confidence
(values are boolean, 0 for not a cell, 1 for cell)
cells is a 1D array [x] with the identify of the ROIs classified as cells in iscell
Returns
-------
F_cells : array of float32
array of shape [x,y] where x is the same as the one in cells and y contains the corresponding F intensities
"""
iscell = np.load(filePathiscell, allow_pickle=True)
F = np.load(filePathF, allow_pickle=True)
cells = np.where(iscell == 1)[0]
F_cells = F[cells,:]
return F_cells
#%%Liad's functions slightly adapted
#code from Liad, returns the metadata, remember to change the number of channels
def GetNidaqChannels(niDaqFilePath, numChannels):
"""
Parameters
----------
niDaqFilePath : string
the path of the nidaq file.
numChannels : int, optional
Number of channels in the file. The default is 7.
Returns
-------
niDaq : matrix
the matrix of the niDaq signals [time X channels]
"""
niDaq = np.fromfile(niDaqFilePath, dtype= np.float64)
niDaq = np.reshape(niDaq,(int(len(niDaq)/numChannels),numChannels))
return niDaq
def AssignFrameTime(frameClock,th = 0.5,plot=False):
"""
The function assigns a time in ms to a frame time.
Parameters:
frameClock: the signal from the nidaq of the frame clock
th : the threshold for the tick peaks, default : 3, which seems to work
plot: plot to inspect, default = False
returns frameTimes (ms)
"""
#Frame times
# pkTimes,_ = sp.signal.find_peaks(-frameClock,threshold=th)
# pkTimes = np.where(frameClock<th)[0]
# fdif = np.diff(pkTimes)
# longFrame = np.where(fdif==1)[0]
# pkTimes = np.delete(pkTimes,longFrame)
# recordingTimes = np.arange(0,len(frameClock),0.001)
# frameTimes = recordingTimes[pkTimes]
# threshold = 0.5
pkTimes = np.where(np.diff(frameClock > th, prepend=False))[0]
# pkTimes = np.where(np.diff(np.array(frameClock > 0).astype(int),prepend=False)>0)[0]
if (plot):
f,ax = plt.subplots(1)
ax.plot(frameClock)
ax.plot(pkTimes,np.ones(len(pkTimes))*np.min(frameClock),'r*')
ax.set_xlabel('time (ms)')
ax.set_ylabel('Amplitude (V)')
return pkTimes
#function from Liad, detecting photodiode change
def DetectPhotodiodeChanges_old(photodiode,plot=True,lowPass=30,kernel = 101,fs=1000, waitTime=10000):
"""
The function detects photodiode changes using a 'Schmitt Trigger', that is, by
detecting the signal going up at an earlier point than the signal going down,
the signal is filtered and smootehd to prevent nosiy bursts distorting the detection.W
Parameters:
photodiode: the signal from the nidaq of the photodiode
lowPass: the low pass signal for the photodiode signal, default: 30,
kernel: the kernel for median filtering, default = 101.
fs: the frequency of acquisiton, default = 1000
plot: plot to inspect, default = False
waitTime: the delay time until protocol start, default = 5000
returns: st,et (ms) (if acq is 1000 Hz)
***** WHAT DOES ST, ET STAND FOR???*****
"""
b,a = sp.signal.butter(1, lowPass, btype='low', fs=fs)
# sigFilt = photodiode
sigFilt = sp.signal.filtfilt(b,a,photodiode)
sigFilt = sp.signal.medfilt(sigFilt,kernel)
maxSig = np.max(sigFilt)
minSig = np.min(sigFilt)
thresholdU = (maxSig-minSig)*0.2
thresholdD = (maxSig-minSig)*0.4
threshold = (maxSig-minSig)*0.5
# find thesehold crossings
crossingsU = np.where(np.diff(np.array(sigFilt > thresholdU).astype(int),prepend=False)>0)[0]
crossingsD = np.where(np.diff(np.array(sigFilt > thresholdD).astype(int),prepend=False)<0)[0]
# crossingsU = np.delete(crossingsU,np.where(crossingsU<waitTime)[0])
# crossingsD = np.delete(crossingsD,np.where(crossingsD<waitTime)[0])
crossings = np.sort(np.unique(np.hstack((crossingsU,crossingsD))))
if (plot):
f,ax = plt.subplots(1,1,sharex=True)
ax.plot(photodiode,label='photodiode raw')
ax.plot(sigFilt,label = 'photodiode filtered')
ax.plot(crossings,np.ones(len(crossings))*threshold,'g*')
ax.hlines([thresholdU],0,len(photodiode),'k')
ax.hlines([thresholdD],0,len(photodiode),'k')
# ax.plot(st,np.ones(len(crossingsD))*threshold,'r*')
ax.legend()
ax.set_xlabel('time (ms)')
ax.set_ylabel('Amplitude (V)')
return crossings
def DetectPhotodiodeChanges_new(photodiode,plot=False,kernel = 101,upThreshold = 0.2, downThreshold = 0.4,fs=1000, waitTime=5000):
"""
The function detects photodiode changes using a 'Schmitt Trigger', that is, by
detecting the signal going up at an earlier point than the signal going down,
the signal is filtered and smootehd to prevent nosiy bursts distorting the detection.W
Parameters:
photodiode: the signal from the nidaq of the photodiode
lowPass: the low pass signal for the photodiode signal, default: 30,
kernel: the kernel for median filtering, default = 101.
fs: the frequency of acquisiton, default = 1000
plot: plot to inspect, default = False
waitTime: the delay time until protocol start, default = 5000
returns: diode changes (s) up to the user to decide what on and off mean
"""
# b,a = sp.signal.butter(1, lowPass, btype='low', fs=fs)
sigFilt = photodiode
# sigFilt = sp.signal.filtfilt(b,a,photodiode)
sigFilt = sp.signal.medfilt(sigFilt,kernel)
maxSig = np.max(sigFilt)
minSig = np.min(sigFilt)
thresholdU = (maxSig-minSig)*upThreshold
thresholdD = (maxSig-minSig)*downThreshold
threshold = (maxSig-minSig)*0.5
# find thesehold crossings
crossingsU = np.where(np.diff(np.array(sigFilt > thresholdU).astype(int),prepend=False)>0)[0]
crossingsD = np.where(np.diff(np.array(sigFilt > thresholdD).astype(int),prepend=False)<0)[0]
crossingsU = np.delete(crossingsU,np.where(crossingsU<waitTime)[0])
crossingsD = np.delete(crossingsD,np.where(crossingsD<waitTime)[0])
crossings = np.sort(np.unique(np.hstack((crossingsU,crossingsD))))
if (plot):
f,ax = plt.subplots(1,1,sharex=True)
ax.plot(photodiode,label='photodiode raw')
ax.plot(sigFilt,label = 'photodiode filtered')
ax.plot(crossings,np.ones(len(crossings))*threshold,'g*')
ax.hlines([thresholdU],0,len(photodiode),'k')
ax.hlines([thresholdD],0,len(photodiode),'k')
# ax.plot(st,np.ones(len(crossingsD))*threshold,'r*')
ax.legend()
ax.set_xlabel('time (ms)')
ax.set_ylabel('Amplitude (V)')
return crossings
def GetStimulusInfo(filePath,props):
"""
Parameters
----------
filePath : str
the path of the log file.
props : array-like
the names of the properties to extract.
Returns
-------
StimProperties : list of dictionaries
the list has all the extracted stimuli, each a dictionary with the props and their values.
"""
StimProperties = []
with open(filePath, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in reader:
a = []
for p in range(len(props)):
# m = re.findall(props[p]+'=(\d*)', row[np.min([len(row)-1,p])])
m = re.findall(props[p]+'=([a-zA-Z0-9_.-]*)', row[np.min([len(row)-1,p])])
if (len(m)>0):
a.append(m[0])
if (len(a)>0):
stimProps = {}
for p in range(len(props)):
stimProps[props[p]] = a[p]
StimProperties.append(stimProps)
return StimProperties
def AlignStim(signal, time, eventTimes, window,timeUnit=1,timeLimit=1):
aligned = [];
t = [];
dt = np.median(np.diff(time,axis=0))
if (timeUnit==1):
w = np.rint(window / dt).astype(int)
else:
w = window.astype(int)
maxDur = signal.shape[0]
if (window.shape[0] == 1): # constant window
mini = np.min(w[:,0]);
maxi = np.max(w[:,1]);
tmp = np.array(range(mini,maxi));
w = np.tile(w,((eventTimes.shape[0],1)))
else:
if (window.shape[0] != eventTimes.shape[0]):
print('No. events and windows have to be the same!')
return
else:
mini = np.min(w[:,0]);
maxi = np.max(w[:,1]);
tmp = range(mini,maxi);
t = tmp * dt;
aligned = np.zeros((t.shape[0],eventTimes.shape[0],signal.shape[1]))
for ev in range(eventTimes.shape[0]):
# evInd = find(time > eventTimes(ev), 1);
wst = w[ev,0]
wet = w[ev,1]
evInd = np.where(time>=eventTimes[ev])[0]
if (len(evInd)==0):
continue
else :
# None
# if dist is bigger than one second stop
if (np.any((time[evInd[0]]-eventTimes[ev])>timeLimit)):
continue
st = evInd[0]+ wst #get start
et = evInd[0] + wet #get end
alignRange = np.array(range(np.where(tmp==wst)[0][0],np.where(tmp==wet-1)[0][0]+1))
sigRange = np.array(range(st,et))
valid = np.where((sigRange>=0) & (sigRange<maxDur))[0]
aligned[alignRange[valid],ev,:] = signal[sigRange[valid],:];
return aligned, t
#def DetectWheelMove(moveA,moveB,rev_res = 1024, total_track = 598.47,plot=True):
"""
The function detects the wheel movement.
At the moment uses only moveA.
Parameters:
moveA,moveB: the first and second channel of the rotary encoder
rev_res: the rotary encoder resoution, default =1024
total_track: the total length of the track, default = 598.47 (mm)
kernel: the kernel for median filtering, default = 101.
plot: plot to inspect, default = False
returns: distance
"""
# make sure all is between 1 and 0
moveA /= np.max(moveA)
moveA -= np.min(moveA)
moveB /= np.max(moveB)
moveB -= np.min(moveB)
# detect A move
ADiff = np.diff(moveA)
Ast = np.where(ADiff >0.5)[0]
Aet = np.where(ADiff <-0.5)[0]
# detect B move
BDiff = np.diff(moveB)
Bst = np.where(BDiff >0.5)[0]
Bet = np.where(BDiff <-0.5)[0]
#Correct possible problems for end of recording
if (len(Ast)>len(Aet)):
Aet = np.hstack((Aet,[len(moveA)]))
elif (len(Ast)<len(Aet)):
Ast = np.hstack(([0],Ast))
dist_per_move = total_track/rev_res
# Make into distance
track = np.zeros(len(moveA))
track[Ast] = dist_per_move
distance = np.cumsum(track)
if (plot):
f,ax = plt.subplots(3,1,sharex=True)
ax[0].plot(moveA)
# ax.plot(np.abs(ADiff))
ax[0].plot(Ast,np.ones(len(Ast)),'k*')
ax[0].plot(Aet,np.ones(len(Aet)),'r*')
ax[0].set_xlabel('time (ms)')
ax[0].set_ylabel('Amplitude (V)')
ax[1].plot(distance)
ax[1].set_xlabel('time (ms)')
ax[1].set_ylabel('distance (mm)')
ax[2].plot(track)
ax[2].set_xlabel('time (ms)')
ax[2].set_ylabel('Move')
# movFirst = Amoves>Bmoves
return distance
def running_info(filePath, th = 3, plot=False):
with open(filePath) as file_name:
csvChannels = np.loadtxt(file_name, delimiter=",")
arduinoTime = csvChannels[:,-1]
arduinoTimeDiff = np.diff(arduinoTime,prepend=True)
normalTimeDiff = np.where(arduinoTimeDiff>-100)[0]
csvChannels = csvChannels[normalTimeDiff,:]
# convert time to second (always in ms)
arduinoTime = csvChannels[:,-1]/1000
# Start arduino time at zero
arduinoTime-=arduinoTime[0]
csvChannels = csvChannels[:,:-1]
numChannels = csvChannels.shape[1]
if (plot):
f,ax = plt.subplots(numChannels,sharex=True)
for i in range(numChannels):
ax[i].plot(arduinoTime,csvChannels[:,i])
return csvChannels,arduinoTime
def DetectWheelMove(moveA,moveB,timestamps,rev_res = 1024, total_track = 59.847, plot=False):
"""
The function detects the wheel movement.
At the moment uses only moveA.
[[ALtered the minimum from 0 to 5 because of the data from 04/08/22 -M]]
Parameters:
moveA,moveB: the first and second channel of the rotary encoder
rev_res: the rotary encoder resoution, default =1024
total_track: the total length of the track, default = 59.847 (cm)
kernel: the kernel for median filtering, default = 101.
plot: plot to inspect, default = False
returns: velocity[cm/s], distance [cm]
"""
#introducing thresholoding in case the non movement values are not 0, 5 was the biggest number for now
th_index = moveA<5
moveA[th_index] = 0
th_index = moveB<5
moveB[th_index] = 0
moveA = np.round(moveA).astype(bool)
moveB = np.round(moveB).astype(bool)
counterA = np.zeros(len(moveA))
counterB = np.zeros(len(moveB))
# detect A move
risingEdgeA = np.where(np.diff(moveA>0,prepend=True))[0]
risingEdgeA = risingEdgeA[moveA[risingEdgeA]==1]
risingEdgeA_B = moveB[risingEdgeA]
counterA[risingEdgeA[risingEdgeA_B==0]]=1
counterA[risingEdgeA[risingEdgeA_B==1]]=-1
# detect B move
risingEdgeB = np.where(np.diff(moveB>0,prepend=True))[0]#np.diff(moveB)
risingEdgeB = risingEdgeB[moveB[risingEdgeB]==1]
risingEdgeB_A = moveB[risingEdgeB]
counterA[risingEdgeB[risingEdgeB_A==0]]=-1
counterA[risingEdgeB[risingEdgeB_A==1]]=1
dist_per_move = total_track/rev_res
instDist = counterA*dist_per_move
distance = np.cumsum(instDist)
averagingTime = int(np.round(1/np.median(np.diff(timestamps))))
sumKernel = np.ones(averagingTime)
tsKernel = np.zeros(averagingTime)
tsKernel[0]=1
tsKernel[-1]=-1
# take window sum and convert to cm
distWindow = np.convolve(instDist,sumKernel,'same')
# count time elapsed
timeElapsed = np.convolve(timestamps,tsKernel,'same')
velocity = distWindow/timeElapsed
# if (plot):
# f,ax = plt.subplots(3,1,sharex=True)
# ax[0].plot(moveA)
# # ax.plot(np.abs(ADiff))
# ax[0].plot(Ast,np.ones(len(Ast)),'k*')
# ax[0].plot(Aet,np.ones(len(Aet)),'r*')
# ax[0].set_xlabel('time (ms)')
# ax[0].set_ylabel('Amplitude (V)')
# ax[1].plot(distance)
# ax[1].set_xlabel('time (ms)')
# ax[1].set_ylabel('distance (mm)')
# ax[2].plot(track)
# ax[2].set_xlabel('time (ms)')
# ax[2].set_ylabel('Move')
# movFirst = Amoves>Bmoves
return velocity, distance
def Get_Stim_Identity(log, reps, protocol_type, types_of_stim):
"""
Parameters
----------
log : array
contains the log of stimuli, assumes the order of the columns is "Ori", "SFreq", "TFreq", "Contrast".
reps : integer
how many times a stimulus was repeated.
protocol_type : string
DESCRIPTION. The options are :
- "simple" which refers to the protocol which only shows 12 types of orietnations
- "TFreq": protocol with different temp frequencies
- "SFreq": protocol with different spatial frequencies
= "Contrast": protocol with different contrasts.
types_of_stim : integer
DESCRIPTION.
Refers to the different types of stimuli shown.
Assumes that for "simple", types of stim is 12 becuase 12 different orientations are shown.
For all the others, it is assumed to be 24 because there are 4 different orientartions and 6 different variations of parameters
Returns
-------
an array of shape (types_of_stim, reps) if protocol was "simple"
an array of shape(4, reps, 6) for all other protocols (if 4 different orientations and 6 different other parameters).
"""
#the angles of the stim
#in the case of 20 iterations, given that for simple gratings protocol 12 orientations are shown, the total stimuli shown is 240
if types_of_stim == 12:
angles = np.array([30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360])
TFreq =np.array([2])
SFreq = np.array([0.08])
contrast = np.array([1])
#for other gratings protocols such as temp freq etc, this number should be double
elif types_of_stim == 24:
angles = np.array([0, 90, 180, 270])
TFreq = np.array([0.5, 1, 2, 4, 8, 16])
SFreq = np.array([0.01, 0.02, 0.04, 0.08, 0.16, 0.32])
contrast = np.array([0, 0.125, 0.25, 0.5, 0.75, 1])
#what each angle means
# 0 degrees is vertical to the left,
#90 is horizontal down,
#180 is vertical to the right and
#270 is horizontal up
#with these 4 orientations can test orientation and direction selectivity
#reps = how many repetitions of the same stim we have
#getting a 3D array with shape(orientation, repeats, TFreq/SFreq)
#all_TFreq = np.zeros((angles.shape[0], reps, TFreq.shape[0])).astype(int)
#all_SFreq = np.zeros((angles.shape[0], reps, SFreq.shape[0])).astype(int)
all_parameters = np.zeros((angles.shape[0], TFreq.shape[0], reps)).astype(int)
#all_oris = np.zeros((angles.shape[0], reps)).astype(int)
for angle in range(angles.shape[0]):
if protocol_type == "TFreq":
for freq in range(TFreq.shape[0]):
specific_TF = np.where((log[:,0] == angles[angle]) & (log[:,2] == TFreq[freq]) & (log[:,3] == 1)) [0]
all_parameters[angle, freq, :] = specific_TF
if protocol_type == "SFreq":
for freq in range(SFreq.shape[0]):
specific_SF = np.where((log[:,0] == angles[angle]) & (log[:,1] == SFreq[freq]) & (log[:,3] == 1)) [0]
all_parameters[angle, freq, :] = specific_SF
if protocol_type == "Contrast":
for freq in range(TFreq.shape[0]):
specific_contrast = np.where((log[:,0] == angles[angle]) & (log[:,3] == contrast[freq])) [0]
all_parameters[angle, freq, :] = specific_contrast
# if protocol_type == "simple":
# specific_P = np.where((log[:,0] == angles[angle])) [0]
# all_oris[angle, :] = specific_P
#return all_oris
return all_parameters
def behaviour_reps (log, types_of_stim,reps, protocol_type, speed, time, stim_on, stim_off):
"""
Takes the stim on values and the stim off values which tell you the exact time
Then uses this to find the value in the running data which gives you a vector that contains all the values within that period
Decides within the loop if 90% of the values are above a certain threshold then assign to each rep a 0 or 1 value
Make separate arrays which contain the indices like in all_oris but split into running and rest arrays
Then can use these values to plot separate parts of the traces (running vs not running)
Parameters
----------
log : array
contains the log of stimuli, assumes the order of the columns is "Ori", "SFreq", "TFreq", "Contrast".
types_of_stim : integer
DESCRIPTION:
Refers to the different types of stimuli shown.
Assumes that for "simple", types of stim is 12 becuase 12 different orientations are shown.
For all the others, it is assumed to be 24 because there are 4 different orientartions and 6 different variations of parameters
reps : integer
how many times a stimulus was repeated.
protocol_type : string
The options are :
- "simple" which refers to the protocol which only shows 12 types of orietnations
- "TFreq": protocol with different temp frequencies
- "SFreq": protocol with different spatial frequencies
= "contrast": protocol with different contrasts.
speed : 1D array
the speed throughout the whole experiment.
time : 1D array
The corrected time at which the behaviour occured.
Both of the above are outputs from Liad's function "DetectWheelMove" and duinoDelayCompensation
stim_on : 1D array
from photodiode, time at which stimulus appears.
stim_off : 1D array
same as above but when stim disappears.
Returns
-------
two arrays of shape (types_of_stim, reps) if protocol was "simple"
two arrays of shape(4, reps, 6) for all other protocols (if 4 different orientations and 6 different other parameters)
(one for running trials, one for rest trials)
"""
stim_on_round = np.around(stim_on, decimals = 2)
stim_off_round = np.around(stim_off, decimals = 2)
speed_time = np.stack((time, speed)).T
for rep in range(stim_on.shape[0]-1):
start = np.where(stim_on_round[rep] == speed_time[:,0])[0]
stop = np.where(stim_off_round[rep] == speed_time[:,0])[0]
interval = speed_time[start[0]:stop[0], 1]
running_bool = np.argwhere(interval>1)
plt.plot(interval)
if running_bool.shape[0]/interval.shape[0]>0.9:
a = 1
else:
a = 0
if a ==1:
#now appending the final column of the log with 1 if the above turns out true
log[rep,4] = 1
if types_of_stim == 12:
angles = np.array([30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360])
TFreq =np.array([2])
SFreq = np.array([0.08])
contrast = np.array([1])
#for other gratings protocols such as temp freq etc, this number should be double
elif types_of_stim == 24:
angles = np.array([0, 90, 180, 270])
TFreq = np.array([0.5, 1, 2, 4, 8, 16])
SFreq = np.array([0.01, 0.02, 0.04, 0.08, 0.16, 0.32])
contrast = np.array([0, 0.125, 0.25, 0.5, 0.75, 1])
"""
for running
"""
#running = np.ones((4, 6, 30))*np.nan
running = []
#creates a list of arrays by looking in the log file and sorting the indices based on the desired angles, freq
#and if there is a 0 or a 1 in the final column
for angle in range(angles.shape[0]):
if protocol_type == "SFreq":
for freq in range(TFreq.shape[0]):
specific_SF_r = np.where((log[:,0] == angles[angle]) & (log[:,1] == SFreq[freq]) & (log[:,3] == 1) & (log[:,4] ==1)) [0]
#running[angle, freq,:] = specific_SF_r
running.append(specific_SF_r)
if protocol_type == "TFreq":
for freq in range(TFreq.shape[0]):
specific_TF_r = np.where((log[:,0] == angles[angle]) & (log[:,2] == TFreq[freq]) & (log[:,3] == 1) & (log[:,4] ==1)) [0]
running.append(specific_TF_r)
#running[angle, freq,:] = specific_TF_r
if protocol_type == "Contrast":
for freq in range(TFreq.shape[0]):
specific_contrast_r = np.where((log[:,0] == angles[angle]) & (log[:,2] == contrast[freq]) & (log[:,4] ==1)) [0]
running.append(specific_contrast_r)
#running[angle, freq, :] = specific_contrast_r
elif protocol_type == "simple":
specific_P_r = np.where((log[:,0] == angles[angle]) & (log[:,4] ==1)) [0]
running.append(specific_P_r)
"""
for rest
"""
rest = []
for angle in range(angles.shape[0]):
if protocol_type == "SFreq":
for freq in range(TFreq.shape[0]):
specific_SF_re = np.where((log[:,0] == angles[angle]) & (log[:,1] == SFreq[freq]) & (log[:,4] ==0)) [0]
rest.append(specific_SF_re)
if protocol_type == "TFreq":
for freq in range(TFreq.shape[0]):
specific_TF_re = np.where((log[:,0] == angles[angle]) & (log[:,2] == TFreq[freq]) & (log[:,4] ==0)) [0]
rest.append(specific_TF_re)
if protocol_type == "Contrast":
for freq in range(TFreq.shape[0]):
specific_contrast_re = np.where((log[:,0] == angles[angle]) & (log[:,3] == contrast[freq]) & (log[:,4] ==0)) [0]
rest.append(specific_contrast_re)
elif protocol_type == "simple":
specific_P_re = np.where((log[:,0] == angles[angle]) & (log[:,4] ==0)) [0]
rest.append(specific_P_re)
return running, rest
|
mariacozan/Analysis_and_Processing
|
functions/functions2022_07_15.py
|
functions2022_07_15.py
|
py
| 26,567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19239185532
|
# stack ! 과제는 끝나지 않아!
# 효율 고려 X, 하나 넣고 하나 빼기
import sys
from collections import deque
input = sys.stdin.readline
N = int(input())
S = deque() # 과제 넣어두는 스택
tot = 0 # 총 점수
for _ in range(N):
W = list(map(int, input().split()))
if W[0]: # 새 과제가 있다면
if W[2] == 1: # 지금 바로 끝낼 수 있으면 점수 바로 더해주기
tot += W[1]
else: # 아니라면 시간 1 빼서 S에 넣어주기
S.append([W[1], W[2]-1])
else: # 새 과제가 없다면
if S: # 남은 과제 있을 때
n_score, n_time = S.pop()
if n_time == 1: # 지금 끝낼 수 있으면 점수 더하고
tot += n_score
else: # 못 끝내면 시간만 1 빼주기
S.append([n_score, n_time-1])
print(tot)
|
sdh98429/dj2_alg_study
|
BAEKJOON/stack/b17952.py
|
b17952.py
|
py
| 878 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
22933846621
|
from cryptopals.set1.common import recover_xor_key
def test(hex_strings, expected):
english_score = {
score: text for key, score, text in [
recover_xor_key(hex_string.decode('hex'))
for hex_string in hex_strings
]
}
best_score = min(english_score)
return english_score[best_score]
|
ericnorris/cryptopals-solutions
|
cryptopals/set1/challenge_04.py
|
challenge_04.py
|
py
| 341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17441173344
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import streamlit as st
import ptitprince as pt
def scatter_plot(df,fig):
hobbies = []
for col in df.columns:
hobbies.append(col)
print(col)
st.title(" Scatter Plot")
hobby = st.selectbox("X-axis: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("scatter plot")):
st.text("scatter plot")
ax = sns.regplot(x=hobby, y=hobby1, data=df)
st.pyplot(fig)
def group_histogram(df,fig):
st.title("Grouped Histogram")
if (st.button("Grouped Histogram")):
st.text("Grouped Histogram")
for condition in df.TrialType.unique():
cond_data = df[(df.TrialType == condition)]
ax = sns.distplot(cond_data.RT, kde=False)
ax.set(xlabel='Response Time', ylabel='Frequency')
st.pyplot(fig)
def bar_plot(df,fig,hobbies):
st.title("Bar Plot")
hobby = st.selectbox("X-axis for barplot: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis for barplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Bar plot")):
st.text("Bar plot")
sns.barplot(x=hobby, y=hobby1, data=df)
st.pyplot(fig)
def box_plot1(df,fig,hobbies):
st.title("Box Plot")
hobby = st.selectbox("X-axis for boxplot: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis for boxplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Box plot")):
st.text("Box plot")
sns.boxplot(x=hobby, y=hobby,data=df)
st.pyplot(fig)
def heat_map(df,fig,hobbies):
st.title("Heatmap Plot")
col2 = st.multiselect(
"Blah:", sorted(list(hobbies)), sorted(list(hobbies))
)
if (st.button("Heatmap plot")):
st.text("Heatmap plot")
ax = sns.heatmap(df[col2])
st.pyplot(fig)
def violine_plot(df,fig,hobbies):
st.title("Violin Plot")
hobby = st.selectbox("X-axis for Violinplot: ", hobbies)
# print the selected hobby
st.write("X-axis for Violinplot: ", hobby)
hobby1 = st.selectbox("Y-axis for Violinplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Violin plot")):
st.text("Violin plot")
sns.violinplot(x=hobby, y=hobby1, data=df)
st.pyplot(fig)
def rain_cloudplot(df,fig,hobbies):
st.title("Raincloud Plot")
hobby = st.selectbox("X-axis for Raincloudplot: ", hobbies)
# print the selected hobby
st.write("You have selected X-axis: ", hobby)
hobby1 = st.selectbox("Y-axis for Raincloudplot: ", hobbies)
st.write("You have selected Y-axis: ", hobby1)
if (st.button("Raincloud plot")):
st.text("Raincloud plot")
ax = pt.RainCloud(x=hobby, y=hobby1,
data=df,
width_viol=.8,
width_box=.4,
figsize=(12, 8), orient='h',
move=.0)
st.pyplot(fig)
def app():
filename = st.text_input('Enter a file path:')
try:
df = pd.read_csv(filename)
except:
None
uploaded_files = st.file_uploader("Upload CSV", type="csv", accept_multiple_files=True)
if uploaded_files:
for file in uploaded_files:
file.seek(0)
uploaded_data_read = [pd.read_csv(file) for file in uploaded_files]
df = pd.concat(uploaded_data_read)
hobbies=[]
try:
fig = plt.figure(figsize=(12, 8))
for col in df.columns:
hobbies.append(col)
print(col)
fig = plt.figure(figsize=(12, 8))
st.dataframe(data=df, width=None, height=None)
scatter_plot(df,fig)
group_histogram(df, fig)
bar_plot(df, fig, hobbies)
heat_map(df, fig, hobbies)
violine_plot(df, fig, hobbies)
box_plot1(df, fig, hobbies)
rain_cloudplot(df, fig, hobbies)
except:
None
|
imsanjoykb/Data-Analytics-Tool-Development
|
apps/graphs.py
|
graphs.py
|
py
| 4,249 |
python
|
en
|
code
| 3 |
github-code
|
6
|
10159507438
|
# 0 1 2 3 4
# 5 6 7 8 9
# 0 1 2 3 4
# 5 6 7 8 9
def rosy():
counter=0
for rows in range(1,3):
for col in range(0, 5):
print(counter,end=' ')
counter += 1
print()
rosy()
rosy()
|
suchishree/django_assignment1
|
python/looping/assignment 3/no6.py
|
no6.py
|
py
| 228 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32470859712
|
#숫자 카드 2
from bisect import bisect_left, bisect_right
def binary(x):
start = bisect_left(data_n, x)
end = bisect_right(data_n, x)
return end - start
n = int(input())
data_n = sorted(list(map(int, input().split())))
m = int(input())
data_m = list(map(int, input().split()))
for x in data_m:
print(binary(x), end= ' ')
|
JinDDung2/python-pratice
|
BOJ/binary/10816.py
|
10816.py
|
py
| 345 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38701948852
|
#coding=utf8
import config
import json
import sys, time
py3k = sys.version_info.major > 2
import os.path
import urllib
if py3k:
from urllib import parse as urlparse
else:
import urlparse
def get_one():
return config.dbconn().fetch_rows('http', condition={'checked': 0}, order="id asc", limit="1", fetchone=True)
def check_key(key):
'''
是否需要保留这个key
'''
blacklist = ['t', 'r', 'submit']
if key.lower() in blacklist:
return False
return True
def check_value(value, vtype):
'''
是否需要保留这个value
'''
if vtype == 'array': return False
return True
def get_type(key, value):
if type(value) == type([]): return 'array'
value = value[0]
if value.isdigit(): return 'int'
try:
float(value)
return 'float'
except:
pass
# url check
u = urlparse.urlparse(value)
if u.scheme and u.netloc:
return 'url'
try:
j = json.loads(value)
if type(j) == type([]) or type(j) == type({}):
return 'json'
except:
pass
return 'str'
while True:
http = get_one()
if not http:
time.sleep(3)
continue
req = json.loads(http['req'])
if req['rtype'] not in ['qs', 'rewrite']:
config.dbconn().insert('requests', {'requestid': http['id'], 'method': req['method'], 'key': '', 'type': 'special|'+req['rtype']})
else:
# support array like a[]=1&a[]=2
parsed = urlparse.urlparse(req['uri'])
get_parts = urlparse.parse_qs(parsed.query)
if get_parts:
for k,v in get_parts.items():
v = v[0] if len(v) == 1 else v
vtype = get_type(k, v)
if check_key(k) and check_value(v, vtype):
config.dbconn().insert('requests', {'requestid': http['id'], 'method': "GET", 'key': k, 'type': vtype})
if not parsed.query and not os.path.splitext(parsed.path)[1] and len(parsed.path.split('/')) > 3:
path_parts = parsed.path.split('/')
for i in range(3, len(path_parts)):
vtype = 'rewrite|'+get_type('rewrite', path_parts[i])
config.dbconn().insert('requests', {'requestid': http['id'], 'method': "GET", 'key': str(i), 'type': vtype})
if req['method'] == "POST":
post_parts = urlparse.parse_qs(urlparse.urlparse(req['body']).query)
if post_parts:
for k,v in post_parts.items():
v = v[0] if len(v) == 1 else v
vtype = get_type(k, v)
if check_key(k) and check_value(v, vtype):
config.dbconn().insert('requests', {'requestid': http['id'], 'method': "POST", 'key': k, 'type': vtype})
config.dbconn().update('http', {'checked': 1}, {'id': http['id']})
|
5alt/ZeroExploit
|
parser.py
|
parser.py
|
py
| 2,452 |
python
|
en
|
code
| 4 |
github-code
|
6
|
26829773618
|
#######################################
# This file computes several characteristics of the portage graph
#######################################
import math
import sys
import core_data
import hyportage_constraint_ast
import hyportage_data
import utils
import graphs
import host.scripts.utils
from host.scripts import hyportage_db
data = {}
######################################################################
# GENERIC STATISTICS EXTRACTION FUNCTION
######################################################################
def map_base_statistics(value_number, total_size, map_size):
average = total_size / float(value_number)
variance = 0
for key, value in map_size.iteritems():
tmp = average - key
tmp = tmp * tmp * len(value)
variance = variance + tmp
variance = math.sqrt(variance / value_number)
return average, variance
def generics(input_iterator, extract_data, extract_key, filter_function=host.scripts.utils.filter_function_simple, store_data_map=False):
value_number = 0
map_data = {}
map_size = {}
total_size = 0
max_size = 0
min_size = sys.maxint
for element in input_iterator:
if filter_function(element):
value_number = value_number + 1
data = extract_data(element)
key = extract_key(element)
size = len(data)
if store_data_map:
if data in map_data: map_data[data].add(key)
else: map_data[data] = {key}
if size in map_size: map_size[size].add(key)
else: map_size[size] = {key}
total_size = total_size + size
if size > max_size: max_size = size
if size < min_size: min_size = size
average, variance = map_base_statistics(value_number, total_size, map_size)
return {
'number': value_number,
'map_data': map_data,
'map_size': map_size,
'total_size': total_size,
'average': average,
'variance': variance,
'max_size': max_size,
'min_size': min_size
}
def generic_map(input_map, extraction_function, filter_function=host.scripts.utils.filter_function_simple, store_data_map=False):
return generics(input_map.iteritems(), lambda el: extraction_function(el[1]), lambda el: el[0], filter_function, store_data_map)
def generic_list(input_list, extraction_function, filter_function=host.scripts.utils.filter_function_simple, store_data_map=False):
return generics(input_list, extraction_function, lambda el: tuple(el), filter_function, store_data_map)
######################################################################
# FEATURES
######################################################################
def features(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the USE Flags Core Statistics.")
required = sum([len(spl.required_iuses) for spl in hyportage_db.mspl.itervalues() if filter_function(spl)])
local = sum([len(spl.iuses_default) for spl in hyportage_db.mspl.itervalues() if filter_function(spl)])
global data
data['features'] = generic_map(hyportage_db.mspl, hyportage_data.spl_get_iuses_full, filter_function)
data['features']['average_required'] = required / float(data['features']['number'])
data['features']['average_local'] = local / float(data['features']['number'])
utils.phase_end("Computation Completed")
def features_usage(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the USE Flags Core Statistics.")
map_features = {}
for key, value in hyportage_db.mspl.iteritems():
if filter_function(value):
for feature in hyportage_data.spl_get_required_iuses(value):
if feature in map_features: map_features[feature].add(key)
else: map_features[feature] = {key}
global data
data['feature_usage'] = generic_map(map_features, core_data.identity, filter_function)
data['feature_usage']['map_data'] = map_features
utils.phase_end("Computation Completed")
"""
def statistics_features(filter_function=db.filter_function_simple):
utils.phase_start("Computing the USE Flags Core Statistics.")
features_number = 0
features_max = 0
features_min = 100
spl_min = []
spl_max = []
spl_number = 0
for spl in db.mspl.itervalues():
if filter_function(spl):
spl_number = spl_number + 1
use_flag_size = len(hyportage_data.spl_get_required_iuses(spl))
if use_flag_size < features_min:
features_min = use_flag_size
spl_min = [spl.name]
elif use_flag_size == features_min: spl_min.append(spl.name)
if use_flag_size > features_max:
features_max = use_flag_size
spl_max = [spl.name]
elif use_flag_size == features_max: spl_max.append(spl.name)
features_number = features_number + use_flag_size
res = {
'min': features_min,
'min_spl_list': sorted(spl_min),
'max': features_max,
'max_spl_list': sorted(spl_max),
'number': features_number,
'spl_number': spl_number,
'average': features_number / spl_number
}
global statistics
statistics['features'] = res
utils.phase_end("Computation Completed")
"""
######################################################################
# DEPENDENCIES
######################################################################
class GETGuardedDependenciesVisitor(hyportage_constraint_ast.ASTVisitor):
def __init__(self):
super(hyportage_constraint_ast.ASTVisitor, self).__init__()
self.res = {}
self.guards = 0
def visitDependCONDITION(self, ctx):
self.guards = self.guards + 1
map(self.visitDependEL, ctx['els'])
self.guards = self.guards - 1
def visitDependSIMPLE(self, ctx):
pattern = ctx['atom']
if pattern in self.res:
if self.guards == 0: self.res[pattern]['guarded'] = False
if "selection" in ctx: self.res[pattern]['selects'] = True
else: self.res[pattern] = {'guarded': self.guards > 0, 'selects': "selection" in ctx}
def visitSPL(self, spl):
self.visitDepend(spl.fm_combined)
res = self.res
self.res = {}
self.guards = 0
return res
def dependencies(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Dependencies Statistics.")
visitor = GETGuardedDependenciesVisitor()
local_map = {spl.name: visitor.visitSPL(spl) for spl in hyportage_db.mspl.itervalues()}
def extraction_function_all(data): return data.keys()
def extraction_function_guarded(data): return {pattern for pattern in data.iterkeys() if data[pattern]['guarded']}
def extraction_function_selects(data): return {pattern for pattern in data.iterkeys() if data[pattern]['selects']}
global data
data['dependencies_all'] = generic_map(local_map, extraction_function_all, filter_function)
data['dependencies_guarded'] = generic_map(local_map, extraction_function_guarded, filter_function)
data['dependencies_selects'] = generic_map(local_map, extraction_function_selects, filter_function)
utils.phase_end("Computation Completed")
def lone_packages(filter_function=host.scripts.utils.filter_function_simple):
referenced_spls = {
spl
for el in hyportage_db.flat_pattern_repository.itervalues()
for spl in el.__generate_matched_spls(hyportage_db.mspl, hyportage_db.spl_groups)
}
spls = filter(filter_function, hyportage_db.mspl.itervalues())
spls = filter(lambda spl: len(spl.dependencies) == 0, spls)
spls = filter(lambda spl: spl not in referenced_spls, spls)
global data
data['lone_packages'] = spls
"""
def statistics_dependencies(filter_function=db.filter_function_simple):
utils.phase_start("Computing the Dependencies Core Statistics.")
dependencies_number = 0
dependencies_max = 0
dependencies_min = 100
dependencies_guarded_number = 0
dependencies_guarded_max = 0
dependencies_guarded_min = 100
spl_number = 0
spl_max = []
spl_min = []
spl_guarded_number = 0
spl_guarded_max = []
spl_guarded_min = []
visitor = GETDependenciesVisitor()
for spl in db.mspl.itervalues():
if filter_function(spl):
spl_number = spl_number + 1
deps = visitor.visitSPL(spl)
#print(" " + spl.name + ": " + str(deps))
dependencies_size = len(deps)
if dependencies_size < dependencies_min:
dependencies_min = dependencies_size
spl_min = [spl.name]
elif dependencies_size == dependencies_min:
spl_min.append(spl.name)
if dependencies_size > dependencies_max:
dependencies_max = dependencies_size
spl_max = [spl.name]
elif dependencies_size == dependencies_max:
spl_max.append(spl.name)
dependencies_number = dependencies_number + dependencies_size
deps_guarded = {k for k, v in deps.iteritems() if v}
dependencies_guarded_size = len(deps_guarded)
if dependencies_guarded_size < dependencies_guarded_min:
dependencies_guarded_min = dependencies_guarded_size
spl_guarded_min = [spl.name]
elif dependencies_guarded_size == dependencies_guarded_min:
spl_guarded_min.append(spl.name)
if dependencies_guarded_size > dependencies_guarded_max:
dependencies_max = dependencies_guarded_size
dependencies_guarded_max = [spl.name]
elif dependencies_guarded_size == dependencies_guarded_max:
spl_guarded_max.append(spl.name)
dependencies_guarded_number = dependencies_guarded_number + dependencies_guarded_size
if dependencies_guarded_size > 0: spl_guarded_number = spl_guarded_number + 1
res = {
'min': dependencies_min,
'min_spl_list': sorted(spl_min),
'max': dependencies_max,
'max_spl_list': sorted(spl_max),
'number': dependencies_number,
'spl_number': spl_number,
'average': dependencies_number / spl_number,
'guarded_min': dependencies_guarded_min,
'guarded_min_spl_list': sorted(spl_guarded_min),
'guarded_max': dependencies_guarded_max,
'guarded_max_spl_list': sorted(spl_guarded_max),
'guarded_number': dependencies_guarded_number,
'guarded_spl_number': spl_guarded_number,
'guarded_average': dependencies_guarded_number / spl_guarded_number
}
global statistics
statistics['dependencies'] = res
utils.phase_end("Computation Completed")
"""
######################################################################
# PATTERNS (ABSTRACT SPL)
######################################################################
def pattern_refinement(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Pattern (refinement) Statistics.")
def extraction_function(element): return element.matched_spls(hyportage_db.mspl, hyportage_db.spl_groups)
global data
data['pattern_refinement'] = generic_map(hyportage_db.flat_pattern_repository, extraction_function, filter_function)
utils.phase_end("Computation Completed")
def statistics_pattern(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Pattern Core Statistics.")
pattern_number = 0
pattern_usage = {}
pattern_usage_max = 0
for pattern_element in hyportage_db.flat_pattern_repository.itervalues():
if filter_function(pattern_element):
pattern_number = pattern_number + 1
size = len(pattern_element.containing_spl)
if pattern_usage_max < size:
pattern_usage_max = size
if size in pattern_usage:
pattern_usage[size].extend(pattern_element)
else: pattern_usage[size] = [pattern_element]
pattern_abstraction_number = 0
pattern_abstraction_max = [0, []]
pattern_abstraction_min = [100, []]
for pattern_element in hyportage_db.flat_pattern_repository.itervalues():
if filter_function(pattern_element):
pattern_abstraction_size = len(pattern_element.matched_spls(hyportage_db.mspl, hyportage_db.spl_groups))
if pattern_abstraction_size < pattern_abstraction_min[0]:
pattern_abstraction_min[0] = pattern_abstraction_size
pattern_abstraction_min[1] = [pattern_element]
elif pattern_abstraction_size == pattern_abstraction_min[0]:
pattern_abstraction_min[1].append(pattern_element)
if pattern_abstraction_size > pattern_abstraction_max[0]:
pattern_abstraction_max[0] = pattern_abstraction_size
pattern_abstraction_max[1] = [pattern_element]
elif pattern_abstraction_size == pattern_abstraction_max[0]:
pattern_abstraction_max[1].append(pattern_element)
pattern_abstraction_number = pattern_abstraction_number + pattern_abstraction_size
res = {
'number': pattern_number,
'usage': pattern_usage,
'usage_max': pattern_usage_max,
'usage_average': pattern_usage_max / pattern_number,
'total_abstraction_number': pattern_abstraction_number,
'abstraction_min': pattern_abstraction_min[0],
'abstraction_min_list': pattern_abstraction_min[1],
'abstraction_max': pattern_abstraction_max[0],
'abstraction_max_list': pattern_abstraction_max[1]
}
global data
data['patterns'] = res
utils.phase_end("Computation Completed")
######################################################################
# CYCLES
######################################################################
def graph(filter_function=host.scripts.utils.filter_function_simple):
utils.phase_start("Computing the Graph Core Statistics.")
graph_mspl, spl_nodes = graphs.mspl(filter_function, keep_self_loop=True)
nodes_spl = {node: spl for spl, node in spl_nodes.iteritems()}
visited = graph_mspl.getBooleanProperty("visited")
for n in graph_mspl.getNodes():
visited.setNodeValue(n, False)
shairplay_len = sys.maxint
cycles = []
for n in graph_mspl.getNodes():
if not visited.getNodeValue(n):
visited.setNodeValue(n, True)
path = [n]
branches = [graph_mspl.getOutNodes(n)]
if "shairplay" in nodes_spl[n].name: shairplay_len = 1
while path:
if len(path) >= shairplay_len: print(str([nodes_spl[node].name for node in path]))
if branches[-1].hasNext():
succ = branches[-1].next()
if len(path) >= shairplay_len: print(" found: " + nodes_spl[succ].name)
if succ in path:
if len(path) >= shairplay_len: print(" loop found: " + str([nodes_spl[node].name for node in path[path.index(succ):]]))
cycles.append([nodes_spl[node].name for node in path[path.index(succ):]])
elif not visited.getNodeValue(succ):
visited.setNodeValue(succ, True)
path.append(succ)
branches.append(graph_mspl.getOutNodes(succ))
if "shairplay" in nodes_spl[succ].name: shairplay_len = len(path)
else:
path.pop()
branches.pop()
if len(path) < shairplay_len: shairplay_len = sys.maxint
res = generic_map({tuple(v): v for v in cycles}, core_data.identity, host.scripts.utils.filter_function_simple)
res['cycles'] = cycles
global data
data['graph'] = res
utils.phase_end("Computation Completed")
|
HyVar/gentoo_to_mspl
|
host/statistics/statistics.py
|
statistics.py
|
py
| 14,279 |
python
|
en
|
code
| 10 |
github-code
|
6
|
32646505991
|
#!/usr/bin/python
class NodeVisitor(object):
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node): # Called if no explicit visitor function exists for a node.
print("(%d,%d): %s(%s)" %(node.lineno, node.lineno, node.type, node.value))
print("UNKNOWN STRUCTURE")
error_found = 1
class TypeChecker(NodeVisitor):
error_found = 0;
returnedType = {'int' : {}, 'float' : {}, 'string' : {}}
for i in returnedType.keys():
returnedType[i] = {}
for j in returnedType.keys():
returnedType[i][j] = {}
for k in ['+','-','/','*','%']:
returnedType[i][j][k] = []
returnedType['int']['float']['+'] = 'float'
returnedType['int']['int']['+'] = 'int'
returnedType['float']['float']['+'] = 'float'
returnedType['float']['int']['+'] = 'float'
returnedType['string']['string']['+'] = 'string'
returnedType['int']['float']['-'] = 'float'
returnedType['int']['int']['-'] = 'int'
returnedType['float']['float']['-'] = 'float'
returnedType['float']['int']['-'] = 'float'
returnedType['int']['float']['*'] = 'float'
returnedType['int']['int']['*'] = 'int'
returnedType['float']['float']['*'] = 'float'
returnedType['float']['int']['*'] = 'float'
returnedType['string']['int']['*'] = 'string'
returnedType['int']['float']['/'] = 'float'
returnedType['int']['int']['/'] = 'int'
returnedType['float']['float']['/'] = 'float'
returnedType['float']['int']['/'] = 'float'
returnedType['int']['int']['%'] = 'int'
returnedTypeRelative = {'int' : {}, 'float' : {}, 'string' : {}}
for i in returnedTypeRelative.keys():
returnedTypeRelative[i] = {}
for j in returnedTypeRelative.keys():
returnedTypeRelative[i][j] = 'err'
returnedTypeRelative['int']['float'] = 'int'
returnedTypeRelative['int']['int'] = 'int'
returnedTypeRelative['float']['float'] = 'int'
returnedTypeRelative['float']['int'] = 'int'
returnedTypeRelative['string']['string'] = 'int'
funs = {}
where_declared = {}
current_function = '*'
scope_number = 0
loop_number = 0
comp = 0
tmp_dec = {}
def visit_Program(self, node):
self.visit(node.insts)
return self.where_declared
def visit_Instructions(self, node):
for ins in node.instrs:
self.visit(ins)
def visit_Print(self, node):
if self.visit(node.expr) not in ('int','float','string'):
print("CANNOT PRINT", node.expr)
self.error_found = 1
def visit_Assignment(self, node):
if node.id in self.where_declared.keys():
if self.current_function in self.where_declared[node.id].keys():
type1 = self.where_declared[node.id][self.current_function]
elif '*' in self.where_declared[node.id].keys():
type1 = self.where_declared[node.id]['*']
else:
type1 = 'undeclared'
# print("UNDECLARED VARIABLE",node.id)
# self.error_found = 1
else:
type1 = 'undeclared'
# print("UNDECLARED VARIABLE",node.id)
# self.error_found = 1
type2 = self.visit(node.expr)
if type1 != 'undeclared' and type1 != type2:
print("TYPE MISMATCH IN ASSIGNMENT\n")
self.error_found = 1
def visit_Expressions(self, node):
tmp = []
for expr in node.exprs:
tmp.append(self.visit(expr))
return tmp
def visit_Matrix_function(self, node):
if self.visit(node.arg) != 'int':
print("Matrix function takes not int")
self.error_found = 1
def visit_Const(self, node):
if type(node.value) == str:
return 'string'
if type(node.value) == int:
return 'int'
if type(node.value) == float:
return 'float'
def visit_While(self, node):
self.visit(node.cond)
self.loop_number += 1
self.visit(node.stmt)
self.loop_number -= 1
def visit_For(self, node):
self.visit(node.id)
self.visit(node.range)
self.loop_number += 1
self.visit(node.inst)
self.loop_number -= 1
def visit_Range(self, node):
from_type = self.visit(node.range_from)
if from_type != 'undeclared' and from_type != 'int':
print("Range from should evaluate to int")
self.error_found = 1
to_type = self.visit(node.range_to)
if to_type != 'undeclared' and to_type != 'int':
print("Range to should evaluate to int")
self.error_found = 1
def visit_Continue(self, node):
if self.loop_number <= 0:
print("Continue used outside loop")
self.error_found = 1
def visit_Break(self, node):
if self.loop_number <= 0:
print("Break used outside loop")
self.error_found = 1
def visit_Condition(self, node):
if self.returnedTypeRelative[self.visit(node.left)][self.visit(node.right)] == 'err':
print("TYPE MISMATCH IN CONDITION\n")
self.error_found = 1
if self.returnedTypeRelative[self.visit(node.left)][self.visit(node.right)] != 'int':
print("CONDITION MUST BE INT")
self.error_found = 1
def visit_ComInstructions(self, node):
# self.tmp_dec = self.where_declared
# self.comp = 1
self.visit(node.instrs)
# self.comp = 0
# self.where_declared = self.tmp_dec
def visit_BinExpr(self, node):
type1 = self.visit(node.left)
type2 = self.visit(node.right)
op = node.op;
if self.returnedType[type1][type2][op] == 'err':
print("TYPE MISMATCH IN BIN EXPR\n")
self.error_found = 1;
return self.returnedType[type1][type2][op]
# def visit_Variable(self, node):
# if node.ID in self.where_declared.keys():
# if self.current_function in self.where_declared[node.ID].keys():
# return self.where_declared[node.ID][self.current_function]
# elif '*' in self.where_declared[node.ID].keys():
# return self.where_declared[node.ID]['*']
# else:
# print("UNDECLARED VARIABLE IN THIS FUN")
# self.error_found = 1
# else:
# print("UNDECLARED VARIABLE", node.ID)
# self.error_found = 1
# to jest do poprawy
def visit_Variable(self, node):
return 'int'
def visit_Return(self, node):
self.visit(node.ret)
|
Andrzej97/kompilatory
|
TypeChecker_v3.py
|
TypeChecker_v3.py
|
py
| 6,834 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33680275570
|
import numpy as np
from numpy import dtype, uint8
class lena(object):
def __init__(self, pallete):
self.pallete = pallete
# Open the file for reading
def read_file(self, my_file):
stream = open(my_file, 'rb')
img = np.fromfile(stream, dtype=(uint8, 3))
return img
# Create a dither matrix
def dither_matrix_row(self, row, mat):
my_row = []
for i in range(128):
for j in mat[row]:
my_row.append(j)
return my_row
# Span the 4 X 4 dither matrix from the book in 512 X 512
def dither_matrix(self, first, second, third, fourth):
dither = []
total_length = 0
all_rows = [first, second, third, fourth]
for i in range(len(all_rows)):
total_length += len(all_rows[i])
for i in range(128):
dither.append(first)
dither.append(second)
dither.append(third)
dither.append(fourth)
dither = np.reshape(dither, (-1, 512))
return dither
# Transform the image
def quantize(self, img_array, dither_array):
quantized_array = []
for i in range(512):
for j in range(512):
if img_array[i][j] > dither_array[i][j]:
quantized_array.append(255)
else:
quantized_array.append(0)
temp_array = np.zeros(len(quantized_array), dtype=(uint8))
for i in range(len(quantized_array)):
temp_array[i] = quantized_array[i]
return temp_array
# Combine all three red, green and blue
def combine_rgb(self, r, g, b):
rgb = np.zeros(len(r), dtype=(uint8, 3))
for i in range(len(r)):
rgb[i][0] = r[i]
rgb[i][1] = g[i]
rgb[i][2] = b[i]
return rgb
# Scale dither matrix from 0 to 255 (8 bit) red and green
# Add 1 to the dither multiply by 16 and subtract 1
def scaled_dither(self, dither):
dither = np.add(dither, 1)
dither = np.dot(dither, 16)
dither = np.add(dither, -1)
return dither
if __name__ == '__main__':
# 4 X 4 dither matrix from the book
pallete = np.array([
[0, 8, 2, 10],
[12, 4, 14, 6],
[3, 11, 1, 9],
[15, 7, 13, 5]]
)
# Instantiate lena with the pallete
lena = lena(pallete)
# File must be a .data extension
file_to_open = raw_input('Enter a .data file: ')
#img = lena.read_file('LennaRGB512.data')
img = lena.read_file(file_to_open)
# Initialize red, green, blue arrays with zeros
red = np.zeros(len(img), dtype=(uint8))
green = np.zeros(len(img), dtype=(uint8))
blue = np.zeros(len(img), dtype=(uint8))
# Create the three channels for red, green and blue
for i in range(len(img)):
red[i] = img[i][0] # Red channel
green[i] = img[i][1] # Green channel
blue[i] = img[i][2] # Blue channel
# Convert linear arrays to 512 X 512
red = np.reshape(red, (-1, 512))
green = np.reshape(green, (-1, 512))
blue = np.reshape(blue, (-1, 512))
# Populate pallete along rows
first = lena.dither_matrix_row(0, pallete)
second = lena.dither_matrix_row(1, pallete)
third = lena.dither_matrix_row(2, pallete)
fourth = lena.dither_matrix_row(3, pallete)
# Populate entire matrix using rows
my_dither = lena.dither_matrix(first, second, third, fourth)
# Scale dither matrix from 0 to 255 (8 bit)
my_scaled_dither = lena.scaled_dither(my_dither)
# Quantize the arrays
quantized_lena_red = lena.quantize(red, my_scaled_dither)
quantized_lena_green = lena.quantize(green, my_scaled_dither)
quantized_lena_blue = lena.quantize(blue, my_scaled_dither)
# Combine three layers of red, green and blue
quantized_lena_rgb = lena.combine_rgb(
quantized_lena_red,
quantized_lena_green,
quantized_lena_blue
)
# Save the new file
quantized_lena_rgb.tofile(file_to_open + '_3bit.data')
|
ortizub41/lena
|
lena/lena.py
|
lena.py
|
py
| 4,071 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33846054954
|
from jwst.stpipe import Step
from jwst import datamodels
from ..datamodels import TMTDarkModel
from . import dark_sub
from ..utils.subarray import get_subarray_model
__all__ = ["DarkCurrentStep"]
class DarkCurrentStep(Step):
"""
DarkCurrentStep: Performs dark current correction by subtracting
dark current reference data from the input science data model.
"""
spec = """
dark_output = output_file(default = None) # Dark model subtracted
"""
reference_file_types = ["dark"]
def process(self, input):
# Open the input data model
with datamodels.open(input) as input_model:
# Get the name of the dark reference file to use
self.dark_name = self.get_reference_file(input_model, "dark")
self.log.info("Using DARK reference file %s", self.dark_name)
# Check for a valid reference file
if self.dark_name == "N/A":
self.log.warning("No DARK reference file found")
self.log.warning("Dark current step will be skipped")
result = input_model.copy()
result.meta.cal_step.dark = "SKIPPED"
return result
# Create name for the intermediate dark, if desired.
dark_output = self.dark_output
if dark_output is not None:
dark_output = self.make_output_path(
None, basepath=dark_output, ignore_use_model=True
)
# Open the dark ref file data model - based on Instrument
dark_model = TMTDarkModel(self.dark_name)
dark_model = get_subarray_model(input_model, dark_model)
# Do the dark correction
result = dark_sub.do_correction(input_model, dark_model, dark_output)
dark_model.close()
return result
|
oirlab/iris_pipeline
|
iris_pipeline/dark_current/dark_current_step.py
|
dark_current_step.py
|
py
| 1,857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35116816269
|
# Import the libraries
import cv2
import numpy as np
import math as m
from matplotlib import pyplot as plt
#-- PRE-PROCESSING --
# Read the image
nimg = 'image1' # Change 'image1' for the name of your image
image = cv2.imread(nimg + '.jpg')
# Extract the RGB layers of the image
rgB = np.matrix(image[:,:,0]) # Blue
rGb = np.matrix(image[:,:,1]) # Green
Rgb = np.matrix(image[:,:,2]) # Red
# Define the combination RGB
II = cv2.absdiff(rGb,rgB)
I = II*255
cv2.imshow('Images with layers extracted', I)
cv2.waitKey(0)
# Initial binarization of the image
[fil, col] = I.shape
for o in range(0,fil):
for oo in range(0,col):
if I[o, oo]<80: # Pixel less than 80 will be 0
I[o,oo]=0
for o in range(0,fil):
for oo in range(0,col):
if I[o, oo]>0: # Pixel more than 0 will be 1
I[o,oo]=1
# Morphological transformations
# Create square streel: se for closing and se2 for dilation
se = np.ones((50, 50), np.uint8)
se2 = np.ones((10, 10), np.uint8)
closing = cv2.morphologyEx(I,cv2.MORPH_CLOSE,se) # Closing
dilation = cv2.dilate(closing,se2,1) # Dilation
# Find the contours
contours,hierarchy=cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Extract the contours
cnt = contours[:]
num = len(cnt)
# print(num)
# print(contours)
# print(hierarchy)
# Calculate the bigger contour
box = np.zeros((num,4))
for j in range(0, num):
box[j,:]=cv2.boundingRect(cnt[j])
L = np.zeros((num,4))
Max=[0,0]
for j in range(0, num):
L[j,:]=box[j]
if L[j,2]>Max[1]:
Max=[j,L[j,2]]
BOX = box[Max[0],:]
# Mask
b = image[int(BOX[1]):int(BOX[1]+BOX[3]),int(BOX[0]):int(BOX[0]+BOX[2]),:]
#-- SEGMENTATION --
[fil,col,cap] = b.shape
# Extract the RGB layers of the image with the mask
rgB = b[:,:,0] # Blue
rGb = b[:,:,1] # Green
Rgb = b[:,:,2] # Red
# Normalizate the layers
R = Rgb/255.0
G = rGb/255.0
B = rgB/255.0
# Build the color K space
K = np.zeros((fil,col)) # Black layer
for o in range(0,fil):
for oo in range(0,col):
MAX = max(R[o,oo],G[o,oo],B[o,oo]) # Calculate the maximum value R-G-B
K[o,oo] = 1-MAX
# Save the image in .bmp format
cv2.imwrite('imgbmp_' + nimg + '.bmp', K)
# Read the image
k = cv2.imread('imgbmp_' + nimg + '.bmp')
# Apply Canny
BW1 = cv2.Laplacian(k, cv2.CV_8UC1)
# Extract layers
imgk = BW1[:,:,0]+BW1[:,:,1]+BW1[:,:,2]
# Save the image
cv2.imwrite('result_' + nimg + '.png', imgk*255)
|
selenebpradop/basic_exercises-computer_vision
|
contours_of_an_image_v2.py
|
contours_of_an_image_v2.py
|
py
| 2,461 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34965304781
|
"""
递归动态规划
"""
class Solution(object):
def canJump(self,nums):
if len(nums) == 1:
return True
for i in range(1,nums[0]+1):
if i <= len(nums):
if self.canJump(nums[i:]):
return True
else:
return True
return False
"""
备忘录递归动态规划
"""
class Solution(object):
def canJump(self,nums):
n = len(nums)
if n == 1:
return True
a = [0] * n
a[n-1] = 1
position = 0
return self.canJumps(nums,a,position)
def canJumps(self,nums,a,position):
print(nums[position])
if a[position] != 0:
print(1)
if a[position] == 1:
return True
else:
return False
print(position)
furjump = min(nums[position]+position,len(nums)-1)
for i in range(position+1,furjump+1):
if i <= len(nums):
if self.canJumps(nums,a,i):
a[i] = 1
return True
else:
return True
a[position]=-1
return False
"""
自底向上动态规划
"""
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
n = len(nums)
if n == 1:
return True
memo = [0] * n
memo[n-1] = 1
m = n - 2
while m>= 0:
furjump = min(m+nums[m],n)
print(m,furjump)
for j in range(m+1,furjump+1):
if (memo[j] == 1):
memo[m]= 1
break
m = m -1
print(memo)
return memo[0] == 1
"""
贪心算法
"""
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
n = len(nums) - 2
lasted = len(nums)- 1
while n >= 0:
if (n + nums[n]>= lasted):
lasted= n
n = n - 1
return lasted== 0
|
qingyuannk/phoenix
|
dp/jumpgame.py
|
jumpgame.py
|
py
| 2,117 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9200799444
|
from fastapi import status, HTTPException, Depends
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from datetime import datetime, timedelta
from sqlalchemy.orm import Session
from .schema import TokenData
from app import database, models
from .config import env
SECRET_KEYS = env.SECRET_KEY
ALGORITHM = env.ALGORITHM
ACCESS_TOKEN_EXPIRE_MINUTES = env.ACCESS_TOKEN_EXPIRE_MINUTES
oauth2_schema = OAuth2PasswordBearer(tokenUrl="users/login")
def create_access_token(payload: dict):
to_encode = payload.copy()
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
token = jwt.encode(to_encode, SECRET_KEYS, algorithm=ALGORITHM)
return token
def verify_access_token(token: str, credentials_exception):
try:
payload = jwt.decode(token, SECRET_KEYS, algorithms=[ALGORITHM])
id: str = payload.get("user_id")
if id is None:
raise credentials_exception
token_data = TokenData(id=id)
except JWTError:
raise credentials_exception
return token_data
def get_current_user(
token: str = Depends(oauth2_schema),
db: Session = Depends(database.get_db)
):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Auth": "Bearer"}
)
token = verify_access_token(token, credentials_exception)
user = db.query(models.User).filter(models.User.id == token.id).first()
return user
|
Ichi-1/FastAPI-example-api
|
app/oauth2.py
|
oauth2.py
|
py
| 1,581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25575181895
|
from typing import List
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
new_array = [1]*len(nums)
for i in range(len(nums)):
new_p = (i - k)%len(nums)
new_array[i] = nums[new_p]
return new_array
s = Solution()
l = [1,2,3,4,5,6,7]
x = s.rotate(l,3)
print(x)
|
ThadeuFerreira/python_code_challengers
|
rotate_array.py
|
rotate_array.py
|
py
| 331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72332661307
|
#!/usr/bin/env python3
import os
import configparser
from mongoengine.connection import connect
from .data_model import Post
from .render_template import render
from .mailgun_emailer import send_email
def email_last_scraped_date():
## mongodb params (using configparser)
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'settings.cfg'))
mlab_uri = config.get('MongoDB', 'mlab_uri')
# connect to db
MONGO_URI = mlab_uri
connect('sivji-sandbox', host=MONGO_URI)
## get the last date the webscraper was run
for post in Post.objects().fields(date_str=1).order_by('-date_str').limit(1):
day_to_pull = post.date_str
## pass in variables, render template, and send
context = {
'day_to_pull': day_to_pull,
'Post': Post,
}
html = render("template.html", context)
send_email(html)
|
alysivji/reddit-top-posts-scrapy
|
top_post_emailer/__init__.py
|
__init__.py
|
py
| 917 |
python
|
en
|
code
| 14 |
github-code
|
6
|
26239060381
|
#!/usr/bin/env python3
'''
This script will incement the major version number of the specified products.
It is assumed that the version number in the label itself is correct, and the version
just needs to be added on to the filename.
Usage:
versioning.py <label_file>...
'''
import re
import os
import sys
from bs4 import BeautifulSoup
LABELFILE_PARSE_VERSIONED_REGEX = r'(.+)_(\d+)_(\d+)\.xml'
LABELFILE_PARSE_UNVERSIONED_REGEX = r'(.+)\.xml'
DATAFILE_PARSE_VERSIONED_REGEX = r'(.+)_(\d+)\.([a-z0-9]+)'
DATAFILE_PARSE_UNVERSIONED_REGEX = r'(.+)\.([a-z0-9]+)'
def main(argv=None):
''' Entry point into the script '''
if argv is None:
argv = sys.argv
filepaths = argv[1:]
for filepath in filepaths:
dirname, filename = os.path.split(filepath)
increment_product(dirname, filename)
def increment_product(path, labelfile):
'''
Increments the version number of the specified product
'''
label = read_label(path, labelfile)
datafile = extract_datafile(label)
new_labelfile = increment_labelfile(labelfile)
if datafile:
new_datafile = increment_datafile(datafile)
contents = inject_datafile(label, datafile, new_datafile)
with open(new_labelfile, "w") as outfile:
outfile.write(contents)
rename(path, datafile, new_datafile)
else:
rename(path, labelfile, new_labelfile)
def read_label(path, labelfile):
'''
Reads in a product label file as a string
'''
with open(os.path.join(path, labelfile)) as infile:
return infile.read()
def extract_datafile(label):
''' Finds the data filename referenced in a product '''
soup = BeautifulSoup(label, 'lxml-xml')
if soup.Product_Observational:
return extract_observational_datafile(soup.Product_Observational)
if soup.Product_Collection:
return extract_collection_datafile(soup.Product_Collection)
if soup.Product_Document:
return extract_document_datafile(soup.Product_Document)
return None
def extract_collection_datafile(product):
''' Finds the inventory filename referenced in a collection product '''
file_area = product.File_Area_Inventory if product else None
file_element = file_area.File if file_area else None
file_name = file_element.file_name if file_element else None
return file_name.string
def extract_observational_datafile(product):
''' Finds the data filename referenced in a product '''
file_area = product.File_Area_Observational if product else None
file_element = file_area.File if file_area else None
file_name = file_element.file_name if file_element else None
return file_name.string
def extract_document_datafile(product):
''' Finds the document filename referenced in a document product. '''
document = product.Document if product else None
document_edition = document.Document_Edition if document else None
document_file = document_edition.Document_File if document_edition else None
file_name = document_file.document_file if document_file else None
return file_name.string
def increment_labelfile(labelfile):
''' Creates the filename for a label file with a new version number '''
(filebase, major, _) = parse_labelfile_name(labelfile)
newmajor, newminor = major + 1, 0
return "{}_{}_{}.xml".format(filebase, newmajor, newminor)
def increment_datafile(datafile):
''' Creates the filename for a data file with the new version number '''
(filebase, major, extension) = parse_datafile_name(datafile)
newmajor = major + 1
return "{}_{}.{}".format(filebase, newmajor, extension)
def inject_datafile(label, datafile, new_datafile):
''' Replaces the filename reference in a label with the specified file '''
return label.replace(datafile, new_datafile)
def rename(dirname, filename, newfilename):
''' Renames a file '''
src = os.path.join(dirname, filename)
dst = os.path.join(dirname, newfilename)
if os.path.exists(newfilename):
print("File already exists: " + newfilename)
else:
os.rename(src, dst)
def parse_datafile_name(name):
''' Extract the version number from a data file, if available '''
versioned_match = re.match(DATAFILE_PARSE_VERSIONED_REGEX, name)
if versioned_match:
(filebase, major, extension) = versioned_match.groups()
return (filebase, int(major), extension)
unversioned_match = re.match(DATAFILE_PARSE_UNVERSIONED_REGEX, name)
(filebase, extension) = unversioned_match.groups()
return (filebase, 1, extension)
def parse_labelfile_name(name):
''' Extract the version number from a label file, if available '''
versioned_match = re.match(LABELFILE_PARSE_VERSIONED_REGEX, name)
if versioned_match:
(filebase, major, minor) = versioned_match.groups()
return (filebase, int(major), int(minor))
unversioned_match = re.match(LABELFILE_PARSE_UNVERSIONED_REGEX, name)
filebase = unversioned_match.groups()[0]
return (filebase, 1, 0)
def increment_major(major, _):
'''
Returns the version number with the major version incremented,
and the minor version reset to 1
'''
return (major + 1, 0)
def increment_minor(major, minor):
''' Returns the version number with the minor version incremented '''
return (major, minor + 1)
def attach_version_to_datafile(filebase, extension, major):
''' Creates a version of a filename that includes the version number '''
return '{filebase}_{major}.{extension}'.format(
major=major,
filebase=filebase,
extension=extension
)
def attach_version_to_labelfile(filebase, major, minor):
''' Creates a version of a label filename that includes the version number '''
return '{filebase}_{major}_{minor}.xml'.format(
filebase=filebase,
major=major,
minor=minor
)
if __name__ == '__main__':
sys.exit(main())
|
sbn-psi/data-tools
|
orex/pds4-tools/versioning.py
|
versioning.py
|
py
| 5,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35484692669
|
import os
import string
def file_check(filepath, mode):
if os.path.exists(filepath):
if os.path.isfile(filepath):
f = open("%s" % filepath, "%s" % mode)
return f
else:
return "Incorrect file"
else:
return "Incorrect file"
print(file_check("kivy_test.py", "r"))
def decorator(func_to_decorate):
def f():
print(func_to_decorate.__name__)
os.uname()
return func_to_decorate()
return f
@decorator
def adding():
return 5 + 9
print(adding())
def formatting(dir_path, exit_format):
a = exit_format.replace(str(k for k in string.digits), "").split(".")
if os.path.isdir(dir_path):
arr = os.listdir(dir_path)
for i in range(len(arr)):
if arr[i].replace(str(k for k in string.digits), "").split(".") != \
exit_format.replace(str(k for k in string.digits), "").split("."):
if 99 > len(arr) > 9:
if i < 9:
os.rename(arr[i], "%s" % a[0] + "0" + "%s" % (i + 1) + "." + "%s" % a[1])
else:
os.rename(arr[i], "%s" % a[0] + "0" + "%s" % (i + 1) + "." + "%s" % a[1])
if 999 > len(arr) > 99:
if i < 99:
os.rename(arr[i], "%s" % a[0] + "00" + "%s" % (i + 1) + "." + "%s" % a[1])
else:
os.rename(arr[i], "%s" % a[0] + "%s" % (i + 1) + "." + "%s" % a[1])
if len(arr) > 999:
if i < 999:
os.rename(arr[i], "%s" % a[0] + "000" + "%s" % (i + 1) + "." + "%s" % a[1])
else:
os.rename(arr[i], "%s" % a[0] + "%s" % (i + 1) + "." + "%s" % a[1])
formatting("/home/sirius/fotos", "image000.jpg")
|
TaffetaEarth/homework_python
|
os_work.py
|
os_work.py
|
py
| 1,849 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4143416632
|
'''A module for demo-ing exceptions'''
import sys
from math import log
def convert_to_int(s):
x = -1
try:
return int(s)
print("Conversion succeeded! x =", x)
except (ValueError,TypeError) as e:
print("Conversion error: {}".format(str(e)), file=sys.stderr)
return -1
def string_log(s):
v = convert_to_int(s)
return log(v)
def square_root(x):
guess = x
i = 0
while guess * guess != x and i < 20:
guess = (guess + x / guess) / 2.0
i += 1
return guess
def main():
print(square_root(9))
print(square_root(2))
print(square_root(64))
try:
print(square_root(-1))
except ZeroDivisionError:
print("Cannot compute the square_root of a negative numero")
print("Program execution continues normally here")
if __name__ == '__main__':
main()
|
gitsana/Python_Tutorial
|
M6-Exception Handling/exceptional2.py
|
exceptional2.py
|
py
| 764 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74750166267
|
from src.components import summarizer
from celery import Celery
from celery.utils.log import get_task_logger
from EmailSender import send_email
logger = get_task_logger(__name__)
celery = Celery(
__name__, backend="redis://127.0.0.1:6379", broker="redis://127.0.0.1:6379"
)
@celery.task(name="summarizer")
def GmailSummarizer(gmails, email_address):
responses = []
for gmail in gmails:
gmail_summary = summarizer.summarize(gmail)
responses.append(gmail_summary)
send_email(responses, email_address)
return True
"""
run celery and also redis
# celery -A flask_celery.celery worker -l info --pool=solo
Compile Celery with --pool=solo argument. #IMP
# celery -A flask_celery.celery worker -l info --pool=solo
Example: celery -A your-application worker -l info --pool=solo
"""
|
SVijayB/Gist
|
scripts/flask_celery.py
|
flask_celery.py
|
py
| 816 |
python
|
en
|
code
| 4 |
github-code
|
6
|
21025178712
|
#!/usr/bin/env python3
import logging
import sys
from ev3dev2.motor import OUTPUT_A, OUTPUT_B, OUTPUT_C, MediumMotor
from ev3dev2.control.rc_tank import RemoteControlledTank
log = logging.getLogger(__name__)
class TRACK3R(RemoteControlledTank):
"""
Base class for all TRACK3R variations. The only difference in the child
classes are in how the medium motor is handled.
To enable the medium motor toggle the beacon button on the EV3 remote.
"""
def __init__(self, medium_motor, left_motor, right_motor):
RemoteControlledTank.__init__(self, left_motor, right_motor)
self.medium_motor = MediumMotor(medium_motor)
self.medium_motor.reset()
class TRACK3RWithBallShooter(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor)
self.remote.on_channel1_beacon = self.fire_ball
def fire_ball(self, state):
if state:
self.medium_motor.run_to_rel_pos(speed_sp=400, position_sp=3*360)
else:
self.medium_motor.stop()
class TRACK3RWithSpinner(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor)
self.remote.on_channel1_beacon = self.spinner
def spinner(self, state):
if state:
self.medium_motor.run_forever(speed_sp=50)
else:
self.medium_motor.stop()
class TRACK3RWithClaw(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor)
self.remote.on_channel1_beacon = self.move_claw
def move_claw(self, state):
if state:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=-75)
else:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=75)
|
ev3dev/ev3dev-lang-python-demo
|
robots/TRACK3R/TRACK3R.py
|
TRACK3R.py
|
py
| 2,002 |
python
|
en
|
code
| 59 |
github-code
|
6
|
25293805211
|
import unittest
from task import fix_encoding
expected_content = """Roses are räd.
Violets aren't blüe.
It's literally in the name.
They're called violets.
"""
filename = "example.txt"
output = "output.txt"
class TestCase(unittest.TestCase):
def setUp(self) -> None:
with open(filename, "w") as f:
f.write(expected_content)
def test_fix_encoding(self):
fix_encoding(filename, output)
with open(filename, "r", encoding="utf-8") as file:
actual_content = file.read()
self.assertEqual(actual_content, expected_content, "wrong answer")
|
DoctorManhattan123/edotools-python-course
|
Strings, inputs and files/file encoding/tests/test_task.py
|
test_task.py
|
py
| 604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71344821309
|
#Inicio do While
opcao = -1
#Variaveis do Saque, máximo do saque 500 por saque e até 3x por dia
limiteSaque = 500
saqueDia = 3
valorSacado = 0
#Variaveis do Saldo
saldo = float(0)
#Variaveis Deposito
deposito = float(0)
while opcao != 0:
opcao = int(input(" [1] Para sacar \n [2] Para depositar \n [3] Para ver saldo \n [0] Para sair \n :"))
#Opção de Saque
if opcao == 1 :
saque = float(input("Digite o valor a ser sacado: "))
if saldo <= 0 :
print ("Conta zerado, não é possivel realizar o saque")
continue
if saldo < saque:
print("Não há fundos suficientes para realizar o saque!!!")
continue
if saque > 500:
print ("Valor indisponivel para saque, somente saques abaixo de 500")
continue
if saque <= 0:
print("Erro, valor impossivel de sacar")
if saqueDia <= 0:
print("Sem saques diarios restantes")
continue
else:
saqueDia -= 1
saldo -= saque
print("Valor sacado com sucesso.")
#Opção de Deposito
elif opcao == 2 :
deposito = float(input("Digite o valor do deposito: "))
if deposito > 0:
saldo += deposito
print("Deposito realizado com sucesso.")
else:
print("Deposito falhou, valor invalido!!!")
#Opção de Extrato
elif opcao == 3 :
print("Imprimindo extrato, só um momento:")
print(saldo)
else:
print("Obrigado por usar nosso sistema, operação concluida")
|
Dnx0/trilha-python-dio
|
sistemaBancario.py
|
sistemaBancario.py
|
py
| 1,631 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
30969861276
|
import matplotlib.pyplot as plt
import time
import numpy as np
from PIL import Image
class graphic_display():
def __init__(self):
self.um_per_pixel = 0.5
self.cm_hot = plt.get_cmap('hot')
self.cm_jet = plt.get_cmap('jet')
self.cm_vir = plt.get_cmap('viridis')
self.cm_mag = plt.get_cmap('magma')
# self.cm_grn = plt.get_cmap('Greens')
self.cm_raw = plt.get_cmap('gray')
self.fps_counter = np.array([time.time(),time.time(),time.time()])
self.img_rs = None
self.img_norm = None
self.img_gamma = None
self.img_p = None
self.img_cm = None
self.img_sb = None
self.img_fin = None
self.win = None
self.cam = None
return
def update_win(self, win):
self.win = win
def update_cam(self, cam):
self.cam = cam
def update_image(self):
print('update image function')
self.img_rs = np.array(Image.fromarray(self.cam.img_raw).resize(size=(958, 638)),dtype = 'float64')/255
if self.win.zoom_factor > 1:
r1 = self.img_rs.shape[0]
c1 = self.img_rs.shape[1]
r2 = int(np.round(r1/self.win.zoom_factor))
c2 = int(np.round(c1/self.win.zoom_factor))
self.img_rs = self.img_rs[int((r1-r2)/2):int((r1-r2)/2)+r2, int((c1-c2)/2):int((c1-c2)/2)+c2]
# update and process the image for display from the camera
self.update_image_gamma()
self.normalise_img()
self.update_colormap()
self.display_saturated_pixels_purple() ### error
self.burn_scalebar_into_image()
# gui functions
self.win.repaint_image() ### may zoom in twice for raw image, need double check
self.win.update_hist()
# self.win.image_histogram.update_histogram() # method in histogram_canvas class
self.win.status_text_update_image()
# fps counter
self.fps_counter = np.append(self.fps_counter,time.time())
self.fps_counter = np.delete(self.fps_counter, 0)
self.win.status_fps_number.setText(str(np.round(1/np.mean(np.diff(self.fps_counter)),5)))
print('current saved value for fps is: ' + str(self.cam.fps) + ' current timer value is: ' + str(self.cam.timer_value))
return
def update_image_gamma(self):
if self.win.gamma == 1:
self.img_gamma = self.img_rs
else:
self.img_gamma = self.img_rs**self.win.gamma
return
def normalise_img(self):
print('normalise function')
if self.win.cbox_normalise.isChecked():
imgnormmin = np.min(np.nonzero(self.img_gamma))
imgnormmax = np.max(self.img_gamma)
self.img_norm = (self.img_gamma-imgnormmin)/(imgnormmax--imgnormmin)
self.img_norm = self.img_norm
else:
self.img_norm = self.img_gamma
return
def update_colormap(self):
print('update colormap function')
# convert from gray to colormap magma selection
if self.win.combobox_colourmap.currentIndex() == 0:
self.img_cm = self.cm_mag(self.img_norm)
# convert from gray to colormap green selection
elif self.win.combobox_colourmap.currentIndex() == 1:
self.img_cm = np.zeros(np.hstack([np.shape(self.img_norm),4]))
self.img_cm[:,:,1] = self.img_norm
self.img_cm[:,:,3] = 255
## or use Greens colormap directly
# self.img_cm = self.cm_grn(self.img_norm)
# convert from gray to colormap viridis (3 channel) selection
elif self.win.combobox_colourmap.currentIndex() == 2:
self.img_cm = self.cm_vir(self.img_norm)
# convert from gray to colormap jet selection
elif self.win.combobox_colourmap.currentIndex() == 3:
self.img_cm = self.cm_jet(self.img_norm)
elif self.win.combobox_colourmap.currentIndex() == 4:
# self.img_cm = np.zeros(np.hstack([np.shape(self.img_norm),4]))
# self.img_cm[:,:,0] = self.img_norm
# self.img_cm[:,:,1] = self.img_norm
# self.img_cm[:,:,2] = self.img_norm
# self.img_cm[:,:,3] = 1
# print(self.img_cm)
# print(self.cam.img_raw)
## or use gray colormap directly
self.img_cm = self.cm_raw(self.img_norm)
return
def display_saturated_pixels_purple(self):
print('saturated pxls purple function')
# saturated pixels show up purple if check box is selected
# if self.win.combobox_colourmap.currentIndex() != 4:
self.img_p = self.img_cm
if self.win.cbox_saturated.isChecked():
ind = self.img_norm > 254
self.img_p[ind,0] = 255
self.img_p[ind,1] = 0
self.img_p[ind,2] = 255
return
def burn_scalebar_into_image(self):
print('burn scalebar function')
self.img_sb = self.img_p
if self.win.cbox_show_scalebar.isChecked():
s = self.img_sb.shape
if self.win.combobox_colourmap.currentIndex() == 1:
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 0] = 255
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 1] = 0
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 2] = 255
else:
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 0] = 0
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 1] = 255
self.img_sb[int(s[0]*0.95):int(s[0]*0.955), int(s[1]*0.05):int(s[1]*0.05+100/self.um_per_pixel), 2] = 0
self.img_fin = self.img_sb
self.img_fin = np.array(self.img_fin*255,dtype='uint8')
return
|
peterlionelnewman/flow_lithographic_printer
|
Graphic_display.py
|
Graphic_display.py
|
py
| 6,222 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33800125828
|
import sys
sys.setrecursionlimit(10000)
def dfs(d, v, visited):
visited[v]= True
for i in d[v]:
if not visited[i]:
dfs(d, i, visited)
n,m = map(int, input().split())
d = [[] for _ in range(n+1)]
visited = [False]*(n+1)
result = 0
for _ in range(m):
u,v = map(int, input().split())
d[u].append(v)
d[v].append(u)
for i in range(1,n+1):
if not visited[i]:
dfs(d,i,visited)
result += 1
print(result)
|
devAon/Algorithm
|
BOJ-Python/boj-11724_연결요소의개수.py
|
boj-11724_연결요소의개수.py
|
py
| 459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14594327515
|
import tensorflow as tf
import json
from model_provider import get_model
from utils.create_gan_tfrecords import TFRecordsGAN
from utils.augment_images import augment_autoencoder
import os
import tensorflow.keras as K
import datetime
import string
from losses import get_loss, gradient_penalty
import argparse
physical_devices = tf.config.experimental.list_physical_devices("GPU")
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
mirrored_strategy = tf.distribute.MirroredStrategy()
args = argparse.ArgumentParser(description="Train a network with specific settings")
args.add_argument("-d", "--dataset", type=str, default="zebra2horse",
help="Name a dataset from the tf_dataset collection",
choices=["zebra2horse"])
args.add_argument("-opt", "--optimizer", type=str, default="Adam", help="Select optimizer",
choices=["SGD", "RMSProp", "Adam"])
args.add_argument("-lrs", "--lr_scheduler", type=str, default="constant", help="Select learning rate scheduler",
choices=["poly", "exp_decay", "constant"])
args.add_argument("-gm", "--gan_mode", type=str, default="constant", help="Select training mode for GAN",
choices=["normal", "wgan_gp"])
args.add_argument("-e", "--epochs", type=int, default=1000, help="Number of epochs to train")
args.add_argument("--lr", type=float, default=2e-4, help="Initial learning rate")
args.add_argument("--momentum", type=float, default=0.9, help="Momentum")
args.add_argument("-bs", "--batch_size", type=int, default=16, help="Size of mini-batch")
args.add_argument("-si", "--save_interval", type=int, default=5, help="Save interval for model")
args.add_argument("-m", "--model", type=str, default="cyclegan", help="Select model")
args.add_argument("-logs", "--logdir", type=str, default="./logs", help="Directory to save tensorboard logdir")
args.add_argument("-l_m", "--load_model", type=str,
default=None,
help="Load model from path")
args.add_argument("-s", "--save_dir", type=str, default="./cyclegan_runs",
help="Save directory for models and tensorboard")
args.add_argument("-tfrecs", "--tf_record_path", type=str, default="/data/input/datasets/tf2_gan_tfrecords",
help="Save directory that contains train and validation tfrecords")
args.add_argument("-sb", "--shuffle_buffer", type=int, default=1024, help="Size of the shuffle buffer")
args.add_argument("--width", type=int, default=286, help="Size of the shuffle buffer")
args.add_argument("--height", type=int, default=286, help="Size of the shuffle buffer")
args.add_argument("--c_width", type=int, default=256, help="Crop width")
args.add_argument("--c_height", type=int, default=256, help="Crop height")
args.add_argument("--random_seed", type=int, default=1, help="Set random seed to this if true")
args = args.parse_args()
tf_record_path = args.tf_record_path
dataset = args.dataset
BUFFER_SIZE = args.shuffle_buffer
BATCH_SIZE = args.batch_size
IMG_WIDTH = args.width
IMG_HEIGHT = args.height
CROP_HEIGHT = args.c_height if args.c_height < IMG_HEIGHT else IMG_HEIGHT
CROP_WIDTH = args.c_width if args.c_width < IMG_WIDTH else IMG_WIDTH
LAMBDA = 10
EPOCHS = args.epochs
LEARNING_RATE = args.lr
LEARNING_RATE_SCHEDULER = args.lr_scheduler
save_interval = args.save_interval
save_dir = args.save_dir
load_model_path = args.load_model
MODEL = args.model
gan_mode = args.gan_mode
time = str(datetime.datetime.now())
time = time.translate(str.maketrans('', '', string.punctuation)).replace(" ", "-")[:-8]
logdir = "{}_{}_e{}_lr{}_{}x{}_{}".format(time, MODEL, EPOCHS, LEARNING_RATE, IMG_HEIGHT, IMG_WIDTH, gan_mode)
train_A, train_B = \
TFRecordsGAN(
tfrecord_path=
"{}/{}_train.tfrecords".format(tf_record_path, dataset + "_a")).read_tfrecords(), \
TFRecordsGAN(
tfrecord_path=
"{}/{}_train.tfrecords".format(tf_record_path, dataset + "_b")).read_tfrecords()
with open(f"{args.tf_record_path}/data_samples.json") as f:
data = json.load(f)
num_samples_ab = [data[dataset + "_a"], data[dataset + "_b"]]
if num_samples_ab[0] > num_samples_ab[1]:
total_samples = num_samples_ab[0]
train_B = train_B.repeat()
else:
total_samples = num_samples_ab[1]
train_A = train_A.repeat()
augmentor = lambda batch: augment_autoencoder(batch, size=(IMG_HEIGHT, IMG_WIDTH), crop=(CROP_HEIGHT, CROP_WIDTH))
train_A = train_A.map(
augmentor, num_parallel_calls=tf.data.AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
train_B = train_B.map(
augmentor, num_parallel_calls=tf.data.AUTOTUNE).cache().shuffle(
BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
train_A = mirrored_strategy.experimental_distribute_dataset(train_A)
train_B = mirrored_strategy.experimental_distribute_dataset(train_B)
if gan_mode == "wgan_gp":
gan_loss_obj = get_loss(name="Wasserstein")
else:
gan_loss_obj = get_loss(name="binary_crossentropy")
cycle_loss_obj = get_loss(name="MAE")
id_loss_obj = get_loss(name="MAE")
def discriminator_loss(real, generated):
if gan_mode == "wgan_gp":
real_loss = gan_loss_obj(-tf.ones_like(real), real)
generated_loss = gan_loss_obj(tf.ones_like(generated), generated)
else:
real_loss = gan_loss_obj(tf.ones_like(real), real)
generated_loss = gan_loss_obj(tf.zeros_like(generated), generated)
total_disc_loss = generated_loss + real_loss
return tf.reduce_mean(total_disc_loss) * 0.5
def generator_loss(generated):
return tf.reduce_mean(
gan_loss_obj(-tf.ones_like(generated), generated)) if gan_mode == "wgan_gp" else tf.reduce_mean(
gan_loss_obj(tf.ones_like(generated), generated))
def calc_cycle_loss(real_image, cycled_image):
loss1 = cycle_loss_obj(real_image, cycled_image)
return loss1
def identity_loss(real_image, same_image):
loss = id_loss_obj(real_image, same_image)
return LAMBDA * 0.5 * loss
if LEARNING_RATE_SCHEDULER == "poly":
lrs = K.optimizers.schedules.PolynomialDecay(LEARNING_RATE,
decay_steps=EPOCHS,
end_learning_rate=1e-8, power=0.8)
elif LEARNING_RATE_SCHEDULER == "exp_decay":
lrs = K.optimizers.schedules.ExponentialDecay(LEARNING_RATE,
decay_steps=EPOCHS,
decay_rate=0.5)
else:
lrs = LEARNING_RATE
with mirrored_strategy.scope():
generator_g = get_model("{}_gen".format(MODEL), type="gan")
generator_f = get_model("{}_gen".format(MODEL), type="gan")
discriminator_x = get_model("{}_disc".format(MODEL), type="gan")
discriminator_y = get_model("{}_disc".format(MODEL), type="gan")
tmp = tf.cast(tf.random.uniform((1, CROP_HEIGHT, CROP_WIDTH, 3), dtype=tf.float32, minval=0, maxval=1),
dtype=tf.float32)
generator_g(tmp), generator_f(tmp), discriminator_x(tmp), discriminator_y(tmp)
generator_g_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
generator_f_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
discriminator_x_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
discriminator_y_optimizer = tf.keras.optimizers.Adam(lrs, beta_1=0.5)
def load_models(models_parent_dir):
assert os.path.exists(models_parent_dir), "The path {} is not valid".format(models_parent_dir)
p_gen_g = K.models.load_model(os.path.join(models_parent_dir, "gen_g"))
p_gen_f = K.models.load_model(os.path.join(models_parent_dir, "gen_f"))
p_disc_x = K.models.load_model(os.path.join(models_parent_dir, "disc_x"))
p_disc_y = K.models.load_model(os.path.join(models_parent_dir, "disc_y"))
generator_g.set_weights(p_gen_g.get_weights())
print("Generator G loaded successfully")
generator_f.set_weights(p_gen_f.get_weights())
print("Generator F loaded successfully")
discriminator_x.set_weights(p_disc_x.get_weights())
print("Discriminator X loaded successfully")
discriminator_y.set_weights(p_disc_y.get_weights())
print("Discriminator Y loaded successfully")
if load_model_path is not None:
load_models(load_model_path)
START_EPOCH = int(load_model_path.split("/")[-1])
else:
START_EPOCH = 0
def write_to_tensorboard(g_loss_g, g_loss_f, d_loss_x, d_loss_y, c_step, writer):
with writer.as_default():
tf.summary.scalar("G_Loss_G", g_loss_g.numpy(), c_step)
tf.summary.scalar("G_Loss_F", g_loss_f.numpy(), c_step)
tf.summary.scalar("D_Loss_X", tf.reduce_mean(d_loss_x).numpy(), c_step)
tf.summary.scalar("D_Loss_Y", tf.reduce_mean(d_loss_y).numpy(), c_step)
if len(physical_devices) > 1:
o_img_a = tf.cast(image_x.values[0], dtype=tf.float32)
o_img_b = tf.cast(image_y.values[0], dtype=tf.float32)
img_a, img_b = o_img_a, o_img_b
else:
img_a = image_x
img_b = image_y
# img_size_a, img_size_b = img_a.shape[1] * img_a.shape[2] * img_a.shape[3], img_b.shape[1] * img_b.shape[2] * \
# img_b.shape[3]
# mean_a, mean_b = tf.reduce_mean(img_a, axis=[1, 2, 3], keepdims=True), tf.reduce_mean(img_b, axis=[1, 2, 3],
# keepdims=True)
# adjusted_std_a = tf.maximum(tf.math.reduce_std(img_a, axis=[1, 2, 3], keepdims=True),
# 1 / tf.sqrt(img_size_a / 1.0))
# adjusted_std_b = tf.maximum(tf.math.reduce_std(img_b, axis=[1, 2, 3], keepdims=True),
# 1 / tf.sqrt(img_size_b / 1.0))
f_image_y = generator_g(img_a, training=True)
f_image_x = generator_f(img_b, training=True)
confidence_a = discriminator_x(f_image_x, training=True)
confidence_b = discriminator_y(f_image_y, training=True)
tf.summary.image("img_a", tf.cast(127.5 * (img_a + 1), dtype=tf.uint8), step=c_step)
tf.summary.image("img_b", tf.cast(127.5 * (img_b + 1), dtype=tf.uint8), step=c_step)
tf.summary.image("fake_img_a", tf.cast((f_image_x + 1) * 127.5, dtype=tf.uint8), step=c_step)
tf.summary.image("fake_img_b", tf.cast((f_image_y + 1) * 127.5, dtype=tf.uint8), step=c_step)
tf.summary.image("confidence_a", confidence_a, step=c_step)
tf.summary.image("confidence_b", confidence_b, step=c_step)
@tf.function
def train_step(real_x, real_y, n_critic=5):
# real_x = tf.image.per_image_standardization(real_x)
# real_y = tf.image.per_image_standardization(real_y)
with tf.GradientTape(persistent=True) as tape:
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# same_x and same_y are used for identity loss.
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
# calculate the loss
gen_g_loss = generator_loss(disc_fake_y)
gen_f_loss = generator_loss(disc_fake_x)
total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_loss(real_y, cycled_y)
# Total generator loss = adversarial loss + cycle loss
total_gen_g_loss = LAMBDA * total_cycle_loss + identity_loss(real_y, same_y) + gen_g_loss
total_gen_f_loss = LAMBDA * total_cycle_loss + identity_loss(real_x, same_x) + gen_f_loss
if gan_mode != "wgan_gp":
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)
# ------------------- Disc Cycle -------------------- #
if gan_mode == "wgan_gp":
disc_x_loss, disc_y_loss = wgan_disc_apply(fake_x, fake_y, n_critic, real_x, real_y)
# Calculate the gradients for generator and discriminator
generator_g_gradients = tape.gradient(total_gen_g_loss,
generator_g.trainable_variables)
generator_f_gradients = tape.gradient(total_gen_f_loss,
generator_f.trainable_variables)
if gan_mode != "wgan_gp":
discriminator_x_gradients = tape.gradient(disc_x_loss,
discriminator_x.trainable_variables)
discriminator_y_gradients = tape.gradient(disc_y_loss,
discriminator_y.trainable_variables)
# Apply the gradients to the optimizer
generator_g_optimizer.apply_gradients(zip(generator_g_gradients,
generator_g.trainable_variables))
generator_f_optimizer.apply_gradients(zip(generator_f_gradients,
generator_f.trainable_variables))
if gan_mode != "wgan_gp":
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,
discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,
discriminator_y.trainable_variables))
return total_gen_g_loss, total_gen_f_loss, disc_x_loss, disc_y_loss
def wgan_disc_apply(fake_x, fake_y, n_critic, real_x, real_y):
for _ in range(n_critic):
with tf.GradientTape(persistent=True) as disc_tape:
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x) + 10 * gradient_penalty(real_x, fake_x,
discriminator_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y) + 10 * gradient_penalty(real_y, fake_y,
discriminator_y)
discriminator_x_gradients = disc_tape.gradient(disc_x_loss,
discriminator_x.trainable_variables)
discriminator_y_gradients = disc_tape.gradient(disc_y_loss,
discriminator_y.trainable_variables)
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,
discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,
discriminator_y.trainable_variables))
return disc_x_loss, disc_y_loss
@tf.function
def distributed_train_step(dist_inputs_a, dist_inputs_b):
per_replica_gen_g_losses, per_replica_gen_f_losses, per_replica_disc_x_losses, per_replica_disc_y_losses = \
mirrored_strategy.run(train_step, args=(dist_inputs_a, dist_inputs_b))
reduced_gen_g_loss, reduced_gen_f_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.MEAN,
per_replica_gen_g_losses,
axis=None), mirrored_strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_gen_f_losses,
axis=None)
reduced_disc_x_loss, reduced_disc_y_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.MEAN,
per_replica_disc_x_losses,
axis=None), mirrored_strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_disc_y_losses,
axis=None)
return reduced_gen_g_loss, reduced_gen_f_loss, reduced_disc_x_loss, reduced_disc_y_loss
train_writer = tf.summary.create_file_writer(os.path.join(args.logdir, logdir))
def save_models():
K.models.save_model(generator_g, os.path.join(save_dir, MODEL, str(epoch + 1), "gen_g"))
K.models.save_model(generator_f, os.path.join(save_dir, MODEL, str(epoch + 1), "gen_f"))
K.models.save_model(discriminator_x, os.path.join(save_dir, MODEL, str(epoch + 1), "disc_x"))
K.models.save_model(discriminator_y, os.path.join(save_dir, MODEL, str(epoch + 1), "disc_y"))
print("Model at Epoch {}, saved at {}".format(epoch, os.path.join(save_dir, MODEL, str(epoch))))
for epoch in range(START_EPOCH, EPOCHS):
print("\n ----------- Epoch {} --------------\n".format(epoch + 1))
n = 0
with train_writer.as_default():
tf.summary.scalar("Learning Rate", lrs(epoch).numpy(),
epoch) if LEARNING_RATE_SCHEDULER != "constant" else tf.summary.scalar("Learning Rate", lrs,
epoch)
for image_x, image_y in zip(train_A, train_B):
gen_g_loss, gen_f_loss, disc_x_loss, disc_y_loss = distributed_train_step(image_x, image_y)
print(
"Epoch {} \t Gen_G_Loss: {}, Gen_F_Loss: {}, Disc_X_Loss: {}, Disc_Y_Loss: {}".format(epoch + 1, gen_g_loss,
gen_f_loss,
disc_x_loss,
disc_y_loss))
n += 1
if n % 20 == 0:
write_to_tensorboard(gen_g_loss, gen_f_loss, disc_x_loss, disc_y_loss,
(epoch * total_samples // BATCH_SIZE) + n, train_writer)
if (epoch + 1) % save_interval == 0:
save_models()
|
AhmedBadar512/Badr_AI_Repo
|
cycle_gan_train.py
|
cycle_gan_train.py
|
py
| 18,298 |
python
|
en
|
code
| 2 |
github-code
|
6
|
43005467228
|
"""
Utility functions
"""
import torch
import matplotlib as mpl
import numpy as np
import math
mpl.use('Agg')
from matplotlib import pyplot as plt
def sin_data(n_train, n_test, noise_std, sort=False):
"""Create 1D sine function regression dataset
:n_train: Number of training samples.
:n_test: Number of testing samples.
:noise_srd: Standard deviation of observation noise.
:returns: x_train, y_train, x_test, y_test
"""
def ground_truth(x):
return torch.sin(math.pi * x)
xn = torch.rand(n_train, 1) * 2 - 1 # Uniformly random in [-1, 1]
yn = ground_truth(xn) + noise_std * torch.randn(n_train, 1)
if sort:
indices = torch.argsort(xn, axis=0)
xn = xn[indices.squeeze()]
yn = yn[indices.squeeze()]
xt = torch.linspace(-1.1, 1.1, n_test).view(-1, 1)
yt = ground_truth(xt) + noise_std * torch.randn(n_test, 1)
return xn, yn, xt, yt
def plot_lengthscale(xt, lengthscale, uncertainty, name=None):
"""
Visualize lengthscale function and its corresponding uncertainty.
:lengthscale: Lengthscale mean.
:uncertainty: Standard deviation of lengthscale prediction.
"""
lengthscale = lengthscale.numpy().ravel()
uncertainty = uncertainty.numpy().ravel()
lower = lengthscale - 2.0 * uncertainty
upper = lengthscale + 2.0 * uncertainty
xt = xt.numpy().ravel()
fig, ax = plt.subplots()
ax.plot(xt, lengthscale, 'b', lw=2, alpha=0.8, label='Lengthscale')
ax.fill_between(xt, lower, upper,
facecolor='b', alpha=0.3, label='95% CI')
ax.set_xlim([xt.min(), xt.max()])
ax.legend(loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2), ncol=3,
borderaxespad=0, frameon=False)
if name is not None:
fig.savefig('../results/prediction/' + name + '.svg')
def plot_pytorch(dataset, preds, name=None):
dataset = [tensor.numpy().ravel() for tensor in dataset]
xn, yn, xt, ft = dataset
mean = preds.mean.cpu().numpy().ravel()
lower, upper = preds.confidence_region()
lower = lower.cpu().numpy().ravel()
upper = upper.cpu().numpy().ravel()
fig, ax = plt.subplots()
ax.plot(xn, yn, 'k.', label='Training data')
ax.plot(xt, ft, 'r--', lw=2, alpha=0.8, label='Function')
ax.plot(xt, mean, 'b', lw=2, alpha=0.8, label='Prediction')
ax.fill_between(xt, lower, upper,
facecolor='b', alpha=0.3, label='95% CI')
ax.set_xlim([xt.min(), xt.max()])
ax.legend(loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2), ncol=3,
borderaxespad=0, frameon=False)
if name is not None:
fig.savefig('../results/prediction/' + name + '.svg')
def plot_1d_results(dataset, mean, std, title=None, name=None):
"""
Visualize training data, ground-truth function, and prediction.
:dataset: A tuple containing (Xn, Yn, Xt, Ft)
:mean: Mean of predictive Gaussian distribution.
:std: Standard deviation of predictive Gaussian distribution.
"""
dataset = [tensor.cpu().numpy().ravel() for tensor in dataset]
xn, yn, xt, ft = dataset
mean, std = mean.cpu().numpy().ravel(), std.cpu().numpy().ravel()
lower = mean - 2.0 * std
upper = mean + 2.0 * std
fig, ax = plt.subplots()
ax.plot(xn, yn, 'k.', label='Training data')
ax.plot(xt, ft, 'r.', lw=2, alpha=0.8, label='Test data')
ax.plot(xt, mean, 'b', lw=2, alpha=0.8, label='Prediction')
ax.fill_between(xt, lower, upper,
facecolor='b', alpha=0.3, label='95% CI')
ax.set_xlim([xt.min(), xt.max()])
ax.legend(loc='lower left', bbox_to_anchor=(0, 1.02, 1, 0.2), ncol=3,
borderaxespad=0, frameon=False)
if title is not None:
ax.set_title(title, loc='center')
if name is not None:
fig.savefig(name + '.pdf')
def train(model, optimizer, n_iter, verbose=True, name=None, Xn=None, yn=None, tol=None):
"""
Training helper function.
"""
n_train = Xn.size(0) if Xn is not None else model.Xn.size(0)
losses = []
for i in range(n_iter):
optimizer.zero_grad()
if Xn is None and yn is None:
loss = model.loss()
else:
loss = model.loss(Xn, yn)
loss.backward()
optimizer.step()
losses.append(loss.item())
if tol is not None:
# if the result is stable for over 50 iteration, then we consider it converges.
n = 50
if len(losses) > n:
last = losses[-n:]
if max(last) - min(last) <= tol:
if verbose:
print("Converges at iteration: ", i)
break
if verbose:
print('Iteration: {0:04d} Loss: {1: .6f}'.format(i, loss.item() / n_train))
if name is not None:
plt.figure()
plt.plot(losses, lw=2)
plt.ylabel('Loss')
plt.xlabel('Number of iteration')
plt.savefig(name + '.svg')
|
weiyadi/dlm_sgp
|
conjugate/utils.py
|
utils.py
|
py
| 4,966 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24603935810
|
with open("inputs/day14.txt", 'r') as fh:
lines = fh.readlines()
schedules = {}
for line in lines:
parts = line.split()
name = parts[0]
speed = int(parts[3])
duration = int(parts[6])
rest = int(parts[13])
schedule = []
while len(schedule) < 2503:
schedule += [speed, ] * duration
schedule += [0, ] * rest
schedules[name] = schedule
for reindeer in schedules:
print("{} flew {} in 2503 seconds".format(reindeer, sum(schedules[reindeer][:2503])))
scores = {}
for reindeer in schedules:
scores.update({reindeer: {"dist": 0, "score": 0}})
for sec in range(2503):
for reindeer in scores:
scores[reindeer]["dist"] += schedules[reindeer][sec]
furthest = 0
winning = []
for reindeer in scores:
if scores[reindeer]["dist"] == furthest:
winning.append(reindeer)
elif scores[reindeer]["dist"] > furthest:
furthest = scores[reindeer]["dist"]
winning = [reindeer, ]
for reindeer in winning:
scores[reindeer]["score"] += 1
from pprint import pprint as pp
pp(scores)
|
neilo40/adventofcode2015
|
day14.py
|
day14.py
|
py
| 1,111 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20186345178
|
# Import pakages
import torch
import torch.nn as nn
import gym
import os
import torch.nn.functional as F
import torch.multiprocessing as mp
import numpy as np
# Import python files
from utils import v_wrap, set_init, push_and_pull, record
from shared_adam import SharedAdam
os.environ["OMP_NUM_THREADS"] = "1"
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Setting hyperparameters
UPDATE_GLOBAL_ITER = 10 #
GAMMA = 0.99
MAX_EP = 500
hidden_dim_pi = 16
hidden_dim_v = 16
env = gym.make('CartPole-v0')
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
# Define basic neural network(It will be same for each worker)
class Net(nn.Module):
def __init__(self, s_dim, a_dim):
super(Net, self).__init__()
self.s_dim = s_dim # 4
self.a_dim = a_dim # 2
self.pi1 = nn.Linear(s_dim, hidden_dim_pi) # (N, 4) -> (N, hidden_dim_pi)
self.pi2 = nn.Linear(hidden_dim_pi, a_dim) # (N, hidden_dim_pi) -> (N, 2)
self.v1 = nn.Linear(s_dim, hidden_dim_v) # (N, 4) -> (N, hidden_dim_v)
self.v2 = nn.Linear(hidden_dim_v, 1) # (N, hidden_dim_v) -> (N, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical # It means that [a, b, c, ...] -> 0:a, 1:b, 2:c, ...
# forward returns output of model
# Return : softmax^(-1)(probability) and V(s) (Note. During using crossentropy loss in pytorch, network must not contain softmax layer)
def forward(self, x):
pi1 = torch.tanh(self.pi1(x))
logits = self.pi2(pi1)
v1 = torch.tanh(self.v1(x))
values = self.v2(v1)
return logits, values
# choose_action returns action from state s
# Return : action
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
prob = F.softmax(logits, dim=1).data # We need to change to probability
m = self.distribution(prob) # take actions by given probability
return m.sample().numpy()[0]
# evaluate loss function
# v_t : r+gamma*v_(t+1)
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
probs = F.softmax(logits, dim=1)
m = self.distribution(probs)
exp_v = m.log_prob(a) * td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, name):
super(Worker, self).__init__()
self.name = 'w%02i' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.lnet = Net(N_S, N_A) # local network
self.env = gym.make('CartPole-v0').unwrapped
def run(self):
total_step = 1
while self.g_ep.value < MAX_EP:
s = self.env.reset()
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0.
while True: #현재 시점 t
if self.name == 'w00':
self.env.render()
a = self.lnet.choose_action(v_wrap(s[None, :]))
s_, r, done, _ = self.env.step(a)
if done: r = -1
ep_r += r
buffer_a.append(a)
buffer_s.append(s)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
# sync
push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)
buffer_s, buffer_a, buffer_r = [], [], []
if done: # done and print information
ep_r = min(ep_r, 200)
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name)
break
s = s_
total_step += 1
self.res_queue.put(None)
if __name__ == "__main__":
gnet = Net(N_S, N_A) # global network
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=5e-4, betas=(0.92, 0.999)) # global optimizer
global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()
# parallel training
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
import matplotlib.pyplot as plt
res = np.array(res)
np.save("discrete_result.npy", res)
plt.plot(res)
plt.ylabel('ep reward')
plt.xlabel('Episode')
plt.show()
|
smfelixchoi/MATH-DRL-study
|
6.A3C/discrete_A3C.py
|
discrete_A3C.py
|
py
| 4,973 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3129533999
|
import numpy as np
data = [[]]
with open("data.txt","r") as fichier:
for line in fichier.read().splitlines():
if line:
data[-1].append(line)
else:
data.append([])
nb_stacks = int(data[0][-1][-2])
stacks = [[]]
pile_max = len(data[0])-1
for i in range(nb_stacks):
stacks.append([])
for j in range(pile_max):
lettre = data[0][pile_max-j-1][4*i+1]
if lettre!=" ":
stacks[i].append([])
stacks[i][j] = lettre
stacks.pop(-1)
def deplacer(tas,nombre,pile1,pile2):
taille1 = len(tas[pile1])
taille2 = len(tas[pile2])
for i in range(nombre):
tas[pile2].append([])
tas[pile2][taille2+i] = tas[pile1][taille1-i-1]
tas[pile1].pop(-1)
return tas
def deplacer2(tas,nombre,pile1,pile2):
taille1 = len(tas[pile1])
taille2 = len(tas[pile2])
for i in range(nombre):
tas[pile2].append([])
tas[pile2][taille2+i] = tas[pile1][taille1-nombre+i]
for i in range(nombre):
tas[pile1].pop(-1)
return tas
for ligne in data[1]:
instruction = ligne.split(" ")
deplacer2(stacks,int(instruction[1]),int(instruction[3])-1,int(instruction[5])-1)
dessus = ""
for ligne in stacks:
dessus += ligne[-1]
print(dessus)
|
Schtroumpfissime/AdventOfCode2022
|
5/main.py
|
main.py
|
py
| 1,276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42742926031
|
from models.models import VatsimPilot
from data_reader import reader
def main():
print("VATSIM LIB")
json_data = reader.init()
vgs = reader.get_vatsim_general(json_data)
# print(vgs)
pilots = reader.get_vatsim_pilots(json_data)
print(f"number of pilots this update: {len(pilots)}")
flight_plans = reader.get_flight_plans(pilots)
print(f"number of flight plans this update: {len(flight_plans)}")
if __name__ == "__main__":
main()
|
ahuimanu/vatsimlib
|
run.py
|
run.py
|
py
| 472 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2654980341
|
from Tkinter import *
root = Tk()
frame = Frame(root, bd=2, relief=SUNKEN)
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
xscrollbar = Scrollbar(frame, orient=HORIZONTAL)
xscrollbar.grid(row=1, column=0, sticky=E+W)
yscrollbar = Scrollbar(frame)
yscrollbar.grid(row=0, column=1, sticky=N+S)
canvas = Canvas(frame, bd=0,
xscrollcommand=xscrollbar.set,
yscrollcommand=yscrollbar.set)
canvas.grid(row=0, column=0, sticky=N+S+E+W)
xscrollbar.config(command=canvas.xview)
yscrollbar.config(command=canvas.yview)
frame.pack()
mainloop()
|
sbobovyc/DCS
|
legacy/DCS2_py/examples/canvas_scrollbox.py
|
canvas_scrollbox.py
|
py
| 600 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71470012027
|
from lxml import etree
from xml.etree import ElementTree
def get_text_from_file(xml_file):
tree = etree.parse(xml_file)
root = tree.getroot()
for element in root.iterfind('.//para'):
for ele in element.findall('.//display'):
parent = ele.getparent()
parent.remove(ele)
ElementTree.dump(element)
|
ayandeephazra/Natural_Language_Processing_Research
|
PaperDownload/papers/process_xml.py
|
process_xml.py
|
py
| 349 |
python
|
en
|
code
| 2 |
github-code
|
6
|
13461686812
|
"""
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
click to show corner cases.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
"""
class Solution(object):
def fullJustify(self, words, maxWidth):
"""
:type words: List[str]
:type maxWidth: int
:rtype: List[str]
"""
if len(words) == 0:
return []
if maxWidth == 0:
return [v for v in words if len(v) == 0]
pos = 0
ans = []
curr_words = []
while True:
while sum(map(len, curr_words)) + len(curr_words) + len(words[pos]) <= maxWidth:
curr_words.append(words[pos])
pos += 1
if pos >= len(words):
last_line = ' '.join(curr_words)
last_line += (maxWidth - len(last_line)) * " "
ans.append(last_line)
return ans
words_length = sum(map(len, curr_words))
if len(curr_words) == 1:
ans.append(curr_words[0] + (maxWidth - words_length) * " ")
else:
join_spaces = (maxWidth - words_length) // (len(curr_words) - 1)
extra_spaces = (maxWidth - words_length) % (len(curr_words) - 1)
for i in range(extra_spaces):
curr_words[i] += " "
ans.append((' ' * join_spaces).join(curr_words))
curr_words = []
# better way: ' '.join words, then chop off each group of words based on the indices of the spaces
# for each chunk, split, do same thing as above to calculate the spaces to join with
# append to ans, end when len(joined_string) <= 16
ans = Solution()
print(ans.fullJustify([""], 16))
print(ans.fullJustify([""], 0))
print(ans.fullJustify(["this", "is"], 0))
print(ans.fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16))
print(ans.fullJustify(["This", "is", "an", "example", "of", "text", "justification.", "exhaustively"], 16))
|
szhongren/leetcode
|
68/main.py
|
main.py
|
py
| 3,017 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23704854533
|
#!/usr/bin/env python3
import json
import os
import requests
import datetime
base_url="https://raw.githubusercontent.com/threatstop/crl-ocsp-whitelist/master/"
uri_list=['crl-hostnames.txt','crl-ipv4.txt','crl-ipv6.txt','ocsp-hostnames.txt','ocsp-ipv4.txt','ocsp-ipv6.txt']
dict=dict()
dict['list']=list()
def source_read_and_add(input_file):
output_list=list()
for item in input_file:
item=item.rstrip()
output_list.append(item)
return output_list
for uri in uri_list:
url = base_url + uri
r=requests.get(url)
dict['list'] += source_read_and_add(r.text)
dict['type'] = "string"
dict['matching_attributes']=["hostname","domain","ip-dst","ip-src","url", "domain|ip"]
dict['name']="CRL Warninglist"
dict['version']= int(datetime.date.today().strftime('%Y%m%d'))
dict['description']="CRL Warninglist from threatstop (https://github.com/threatstop/crl-ocsp-whitelist/)"
dict['list']=list(set(dict['list']))
print(json.dumps(dict))
|
007Alice/misp-warninglists
|
tools/generate-crl-ip-list.py
|
generate-crl-ip-list.py
|
py
| 943 |
python
|
en
|
code
| null |
github-code
|
6
|
16704497954
|
import pickle
import numpy as np
import scipy.io as sio
from library.error_handler import Error_Handler
class Data_Loader:
def load_data_from_pkl(self, filepath_x, filepath_y, ordering="True"):
with open(filepath_x, "rb") as file_x:
x_data = pickle.load(file_x)
with open(filepath_y, "rb") as file_y:
y_data = pickle.load(file_y)
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
if np.min(y_data) > 0:
y_data = y_data - np.min(y_data)
reordered_data = Data_Loader.__reorder(x_data, ordering)
return reordered_data, y_data
def load_data_from_npy(self, filepath_x, filepath_y, ordering="True"):
x_data = np.load(filepath_x)
y_data = np.load(filepath_y)
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
if np.min(y_data) > 0:
y_data = y_data - np.min(y_data)
reordered_data = Data_Loader.__reorder(x_data, ordering)
return reordered_data, y_data
def load_data_from_mat(self, filepath, x_key, y_key, ordering):
mat_dict = sio.loadmat(filepath)
x_data = mat_dict[x_key]
y_data = mat_dict[y_key]
x_data = np.asarray(x_data)
y_data = np.asarray(y_data)
if np.min(y_data) > 0:
y_data = y_data - np.min(y_data)
reordered_data = Data_Loader.__reorder(x_data, ordering)
return reordered_data, y_data
def __reorder(x, ordering):
if ordering == "SWHC":
return x
elif ordering == "CWHS":
x = np.swapaxes(x, 3, 0)
return x
elif ordering == "WHCS":
x = np.rollaxis(x, 2, 0)
x = np.swapaxes(x, 0, 3)
return x
elif ordering == "WHSC":
x = np.rollaxis(x, 3, 0)
x = np.swapaxes(x, 0, 3)
return x
elif ordering == "SCWH":
x = np.rollaxis(x, 1, 4)
return x
elif ordering == "CSWH":
x = np.swapaxes(x, 0, 1)
x = np.rollaxis(x, 1, 4)
return x
else:
Error_Handler.error_in_data_ordering()
|
tzee/EKDAA-Release
|
library/data_loader.py
|
data_loader.py
|
py
| 2,380 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70602414269
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 11:06:45 2020
@author: xchen
"""
## required packages
# system imports
import os
import sys
from termcolor import colored
from colorama import init
# data manipulation and data clean
from nltk.corpus import stopwords
# sklearn
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
# self-defined
import pipeline
# default data path
DATA_PATH = '../data'
GLOVE_PATH = '../glove.6B'
# default parameters
stop_words = stopwords.words('english')
stop_words = stop_words + ['would','could','may','also', 'one', 'two', 'three',
'first', 'second' ,'third',
'someone', 'anyone', 'something', 'anything',
'subject', 'organization', 'lines',
'article', 'writes', 'wrote']
tokenize_regex1 = r"\w+|\$[\d\.]+"
tokenize_regex2 = r"[a-zA-Z_]+"
def main_test(path):
dir_path = path or DATA_PATH
TRAIN_DIR = os.path.join(dir_path, "train")
TEST_DIR = os.path.join(dir_path, "test")
# load data
print (colored('Loading files into memory', 'green', attrs=['bold']))
train_path_list, ylabel_train = pipeline.parse_files(TRAIN_DIR)
test_path_list, ylabel_test = pipeline.parse_files(TEST_DIR)
train_documents = [pipeline.load_document(path = path, label = y) for \
path, y in zip(train_path_list, ylabel_train)]
test_documents = [pipeline.load_document(path = path, label = y) for \
path, y in zip(test_path_list, ylabel_test)]
# clean all documents
print (colored('Cleaning all files', 'green', attrs=['bold']))
pipeline.clean_all_documents(train_documents,
word_split_regex = tokenize_regex1,
stop_words = stop_words,
contraction_dict = 'default')
pipeline.clean_all_documents(test_documents,
word_split_regex = tokenize_regex1,
stop_words = stop_words,
contraction_dict = 'default')
# encode labels
print (colored('Encoding labels', 'green', attrs=['bold']))
y_train, y_test, category = pipeline.label_encoder(ylabel_train, ylabel_test, 'ordinal')
## *************************** machine learning ***************************
# calculate the BOW representation
print (colored('Calculating BOW', 'green', attrs=['bold']))
X_train_bow = pipeline.BagOfWord.fit_transform(train_documents)
X_test_bow = pipeline.BagOfWord.transform(test_documents)
print ("The shape of X after processing is: \ntrain: %s, test: %s"%(X_train_bow.shape, X_test_bow.shape))
# calculate the tf-idf representation
print (colored('Calculating Tf-idf', 'green', attrs=['bold']))
X_train_tfidf = pipeline.Tfidf.fit_transform(train_documents)
X_test_tfidf = pipeline.Tfidf.transform(test_documents)
print ("The shape of X after processing is: \ntrain: %s, test: %s"%(X_train_tfidf.shape, X_test_tfidf.shape))
# scale
scaler = preprocessing.Normalizer()
X_train_scaled = scaler.fit_transform(X_train_bow)
X_test_scaled = scaler.transform(X_test_bow)
## models
# naive bayes
clf_nb = MultinomialNB()
# logistic regression
clr_lr = LogisticRegression(penalty='l2', C=12, solver='lbfgs', max_iter=500, random_state=42)
# svm
clf_svm = SGDClassifier(penalty = 'l2',alpha = 5e-5, random_state=42)
# model selection
print (colored('Selecting model using 10-fold cross validation', 'magenta', attrs=['bold']))
clf_list = [clf_nb, clr_lr, clf_svm]
clf_optimal, clf_f1 = pipeline.model_selection(X_train_tfidf, y_train, clf_list, cv=5, scoring='f1_macro')
# test the optimal classifier with train-test-split
print (colored('Testing the optimal classifier with train-test split', 'magenta', attrs=['bold']))
f1 = pipeline.test_classifier(X_train_tfidf, y_train, clf_optimal, test_size=0.2, y_names=category, confusion=True)
print('Train score (macro f1):%.4f, test score (macro f1):%.4f'%(f1[1],f1[0]))
# predict test set
print (colored('Predicting test dataset', 'magenta', attrs=['bold']))
y_pred_ml = pipeline.model_prediction(clf_optimal, X_train_tfidf, y_train, X_test_tfidf)
pipeline.model_report(y_test, y_pred_ml, y_names=category, confusion=True)
def main():
init()
# get the dataset
print (colored("Where is the dataset?", 'cyan', attrs=['bold']))
print (colored('Press return with default path', 'yellow'))
ans = sys.stdin.readline()
# remove any newlines or spaces at the end of the input
path = ans.strip('\n')
if path.endswith(' '):
path = path.rstrip(' ')
print ('\n\n')
# do the main test
main_test(path)
if __name__ == '__main__':
main()
|
linnvel/text-classifier-master
|
ML.py
|
ML.py
|
py
| 5,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21379925078
|
from bogos import ScrapeBogos
import configparser
import twitter
def lambda_handler(event, context):
config = configparser.ConfigParser()
config.read('config.ini');
keywords = ''
keywordMultiWord = False
url = ''
prefixText = ''
postfixText = ''
noBogoText = ''
print('Config values:')
if 'BOGO' not in config:
print("No BOGO config found")
return
else:
bogoConfig = config['BOGO']
if 'keywords' not in bogoConfig or 'url' not in bogoConfig:
print("'keywords' or 'url' was provided in the config")
return
else:
keywords = bogoConfig['keywords'].split(',')
print('keywords: ' + str(keywords))
url = bogoConfig['url']
print('url: ' + url)
if 'keywordMultiWord' in bogoConfig:
keywordMultiWord = bogoConfig['keywordMultiWord'].lower() == 'true'
print('keywordMultiWord: ' + str(keywordMultiWord))
if 'prefixText' in bogoConfig:
prefixText = bogoConfig['prefixText']
print('prefixText: ' + prefixText)
if 'postfixText' in bogoConfig:
postfixText = bogoConfig['postfixText']
print('postfixText: ' + postfixText)
if 'noBogoText' in bogoConfig:
noBogoText = bogoConfig['noBogoText']
print('noBogoText: ' + noBogoText)
consumer_key = ''
consumer_secret = ''
access_token_key = ''
access_token_secret = ''
if 'TwitterApi' in config:
twitterConfig = config['TwitterApi']
consumer_key = twitterConfig['consumer_key']
consumer_secret = twitterConfig['consumer_secret']
access_token_key = twitterConfig['access_token_key']
access_token_secret = twitterConfig['access_token_secret']
print('End of config values')
print('====================\n')
bogos = ScrapeBogos(url, keywords, keywordMultiWord, prefixText, postfixText)
bogos.initialize()
tweetBogo(bogos.getItemsFound(), noBogoText, consumer_key, consumer_secret, access_token_key, access_token_secret)
def tweetBogo(itemsFound, noBogoText, consumer_key, consumer_secret, access_token_key, access_token_secret):
twitterApi = None
if consumer_key and consumer_secret and access_token_key and access_token_secret:
twitterApi = twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret)
if itemsFound:
for item in itemsFound:
print(item);
if twitterApi:
print('posting to twitter: ' + item)
twitterApi.PostUpdate(item)
elif noBogoText:
print(noBogoText);
if twitterApi:
print('posting to twitter: ' + noBogoText)
twitterApi.PostUpdate(noBogoText)
else:
print("nothing found");
|
DFieldFL/publix-bogo-notification
|
BogoMain.py
|
BogoMain.py
|
py
| 2,732 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41236509775
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
urlpatterns = [
]
router = DefaultRouter()
router.register("porcelain", viewset=views.PorcelainView, basename="porcelain")
router.register("dynasty", viewset=views.DynastyView, basename="dynasty")
router.register("EmperorYear", viewset=views.EmperorYearView, basename="EmperorYear")
urlpatterns += router.urls
|
beishangongzi/porcelain-backend
|
predict_model/urls.py
|
urls.py
|
py
| 412 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36064906771
|
'''
Created on 2017-1-13
@author: xuls
'''
from PIL import Image
import os
PATH2=os.path.dirname(os.getcwd())
def classfiy_histogram(image1,image2,size = (256,256)):
image1 = image1.resize(size).convert("RGB")
g = image1.histogram()
image2 = image2.resize(size).convert("RGB")
s = image2.histogram()
assert len(g) == len(s),"error"
data = []
for index in range(0,len(g)):
if g[index] != s[index]:
data.append(1 - abs(g[index] - s[index])/max(g[index],s[index]) )
else:
data.append(1)
print(sum(data)/len(g))
def compare(image):
image1 = Image.open(PATH2+"\\aw\\image\\expected\\"+image+".png")
image2 = Image.open(PATH2+"\\aw\\image\\actual\\"+image+".png")
print(image+"-differ:")
classfiy_histogram(image1,image2,size = (256,256))
if __name__ == "__main__":
'''Search'''
compare("image01")
compare("image02")
compare("image03")
compare("image04")
compare("image05")
compare("image06")
compare("image07")
compare("image08")
#
'''BusinessChance'''
compare("image11")
compare("image12")
compare("image13")
compare("image14")
'''CarContrast'''
compare("image21")
compare("image22")
'''FriendsToHelp'''
compare("image31")
compare("image32")
compare("image33")
compare("image34")
compare("image35")
compare("image36")
compare("image37")
compare("image38")
compare("image39")
compare("image3a")
compare("image3b")
compare("image3c")
compare("image3d")
compare("image3e")
'''SendTopic'''
compare("image41")
compare("image42")
compare("image43")
compare("image44")
compare("image45")
compare("image46")
compare("image47")
compare("image48")
|
xulishuang/qichebaojiadaquan
|
src/script/sameas.py
|
sameas.py
|
py
| 1,917 |
python
|
en
|
code
| 0 |
github-code
|
6
|
162022841
|
import time
from selenium import webdriver
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from .pages.login import LoginPage
class ManageUserTestCase(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(20)
self.browser.maximize_window()
def tearDown(self):
if self.browser:
self.browser = None
def test_user_interaction_on_manage_user_page(self):
temp_page = LoginPage(self.browser)
temp_page.first_visit('{}/account/login/'.format(self.live_server_url), 'email')
temp_page = temp_page.login_user('[email protected]', 'admin')
temp_page.first_visit(self.live_server_url)
self.page = temp_page.visit_manage_user()
self.assertIn('Manage Users', self.page.get_body_content())
user_info = self.page.add_new_user()
self.page.wait_for_element_with_class_name('stickit_name')
tbody = self.browser.find_element_by_id('tbody')
tbody_text = tbody.text.lower()
email = user_info.get('email').lower()
name = user_info.get('name').lower()
phone = user_info.get('phone').lower()
department = user_info.get('department').lower()
self.assertIn(email, tbody_text)
self.assertIn(name, tbody_text)
self.assertIn(phone, tbody_text)
self.assertIn(department, tbody_text)
edited_user_info = self.page.edit_user()
self.page.wait_for_element_with_class_name('stickit_name')
tbody = self.browser.find_element_by_id('tbody')
tbody_text = tbody.text.lower()
self.assertNotIn(name, tbody_text)
self.assertNotIn(phone, tbody_text)
self.assertNotIn(department, tbody_text)
self.assertIn(edited_user_info.get('name').lower(), tbody_text)
self.assertIn(edited_user_info.get('phone').lower(), tbody_text)
self.assertIn(edited_user_info.get('department').lower(), tbody_text)
self.page.delete_user()
time.sleep(3)
tbody = self.browser.find_element_by_id('tbody')
tbody_text = tbody.text.lower()
self.assertNotIn(edited_user_info.get('name').lower(), tbody_text)
self.assertNotIn(edited_user_info.get('phone').lower(), tbody_text)
self.assertNotIn(edited_user_info.get('department').lower(), tbody_text)
self.assertNotIn(email, tbody_text)
|
pophils/TaskManagement
|
yasanaproject/tests/functional/test_manage_user.py
|
test_manage_user.py
|
py
| 2,447 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34838799506
|
import csv
import pandas as pd
cerealFile = open('cereal.csv')
cerealReader = csv.reader(cerealFile)
cerealList = list(cerealReader)
df = pd.read_csv('cereal.csv')
for row in cerealList:
print(row[0])
#print(df.info())
print(df['calories'].dtypes)
|
kamiltrzcinski/python
|
zad7.py
|
zad7.py
|
py
| 257 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1175929683
|
#!/usr/bin/env python3
"""
T9 Spelling problem
for Google Code Jam Africa 2010
Qualification
Link to problem description:
http://code.google.com/codejam/contest/351101/dashboard#s=p2
author:
Chris Nitsas
(nitsas)
language:
Python 3.2.1
date:
April, 2012
usage:
$ python3 runme.py sample.in
or
$ runme.py sample.in
(where sample.in is the input file and $ the prompt)
"""
import sys
# non-standard modules:
from helpful import read_int
class T9Translator:
"""
Translates strings of lowercase characters a-z and space
characters to T9 strings.
"""
def __init__(self, characters=None, t9_phrases=None):
if characters is not None:
self.characters = characters
else:
self.characters = "abcdefghijklmnopqrstuvwxyz "
if t9_phrases is not None:
self.t9_phrases = t9_phrases
else:
self.t9_phrases = ["2", "22", "222", "3", "33", "333", "4", "44",
"444", "5", "55", "555", "6", "66", "666", "7",
"77", "777", "7777", "8", "88", "888", "9",
"99", "999", "9999", "0"]
self.character_to_t9 = dict(zip(self.characters, self.t9_phrases))
def toT9(self, string):
result = self.character_to_t9[string[0]]
for letter in string[1:]:
# check if we're going to have to press the same key again
if result[-1] != self.character_to_t9[letter][0]:
# add the new letter's t9 translation to result
result += self.character_to_t9[letter]
else:
# we must pause (insert a space) before the next keypress
result += " " + self.character_to_t9[letter]
return result
def main(filename=None):
if filename is None:
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
print("Usage: runme.py input_file")
return 1
with open(filename, "r") as f:
num_cases = read_int(f)
translator = T9Translator()
for i, line in enumerate(f, 1):
print("Case #" + str(i) + ": " + translator.toT9(line.rstrip("\n")))
return 0
if __name__ == "__main__":
status = main()
sys.exit(status)
|
nitsas/codejamsolutions
|
T9 Spelling/runme.py
|
runme.py
|
py
| 2,291 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20832937788
|
import re
def open_fasta_file(file_address):
file = open(file_address, 'r')
text = file.read()
file.close()
return text
def record_counter(file_address):
txt = open_fasta_file(file_address)
counter = txt.count('>')
return counter
def dna_dict_creator(file_address):
txt = open_fasta_file(file_address)
dna_list = re.split('>', txt)
dna_list = list(filter(None, dna_list))
list_of_keys, new_dna_list = [], []
totall_dna_in_fasta = {}
for item in dna_list:
dna = (re.split('\n', (item)))
dna = list(filter(None, dna))
list_of_keys.append(dna[0])
dna = dna[1:]
dnastr = ''
dnastr = ''.join(dna)
new_dna_list.append(dnastr)
for i in range(0, len(list_of_keys)):
totall_dna_in_fasta[list_of_keys[i][:30]] = new_dna_list[i]
return totall_dna_in_fasta
def length_calculater(file_address):
dna_dict = dna_dict_creator(file_address)
dict_of_length = {}
for item in dna_dict.keys():
dict_of_length[item] = len(dna_dict[item])
return dict_of_length
def longest_shortest(file_address):
dna_length_dict = length_calculater(file_address)
sorted_dna_length = sorted(dna_length_dict.items(), key=lambda x: x[1])
longest = (sorted_dna_length[-1][0][:30], sorted_dna_length[-1][1])
shortest = (sorted_dna_length[0][0][:30], sorted_dna_length[0][1])
identical_shortest_len, identical_longest_len = [], []
for i in range(0, len(sorted_dna_length)):
longest_len = sorted_dna_length[-1][1]
if sorted_dna_length[-i][1] == longest_len:
identical_longest_len.append(sorted_dna_length[-i][0][:25])
else:
break
for i in range(0, len(sorted_dna_length)):
shortest_len = sorted_dna_length[0][1]
if sorted_dna_length[i][1] == shortest_len:
identical_shortest_len.append(sorted_dna_length[i][0][:25])
else:
break
return shortest, longest, identical_shortest_len, identical_longest_len
def orf_finder(file_address, reading_frames):
fasta_file = dna_dict_creator(file_address)
dnas_keys = fasta_file.keys()
dna_orf = {}
for item in dnas_keys:
dna = fasta_file[item]
start_position = reading_frames - 1
mark = 0
start_index, stop_index, orf = [], [], []
for i in range(start_position, len(dna), 3):
if dna[i:i + 3] == 'ATG':
start_index.append(i)
for i in range(start_position, len(dna), 3):
if dna[i:i + 3] in ["TAA", "TGA", "TAG"]:
stop_index.append(i)
for i in range(0, len(start_index)):
for j in range(0, len(stop_index)):
if start_index[i] < stop_index[j] and start_index[i] > mark:
orf.append(dna[start_index[i]:stop_index[j] + 3])
mark = stop_index[j] + 3
break
dna_orf[item] = orf
return dna_orf
def longest_orf_length(file_address, reading_frames):
all_orfs = orf_finder(file_address, reading_frames)
orf_keys = all_orfs.keys()
orf_lengths = {}
for identifier in orf_keys:
orf = all_orfs[identifier]
length = []
for item in orf:
length.append(len(item))
length.sort(reverse=True)
if len(length) > 0:
orf_lengths[identifier] = length[0]
else:
orf_lengths[identifier] = 0
return orf_lengths, max(orf_lengths.values())
def longest_orf_position(file_address, reading_frame):
longest_orf_len = longest_orf_length(file_address, reading_frame)
long_orfs = longest_orf_len[0]
longest_orf_len = longest_orf_len[1]
longest_orf_identifier = ''
for item in long_orfs.keys():
if long_orfs[item] == longest_orf_len:
longest_orf_identifier = item
all_dna = dna_dict_creator(file_address)
all_orfs = orf_finder(file_address, reading_frame)
longest_orf_in_fasta = all_orfs[longest_orf_identifier][0]
for i in range(0, len(all_orfs[longest_orf_identifier])):
if (len(all_orfs[longest_orf_identifier][i]) > len(longest_orf_in_fasta)):
longest_orf_in_fasta = all_orfs[longest_orf_identifier][i]
dna = all_dna[longest_orf_identifier]
start_pos = dna.rfind(longest_orf_in_fasta)
#
# start_positions = {}
#
# for item in all_dna.keys():
# dna = all_dna[item]
# orfs = all_orfs[item]
# longest = ''
# for orf in orfs:
# if len(orf) > len(longest):
# longest = orf
# start_positions[item] = dna.rfind(longest)
# return start_positions
return start_pos + 1
def all_repeats(file_address, length):
fasta_file = dna_dict_creator(file_address)
dnas_keys = fasta_file.keys()
repeats_dict = {}
for item in dnas_keys:
repeats_list, fragments = [], []
dna = fasta_file[item]
for i in range(0, len(dna)):
fragments.append(dna[i:i + length])
for piece in fragments:
if len(piece) == length and dna.count(piece) > 1:
repeats_list.append(dna.count(piece))
repeats_dict[piece] = (dna.count(piece))
return repeats_dict
def most_frequent_repeat(file_address, length):
repeat_dict = all_repeats(file_address, length)
keys = repeat_dict.keys()
max_repeat = {}
for key in keys:
temp_list = []
max_rep = max(repeat_dict[key])
count = repeat_dict[key].count(max_rep)
temp_list.append(max_rep)
temp_list.append(count)
max_repeat[key] = temp_list
return max_repeat
# print(longest_orf_length('dna2.fasta', 3)[1])
# print(longest_orf_position('dna2.fasta',3))
# print(len(dna_dict_creator('dna.example.fasta')))
# print(longest_orf_position('dna.example.fasta',0))
# print(longest_orf_position('dna.example.fasta',0))
# print(most_frequent_repeat('dna.example.fasta',6))
# print(all_repeats('dna.example.fasta',6))
# orf = 'gi|142022655|gb|EQ086233.1|16'
# long_orfs = longest_orf_length('dna2.fasta',3)[0]
# for item in long_orfs.keys():
# if orf in item:
# print(long_orfs[item])
|
saeedrafieyan/bioinformatics
|
final.py
|
final.py
|
py
| 6,224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34608371364
|
import os.path
import json
import os
def readDIPfile(parent_path):
edges = {}
index = 0
xmlfilepath = os.path.join(parent_path, r'data\Hsapi20170205CR.txt')
f = open(xmlfilepath)
lines = f.readlines()
for line in lines:
line_list = line.strip("\n").split("\t")
if line_list[9] == "taxid:9606(Homo sapiens)" and line_list[10] == "taxid:9606(Homo sapiens)":
source = line_list[0].split("|")[0]
target = line_list[1].split("|")[0]
if source != target:
edges[index] = [source, target]
index += 1
print(len(edges))
result_path = parent_path + r'\data\uploads\resultEdges.json'
with open(result_path, 'w') as fw:
json.dump(edges, fw)
if __name__ == '__main__':
ROOT_DIR = os.path.dirname(os.path.abspath('__file__'))
parent_path = os.path.dirname(ROOT_DIR)
readDIPfile(parent_path)
|
LittleBird120/DiseaseGenePredicition
|
DiseaseGenePredicition/Human_COVID_node2vec20210315/data_processing/readHumanProtein.py
|
readHumanProtein.py
|
py
| 919 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1435864274
|
import math
import numpy as np
import cv2
from matplotlib import pyplot as plt
def Euclidean_Distance(pointA, pointB):
ans = ((pointA[0] - pointB[0])**2+(pointA[1] - pointB[1])**2)**0.5
return ans
def Flat_Kernel(distance, bandwidth, point_number):
inRange = []
weight = np.zeros((point_number, 1))
for i in range (distance.shape[0]):
if distance[i] <= bandwidth:
inRange.append(distance[i])
weight[i] = 1
inRange = np.array(inRange)
return weight
def Gaussian_Kernel(distance, bandwidth, point_number):
left = 1.0/(bandwidth * math.sqrt(2*math.pi))
right = np.zeros((point_number, 1))
for i in range(point_number):
right[i, 0] = (-0.5 * distance[i] * distance[i]) / (bandwidth * bandwidth)
right[i, 0] = np.exp(right[i, 0])
return left * right
def Get_Mono_Histogram(image_dir):
img = cv2.imread(image_dir)
hist = cv2.calcHist([img],[0],None,[256],[0,256])
plt.hist(img.ravel(), 256, [0, 256])
plt.show()
def Get_RGB_Histogram(image_dir):
img = cv2.imread(image_dir)
color = ('b', 'g', 'r')
for i, col in enumerate(color):
histr = cv2.calcHist([img], [i], None, [256], [0, 256])
plt.plot(histr, color=col)
plt.xlim([0, 256])
plt.show()
|
laitathei/algorithm_implemention
|
machine_learning/Mean_Shift/utils.py
|
utils.py
|
py
| 1,331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
474990887
|
"""
*Author : Revanth Sai Nandamuri
*GitHUB : https://github.com/RevanthNandamuri1341b0
*Date of update : 25 August 2021
*Project name : Finding missing number
*Domain : PYTHON
*Description : You are given an array of positive numbers
from 1 to n, such that all numbers from
1 to n are present except one number x.
You have to find x.The input array is not sorted.
Runtime Complexity: O(n)
*File Name : amazon_interview_question1.py
*File ID : 799173
*Modified by : #your name#
"""
def amazon_unique(n,arr):
a_list = list(range(1, n+1))
print(a_list)
for i in a_list:
if i not in arr:
print(i)
n=int(input())
arr= list(map(int,input().strip().split()))
#print(arr)
amazon_unique(n,arr)
|
RevanthNandamuri1341b0/PYTHON-COMPY
|
amazon_interview_question1.py
|
amazon_interview_question1.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73900222588
|
# coding: utf-8
import unittest
import os
from django.conf import settings
from studitemps_storage.path import guarded_join
from studitemps_storage.path import guarded_safe_join
from studitemps_storage.path import guarded_join_or_create
from studitemps_storage.path import FileSystemNotAvailable
ABSPATH = os.path.abspath(".")
TEST_DIR = os.path.join("studitemps_storage", "tests", "test_dir")
"""
Using unittest.TestCase because we don't need django-Database or Server
"""
class GuardedJoinTestCase(unittest.TestCase):
def test_file_exists(self):
"""
it should act like os.path.join
"""
self.assertEqual(
guarded_join(ABSPATH, 'studitemps_storage'),
os.path.join(ABSPATH, 'studitemps_storage')
)
self.assertEqual(
guarded_join(ABSPATH, TEST_DIR, 'check.txt'),
os.path.join(ABSPATH, TEST_DIR, 'check.txt')
)
def test_file_not_exists(self):
"""
It should raise IOError for not existing file/folder
"""
self.assertRaises(IOError, guarded_join, ABSPATH, 'files-does-not-exists')
def test_file_system_not_available(self):
"""
Manually activates GUARDED_JOIN_TEST to raise FileSystemNotAvailable
"""
settings.GUARDED_JOIN_TEST = True
self.assertRaises(FileSystemNotAvailable, guarded_join, ABSPATH)
settings.GUARDED_JOIN_TEST = False
class GuardedSafeJoin(unittest.TestCase):
def test_file_exists(self):
"""
It should act like os.path join with base-folder
"""
self.assertEqual(
guarded_safe_join(TEST_DIR, 'check.txt'),
os.path.join(ABSPATH, TEST_DIR, 'check.txt')
)
def test_outside_project(self):
"""
It should raise exception if try to access files outside project
"""
self.assertRaises(ValueError, guarded_safe_join, "..", "..", "..")
def test_not_exists(self):
"""
It should act like os.path join
If file/folder doesn't exists returns joined-path
"""
self.assertEqual(
guarded_safe_join(TEST_DIR, "file-does-not-exists"),
os.path.join(ABSPATH, TEST_DIR, "file-does-not-exists")
)
class GuardedJoinOrCreate(unittest.TestCase):
def test_file_exists(self):
"""
It should return path and not create new folder
"""
self.assertEqual(
guarded_join_or_create(ABSPATH, 'README.md'),
os.path.join(ABSPATH, 'README.md')
)
def test_create_dir(self):
"""
Dir does not exists - create new
"""
path = os.path.join(TEST_DIR, "new-dir")
# The folder shouldn't exists
self.assertFalse(os.path.exists(path))
# The folder should be created
guarded_join_or_create(path)
# The folder should be created successful
self.assertTrue(os.path.exists(path))
# Remove
os.rmdir(path)
self.assertFalse(os.path.exists(path))
|
STUDITEMPS/studitools_storages
|
studitemps_storage/tests/suites/path.py
|
path.py
|
py
| 3,073 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21114723474
|
import os
from dotenv import load_dotenv, dotenv_values
# FOR LOG
import logging
from logging.handlers import RotatingFileHandler
import datetime
import math
import json
# Load environmental variable
config = dotenv_values(".env")
# --------------------------------------------------- LOGGING ---------------------------------------------------------
# Create new log folder if not exist
LOG_FOLDER_NAME = config.get('LOG_FOLDER_NAME')
LOG_FOLDER = os.path.join(os.getcwd(), LOG_FOLDER_NAME)
LOG_FILE = os.path.join(LOG_FOLDER, 'log_{datetime}.log'.format(datetime=datetime.datetime.now().strftime('%Y-%m-%d')))
MAXBYTES = (config.get('MAXBYTES'))
BACKUP_COUNT = config.get('BACKUP_COUNT')
# Set up logging basic config
try:
handler_rfh = RotatingFileHandler(LOG_FILE, maxBytes=int(MAXBYTES), backupCount=int(BACKUP_COUNT))
handler_rfh.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p')
logging.getLogger('CRAWL_TELEGRAM').addHandler(handler_rfh)
except Exception as e:
logging.exception(e)
# ----------------------------------------------------------------------------------------------------------------------
from entities.Account import Account
from entities.User import User
from BatchProcessor import BatchProcessor
if __name__ == "__main__":
bpro = BatchProcessor()
num_mem_per_acc = 4
list_members = []
for i in range(3):
member = User(f'user_id_{i}', f'access_hash_{i}')
list_members.append(member)
list_accounts = []
for i in range(3):
acc = Account(f'phone_no_{i}', f'api_id_{i}', f'api_hash_{i}', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
list_accounts.append(acc)
logging.info(list_accounts)
logging.info(list_members)
list_use_accounts, dict_batch, is_lack_acc, max_mem_process = bpro.divide_into_batch(list_accounts, list_members, num_mem_per_acc)
logging.info(', '.join([acc.phone_no for acc in list_use_accounts]))
logging.info(is_lack_acc)
logging.info(max_mem_process)
for key in dict_batch:
print(key)
print('Account:' ,dict_batch[key][0])
print('List members:')
print(*dict_batch[key][1], sep='\n')
|
Splroak/add_member_telegram
|
src/test_BatchProcessor.py
|
test_BatchProcessor.py
|
py
| 2,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13489533801
|
import json
import requests
resource = requests.post('http://216.10.245.166/Library/DeleteBook.php',
json = {"ID" : "ashish123227"}, headers={'Content-Type' : 'application/json' }
)
assert resource.status_code == 200 , f'the api failed with an error messages as : {resource.text}'
response_json = json.loads(resource.text)
print(response_json)
assert(response_json['msg']) == 'book is successfully deleted' , 'book is not deleted'
|
bhagatashish/APT_Testing
|
delete_book.py
|
delete_book.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73795089468
|
import json
import os
from flask import current_app, redirect, request, Response
from . import blueprint
@blueprint.route("/routes")
def routes():
data = {
"name": current_app.config["name"],
"version": current_app.config["version"],
"routes": {
"api": [
"/api/documentation",
"/api/shutdown",
"/api/version"
],
"igv": [
"/igv/demo",
"/igv/custom",
"/igv/session"
]
}
}
js = json.dumps(data, indent=4, sort_keys=True)
resp = Response(js, status=200, mimetype="application/json")
return resp
@blueprint.route("/api/documentation")
def documentation():
return redirect("https://github.com/igvteam/igv.js", code=302)
@blueprint.route("/api/shutdown")
def shutdown():
try:
request.environ.get("werkzeug.server.shutdown")()
except Exception:
raise RuntimeError("Not running with the Werkzeug Server")
return "Shutting down..."
@blueprint.route("/api/version")
def api_version():
data = {
"tool_version": current_app.config["tool_version"],
"igv_version": current_app.config["igv_version"]
}
js = json.dumps(data, indent=4, sort_keys=True)
resp = Response(js, status=200, mimetype="application/json")
return resp
|
cumbof/igv-flask
|
igv/routes/basics.py
|
basics.py
|
py
| 1,385 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1066446639
|
"""
This module defines the interface for communicating with the sound module.
.. autoclass:: _Sound
:members:
:undoc-members:
:show-inheritance:
"""
import glob
import os
import platform
import subprocess
from functools import partial
from opsoro.console_msg import *
from opsoro.sound.tts import TTS
from opsoro.users import Users
get_path = partial(os.path.join, os.path.abspath(os.path.dirname(__file__)))
class _Sound(object):
def __init__(self):
"""
Sound class, used to play sound and speak text.
"""
# List of search folders for sound files
self.sound_folders = ["../data/sounds/"]
self.playProcess = None
self.jack = False
self._platform = platform.system()
def _play(self, filename):
"""
Play any local file, used internally by other methods
:param string filename: full filename to play
"""
FNULL = open(os.devnull, "w")
if self._platform == "Darwin":
# OSX playback, used for development
self.playProcess = subprocess.Popen(
["afplay", filename], stdout=FNULL, stderr=subprocess.STDOUT)
elif not self.jack:
self.playProcess = subprocess.Popen(
["aplay", filename], stdout=FNULL, stderr=subprocess.STDOUT)
else:
# self.playProcess = subprocess.Popen(["aplay", "-D", "hw:0,0", full_path], stdout=FNULL, stderr=subprocess.STDOUT)
self.playProcess = subprocess.Popen(
["aplay", "-D", "hw:0,0", filename],
stdout=FNULL,
stderr=subprocess.STDOUT)
def say_tts(self, text, generate_only=False):
"""
Converts a string to a soundfile using Text-to-Speech libraries
:param string text: text to convert to speech
:param bool generate_only: do not play the soundfile once it is created
"""
if text is None:
return
full_path = TTS.create(text)
if generate_only:
return
# Send sound to virtual robots
Users.broadcast_robot({'sound': 'tts', 'msg': text})
self.stop_sound()
self._play(full_path)
def play_file(self, filename):
"""
Plays an audio file according to the given filename.
:param string filename: file to play
:return: True if sound is playing.
:rtype: bool
"""
self.stop_sound()
path = None
if os.path.splitext(filename)[1] == '':
filename += '.*'
for folder in self.sound_folders:
f = os.path.join(get_path(folder), filename)
files = glob.glob(f)
if files:
path = files[0]
break
if path is None:
print_error("Could not find soundfile \"%s\"." % filename)
return False
# Send sound to virtual robots
name, extension = os.path.splitext(os.path.basename(filename))
Users.broadcast_robot({'sound': 'file', 'msg': name})
self._play(path)
return True
def get_file(self, filename, tts=False):
"""
Returns audio file data according to the given filename.
:param string filename: file to return the data from
:return: Soundfile data.
:rtype: var
"""
path = None
data = None
if tts:
path = TTS.create(filename)
else:
if os.path.splitext(filename)[1] == '':
filename += '.*'
for folder in self.sound_folders:
f = os.path.join(get_path(folder), filename)
files = glob.glob(f)
if files:
path = files[0]
break
if path is None:
print_error("Could not find soundfile \"%s\"." % filename)
return data
try:
with open(get_path(path)) as f:
data = f.read()
except Exception as e:
print_error(e)
# Send sound to virtual robots
return data
def stop_sound(self):
"""
Stop the played sound.
"""
if self.playProcess == None:
return
self.playProcess.terminate()
self.playProcess = None
def wait_for_sound(self):
"""
Wait until the played sound is done.
"""
if self.playProcess == None:
return
self.playProcess.wait()
self.playProcess = None
# Global instance that can be accessed by apps and scripts
Sound = _Sound()
|
OPSORO/OS
|
src/opsoro/sound/__init__.py
|
__init__.py
|
py
| 4,683 |
python
|
en
|
code
| 9 |
github-code
|
6
|
25754911493
|
import os
from multiprocessing import freeze_support,set_start_method
import multiprocessing
from Optimization import Optimization
from GA import RCGA
from PSO import PSO
if __name__=='__main__':
from datetime import datetime
start = datetime.now()
print('start:', start.strftime("%m.%d.%H.%M"))
multiprocessing.freeze_support()
lower = [0.9, 0.9, 0.9,0.9,0.9,0.9]
upper = [1.1,1.1,1.1,1.1,1.1,1.1]
pso = PSO(func=Optimization, n_dim=6, pop=72, max_iter=30, w=0.8, lb=lower, ub=upper, c1=1.49, c2=1.49,verbose=True)
#freeze_support()
#set_start_method('forkserver')
pso.record_mode=True
pso.run(precision=1e-5)
print('best_x',pso.pbest_x,'\n','best_y',pso.pbest_y)
f =open('best_opt.txt','a+')
f.write(str(pso.best_x))
f.close()
f=open('updating_processing.txt','a+')
f.write(str(pso.pbest_x))
f.write('\n')
f.write(str(pso.pbest_y))
end=datetime.now()
print('end',end.strftime("%m.%d.%H.%M"))
os.system('MAC.py')
|
zhengjunhao11/model-updating-framework
|
program_framework/Input.py
|
Input.py
|
py
| 1,002 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6830398340
|
#!/usr/bin/env python
#!/usr/bin/python
from tkinter import *
root = Tk() # creates a blank window named root
top_frame = Frame(root)
top_frame.pack()
bottom_frame = Frame(root)
bottom_frame.pack(side=BOTTOM) # since the bottom frame is specified to be, at the bottom the top is at the top
button1 = Button(top_frame, text='button 1', highlightbackground='red', fg='yellow') # creating a widget button instead of text
button2 = Button(top_frame, text='button 2', bg='blue', fg='green') # button placement top_frame is the first parameter, what the text displays, the second, and color, fg=, the third
button3 = Button(top_frame, text='button 3', fg='red') # fg foreground bg background
# button4 = Button(bottom_frame, text='button 4', highlightcolor='purple') # at the bottom frame
button4 = Button(bottom_frame, text="Press!", highlightbackground='blue', fg="green") # still the fg color option does not work
button1.pack(side=LEFT) # this tells the program what and where to display
button2.pack(side=LEFT)
button3.pack(side=LEFT, fill=BOTH, expand=True)
button4.pack(side=BOTTOM) # The parameters could be left blank since there are no other objects in the bottom frame
root.mainloop() # mainloop keeps the root looping infinitely or until closed, so the window remains visible on the screen.
'''bottomFrame = Frame(root).pack(side=BOTTOM) ?
btn1 = Button(bottomFrame, text="okay", fg="red").pack()'''
'''from Tkinter import *
Label(None, text='label', fg='green', bg='black').pack()
Button(None, text='button', fg='green', bg='black').pack()
mainloop()
# **************** With ttk:
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
# background="..." doesn't work...
ttk.Style().configure('green/black.TLabel', foreground='green', background='black')
ttk.Style().configure('green/black.TButton', foreground='green', background='black')
label = ttk.Label(root, text='I am a ttk.Label with text!', style='green/black.TLabel')
label.pack()
button = ttk.Button(root, text='Click Me!', style='green/black.TButton')
button.pack()
root.mainloop()'''
|
judas79/TKinter-git-theNewBoston
|
Tkinter - 02 - Organizing your Layout/Tkinter - 02 - Organizing your Layout.py
|
Tkinter - 02 - Organizing your Layout.py
|
py
| 2,170 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73416700989
|
class Verity:
def input_boolean():
print("X", "Y", "Z", "Rezult" )
print("*"*15)
for X in range(2):
for Y in range(2):
for Z in range(2):
rezult=not(X or Y or Z)== ((not X)and (not Y) and (not Z))
print(f"{X} {Y} {Z} - {rezult}")
input_boolean()
# examination_verity()
|
DenisBaicurov/PracticaPython
|
exercise2.py
|
exercise2.py
|
py
| 410 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38938434821
|
#!/usr/bin/env python
from sys import argv
fin = open("include/hiponodes.h")
fout0 = open("include/node_declaration.h","w")
fout1 = open("src/node_assignment.cxx","w")
fout1.write("//// File automatically produced by format_hiponodes.py do not make changes here!!\n")
fout1.write('#include "TIdentificatorCLAS12.h"\n')
fout1.write("int TIdentificatorCLAS12::InitNodes()\n")
fout1.write("{\n")
fout0.write("//// File automatically produced by format_hiponodes.py do not make changes here!!\n")
for line in fin:
if '=' not in line: continue
linearray = line.split("=")
fout0.write(linearray[0] + ";\n")
fout1.write(" " + linearray[0].split("*")[1] + " = " + linearray[1])
fout1.write("}\n")
|
orsosa/Clas12Ana
|
format_hiponodes.py
|
format_hiponodes.py
|
py
| 707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.