seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
73816196028
|
import numpy as np
import matplotlib.pyplot as plt
import difuzija as di
import sys
sys.getdefaultencoding()
def rho(x):
if x >= 2.0 and x <= 5.0:
return 5.5
else:
return 0.0
j = [0, 100, 200, 300, 400]
t = [0.5*J for J in j]
P1 = [0.0, 20.0, 0.0, t[0]] #pocetni uvjeti
P2 = [0.0, 20.0, 0.0, t[1]]
P3 = [0.0, 20.0, 0.0, t[2]]
P4 = [0.0, 20.0, 0.0, t[3]]
P5 = [0.0, 20.0, 0.0, t[4]]
N = 100
D1 = di.D_exp(rho, P1, N, j[0]) #vrijednosti funkcije difuzije
D2 = di.D_exp(rho, P2, N, j[1])
D3 = di.D_exp(rho, P3, N, j[2])
D4 = di.D_exp(rho, P4, N, j[3])
D5 = di.D_exp(rho, P5, N, j[4])
X = [x/(20.0/N) for x in np.arange(0.0, 20.0 + 20.0/N, 20.0/N)]
fig = plt.figure(figsize=(9,6), dpi=120)
axes = fig.add_axes([0.15, 0.15, 0.75, 0.70])
plt.rcParams.update({'font.size': 8}) #type: ignore
axes.plot(X, D1, label='t = {}$\u0394$x'.format(j[0]), lw=0.8, color='lightblue')
#axes.plot(X, D2, label='t = {}$\u0394$x'.format(j[1]), lw=0.8, color='blue')
#axes.plot(X, D3, label='t = {}$\u0394$x'.format(j[2]), lw=0.8, color='cyan')
#axes.plot(X, D4, label='t = {}$\u0394$x'.format(j[3]), lw=0.8, color='green')
#axes.plot(X, D5, label='t = {}$\u0394$x'.format(j[4]), lw=0.8, color='orange')
axes.grid(lw=0.5)
axes.set_xlabel('x / $\u0394$x')
axes.set_ylabel('$\u03C1(x,t)$ / kgm$^{-1}$')
axes.legend(loc='best')
axes.set_title('Fazni dijagram matematičkog njihala')
plt.show()
|
FabjanJozic/MMF3
|
Predavanje12_PDJ/Zadatak1.py
|
Zadatak1.py
|
py
| 1,404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74927169786
|
'''
The photon Project
-------------------
File: read_conf.py
This file reads the configuration file
@author: R. THOMAS
@year: 2018
@place: ESO
@License: GPL v3.0 - see LICENCE.txt
'''
#### Python Libraries
import configparser
import os
class Conf:
"""
This Class defines the arguments to be calle to use SPARTAN
For the help, you can use 'SPARTAN -h' or 'SPARTAN --help'
"""
def __init__(self,conf):
"""
Class constructor, defines the attributes of the class
and run the argument section
"""
if conf == None:
##if no configuration was passed, we take the default one
dir_path = os.path.dirname(os.path.realpath(__file__))
default_file = os.path.join(dir_path, 'properties.conf')
self.read_conf(default_file)
else:
self.read_conf(conf)
def read_conf(self, fileconf):
'''
Method that reads the configuration file passed to the code
'''
config = configparser.ConfigParser()
config.read(fileconf)
###background color
self.BACK = config.get('background', 'back_color')
##AXIS properties
AXIS = {'Color' : '', 'Label_color' : '', 'lw' : '', 'Labelsize' : '', 'Axis_label_font' : ''}
AXIS['Color'] = config.get('AXIS', 'Color')
AXIS['Label_color'] = config.get('AXIS', 'Label_Color')
AXIS['lw'] = config.getfloat('AXIS', 'linewidth')
AXIS['Labelsize'] = config.getfloat('AXIS', 'Labelsize')
AXIS['Axis_label_font'] = config.get('AXIS', 'Axis_label_font')
self.axis = AXIS
####Ticks properties
TICKS = {'Minor' : '', 'placement' : '', 'Major_size' : '', 'Minor_size' : '', \
'Major_width' : '', 'Minor_width' : '', 'Ticks_color' : '', 'Label_color' : '',\
'Ticks_label_font' : '', }
TICKS['Minor'] = config.get('TICKS', 'Minor')
TICKS['placement'] = config.get('TICKS', 'placement')
TICKS['Major_size'] = config.getfloat('TICKS', 'Major_size')
TICKS['Minor_size'] = config.getfloat('TICKS', 'Minor_size')
TICKS['Major_width'] = config.getfloat('TICKS', 'Major_width')
TICKS['Minor_width'] = config.getfloat('TICKS', 'Minor_width')
TICKS['Ticks_color'] = config.get('TICKS', 'Ticks_color')
TICKS['Label_color'] = config.get('TICKS', 'Label_color')
TICKS['Label_size'] = config.getfloat('TICKS', 'Label_size')
TICKS['Ticks_label_font'] = config.get('TICKS', 'Ticks_label_font')
self.ticks = TICKS
###legend
LEGEND = {'Frame' : '', 'font_size' : '', 'Legend_font' : '',\
'Label_font_color' : '', 'ncol' : '', 'location':''}
LEGEND['Frame'] = config.get('LEGEND', 'Frame')
LEGEND['font_size'] = config.getfloat('LEGEND', 'font_size')
LEGEND['Legend_font'] = config.get('LEGEND', 'Legend_font')
LEGEND['Label_font_color'] = config.get('LEGEND', 'Label_font_color')
LEGEND['location'] = config.get('LEGEND', 'location')
self.legend = LEGEND
|
astrom-tom/Photon
|
photon/read_conf.py
|
read_conf.py
|
py
| 3,134 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29578754560
|
# -*- coding: utf-8 -*-
"""
https://note.nkmk.me/python-listdir-isfile-isdir/
Created on Wed Oct 31 11:45:21 2018
@author: Akitaka
"""
import os
path = "./testdir"
files = os.listdir(path)
print(type(files)) # <class 'list'>
print(files) # ['dir1', 'dir2', 'file1', 'file2.txt', 'file3.jpg']
#%%
files = os.listdir(path)
files_file = [f for f in files if os.path.isfile(os.path.join(path, f))]
print(files_file) # ['file1', 'file2.txt', 'file3.jpg']
#%%
files = os.listdir(path)
files_dir = [f for f in files if os.path.isdir(os.path.join(path, f))]
print(files_dir) # ['dir1', 'dir2']
|
nakanishi-akitaka/python2018_backup
|
1031/python_listdir_isfile_isdir.py
|
python_listdir_isfile_isdir.py
|
py
| 652 |
python
|
en
|
code
| 5 |
github-code
|
6
|
74182045309
|
"""feature for malware."""
import os.path
from abc import ABC, abstractmethod
import numpy as np
import filebrowser
import lief
from capstone import *
class Feature(ABC):
"""interface for all feature type."""
def __init__(self):
super().__init__()
self.dtype = np.float32
self.name = ''
@abstractmethod
def __call__(self):
"""call for feature extraction."""
def __repr__(self):
return '{}({})'.format(self.name, self.dim)
class BaseFeature(Feature, ABC):
"""interface & base impl for all base feature type."""
def __init__(self, dim):
super(BaseFeature, self).__init__()
self.dim = dim
def empty(self):
return np.zeros((self.dim,), dtype=np.float32)
class RawBytesFeature(Feature):
"""raw bytes from whole exe."""
def __init__(self):
super(RawBytesFeature, self).__init__()
self.bytez = None
def __call__(self, binary):
builder = lief.PE.Builder(binary)
builder.build()
self.bytez = bytearray(builder.get_build())
return self.bytez
def image(self, width=256):
total_size = len(self.bytez)
rem = total_size % width
height = total_size // width
arr = np.frombuffer(self.bytez, dtype=np.uint8)
if rem != 0:
height += 1
arr = np.pad(arr, (0, width-rem), 'constant')
return arr.reshape((height, width))
class OpCodeFeature(Feature):
"""opcode sequence from binary."""
def __init__(self, only_text=False):
super(OpCodeFeature, self).__init__()
self.only_text = only_text
def __call__(self, binary):
opcode_seq = []
disasm_sections = []
for sec in binary.sections:
if lief.PE.SECTION_CHARACTERISTICS.MEM_EXECUTE in sec.characteristics_lists:
disasm_sections.append(sec.name)
if self.only_text:
disasm_sections = [".text"]
for name in disasm_sections:
section = binary.get_section(name)
try: # some sections may contains no content
bytes = section.content.tobytes()
except:
continue
if binary.header.machine == lief.PE.MACHINE_TYPES.I386:
md = Cs(CS_ARCH_X86, CS_MODE_32)
else:
md = Cs(CS_ARCH_X86, CS_MODE_64)
for i in md.disasm(bytes, section.virtual_address):
opcode_seq.append(i.mnemonic)
return opcode_seq
if __name__ == "__main__":
fclient = filebrowser.FileBrowserClient().with_host(
host="10.112.108.112", port="8081", username="admin", password="daxiahyh")
download_list = [
"dagongren/DikeDataset-main/files/benign/0a8deb24eef193e13c691190758c349776eab1cd65fba7b5dae77c7ee9fcc906.exe",
]
opcode_set = set()
for file in download_list:
save_path = os.path.join("../download", file.split("/")[-1])
print(save_path)
fclient.download_auth_file(
file,
save_path)
binary = lief.PE.parse(save_path)
bytes = RawBytesFeature()
print(hex(len(bytes(binary))))
print(bytes.image())
opcodes = OpCodeFeature()
opcode_set.update(opcodes(binary))
print(len(opcode_set))
|
dagrons/try
|
feature/feature.py
|
feature.py
|
py
| 3,314 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9380859967
|
import numpy as np
import time, glob, cv2
from pymycobot import MyCobotSocket
from pymycobot import PI_PORT, PI_BAUD
from single_aruco_detection import marker_detecting
from forward_kinematics import F_K
from inverse_kinematics import I_K
import matplotlib.pyplot as plt
from scipy.linalg import orthogonal_procrustes
def std_deviation():
# load the T_cam2ee
T_cam2ee = np.load("scripts/Hand_eye_calibration/cam2gripper.npy")
# load the T_marker2cam
R_target2cam = np.load("scripts/Hand_eye_calibration/R_target2cam.npy")
t_target2cam = np.load("scripts/Hand_eye_calibration/t_target2cam.npy")
# number of images
n = len(R_target2cam)
T_mar2cam = []
for i in range(len(t_target2cam)):
T_eye = np.eye(4)
T_eye[0:3,0:3] = R_target2cam[i]
T_eye[0:3,3] = np.reshape(t_target2cam[i], (3,))
T_mar2cam.append(T_eye)
# load the T_ee2base
T_ee2base_file = sorted(glob.glob("scripts/Hand_eye_calibration"
+ "/T_gripper2base/*.npy"))
T_ee2base = [np.load(f) for f in T_ee2base_file]
# T_marker2base
T_mar2base = []
for i in range(len(T_ee2base)):
T_mar2base.append(transform2base(T_mar2cam[i], T_cam2ee, T_ee2base[i]))
np.save("scripts/Analysis/T_mar2base", T_mar2base)
# print(np.shape(T_mar2base))
# create the chessboard corners
# 9X6 chessboard, 0.016m for the edge of the square
chessboard_corners = []
# for line (y)
for i in range(0,6):
# for coloum (x)
for j in range(0,9):
corner_coord = [0,0,0,1]
corner_coord[0] = 0.016 * j
corner_coord[1] = 0.016 * i
chessboard_corners.append(corner_coord)
chessboard_corners = np.reshape(chessboard_corners, (54,4))
# print(np.shape(chessboard_corners)[0])
# print(chessboard_corners)
# transfer the chessboard corners to the base
y = []
# loop the T_mar2base
for i in range(0, np.shape(T_mar2base)[0]):
# loop the chessboard corners
y_i = []
for j in range(0,54):
coord = T_mar2base[i] @ np.reshape(chessboard_corners[j], (4,1))
y_i.append(coord)
y.append(y_i)
# print(np.shape(y[0]))
# sum the y_i
sum = np.squeeze(y[0])
for i in range(1,n):
sum = sum + np.squeeze(y[i])
# y bar
y_bar = (sum)/n
# y_i - y_bar
error = []
for i in range(0, np.shape(y)[0]):
error.append(np.squeeze(y[i])-y_bar)
# print(np.shape(error[0]))
# square each error, then sum, then divided by 6*9, finally square root
# error_each_image in m
# error of each corresponding corner in each image
error_each_image = []
for i in range(0, n):
error_each_image.append(np.sqrt(np.sum(np.square(error[i])))/(6*9))
# print(error_each_image)
##### draw the figure and save #####
# font1 = {'family':'times','size':14}
# font2 = {'family':'times','size':12}
# plt.figure(figsize=(12,5))
# plt.plot(np.arange(1,33), error_each_image)
# plt.xticks(range(1,33))
# plt.xlabel("No.i pose", fontdict=font2)
# plt.ylabel("Error / (m)", fontdict=font2)
# plt.title("Average error at No.i pose", fontdict=font1)
# plt.savefig("error.png",dpi=500)
### procrustes ###
R, _ = orthogonal_procrustes(y_bar, chessboard_corners)
np.save("scripts/Analysis/T_mar2base_procrustes", R)
print("the estimated transformation is:")
print(R)
def transform2base(T_marker2cam, T_cam2gripper, T_gripper2base):
"""
chain rule to get the transformation from marker frame to the robot base frame
"""
T_marker2base = T_gripper2base @ T_cam2gripper @ T_marker2cam
return T_marker2base
if __name__ == "__main__":
std_deviation()
|
zzZzzccHEnn/Visual_Tracking
|
evaluation.py
|
evaluation.py
|
py
| 3,833 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19425798857
|
# Token types
#
# EOF (end-of-file) token is used to indicate that
# there is no more input left for lexical analysis
# RESERVED WORDS
PROGRAM = 'PROGRAM'
BEGIN = 'BEGIN'
END = 'END'
VAR = 'VAR'
IO = 'IO'
WAYPOINT = 'WAYPOINT'
TRUE = 'TRUE'
FALSE = 'FALSE'
IF = 'IF'
THEN = 'THEN'
ELSE = 'ELSE'
ENDIF = 'ENDIF'
LOOP = 'LOOP'
UNTIL = 'UNTIL'
WAIT = 'WAIT'
MOVETO = 'MOVETO'
HOME = 'HOME'
#VAR TYPES
ID = 'ID'
INTEGER = 'INTEGER'
BOOL = 'BOOL'
REAL = 'REAL'
INTEGER_CONST = 'INTEGER_CONST'
BOOL_CONST = 'BOOL_CONST'
REAL_CONST = 'REAL_CONST'
PININ = 'PININ'
PINOUT = 'PINOUT'
WAYPOINT = 'WAYPOINT'
# OPERATORS
PLUS = 'PLUS'
MINUS = 'MINUS'
MUL = 'MUL'
INTEGER_DIV = 'INTEGER_DIV'
FLOAT_DIV = 'FLOAT_DIV'
LTE = 'LTE'
LT = 'LT'
GTE = 'GTE'
GT = 'GT'
LPAREN = 'LPAREN'
RPAREN = 'RPAREN'
ASSIGN = 'ASSIGN'
EQUAL = 'EQUAL'
NEQUAL = 'NEQUAL'
# SYNTAX SYMBOLS
SEMI = 'SEMI'
DOT = 'DOT'
COLON = 'COLON'
COMMA = 'COMMA'
EOF = 'EOF'
|
TimTrudeau/T3001
|
SRC/token_types.py
|
token_types.py
|
py
| 1,346 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18262895650
|
from __future__ import print_function
from __future__ import unicode_literals
import sys
import subprocess
def str_chunk(input_str, width):
"""divide string to chunks with fixed width/size.
"""
return (input_str[0+i:width+i] for i in range(0, len(input_str), width))
def str_is_hex(input_str):
"""check if given string is hex digits.
"""
try:
int(input_str, 16)
return True
except:
return False
class I2cDetectTool:
"""class to run i2cdetect and parse command output.
"""
_bus_num = None # i2c bus to be detected
# "_probed_devices" contains the addresses of devices which are
# successfully probed by "i2cdetect".
_probed_devices = []
# "_skipped_devices" contains the addresses of devices which are
# skipped by "i2cdetect" because the devices are in use by drivers.
# Per "i2cdetect" manual page, this strongly suggests that there is
# a chip/device at this address.
_skipped_devices = []
def _parse_i2cdetect_output(self, cmd_output):
"""parse "i2cdetect" command output.
"""
cmd_output = cmd_output.decode('utf-8')
lines = cmd_output.splitlines()
lines.pop(0) # skip first line
start_addr = 0
for line in lines:
entries = list(str_chunk(line, 3))
entries.pop(0) # skip first column
offset = 0
for entry in entries:
entry = entry.strip()
if entry == 'UU':
self._skipped_devices.append(start_addr + offset)
elif str_is_hex(entry):
self._probed_devices.append(start_addr + offset)
offset += 1
start_addr += 16
def __init__(self, bus_num, first_addr=None, last_addr=None):
"""Create an instance of I2cDetectTool.
"""
self._bus_num = bus_num
cmd = 'i2cdetect -y ' + str(bus_num)
f = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, err = f.communicate()
if len(err) != 0:
raise Exception(err)
self._parse_i2cdetect_output(info)
def list_probed_devices(self):
"""Returns the list (addresses) of found i2c devices.
"""
return self._probed_devices
def list_skipped_devices(self):
"""Returns the list (addresses) of skipped i2c devices.
"""
return self._skipped_devices
def is_device_present(self, i2c_addr):
return (i2c_addr in self._probed_devices or
i2c_addr in self._skipped_devices)
if __name__ == "__main__":
"""unit test of classes/functions in the file.
"""
if len(sys.argv) != 3:
print('Error: invalid command line arguments!')
print('Usage: %s <i2c-bus-num> <dev-addr-hex>\n' % sys.argv[0])
sys.exit(1)
bus_num = int(sys.argv[1])
dev_addr = int(sys.argv[2], 16)
bus_info = I2cDetectTool(bus_num)
# Dump probed devices if any
probed = bus_info.list_probed_devices()
if len(probed) == 0:
print('no devices probed on i2c bus %d' % bus_num)
else:
print('%d devices probed on i2c bus %d:' % (len(probed), bus_num))
for item in probed:
print(' - 0x%x' % item)
# Dump skipped devices if any
skipped = bus_info.list_skipped_devices()
if len(skipped) == 0:
print('no devices skipped on i2c bus %d' % bus_num)
else:
print('%d devices skipped on i2c bus %d:' % (len(skipped), bus_num))
for item in skipped:
print(' - 0x%x' % item)
# Test if device is present at given address
if (bus_info.is_device_present(dev_addr)):
print('i2c bus %d, address 0x%x: device detected' %
(bus_num, dev_addr))
else:
print('i2c bus %d, address 0x%x: no device present' %
(bus_num, dev_addr))
|
WeilerWebServices/Facebook
|
openbmc/tests/common/i2cUtils.py
|
i2cUtils.py
|
py
| 4,008 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31273368548
|
# Escreva um programa que recebe um numero e
# printa o dobro, o triplo e a raiz desse número
n1 = int(input('Insira aqui um número '))
dobro = n1*2
triplo = n1*3
raiz = n1**(1/2)
print('O dobro é {} \n'
'O triplo é {} \n'
'A raiz é {} \n'
.format(dobro, triplo, raiz))
|
fcoxico/Python-Projects
|
DobroTriploQuadruplo.py
|
DobroTriploQuadruplo.py
|
py
| 296 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
36914067207
|
from django.urls import path
from rest_framework import routers
from user_accounts import views
router = routers.DefaultRouter()
# router.register('users', user_viewsets)
urlpatterns = [
path('create_user/', views.create_user.as_view(), name='create_user'),
path('login_user/', views.login_user.as_view(), name='login_user'),
path('logout_user/<str:email_address>', views.logout_user.as_view(), name='logout_user'),
path('GetUserInfoAPI/<str:email_address>', views.GetUserInfoAPI.as_view(), name='GetUserInfoAPI'),
# path('get_principalesID/<str:email_address>', views.get_principalesID.as_view(), name='get_principalesID'),
]
|
AmbeyiBrian/ELECTRONIC-SCHOOL-MANAGER-KENYA
|
elimu_backend/user_accounts/urls.py
|
urls.py
|
py
| 649 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18215167051
|
import BinarySearchTreeMap
def create_chain_bst(n):
chain_bst = BinarySearchTreeMap.BinarySearchTreeMap()
for i in range(1,n+1):
chain_bst.insert(i)
return chain_bst
def create_complete_bst(n):
bst = BinarySearchTreeMap.BinarySearchTreeMap()
add_items(bst, 1, n)
return bst
def add_items(bst, low, high):
if low == high:
bst.insert(low)
else:
subtree_root = low + (high - low) // 2
bst.insert(subtree_root)
add_items(bst,low,subtree_root - 1)
add_items(bst,subtree_root + 1,high)
|
andrew-qu2000/Schoolwork
|
cs1134/aq447_hw8/aq447_hw8_q2.py
|
aq447_hw8_q2.py
|
py
| 563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36406902723
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
import os
import csv
import cloudpickle
import numpy as np
import pandas as pd
from scipy.integrate import quad
from scipy.stats import (
gaussian_kde,
ks_2samp,
t
)
from sklearn.feature_selection import SelectorMixin
from sklearn.base import TransformerMixin
from sklearn.utils import resample
from sklearn.metrics import (
f1_score,
precision_score,
recall_score,
average_precision_score,
)
from .logger import log
CSV_WRITE_FORMAT = {
'index': False,
'quoting': csv.QUOTE_ALL,
}
CSV_READ_FORMAT = {
'keep_default_na': False,
}
class Sample:
@staticmethod
def full_sample(data, n):
return data
@staticmethod
def random_sample(data, n, random_state=1):
rng = np.random.default_rng(random_state)
sample = rng.choice(data, n)
return sample
@staticmethod
def percentile_sample(data, n, lower=0, upper=100):
quantiles = np.linspace(lower, upper, n, endpoint=True)
sample = np.percentile(data, quantiles, interpolation='lower')
return sample
@staticmethod
def percentile_interpolation_sample(data, n, lower=0, upper=100):
quantiles = np.linspace(lower, upper, n, endpoint=True)
sample = np.percentile(data, quantiles, interpolation='linear')
return sample
Sample.MODES = {
'random': Sample.random_sample,
'percentile': Sample.percentile_sample,
'interpolate': Sample.percentile_interpolation_sample,
'full': Sample.full_sample,
}
class Stats:
@staticmethod
def compute_integral_boundaries(f, retsize):
u = f.resample(retsize)
a = u.mean() - 8 * u.std()
b = u.mean() + 8 * u.std()
return a, b
@staticmethod
def discrete_hellinger_integral(p, q, a, b, retsize):
x, step = np.linspace(a, b, retsize, endpoint=True, retstep=True)
i = np.dot(np.sqrt(p(x)), np.sqrt(q(x))) * step
if i > 1:
return 0
else:
return i
@classmethod
def discrete_hellinger_distance(cls, p, q, retsize=100):
a1, b1 = cls.compute_integral_boundaries(p, retsize)
a2, b2 = cls.compute_integral_boundaries(q, retsize)
a1, b1, a2, b2 = sorted([a1, b1, a2, b2])
i1 = cls.discrete_hellinger_integral(p, q, a1, b1, retsize)
i2 = cls.discrete_hellinger_integral(p, q, b1, a2, retsize)
i3 = cls.discrete_hellinger_integral(p, q, a2, b2, retsize)
i = i1 + i2 + i3
if i > 1: # To prevent computing a negative root because of an approximation error during integration
return 0
else:
return np.sqrt(1 - i)
@staticmethod
def hellinger_integral(p, q, a=-np.inf, b=np.inf):
value, error = quad(
lambda x: np.sqrt(p(x)*q(x)),
a,
b
)
return value, error
@classmethod
def hellinger_distance(cls, p, q, a=-np.inf, b=np.inf, split_integral=True, retsize=100):
if split_integral:
a1, b1 = cls.compute_integral_boundaries(p, retsize)
a2, b2 = cls.compute_integral_boundaries(q, retsize)
a1, b1, a2, b2 = sorted([a1, b1, a2, b2])
i1, _ = cls.hellinger_integral(p, q, a1, b1)
i2, _ = cls.hellinger_integral(p, q, b1, a2)
i3, _ = cls.hellinger_integral(p, q, a2, b2)
value = i1 + i2 + i3
else:
value, error = cls.hellinger_integral(p.pdf, q.pdf, a, b)
if 1 <= value < 1.1: # To prevent computing a negative root because of an approximation error during integration
return 1
elif 1.1 <= value: # If value > 1.1 the approximation failed too much and should not be rejected
return 0
else:
return np.sqrt(1 - value)
@classmethod
def hellinger_distance_1samp(cls, sample, pdf, **params):
kde = gaussian_kde(sample, bw_method='silverman')
return cls.hellinger_distance(kde, pdf, split_integral=False)
@classmethod
def hellinger_distance_2samp(cls, samp1, samp2):
kde1 = gaussian_kde(samp1, bw_method='silverman')
kde2 = gaussian_kde(samp2, bw_method='silverman')
return cls.hellinger_distance(kde1, kde2)
class Accessor:
@staticmethod
def get_entity_kde(entity):
entityid, indexid = entity.split(':')
kdepath = os.path.join(indexid, 'kde', entityid + '.kde')
with open(kdepath, 'rb') as f:
kde = cloudpickle.load(f)
return kde
@staticmethod
def get_entity_data(entity):
entityid, indexid = entity.split(':')
datapath = os.path.join(indexid, 'entity', entityid + '.npy')
data = np.load(datapath)
return data
@staticmethod
def get_entity_metadata(entity):
entityid, indexid = entity.split(':')
path = os.path.join(indexid, 'terminology.csv')
terminology = pd.read_csv(path, **CSV_READ_FORMAT, dtype=str)
data = terminology[terminology.entityid == entityid].squeeze()
return data
@staticmethod
def get_entity_aggregate(entity):
columntypes = {
"entityid": str,
"size": int,
"mean": float,
"std": float,
"var": float,
"frequency": float
}
entityid, indexid = entity.split(':')
path = os.path.join(indexid, 'aggregate.csv')
terminology = pd.read_csv(path, **CSV_READ_FORMAT, dtype=columntypes)
data = terminology[terminology.entityid == entityid].squeeze()
return data
@classmethod
def hellinger_distance_2entity(cls, entity1, entity2, strategy='split_integral'):
kde1 = cls.get_entity_kde(entity1)
kde2 = cls.get_entity_kde(entity2)
strategies = ('full', 'split_integral', 'discrete')
if strategy not in strategies:
strategy = 'split_integral'
log.info(f"Hellinger distance strategy {strategy} must be in {strategies}, switching to 'split_integral'")
if strategy == 'full':
hd = Stats.hellinger_distance(kde1, kde2, split_integral=False)
elif strategy == 'split_integral':
hd = Stats.hellinger_distance(kde1, kde2, split_integral=True)
elif strategy == 'discrete':
hd = Stats.discrete_hellinger_distance(kde1, kde2)
return hd
@classmethod
def ks_test_2entity(cls, entity1, entity2):
data1 = cls.get_entity_data(entity1).flatten()
data2 = cls.get_entity_data(entity2).flatten()
return ks_2samp(data1, data2)
@classmethod
def kde_from_entity(cls, entity):
entityid, indexid = entity.split(':')
kdepath = os.path.join(indexid, 'kde', entityid + '.kde')
os.makedirs(os.path.dirname(kdepath), exist_ok=True)
data = cls.get_entity_data(entity)
kde = gaussian_kde(data, bw_method='silverman')
with open(kdepath, 'wb') as f:
cloudpickle.dump(kde, f)
return kde
class CachedAccessor:
KDECACHE = dict()
@classmethod
def get_entity_kde(cls, entity):
if entity in cls.KDECACHE:
kde = cls.KDECACHE.get(entity)
else:
kde = Accessor.get_entity_kde(entity)
cls.KDECACHE[entity] = kde
return kde
DATACACHE = dict()
@classmethod
def get_entity_data(cls, entity):
if entity in cls.DATACACHE:
data = cls.DATACACHE.get(entity)
else:
data = Accessor.get_entity_data(entity)
cls.DATACACHE[entity] = data
return data
METDATACACHE = dict()
@classmethod
def get_entity_metadata(cls, entity):
if entity in cls.METDATACACHE:
data = cls.METDATACACHE.get(entity)
else:
data = Accessor.get_entity_metadata(entity)
cls.METDATACACHE[entity] = data
return data
class Score:
@staticmethod
def tm_rank(groupby, ret=5):
return groupby.sort_values('y_proba', ascending=False).head(ret).Y.any()
@staticmethod
def tm_score(df, groupby_key, ret=5):
res = df.groupby(groupby_key).apply(Score.tm_rank, ret=ret)
n = len(df.index.get_level_values(groupby_key).unique())
return res.value_counts().get(True, 0) / n
@staticmethod
def tm_score_relaxed(df, groupby_key, ret=5):
res = df.groupby(groupby_key).apply(Score.tm_rank, ret=ret)
n = len(df[df.Y == True].index.get_level_values(groupby_key).unique())
return res.value_counts().get(True, 0) / n
@staticmethod
def compute_tm_score(model, df, groupby_key, ret=5):
df = df.copy()
df['y_proba'] = model.predict_proba(df.X)[:,1]
res = df.groupby(groupby_key).apply(Score.tm_rank, ret=ret)
n = len(df[df.Y == True].index.get_level_values(groupby_key).unique())
return res.value_counts().get(True, 0) / n
class NamedFeatureSelector(SelectorMixin, TransformerMixin):
_params_name = set(['columns', 'selected_columns'])
def __init__(self, columns=None, selected_columns=None):
self.columns = columns or []
self.selected_columns = set(selected_columns or [])
def set_params(self, **params):
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = dict()
for k, v in params.items():
if (k in self._params_name):
valid_params[k] = v
for k, v in valid_params.items():
setattr(self, k, v)
return self
def _get_support_mask(self):
mask = np.array(list(map(lambda x: x in self.selected_columns, self.columns)))
return mask
def fit(self, X, y=None):
return self
class Bootstrap:
@staticmethod
def sample(df, rate=1):
n = int(len(df.index) * rate)
return resample(df, n_samples=n)
@staticmethod
def evaluate(model, data):
X = data.drop('Y', axis=1)
Y = data['Y']
Y_pred = model.predict(X)
Y_proba = model.predict_proba(X)[:, 1]
stats = (
f1_score(Y, Y_pred, zero_division=0),
precision_score(Y, Y_pred, zero_division=0),
recall_score(Y, Y_pred, zero_division=0),
average_precision_score(Y, Y_proba)
)
return stats
@staticmethod
def score(model, df, rep=1000, rate=1, verbose=False):
statistics = []
for i in range(rep):
if verbose and (i % 50 == 0):
log.info(f"Bootstrap iteration {i} over {rep}")
test = Bootstrap.sample(df, rate)
stat = Bootstrap.evaluate(model, test)
statistics.append(stat)
statistics = np.array(statistics)
results = dict()
for name, stats in zip(['f1', 'precision', 'recall', 'PR-AUC'], statistics.T):
mu = stats.mean()
std = stats.std()
alpha = 0.05
if std > 0:
st = (mu - stats) / std
q1 = mu - np.quantile(st, 1-0.5*alpha)*std
q2 = mu - np.quantile(st, 0.5*alpha)*std
else:
q1 = q2 = mu
results[name] = {
'mean': mu,
'std': std,
'CI': 1-alpha,
'lower': q1,
'upper': q2,
}
return results
|
mcrts/dmatch
|
dmatch/utils.py
|
utils.py
|
py
| 11,860 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23595304281
|
import os
import re
import time
from option import Option
import pandas as pd
import wget as wget
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import download
__author__ = 'Song Hui' # 作者名
def get_options_from_command_line():
import argparse
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional argument
parser.add_argument("-o", "--output_dir", help="to set directory of all output files")
parser.add_argument("-r", "--result_file", help="to set result CSV file name")
parser.add_argument("-l", "--limit", help="to set max file count")
# Read arguments from command line
args = parser.parse_args()
if args:
print("parsing arguments: {}".format(args))
return Option(args.output_dir, args.result_file, int(args.limit) if args.limit is not None else None)
if __name__ == '__main__':
# 获取命令行参数
option = get_options_from_command_line()
if not os.path.exists(option.output_dir):
os.mkdir(option.output_dir)
# 设置从url中获取名字的正则表达式
exp_img = re.compile(r'v/.+?/(.*?)\?')
exp_video = re.compile(r'v/.+?/(.*?)\?')
web_options = webdriver.ChromeOptions()
web_options.add_argument("--enable-javascript")
# web_options.add_argument('--always-authorize-plugins=true')
with webdriver.Chrome(options=web_options) as driver:
# with webdriver.Chrome(chrome_options=options) as driver:
wait = WebDriverWait(driver, 10)
# 访问首页
driver.get(
"https://www.facebook.com/ads/library/?active_status=active&ad_type=all&country=ALL&q=clothing&sort_data[direction]=desc&sort_data[mode]=relevancy_monthly_grouped&start_date[min]=2021-11-25&start_date[max]=2021-11-26&search_type=keyword_unordered&media_type=video")
# 获取所有视频描述节点
results = []
while len(results) < option.limit:
results = driver.find_elements(By.CSS_SELECTOR,
"div._99s5")
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(5)
times = 0
# 准备数据表
columns = ['title', 'img_name', 'video_name', 'desc']
df = pd.DataFrame(columns=columns)
for ele in results:
ele_item = ele.find_element(By.CSS_SELECTOR, "div.iajz466s div._7jyg")
# 获取标题节点
title_item = ele_item.find_element(By.CSS_SELECTOR,
"div._8nsi a.aa8h9o0m>span.a53abz89")
title = title_item.text
print(title)
# 获取描述节点
desc_item = ele_item.find_element(By.CSS_SELECTOR,
"div._7jyr>span div._4ik4>div")
desc = desc_item.text
# 获取视频图片节点
video_item = ele_item.find_element(By.CSS_SELECTOR,
"div._8o0a>video")
img_url = video_item.get_attribute('poster')
# print(img_url)
img_name = re.search(exp_img, img_url).group(1)
print(img_name)
video_url = video_item.get_attribute('src')
# print(video_url)
video_name = re.search(exp_video, video_url).group(1) + 'mp4'
# 网站给出的视频文件没有扩展名,这里随便加一个,应该就可以播放了
print(video_name)
if os.path.exists(option.output_dir + video_name):
# 如果目标路径中,对应的视频文件已经存在,那么跳过该记录
continue
# 下载对应的图片和视频文件
try:
wget.download(img_url, option.output_dir + img_name)
except Exception as e:
print('下载图片"{}"异常:{}'.format(img_url, e))
continue
try:
wget.download(video_url, option.output_dir + video_name)
except Exception as e:
print('下载视频"{}"异常:{}'.format(video_url, e))
continue
# 新增1条数据记录
df = df.append({'title': title, 'img_name': img_name, 'video_name': video_name, 'desc': desc},
ignore_index=True)
# 检查数据上限
times += 1
if times >= option.limit:
break
# # driver.get(ele.get_attribute('src'))
time.sleep(0.5)
# 保存数据文件
with open(option.output_result, 'a', encoding="utf-8", newline='') as f:
# 如果文件存在,则添加数据
df.to_csv(f, header=f.tell() == 0)
|
songofhawk/simplerpa
|
test/test_selenium/facebook_download.py
|
facebook_download.py
|
py
| 4,871 |
python
|
en
|
code
| 15 |
github-code
|
6
|
41707501948
|
import tensorflow as tf
from config import cfg
def detect_loss():
def get_box_highest_percentage(arr):
shape = tf.shape(arr)
reshaped = tf.reshape(arr, (shape[0], tf.reduce_prod(shape[1:-1]), -1))
# returns array containing the index of the highest percentage of each batch
# where 0 <= index <= height * width
max_prob_ind = tf.argmax(reshaped[..., -1], axis=-1, output_type=tf.int32)
# turn indices (batch, y * x) into (batch, y, x)
# returns (3, batch) tensor
unraveled = tf.unravel_index(max_prob_ind, shape[:-1])
# turn tensor into (batch, 3) and keep only (y, x)
unraveled = tf.transpose(unraveled)[:, 1:]
y, x = unraveled[..., 0], unraveled[..., 1]
# stack indices and create (batch, 5) tensor which
# contains height, width, offset_y, offset_x, percentage
indices = tf.stack([tf.range(shape[0]), y, x], axis=-1)
box = tf.gather_nd(arr, indices)
y, x = tf.cast(y, tf.float32), tf.cast(x, tf.float32)
# transform box to (y + offset_y, x + offset_x, GRID_SIZE * height, GRID_SIZE * width, obj)
# output is (batch, 5)
out = tf.stack([y + box[..., 2], x + box[..., 3],
cfg.NN.GRID_SIZE * box[..., 0], cfg.NN.GRID_SIZE * box[..., 1],
box[..., -1]], axis=-1)
return out
def loss(y_true, y_pred):
# get the box with the highest percentage in each image
true_box = get_box_highest_percentage(y_true)
pred_box = get_box_highest_percentage(y_pred)
# object loss
obj_loss = tf.keras.losses.binary_crossentropy(y_true[..., 4:5], y_pred[..., 4:5])
# mse with the boxes that have the highest percentage
box_loss = tf.reduce_sum(tf.math.squared_difference(true_box[..., :-1], pred_box[..., :-1]))
return tf.reduce_sum(obj_loss) + box_loss
return loss
|
burnpiro/tiny-face-detection-tensorflow2
|
model/loss.py
|
loss.py
|
py
| 1,942 |
python
|
en
|
code
| 27 |
github-code
|
6
|
10966555857
|
import json
class Config(dict):
def __init__(self, path=None, section='default', *args, **kwargs):
super().__init__(*args, **kwargs)
if path is not None:
self.read(path, section)
def read(self, path, section='default'):
'''read config from config file.
will clear config before read'''
self.section = section
self.dirty = False
self.hasDefault = False
self.path = path
self.clear()
with open(path) as f:
self.conf = json.load(f)
if self.section not in self.conf:
raise KeyError('{} not a valid key'.format(self.section))
self.hasDefault = 'default' in self.conf
if self.hasDefault:
self.update(self.conf['default'])
self.update(self.conf[self.section])
def save(self):
'''save config.'''
dconf = {}
if self.hasDefault:
dconf = self.conf['default']
sconf = self.conf[self.section]
# delete keys
for key in set(sconf):
if key not in self:
self.dirty = True
del sconf[key]
# add / change key
for key in self:
if key in dconf and self[key] == dconf[key]:
continue
else:
self.dirtY
sconf[key] = self[key]
if self.dirty:
with open(self.path, 'w') as f:
json.dump(self.conf, f, sort_keys=True, ensure_ascii=False,
indent=4, separators=(',', ': '))
self.dirty = False
def write(self, path):
'''write conf to file'''
self.path = path
self.dirty = True
self.save()
|
lycsjm/acgnmanager
|
src/lib/config.py
|
config.py
|
py
| 1,751 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13348518622
|
import argparse
import datetime
import pathlib
import subprocess
import sys
import time
import run_all_utils
command = '''
python ../modules/S3segmenter/large/S3segmenter.py
--imagePath "{}"
--stackProbPath "{}"
--outputPath "{}"
--probMapChan {probMapChan}
--area-max 50000
--expand-size {expand_size}
--maxima-footprint-size {maxima_footprint_size}
--mean-intensity-min {mean_intensity_min}
--pixelSize {pixelSize}
'''
MODULE_NAME = 's3seg'
ORION_DEFAULTS = [
('probMapChan', 1, 'int'),
('expand-size', 5, 'int'),
('maxima-footprint-size', 13, 'int'),
('mean-intensity-min', 128, 'float'),
('pixelSize', 0.325, 'float'),
]
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
metavar='config-csv',
required=True
)
parser.add_argument(
'-m',
metavar='module-params',
required=False,
default=None
)
parsed_args = parser.parse_args(argv[1:])
CURR = pathlib.Path(__file__).resolve().parent
file_config, module_params, log_path = run_all_utils.init_run(
parsed_args, ORION_DEFAULTS, MODULE_NAME
)
for config in file_config[:]:
config = run_all_utils.set_config_defaults(config)
name = config['name']
out_dir = config['out_dir']
print('Processing', name)
nucleus_channel = module_params['probMapChan'] - 1
pmap_path = out_dir / name / 'unmicst2' / f'{name}_Probabilities_{nucleus_channel}.ome.tif'
command_run = [
'python',
CURR.parent / 'modules/S3segmenter/large/S3segmenter.py',
'--imagePath', config['path'],
'--stackProbPath', pmap_path,
'--outputPath', out_dir / name / 'segmentation',
'--area-max', str(50000)
]
for kk, vv in module_params.items():
command_run.extend([f"--{kk}", str(vv)])
start_time = int(time.perf_counter())
subprocess.run(command_run)
end_time = int(time.perf_counter())
print('elapsed', datetime.timedelta(seconds=end_time-start_time))
print()
run_all_utils.to_log(
log_path, config['path'], end_time-start_time, module_params
)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Yu-AnChen/orion-scripts
|
processing/command-s3seg.py
|
command-s3seg.py
|
py
| 2,454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18777503409
|
from flask import Flask
from flask import render_template
from pymongo import MongoClient
import json
from bson import json_util
from bson.json_util import dumps
import random
import numpy as np
import ast
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
import matplotlib.pylab as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial.distance import cdist
from scipy.spatial.distance import pdist,squareform
import collections
from collections import defaultdict
from sklearn.decomposition import PCA
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import pairwise_distances
import math
app = Flask(__name__)
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
DBS_NAME = 'crime'
COLLECTION_NAME = 'projects'
FIELDS = {'crmrte': True, 'prbarr': True, 'prbconv': True, 'prbpris': True,'avgsen': True,'density': True,'wcon': True,'wtuc': True,'wtrd': True,'wfir': True,'wser': True,'wmfg': True,'taxpc': True,'pctmin': True,'wfed': True,'wsta': True,'wloc': True,'mix': True,'pctymle': True,'_id': False}
@app.route("/")
def index():
return render_template("index.html")
@app.route("/crime/projects")
def crime_projects():
connection = MongoClient(MONGODB_HOST, MONGODB_PORT)
collection = connection[DBS_NAME][COLLECTION_NAME]
projects = collection.find(projection=FIELDS)
json_projects = []
for project in projects:
json_projects.append(project)
json_projects = json.dumps(json_projects, default=json_util.default)
connection.close()
return json_projects
proj_details=crime_projects();
crime_data = pd.read_json(proj_details)
# testarray = ast.literal_eval(proj_details)
clusterObj= crime_data[['crmrte','prbarr','prbconv','prbpris','avgsen',
'density','wcon','wtuc','wtrd','wfir','wser','wmfg','taxpc','pctmin','wfed','wsta','wloc','mix','pctymle']]
clustervar=clusterObj.copy()
# clustervar['county']= preprocessing.scale(clustervar['county'].astype('float64'))
# clustervar['year']= preprocessing.scale(clustervar['year'].astype('float64'))
clustervar['crmrte']= preprocessing.scale(clustervar['crmrte'].astype('float64'))
clustervar['prbarr']= preprocessing.scale(clustervar['prbarr'].astype('float64'))
clustervar['prbconv']= preprocessing.scale(clustervar['prbconv'].astype('float64'))
clustervar['prbpris']= preprocessing.scale(clustervar['prbpris'].astype('float64'))
clustervar['avgsen']= preprocessing.scale(clustervar['avgsen'].astype('float64'))
clustervar['density']= preprocessing.scale(clustervar['density'].astype('float64'))
clustervar['wcon']= preprocessing.scale(clustervar['wcon'].astype('float64'))
clustervar['wtuc']= preprocessing.scale(clustervar['wtuc'].astype('float64'))
clustervar['wtrd']= preprocessing.scale(clustervar['wtrd'].astype('float64'))
clustervar['wfir']= preprocessing.scale(clustervar['wfir'].astype('float64'))
clustervar['wser']= preprocessing.scale(clustervar['wser'].astype('float64'))
clustervar['wmfg']= preprocessing.scale(clustervar['wmfg'].astype('float64'))
clustervar['taxpc']= preprocessing.scale(clustervar['taxpc'].astype('float64'))
clustervar['pctmin']= preprocessing.scale(clustervar['pctmin'].astype('float64'))
clustervar['wfed']= preprocessing.scale(clustervar['wfed'].astype('float64'))
clustervar['wsta']= preprocessing.scale(clustervar['wsta'].astype('float64'))
clustervar['wloc']= preprocessing.scale(clustervar['wloc'].astype('float64'))
clustervar['mix']= preprocessing.scale(clustervar['mix'].astype('float64'))
clustervar['pctymle']= preprocessing.scale(clustervar['pctymle'].astype('float64'))
clus_train = clustervar
def findSuitableK():
clusters=range(1,9)
meandist=[]
for k in clusters:
model=KMeans(n_clusters=k)
model.fit(clus_train)
clusassign=model.predict(clus_train)
meandist.append(sum(np.min(cdist(clus_train, model.cluster_centers_, 'euclidean'), axis=1))
/ clus_train.shape[0])
plt.plot(clusters, meandist)
plt.xlabel('Number of clusters')
plt.ylabel('Average distance')
plt.title('Selecting k with the Elbow Method') # pick the fewest number of clusters that reduces the average distance
plt.show()
findSuitableK()
def createClusters():
model=KMeans(n_clusters=3)
model.fit(clus_train)
clusassign=model.predict(clus_train)
lables = model.labels_
return lables
lables=createClusters()
def groupClusters():
my_dict = {}
for (ind,elem) in enumerate(lables):
if elem in my_dict:
my_dict[elem].append(ind)
else:
my_dict.update({elem:[ind]})
return my_dict
cluster_dict=groupClusters()
def sampleClusters():
cluster_sample={}
df = pd.DataFrame()
# # df = pd.DataFrame(index=range(0,13),columns=['county','year','crmrte','prbarr','prbconv','prbpris','avgsen',
# 'density','wcon','wfir','wser','wmfg'], dtype='float64')
# df= pd.DataFrame([['county','year','crmrte','prbarr','prbconv','prbpris','avgsen',
# 'density','wcon','wfir','wser','wmfg']])
for i in range(0,3):
length = len(cluster_dict[i])
cluster_sample[i]=random.sample(cluster_dict[i],length//3)
for k in cluster_sample[i]:
df=df.append(clus_train.iloc[[k]],ignore_index=True)
# df.iloc[[count]] = clus_train.iloc[[k]]
return df
def randomSample():
newClusterTrain= clustervar.sample(n=len(clus_train)//3)
return newClusterTrain
randomSampledClusterFrame=randomSample()
sampled_dataFrame=sampleClusters()
pca = PCA(n_components=19)
pca.fit(sampled_dataFrame)
loadings=pca.components_
def pcaRandomSample():
r_pca = PCA(n_components=19)
r_pca.fit_transform(randomSampledClusterFrame)
r_loadings=r_pca.components_
return r_pca,r_loadings
random_pca,Random_loadings =pcaRandomSample()
def screeplot(pca, standardised_values):
y = pca.explained_variance_
x = np.arange(len(y)) + 1
plt.plot(x, y, "o-")
plt.xticks(x, ["PC"+str(i) for i in x], rotation=60)
plt.ylabel("Variance")
plt.show()
return np.array(y),np.array(x)
y,x =screeplot(pca, sampled_dataFrame)
y_random,x_random =screeplot(random_pca, Random_loadings)
@app.route("/crime/screeplot")
def showScreeplot():
return render_template("screeplot.html",y=y.tolist(),x=x.tolist())
@app.route("/crime/randomscreeplot")
def showScreeplot_random():
return render_template("randomScreePlot.html",y=y_random.tolist(),x=x_random.tolist())
def squaredLoadings():
w, h = 3, 19;
squaredLoadings = [0 for y in range(h)]
for i in range(len(loadings)):
sum=0
for j in range(3):
sum = sum + loadings[j][i] **2
squaredLoadings[i]=sum
return squaredLoadings
sumSquareLoadings=squaredLoadings()
@app.route("/crime/squaredLoadings")
def showSqureloadingsPlot():
sortedSumSquareLoadings=sorted(sumSquareLoadings,reverse=True)
length= len(sortedSumSquareLoadings)
columns=[0 for y in range(length)]
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[sumSquareLoadings.index(i)]
index =index+1
return render_template("squaredloadings.html",y=sortedSumSquareLoadings,x=json.dumps(columns))
def randomSquaredLoadings():
w, h = 3, 21;
squaredLoadings = [0 for y in range(h)]
for i in range(len(Random_loadings)):
sum=0
for j in range(3):
sum = sum + loadings[j][i] **2
squaredLoadings[i]=sum
return squaredLoadings
randomsumSquareLoadings=randomSquaredLoadings()
@app.route("/crime/randomsquaredLoadings")
def showRandomSqureloadingsPlot():
sortedSumSquareLoadings=sorted(randomsumSquareLoadings,reverse=True)
length= len(sortedSumSquareLoadings)
columns=[0 for y in range(length)]
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[randomsumSquareLoadings.index(i)]
index =index+1
return render_template("randomSquaredloadings.html",y=sortedSumSquareLoadings,x=json.dumps(columns))
@app.route("/crime/scatterMatrix")
def getColumnData():
sortedSumSquareLoadings=sorted(sumSquareLoadings,reverse=True)
columns=[0 for y in range(3)]
columnsVals={}
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[sumSquareLoadings.index(i)]
index =index+1
if index==3:
break
for i in range(3):
columnsVals.update({columns[i]:sampled_dataFrame.loc[:,columns[i]].tolist()})
return render_template("scatterMatrix.html",dataVal=columnsVals,traits=columns)
@app.route("/crime/randomScatterMatrix")
def getRandomColumnData():
sortedSumSquareLoadings=sorted(randomsumSquareLoadings,reverse=True)
columns=[0 for y in range(3)]
# columnsVals=[0 for y in range(3)]
columnsVals={}
index=0
for i in sortedSumSquareLoadings:
columns[index]=clus_train.columns.values[randomsumSquareLoadings.index(i)]
index =index+1
if index==3:
break
for i in range(3):
columnsVals.update({columns[i]:randomSampledClusterFrame.loc[:,columns[i]].tolist()})
return render_template("scatterMatrix.html",dataVal=columnsVals,traits=columns)
def MDS_DimReduction():
mdsData = MDS(n_components=2,dissimilarity='euclidean')
mdsData.fit(sampled_dataFrame)
return mdsData.embedding_
def MDS_RandomDimReduction():
mdsData = MDS(n_components=2,dissimilarity='euclidean')
mdsData.fit(randomSampledClusterFrame)
return mdsData.embedding_
def PCA_TopComp():
pca = PCA(n_components=2)
return pca.fit_transform(sampled_dataFrame)
def PCA_RandomTopComp():
pca = PCA(n_components=2)
return pca.fit_transform(randomSampledClusterFrame)
top_PCAVal=PCA_TopComp()
top_RandomPCAVal=PCA_RandomTopComp()
@app.route("/crime/scatterPlot")
def PCA_ScatterPlot():
return render_template("scatterPlot.html",dataVal=top_PCAVal.tolist())
@app.route("/crime/randomscatterPlot")
def PCA_RandomScatterPlot():
return render_template("randomScatterPlot.html",dataVal=top_RandomPCAVal.tolist())
def MDS_DimReduction_Correlation():
mdsData = MDS(n_components=2,dissimilarity='precomputed',max_iter=10)
precompute=pairwise_distances(sampled_dataFrame.values,metric='correlation')
return mdsData.fit_transform(precompute)
def MDS_Random_DimReduction_Correlation():
mdsData = MDS(n_components=2,dissimilarity='precomputed',max_iter=10)
precompute=pairwise_distances(randomSampledClusterFrame.values,metric='correlation')
return mdsData.fit_transform(precompute)
@app.route("/crime/MDSscatterPlot")
def MDS_ScatterPlot():
return render_template("ScatterPlotMDS.html",dataVal=mds_embeddings.tolist())
@app.route("/crime/MDSCorrelationscatterPlot")
def MDS_ScatterPlot_Correlation():
return render_template("ScatterPlotMDS.html",dataVal=mds_embeddings_correlation.tolist())
@app.route("/crime/MDSRandomscatterPlot")
def MDS_RandomScatterPlot():
return render_template("RandomScatterPlotMDS.html",dataVal=mds_RandomEmbeddings.tolist())
@app.route("/crime/MDSRandomCorrelationscatterPlot")
def MDS_RandomScatterPlot_Correlation():
return render_template("RandomScatterPlotMDS.html",dataVal=mds_Random_embeddings_correlation.tolist())
mds_embeddings=MDS_DimReduction()
mds_embeddings_correlation=MDS_DimReduction_Correlation()
mds_RandomEmbeddings=MDS_RandomDimReduction()
mds_Random_embeddings_correlation=MDS_Random_DimReduction_Correlation()
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5005,debug=True)
|
rbadri91/N.C.-Crime-Data-Visualization
|
app.py
|
app.py
|
py
| 11,214 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32749134758
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report
def main():
# the goal is to predict if the borrower will pay back the money or not
# reading loan_data.csv as a dataframe
loans = pd.read_csv('loan_data.csv')
# checking out the loans information
print(loans.info())
print(loans.head())
print(loans.describe())
# histogram of two FICO distributions on top of each other, one for each credit.policy outcome
plt.figure(figsize = (10, 6))
loans[loans['credit.policy'] == 1]['fico'].hist(alpha = 0.5, bins = 30, color = 'blue', label = 'Credit policy = 1')
loans[loans['credit.policy'] == 0]['fico'].hist(alpha = 0.5, bins = 30, color = 'red', label = 'Credit policy = 0')
plt.legend()
plt.xlabel('FICO')
# similar figure, except this time selected by the not.fully.paid column
plt.figure(figsize = (10, 6))
loans[loans['not.fully.paid'] == 1]['fico'].hist(alpha = 0.5, bins = 30, label = 'Not fully paid = 1', color = 'blue')
loans[loans['not.fully.paid'] == 0]['fico'].hist(alpha = 0.5, bins = 30, label = 'Not fully paid = 0', color = 'red')
plt.legend()
plt.xlabel('FICO')
# countplot showing the counts of loans by purpose, with the color hue defined by not.fully.paid
plt.figure(figsize = (15, 6))
sns.countplot(data = loans, hue = 'not.fully.paid', x = 'purpose')
# trend between FICO score and interest rate
sns.jointplot(data = loans, kind = 'scatter', x = 'fico', y = 'int.rate')
# lmplots to see if the trend differs between not.fully.paid and credit.policy
sns.lmplot(x = 'fico', y = 'int.rate', data = loans, hue = 'credit.policy', col = 'not.fully.paid')
# purpose column is categorical; transforming them using dummy variables
cat_feats = ['purpose']
final_data = pd.get_dummies(loans, columns = cat_feats, drop_first = True)
print(final_data.info())
# splitting data into a training set and a testing set
X = final_data.drop('not.fully.paid', axis = 1)
y = final_data['not.fully.paid']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)
# instance of DecisionTreeClassifier() and fitting it to the training data
dtree = DecisionTreeClassifier()
dtree.fit(X_train, y_train)
# predictions from the test set and a classification report and a confusion matrix.
predictions = dtree.predict(X_test)
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
# instance of the RandomForestClassifier class and fitting it to the training data
rf = RandomForestClassifier(n_estimators = 600)
rf.fit(X_train, y_train)
# predicting the class of not.fully.paid for the X_test data
rf_predictions = rf.predict(X_test)
# classification report and confusion matrix from the results
print(classification_report(y_test, rf_predictions))
print(confusion_matrix(y_test, rf_predictions))
plt.show()
if __name__ == '__main__':
main()
|
AleksandarPav/Decision-Tree-and-Random-Forest
|
main.py
|
main.py
|
py
| 3,272 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8632528934
|
from multiprocessing import cpu_count
from deepsecrets.core.utils.fs import path_exists
QUOTA_FILE = '/sys/fs/cgroup/cpu/cpu.cfs_quota_us'
PERIOD_FILE = '/sys/fs/cgroup/cpu/cpu.cfs_period_us'
CGROUP_2_MAX = '/sys/fs/cgroup/cpu.max'
class CpuHelper:
def get_limit(self) -> int:
multiproc_limit = self._by_multiproc()
cgroup = self._by_cgroup()
final = cgroup if cgroup != -1 else multiproc_limit
return final if final > 0 else 0
def _by_multiproc(self):
return cpu_count()
def _by_cgroup(self):
quota = 1
period = -1
# cgroup 2: https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
if path_exists(CGROUP_2_MAX):
try:
quota, period = self.__cgroup2()
return quota // period
except Exception:
pass
# cgroup 1: https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html
if path_exists(QUOTA_FILE) and path_exists(PERIOD_FILE):
try:
quota, period = self.__cgroup1()
return quota // period
except Exception:
pass
return quota // period
def __cgroup1(self):
quota = 1
period = -1
with open(QUOTA_FILE) as f:
quota = int(f.read())
with open(PERIOD_FILE) as f:
period = int(f.read())
return quota, period
def __cgroup2(self):
quota = 1
period = -1
with open(CGROUP_2_MAX) as f:
str_quota_period = f.read().split(' ')
quota = int(str_quota_period[0])
period = int(str_quota_period[1])
return quota, period
|
avito-tech/deepsecrets
|
deepsecrets/core/utils/cpu.py
|
cpu.py
|
py
| 1,763 |
python
|
en
|
code
| 174 |
github-code
|
6
|
30357845811
|
import unittest
from traits.api import Enum, HasTraits, Int, Str, Instance
from traitsui.api import HGroup, Item, Group, VGroup, View
from traitsui.menu import ToolBar, Action
from traitsui.testing.api import Index, IsVisible, MouseClick, UITester
from traitsui.tests._tools import (
create_ui,
requires_toolkit,
process_cascade_events,
ToolkitName,
)
class FooPanel(HasTraits):
my_int = Int(2)
my_str = Str("I am a panel/subpanel")
toolbar = Instance(ToolBar)
def default_traits_view(self):
view = View(
Item(name="my_int"),
Item(name="my_str"),
title="FooPanel",
buttons=["OK", "Cancel"],
toolbar=self.toolbar,
)
return view
def _toolbar_default(self):
return ToolBar(Action(name="Open file"))
class FooDialog(HasTraits):
panel1 = Instance(FooPanel)
panel2 = Instance(FooPanel)
view = View(
Group(Item("panel1"), Item("panel2"), layout="split", style="custom")
)
def _panel1_default(self):
return FooPanel()
def _panel2_default(self):
return FooPanel()
class ScrollableGroupExample(HasTraits):
my_int = Int(2)
my_str = Str("The group is scrollable")
scrollable_group_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=True,
),
title="FooPanel",
kind='subpanel',
)
non_scrollable_group_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=False,
),
title="FooPanel",
kind='subpanel',
)
scrollable_group_box_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=True,
label="Scrollable View",
show_border=True,
),
title="FooPanel",
kind='subpanel',
)
scrollable_labelled_group_view = View(
Group(
Item(name="my_int"),
Item(name="my_str"),
scrollable=True,
label="Scrollable View",
),
title="FooPanel",
kind='subpanel',
)
class ScrollableGroupVisibleWhen(HasTraits):
bar = Str("bar!")
baz = Str("Baz?")
enabled = Enum("Yes", "No")
def default_traits_view(self):
view = View(
Item("enabled"),
HGroup(
VGroup(
Item("bar"),
scrollable=True,
visible_when="enabled=='Yes'",
id='bar_group',
),
VGroup(
Item("baz"),
scrollable=True,
visible_when="enabled=='No'",
id='baz_group',
),
),
)
return view
@requires_toolkit([ToolkitName.qt])
class TestUIPanel(unittest.TestCase):
def setup_qt_dock_window(self):
from pyface.qt import QtGui
# set up the dock window for qt
main_window = QtGui.QMainWindow()
self.addCleanup(process_cascade_events)
self.addCleanup(main_window.close)
dock = QtGui.QDockWidget("testing", main_window)
dock.setWidget(QtGui.QMainWindow())
return main_window, dock
def test_panel_has_toolbar_buttons_qt(self):
from pyface.qt import QtGui
_, dock = self.setup_qt_dock_window()
# add panel
panel = FooPanel()
with create_ui(panel, dict(parent=dock.widget(), kind="panel")) as ui:
dock.widget().setCentralWidget(ui.control)
# There should be a toolbar for the panel
self.assertIsNotNone(dock.findChild(QtGui.QToolBar))
# There should be buttons too
# Not searching from dock because the dock panel has buttons for
# popping up and closing the panel
self.assertIsNotNone(ui.control.findChild(QtGui.QPushButton))
def test_subpanel_has_toolbar_no_buttons_qt(self):
from pyface.qt import QtGui
_, dock = self.setup_qt_dock_window()
# add panel
panel = FooPanel()
parent = dock.widget()
with create_ui(panel, dict(parent=parent, kind="subpanel")) as ui:
dock.widget().setCentralWidget(ui.control)
# There should be a toolbar for the subpanel
self.assertIsNotNone(dock.findChild(QtGui.QToolBar))
# Buttons should not be shown for subpanel
# Not searching from dock because the dock panel has buttons for
# popping up and closing the panel
self.assertIsNone(ui.control.findChild(QtGui.QPushButton))
def test_subpanel_no_toolbar_nor_button_in_widget(self):
from pyface.qt import QtGui
# FooDialog uses a QWidget to contain the panels
# No attempt should be made for adding the toolbars
foo_window = FooDialog()
with create_ui(foo_window) as ui:
# No toolbar for the dialog
self.assertIsNone(ui.control.findChild(QtGui.QToolBar))
# No button
self.assertIsNone(ui.control.findChild(QtGui.QPushButton))
# regression test for enthought/traitsui#1512
def test_scrollable_group_visible_when(self):
from pyface.qt import QtGui
obj = ScrollableGroupVisibleWhen()
tester = UITester()
with tester.create_ui(obj) as ui:
bar_group = tester.find_by_id(ui, 'bar_group')
baz_group = tester.find_by_id(ui, 'baz_group')
# for a scrollable group the GroupEditors control should be a
# QScrollArea not just the QWidget. We want the full area to be
# not visible, not just the text box widget.
self.assertIsInstance(bar_group._target.control, QtGui.QScrollArea)
self.assertIsInstance(baz_group._target.control, QtGui.QScrollArea)
self.assertTrue(bar_group.inspect(IsVisible()))
self.assertFalse(baz_group.inspect(IsVisible()))
enabled_box = tester.find_by_name(ui, 'enabled')
baz_item = enabled_box.locate(Index(1))
baz_item.perform(MouseClick())
self.assertTrue(baz_group.inspect(IsVisible()))
self.assertFalse(bar_group.inspect(IsVisible()))
@requires_toolkit([ToolkitName.qt])
class TestPanelLayout(unittest.TestCase):
def test_scrollable_group_typical(self):
from pyface.qt import QtGui
example = ScrollableGroupExample()
ui = example.edit_traits(view=scrollable_group_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
scroll_area = mainwindow.centralWidget()
self.assertIsInstance(scroll_area, QtGui.QScrollArea)
content = scroll_area.widget()
self.assertEqual(type(content), QtGui.QWidget)
finally:
ui.dispose()
def test_scrollable_group_box(self):
from pyface.qt import QtGui
example = ScrollableGroupExample()
ui = example.edit_traits(view=scrollable_group_box_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
scroll_area = mainwindow.centralWidget()
self.assertIsInstance(scroll_area, QtGui.QScrollArea)
group_box = scroll_area.widget()
self.assertIsInstance(group_box, QtGui.QGroupBox)
self.assertEqual(group_box.title(), "Scrollable View")
finally:
ui.dispose()
def test_scrollable_labelled_group(self):
from pyface.qt import QtGui
example = ScrollableGroupExample()
ui = example.edit_traits(view=scrollable_labelled_group_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
scroll_area = mainwindow.centralWidget()
self.assertIsInstance(scroll_area, QtGui.QScrollArea)
content = scroll_area.widget()
self.assertEqual(type(content), QtGui.QWidget)
finally:
ui.dispose()
def test_non_scrollable_group_typical(self):
from pyface.qt import QtGui
example = ScrollableGroupExample(my_str="The group is not scrollable")
ui = example.edit_traits(view=non_scrollable_group_view)
try:
mainwindow = ui.control.layout().itemAt(0).widget()
content = mainwindow.centralWidget()
self.assertEqual(type(content), QtGui.QWidget)
finally:
ui.dispose()
|
enthought/traitsui
|
traitsui/qt/tests/test_ui_panel.py
|
test_ui_panel.py
|
py
| 8,436 |
python
|
en
|
code
| 290 |
github-code
|
6
|
40170866347
|
import os
import numpy as np
import cv2
import imutils
import sys
np.set_printoptions(threshold=sys.maxsize)
corner_shapes_map = {
3: "triangle",
4: "rectangle",
5: "pentagon",
6: "hexagon"
}
model_prediction_map = {
0: "triangle",
1: "rectangle",
2: "circle"
}
def get_corners_in_canvas(canvas):
detected_contours = cv2.findContours(canvas, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
final_contours = imutils.grab_contours(detected_contours)
peri = cv2.arcLength(final_contours[0], True)
approx = cv2.approxPolyDP(final_contours[0], 0.1 * peri, True)
return len(approx)
def get_shape_from_model(canvas):
from keras.models import load_model
model = load_model(os.path.join(os.getcwd(), "ML", "model.h5"))
m_input = cv2.resize(canvas, (60, 60))
m_input = m_input.astype('float32')
print(m_input.shape)
# m_input /= 255
m_input = m_input.reshape(np.prod([60, 60]))
pred_list = model.predict(m_input.reshape(1, np.prod([60, 60])))[0].tolist()
print(pred_list)
max_val = max(pred_list)
return model_prediction_map[pred_list.index(max_val)]
def get_shape(arr, use_ml=False):
np_arr = np.array(arr, dtype=np.uint8)
# Sort by timestamps
np_arr = np_arr[np.argsort(np_arr[:, 0])]
contours = np_arr[:, 1:]
# Hardcoded: Offset x values by 50.
contours[:, 0] += 50
# Create a canvas and fill it up for a thresh img
# canvas = np.full((200, 200), 255, dtype=np.uint8)
# cv2.fillPoly(canvas, pts=np.int32([contours]), color=0)
canvas = np.full((200, 200), 0, dtype=np.uint8)
cv2.fillPoly(canvas, pts=np.int32([contours]), color=255)
if use_ml:
return get_shape_from_model(canvas)
else:
n_corners = get_corners_in_canvas(canvas)
if n_corners > 6:
return "circle"
if n_corners in corner_shapes_map:
return corner_shapes_map[n_corners]
return None
#
# if __name__ == "__main__":
# inp = [(0, 9.4, 83.4), (1, 10.4, 83.2), (2, 8.8, 83.2), (3, 9, 84), (4, 4.6, 81.6), (5, 1.6, 79.4), (6, -7.8, 75.2),
# (7, -12.6, 67.6), (8, -14.6, 59), (9, -10.6, 52.4), (10, -3.6, 49.6), (11, 12.4, 51.8), (12, 21.8, 56.4),
# (13, 28.8, 64.4), (14, 30.4, 73.4), (15, 27.2, 78.6), (16, 19.4, 79.2), (17, 15.4, 82.2), (18, 10.8, 82.4),
# (19, 10.4, 82), (20, 9.6, 81.6), (21, 10.2, 83.8)]
#
# print(get_shape(inp, True))
|
nirajsrimal/UWB_2FA
|
BackEnd/solver.py
|
solver.py
|
py
| 2,434 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41648714624
|
import unittest
from PIL import Image
import numpy as np
from texture.analysis import CoOccur
class MyTestCase(unittest.TestCase):
def test_offset_slices(self):
slices = CoOccur._offset_slices(4, 225)
self.assertEqual(slices, ([[None, -3], [3, None]], [[3, None], [None, -3]]))
pixels = np.array([[1, 2, 3, 1, 0],
[0, 7, 5, 8, 2],
[5, 4, 0, 2, 5],
[7, 1, 3, 4, 9]])
angle = 90 + 45 # ↖
slice_start, slice_end = CoOccur._offset_slices(1, angle)
start = pixels[slice_start[0][0]:slice_start[0][1], slice_start[1][0]:slice_start[1][1]]
end = pixels[slice_end[0][0]:slice_end[0][1], slice_end[1][0]:slice_end[1][1]]
self.assertEqual(start.tolist(), [[7, 5, 8, 2],
[4, 0, 2, 5],
[1, 3, 4, 9]])
self.assertEqual(end.tolist(), [[1, 2, 3, 1],
[0, 7, 5, 8],
[5, 4, 0, 2]])
def test_co_occur(self):
image = Image.open("textures/1.1.04.tiff")
co_occur = CoOccur(image, distances=[1, 2, 4, 8, 16], angles=[0, 90, 180, 270], levels=8)
self.assertEqual(co_occur.matrices.shape, (5, 4, 8, 8))
co_occur = CoOccur(image, distances=[1, 16], angles=[0, 120, 175.3, 240], levels=8)
self.assertEqual(co_occur.matrices.shape, (2, 4, 8, 8))
def test_inertia(self):
image = Image.open("textures/1.1.04.tiff")
l_b = np.arange(8)
l_a = l_b[:, np.newaxis]
coefficients = ((l_a - l_b) ** 2).reshape(1, 1, 8, 8)
co_occur = CoOccur(image, distances=[1, 4, 8, 16, 32], angles=[0, 120, 240])
self.assertAlmostEqual(np.sum(co_occur.matrices[2, 1] * coefficients).item(), co_occur.inertia[2, 1])
self.assertAlmostEqual(np.sum(co_occur.matrices[4, 2] * coefficients).item(), co_occur.inertia_of(32, 240))
def test_average(self):
image = Image.open("textures/1.1.05.tiff")
co_occur = CoOccur(image, distances=[1, 4, 8, 16, 32], angles=[0, 90, 240])
self.assertAlmostEqual(np.mean(co_occur.matrices[2, :, 1, 3]).item(), co_occur.average[2, 1, 3].item())
self.assertAlmostEqual(np.mean(co_occur.matrices[4, :, 2, 6]).item(), co_occur.average_of(32)[2, 6].item())
def test_spread(self):
image = Image.open("textures/1.1.10.tiff")
co_occur = CoOccur(image, distances=[2, 5, 14], angles=[0, 32, 128, 290])
spread_1 = np.max(co_occur.matrices[0, :, 7, 4]) - np.min(co_occur.matrices[0, :, 7, 4])
self.assertAlmostEqual(spread_1, co_occur.spread[0, 7, 4].item())
spread_2 = np.max(co_occur.matrices[2, :, 0, 5]) - np.min(co_occur.matrices[2, :, 0, 5])
self.assertAlmostEqual(spread_2, co_occur.spread_of(14)[0, 5].item())
if __name__ == '__main__':
unittest.main()
|
MatteoZanella/siv-texture-analysis
|
tests/test_com.py
|
test_com.py
|
py
| 2,951 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37682605519
|
"""
Challenge 24: Create a function that will merge
two arrays and return the result as
a new array
"""
def mergeArray(array1, array2):
"""empty array to hold the new values"""
new_array = []
for i in array1:
new_array.append(i)
for j in array2:
new_array.append(j)
return sorted(new_array)
# Driver Method
arr1 = [1, 4, 5, 7, 3, 8]
arr2 = [34, 23, 344, 86]
print(mergeArray(arr1, arr2))
|
mofirojean/50-Coding-Challenge
|
50 Coding Challenge Part I/Python/Challenge24.py
|
Challenge24.py
|
py
| 430 |
python
|
en
|
code
| 2 |
github-code
|
6
|
171133983
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from rp_ui_harness import RequestPolicyTestCase
from marionette import SkipTest
URLS = {
1: ["http://www.maindomain.test/"],
2: ["http://www.otherdomain.test/"],
3: ["http://www.thirddomain.test/"],
"preferences": ["about:requestpolicy", "about:requestpolicy?basicprefs"],
"preferences_1": ["about:requestpolicy"],
"preferences_2": ["about:requestpolicy?basicprefs"],
"policies": ["about:requestpolicy?yourpolicy"]
}
class TestSettingsButtons(RequestPolicyTestCase):
TEST_URL = "http://www.maindomain.test/img_1.html"
def setUp(self):
super(TestSettingsButtons, self).setUp()
self.tabbar = self.browser.tabbar
def tearDown(self):
try:
self._close_all_tabs()
finally:
super(TestSettingsButtons, self).tearDown()
@property
def disabled(self):
return not self.menu.is_working
@property
def skip_if_disabled(self):
if self.disabled:
raise SkipTest("menu is defunct")
################
# Test Methods #
################
def _test__should_open(self, url_id, settings_id):
"""Exactly one settings tab is open, which is at the same
time the one to be opened."""
self._open_tabs([1, url_id, 3], 1)
self._open_settings(settings_id)
self._check_tabs([1, url_id, settings_id, 3], 2)
def test_no_settings_tab_open(self):
self.skip_if_disabled()
"""No settings tab is open."""
self._test__should_open(2, "preferences")
self._test__should_open(2, "policies")
def test_non_equivalent_settings_tab_open(self):
self.skip_if_disabled()
"""A non-equivalent settings tab is open."""
self._test__should_open("policies", "preferences")
self._test__should_open("preferences_1", "policies")
self._test__should_open("preferences_2", "policies")
def _test__basic(self, url_id, settings_id):
"""Exactly one settings tab is open, which is at the same
time the one to be opened."""
# Already on the correct tab
self._open_tabs([1, url_id, 3], 1)
self._open_settings(settings_id)
self._check_tabs([1, url_id, 3], 1)
# Switch to the correct tab
self._open_tabs([url_id, 2, 3], 1)
self._open_settings(settings_id)
self._check_tabs([url_id, 2, 3], 0)
def test_preferences__basic(self):
self.skip_if_disabled()
self._test__basic("preferences_1", "preferences")
self._test__basic("preferences_2", "preferences")
def test_policies__basic(self):
self.skip_if_disabled()
self._test__basic("policies", "policies")
def _test__multiple_equivalent_urls(self, url_id_1, url_id_2, settings_id):
"""Multiple settings tabs are open, but all are equivalent.
However, the URLs still could be different; for example,
"about:requestpolicy" and "about:requestpolicy?basicprefs"
are equivalent.
"""
# Already on the correct tab
self._open_tabs([url_id_1, url_id_2, 3], 1)
self._open_settings(settings_id)
self._check_tabs([url_id_1, url_id_2, 3], 1)
# Switch to the correct tab.
# The tab to the right of the current tab should be selected.
self._open_tabs([url_id_1, 2, url_id_2], 1)
self._open_settings(settings_id)
self._check_tabs([url_id_1, 2, url_id_2], 2)
def test_preferences__multiple(self):
self.skip_if_disabled()
self._test__multiple_equivalent_urls("preferences_1", "preferences_1",
"preferences")
self._test__multiple_equivalent_urls("preferences_1", "preferences_2",
"preferences")
self._test__multiple_equivalent_urls("preferences_2", "preferences_1",
"preferences")
def _test__multiple_non_equivalent_urls(
self, url_id, non_equivalent_url_id, settings_id
):
"""Multiple settings tabs are open, but they are _not_ equivalent."""
# Already on the correct tab
self._open_tabs([non_equivalent_url_id, url_id, 3], 1)
self._open_settings(settings_id)
self._check_tabs([non_equivalent_url_id, url_id, 3], 1)
# Switch to the correct tab (to the left of the current tab).
self._open_tabs([url_id, 2, non_equivalent_url_id], 1)
self._open_settings(settings_id)
self._check_tabs([url_id, 2, non_equivalent_url_id], 0)
# Switch to the correct tab (to the left of the current tab).
# The current tab is the non-equivalent tab.
self._open_tabs([url_id, non_equivalent_url_id, 3], 1)
self._open_settings(settings_id)
self._check_tabs([url_id, non_equivalent_url_id, 3], 0)
def test_preferences__with_other_settings_tabs(self):
self.skip_if_disabled()
self._test__multiple_non_equivalent_urls("preferences_1", "policies",
"preferences")
self._test__multiple_non_equivalent_urls("preferences_2", "policies",
"preferences")
##########################
# Private Helper Methods #
##########################
def _open_tabs(self, tabs, select_index=1):
self._close_all_tabs()
first_tab = True
for tab_id in tabs:
if not first_tab:
self.tabbar.open_tab().select()
url = URLS[tab_id][0]
with self.marionette.using_context("content"):
self.marionette.navigate(url)
if first_tab:
first_tab = False
self.tabbar.tabs[select_index].select()
self._check_tabs(tabs, select_index)
def _close_all_tabs(self):
self.tabbar.close_all_tabs(exceptions=[self.tabbar.tabs[0]])
def _check_tabs(self, tabs, expected_selected_index):
self.assertEqual(expected_selected_index, self.tabbar.selected_index)
expected_tab_urls = [URLS[tab_id] for tab_id in tabs]
tab_urls = [tab.location for tab in self.tabbar.tabs]
self.assertEqual(len(expected_tab_urls), len(expected_tab_urls))
for idx in range(len(expected_tab_urls)):
possible_tab_urls = expected_tab_urls[idx]
tab_url = tab_urls[idx]
self.assertIn(tab_url, possible_tab_urls)
self.tabbar.tabs[expected_selected_index].select()
self.assertEqual(expected_selected_index, self.tabbar.selected_index)
def _open_settings(self, button):
self.menu.open()
with self.menu.in_iframe():
if button == "preferences":
self.menu.preferences_button.click()
elif button == "policies":
self.menu.manage_policies_button.click()
else:
self.fail()
|
RequestPolicyContinued/requestpolicy
|
tests/marionette/tests/menu/test_settings_buttons.py
|
test_settings_buttons.py
|
py
| 7,155 |
python
|
en
|
code
| 253 |
github-code
|
6
|
22509106375
|
import numpy as np
import logging
from pathlib import Path
#Output folder setup
output = Path('./output/log').expanduser()
output.mkdir(parents=True, exist_ok=True)
en_log = logging.getLogger(__name__)
en_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('./output/log/energy.log',mode='w')
file_handler.setFormatter(formatter)
en_log.addHandler(file_handler)
def energy_init(temp,m,n,dX):
for i in range(1,m-1):
for j in range (1,n-1):
temp[j, i] = 1 - j*dX
return(temp)
def energy_bound(temp,m,n,iter):
en_log.debug("Iteration No.: {}--------C".format(iter))
en_log.debug("Temperature at bouondary calculation entry: \n{}".format(temp))
for j in range(n):
temp[0,j] = 1
temp[m-1,j] = 0
for i in range(m):
temp[i,0] = (4/3)*( temp[i,1]- (temp[i, 2]/4) )
temp[i,n-1] = (4/3)*( temp[i,n-2] - (temp[i, n-3]/4) )
en_log.debug("Temperature at boundary calculation exit: \n{}".format(temp))
en_log.debug("____________________________________________________")
return(temp)
def energy_bound_ur(temp_o,temp_calc,m,n,r,iter):
temp_n = np.copy(temp_calc)
en_log.debug("Iteration No.: {}--------D".format(iter))
en_log.debug("Temperature at boundary UR calculation entry: \n{}".format(temp_calc))
for i in range(m):
temp_n[i,0] = temp_o[i,0] + r*(temp_calc[i,0] - temp_o[i,0])
temp_n[i,n-1] = temp_o[i,n-1] + r*(temp_calc[i,n-1] - temp_o[i,n-1])
for j in range(n):
temp_n[0,j] = temp_o[0,j] + r*(temp_calc[0,j] - temp_o[0,j])
temp_n[m-1,j] = temp_o[m-1,j] + r*(temp_calc[m-1,j] - temp_o[m-1,j])
en_log.debug("Temperature at boundary UR calculation exit: \n{}".format(temp_n))
en_log.debug("____________________________________________________")
return(temp_n)
def energy(temp_o,strm,m,n,dX,dY,div,iter):
en_log.debug("Iteration No.: {}--------A".format(iter))
en_log.debug("Temperature at calculation entry: \n{}".format(temp_o))
temp_calc = np.copy(temp_o)
mul = (-1/(4*dX*dY))
for i in range(1,m-1):
for j in range (1,n-1):
strm_i_diff = (strm[i+1,j]-strm[i-1,j])
strm_j_diff = (strm[i,j+1]-strm[i,j-1])
temp_i_diff = (temp_o[i+1,j]-temp_o[i-1,j])
temp_j_diff = (temp_o[i,j+1]-temp_o[i,j-1])
temp_i_sum = (temp_o[i+1,j]+temp_o[i-1,j])/(dX*dX)
temp_j_sum = (temp_o[i,j+1]+temp_o[i,j-1])/(dY*dY)
temp_calc[i,j] = ( (mul*((strm_j_diff*temp_i_diff)-(strm_i_diff*temp_j_diff))) + temp_i_sum + temp_j_sum )/div
en_log.debug("Temperature at calculation exit: \n{}".format(temp_calc))
en_log.debug("____________________________________________________")
return temp_calc
def energy_ur(temp_o,temp_calc,m,n,r,iter):
en_log.debug("Iteration No.: {}--------B".format(iter))
en_log.debug("Temperature at UR calculation entry: \n{}".format(temp_calc))
temp_n = np.copy(temp_calc)
for i in range(1,m-1):
for j in range (1,n-1):
temp_n[i,j] = temp_o[i,j] + r*(temp_calc[i,j] - temp_o[i,j])
en_log.debug("Temperature at UR calculation exit: \n{}".format(temp_n))
en_log.debug("____________________________________________________")
return(temp_n)
def converge(temp_o,strm,m,n,dX,dY,div,iter):
temp_residue = np.zeros((m,n))
mul = (-1/(4*dX*dY))
if iter<5:
temp_max = 1
else:
temp_max = int(np.amax(np.abs(temp_o)))
en_log.debug("Temperature max value: \n{}".format(temp_max))
for i in range(1,m-1):
for j in range (1,n-1):
strm_i_diff = (strm[i+1,j]-strm[i-1,j])
strm_j_diff = (strm[i,j+1]-strm[i,j-1])
temp_i_diff = (temp_o[i+1,j]-temp_o[i-1,j])
temp_j_diff = (temp_o[i,j+1]-temp_o[i,j-1])
temp_i_sum = (temp_o[i+1,j]+temp_o[i-1,j])/(dX*dX)
temp_j_sum = (temp_o[i,j+1]+temp_o[i,j-1])/(dY*dY)
temp_residue[i,j] = (( ( (mul*((strm_j_diff*temp_i_diff)-(strm_i_diff*temp_j_diff))) + temp_i_sum + temp_j_sum )/div )- temp_o[i,j])/temp_max
en_log.debug("Temperature residue domain: \n{}".format(temp_residue))
return np.std(temp_residue)
|
amuthankural/square_cavity_Natural_Convection
|
energy.py
|
energy.py
|
py
| 4,354 |
python
|
en
|
code
| 4 |
github-code
|
6
|
73016396668
|
# TODO save output in a set to remove duplicated warnings
import sys
import time
import subprocess
import linecache
rules = {
"false":[
"PyArg_ParseTuple"
],
"NULL":[
"Py_BuildValue",
"PyLong_FromLong",
#"PyBytes_FromStringAndSize",
#"PyBytes_AsString",
#"PyFloat_FromDouble",
#"PyObject_GetAttrString",
#"PyDict_New",
#"PyDict_GetItemString",
#"PyDict_GetItem",
#"PyList_GetItem",
#"PyList_GET_ITEM",
"PyList_New",
"malloc"
],
"-1":[
#"PyDict_SetItemString",
#"PyType_Ready",
#"PyLong_AsLong",
#"PyFloat_AsDouble", # -1.0
#"PyModule_AddIntConstant",
#"PyObject_SetAttrString",
"PyDict_SetItem",
#"PyList_Append",
#"PyList_Insert",
"PyList_SetItem",
"PyList_SET_ITEM",
"PyTuple_SET_ITEM"
]
}
append_rules = {
"false":[],
"NULL":[],
"-1":[]
}
# string -> string list
def exec_command(command):
output = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, encoding="UTF-8")
return output.stdout.readlines()
# string -> (string, int, string)
def grep_parser(grep_line):
pos_filename = grep_line.find(":")
filename = grep_line[0:pos_filename]
pos_lineno = grep_line[pos_filename + 1:].find(":") + pos_filename + 1
lineno = int(grep_line[pos_filename + 1:pos_lineno])
content = grep_line[pos_lineno + 1:-1]
return (filename, lineno, content)
# string, int -> string
def next_program_line(filename, lineno):
current_line = linecache.getline(filename, lineno).strip()
lineno += 1
if (not current_line.endswith(";")):
return next_program_line(filename, lineno)
next_line = linecache.getline(filename, lineno).strip()
if (next_line == "}" or next_line == ""):
return linecache.getline(filename, lineno + 1).strip()
else:
return next_line
# string -> string
def assignment_parser(assignment):
pos_left = assignment.find("=")
left = assignment[0:pos_left].strip()
if left.find(" ") != -1: # new declarations, remove type identifers
variable = left.split(" ")[1]
if variable.startswith("*"): # pointers
return variable[1:]
else:
return variable
else:
return left
# string, string, bool, string -> None
def check(api, errval, append, path):
command = "grep -rn \"" + api + "\" " + path
for line in exec_command(command):
(filename, lineno, content) = grep_parser(line)
if not filename.endswith(".c") and not filename.endswith(".h"):
continue
if content.strip().startswith("return"):
continue
elif content.strip().startswith("if") or content.strip().startswith("else if"):
continue
elif content.strip().startswith("//") or content.strip().startswith("/*") or content.strip().startswith("*"):
continue
elif content.strip().startswith("#define"):
if append == True:
print(content.strip())
append_rules[errval].append(content.split(" ")[1])
continue
else:
continue
else:
next_content = next_program_line(filename, lineno)
#print(next_content)
variable = assignment_parser(content)
#print(variable)
if next_content.startswith("if") and next_content.find(variable) != -1:
continue
elif next_content.startswith("return") and next_content.find(variable) != -1:
continue
else:
#print(filename)
#print(lineno)
#print(content)
#print(next_content)
print(line.strip())
if __name__ == "__main__":
start_time = time.time()
for (errval, apis) in rules.items():
for api in apis:
#print("===== " + api + " =====")
check(api, errval, True, sys.argv[1])
#print(append_rules)
for (errval, apis) in append_rules.items():
for api in apis:
#print("===== " + api + " =====")
check(api, errval, False, sys.argv[1])
end_time = time.time()
print("total time : {:.2f}s".format(end_time - start_time))
|
S4Plus/pyceac
|
checkers/2/checker.py
|
checker.py
|
py
| 4,376 |
python
|
en
|
code
| 3 |
github-code
|
6
|
5191593918
|
from utils import *
forest = read_day(3)
width = len(forest[0])
right = 3
down = 1
x = 0
y = 0
n_trees = 0
while y < len(forest):
line = forest[y]
cur = line[x%width]
n_trees += cur == "#"
x += right
y += down
print(n_trees)
def count_slope_trees(forest, right, down):
x = 0
y = 0
n_trees = 0
while y < len(forest):
line = forest[y]
cur = line[x%width]
n_trees += cur == "#"
x += right
y += down
return n_trees
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
n_trees_slope = [count_slope_trees(forest, *s) for s in slopes]
print(prod(n_trees_slope))
print("done")
|
nibrivia/aoc-2020
|
day-3.py
|
day-3.py
|
py
| 658 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1539518044
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 27 09:55:08 2018
@author: kartini
"""
import numpy
import io
from itertools import permutations
def read_dataset(fname):
sentences = []
tags = []
with open(fname) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
idx_line = 0
while idx_line < len(content):
sent = []
tag = []
print('idx_line =')
print(idx_line)
while not content[idx_line].startswith('</kalimat'):
if not content[idx_line].startswith('<kalimat'):
content_part = content[idx_line].split('\t')
sent.append(content_part[0])
tag.append(content_part[1])
idx_line = idx_line + 1
sentences.append(sent)
tags.append(tag)
idx_line = idx_line+2
return sentences, tags
def features(sentence, index):
""" sentence: [w1, w2, ...], index: the index of the word """
return {
'word': sentence[index],
'prefix-1': sentence[index][0],
'prefix-2': sentence[index][:2],
'prefix-3': sentence[index][:3],
'suffix-1': sentence[index][-1],
'suffix-2': sentence[index][-2:],
'suffix-3': sentence[index][-3:],
'prev_word': '' if index == 0 else sentence[index - 1],
'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],
}
def transform_to_dataset(sentences, tags):
X, y = [], []
for sentence_idx in range(len(sentences)):
for index in range(len(sentences[sentence_idx])):
X.append(features(sentences[sentence_idx], index))
y.append(tags[sentence_idx][index])
return X, y
sentences,tags = read_dataset('dataset_postagger.txt')
print(sentences[0])
print(tags[0])
def read_file_init_table(fname):
tag_count = {}
tag_count['<start>'] = 0
word_tag = {}
tag_trans = {}
with open(fname) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
idx_line = 0
is_first_word = 0
while idx_line < len(content):
prev_tag = '<start>'
while not content[idx_line].startswith('</kalimat'):
if not content[idx_line].startswith('<kalimat'):
content_part = content[idx_line].split('\t')
if content_part[1] in tag_count:
tag_count[content_part[1]] += 1
else:
tag_count[content_part[1]] = 1
current_word_tag = content_part[0]+','+content_part[1]
if current_word_tag in word_tag:
word_tag[current_word_tag] += 1
else:
word_tag[current_word_tag] = 1
if is_first_word == 1:
current_tag_trans = '<start>,'+content_part[1]
is_first_word = 0
else:
current_tag_trans = prev_tag+','+content_part[1]
if current_tag_trans in tag_trans:
tag_trans[current_tag_trans] += 1
else:
tag_trans[current_tag_trans] = 1
prev_tag = content_part[1]
else:
tag_count['<start>'] += 1
is_first_word = 1
idx_line = idx_line + 1
idx_line = idx_line+1
return tag_count, word_tag, tag_trans
tag_count, word_tag, tag_trans = read_file_init_table('dataset_postagger.txt')
print(tag_count)
print(word_tag)
print(tag_trans)
def create_trans_prob_table(tag_trans, tag_count):
print(tag_trans)
trans_prob = {}
for tag1 in tag_count.keys():
for tag2 in tag_count.keys():
#print('tag1 = ')
#print(tag1)
trans_idx = tag1+','+tag2
#print('trans_idx = ')
#print(trans_idx)
if trans_idx in tag_trans:
#print(trans_idx)
trans_prob[trans_idx] = tag_trans[trans_idx]/tag_count[tag1]
return trans_prob
trans_prob = create_trans_prob_table(tag_trans, tag_count)
print(trans_prob)
def create_emission_prob_table(word_tag, tag_count):
emission_prob = {}
for word_tag_entry in word_tag.keys():
print('---')
print(word_tag_entry)
word_tag_split = word_tag_entry.split(',')
current_word = word_tag_split[0]
current_tag = word_tag_split[1]
# print(current_word)
emission_key = current_word+','+current_tag
print(emission_key)
print(len(emission_key))
if (emission_key == ','):
emission_key = (word_tag_entry)
current_word = ','
current_tag = 'Z'
elif (len(word_tag_split) > 2):
x = word_tag_split[:-1]
current_word = ''.join(x)
current_tag = word_tag_split[-1]
print('ek:', emission_key)
print('ct:', current_tag)
print('cw: ',current_word)
emission_prob[emission_key] = word_tag[word_tag_entry]/tag_count[current_tag]
return emission_prob
emission_prob = create_emission_prob_table(word_tag, tag_count)
print(emission_prob)
def viterbi(trans_prob, emission_prob, tag_count, sentence):
#initialization
viterbi_mat = {}
tag_sequence = []
sentence_words = sentence.split()
currentTag = '<start>'
getScoreMax = 1
for i, currentWord in enumerate (sentence_words):
viterbi_mat[currentWord] = getScoreMax
allScore = []
if (i == len(sentence_words) - 1):
break
for j, nilaiEmission in enumerate(emission_prob.keys()):
# print (currentTag)
score = 0
next_word = nilaiEmission.split(',')[0]
next_tag = nilaiEmission.split(',')[1]
if (next_word == sentence_words[i+1]):
print(currentWord, ' ', next_word, ' ', currentTag, ' ', next_tag)
try:
print('Transition Prob: ', trans_prob[currentTag + ',' + next_tag])
score = getScoreMax * emission_prob[nilaiEmission] * trans_prob[currentTag + ',' + next_tag]
except:
print('Transition Prob: ', 0)
score = getScoreMax * emission_prob[nilaiEmission] * 0
allScore.append({'score':score, 'current_tag': currentTag, 'tag': next_tag})
getScore = [x['score'] for x in allScore]
print(getScore)
getIndexMax = getScore.index(max(getScore))
getScoreMax = max(getScore)
currentTag = allScore[getIndexMax]['tag']
tag_sequence.append(currentTag)
return viterbi_mat, tag_sequence
senetence ="<start> kera untuk amankan"
getViterbi = viterbi(trans_prob, emission_prob, tag_count, senetence)
print(getViterbi)
def baseline(word_tag, sentence):
new_tag_word = []
wordsSplit = sentence.split()
for i, word in enumerate(wordsSplit):
tagWord = []
for j, current_word_tag in enumerate(word_tag.keys()):
if (word == current_word_tag.split(',')[0].lower()):
tagWord.append({'word': word, 'tag': current_word_tag.split(',')[1], 'count': word_tag[current_word_tag] })
print('tw', tagWord)
getCount = [x['count'] for x in tagWord]
try:
getIndex = getCount.index(max(getCount))
getMaxCount = max(getCount)
currentTag = tagWord[getIndex]['tag']
new_tag_word.append({'tag': currentTag, 'word': word, 'count': getMaxCount})
except:
new_tag_word.append({'tag': 'NN', 'word': word, 'count': 0})
return new_tag_word
sentence = "kera untuk amankan"
base = baseline(word_tag, sentence)
print(base)
sentences,tags = read_dataset('data_uji_postagger.txt')
print(sentences[0])
print(tags[0])
tag_count, word_tag, tag_trans = read_file_init_table('data_uji_postagger.txt')
print(tag_count)
print(word_tag)
print(tag_trans)
senetence ="<start> kera untuk amankan"
getViterbi = viterbi(trans_prob, emission_prob, tag_count, senetence)
print(getViterbi)
sentence = "kera untuk amankan"
base = baseline(word_tag, sentence)
print(base)
|
kartininurfalah/NLP
|
1301154577_postagger.py
|
1301154577_postagger.py
|
py
| 8,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4637946235
|
"""Connects to Fiscal DB and provides functions for database utility:
Retrieving, updating values"""
import os
import mysql.connector
import sys
from src.vault_actions import FISCAL_VAULT
fiscal_dict = FISCAL_VAULT.dict_all('secret')
class MySQLConnectionError(Exception):
"""Custom exception for errors encountered during MySQL DB connection"""
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return f"MySQL Connection Error: {self.message}"
if os.environ.get('DOCKER_ENV') == 'true':
database_host = fiscal_dict['AWS_DB_HOST']
database_user = fiscal_dict['AWS_DB_USER']
database_port = '3306'
else:
# database_host = 'localhost' # (Node IP)
# database_user = 'root'
# database_port = '30007' # (Node Port)
database_host = fiscal_dict['AWS_DB_HOST']
database_user = fiscal_dict['AWS_DB_USER']
database_port = '3306'
FISCALDB = mysql.connector.connect(
host=database_host,
port=database_port,
user=database_user,
password=fiscal_dict['DB_PASS'],
database=fiscal_dict['DB_NAME']
)
def check_mysql_connection(connection):
"""Confirms database link on connection, displays any errors"""
try:
connection.ping(reconnect=True)
print("MySQL connection successful")
except MySQLConnectionError as error:
print("Error connecting to MySQL database:", str(error))
def retrieve_database(connection: mysql.connector.MySQLConnection):
"""Gets database table values"""
cursor = connection.cursor()
query = 'SELECT * FROM pie_data'
try:
cursor.execute(query)
result = cursor.fetchall()
except mysql.connector.errors.ProgrammingError as error:
result = ("Error retrieving data from MySQL database:", str(error))
return result
def update_database(keys: list,
values: list,
connection: mysql.connector.MySQLConnection):
"""Updates database table with provided key/values"""
try:
cursor = connection.cursor()
placeholders = ','.join(['%s'] * len(keys))
escaped_keys = [connection._cmysql.escape_string(key) for key in keys]
clean_keys = [key.decode('utf-8') for key in escaped_keys]
query = f"INSERT INTO pie_data ({','.join(clean_keys)}) VALUES ({placeholders})"
cursor.execute(query, values)
connection.commit()
cursor.close()
except ValueError as updatedb_error:
print(f"Error: {updatedb_error}")
|
danlhennessy/fiscal-dash
|
src/database.py
|
database.py
|
py
| 2,555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
571836273
|
from os import remove
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
first_ten = alphabet[0:9]
vowel = ['a','e','i','o','u']
consonant = alphabet.remove(vowel)
print(consonant)
print(vowel)
print(first_ten)
last_ten = alphabet[-10:-1]
print(last_ten)
|
olamide16/ATS_Training
|
week 1/last_alphabet.py
|
last_alphabet.py
|
py
| 329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22704968084
|
"""
INSERT
1. The insert method takes a single argument, val, which is the value to be inserted into the binary search tree.
2. The method first checks whether the root node of the tree is None. If the root node is None, then the method creates a
new node with the value val and sets it as the root of the tree.
3. If the root node is not None, then the method calls the _insert_helper method with the root node and the value to be
inserted as its arguments.
INSERT_HELPER
1. The _insert_helper method takes two arguments: node and val. node represents the current node being examined in the
insertion process, and val is the value to be inserted into the binary search tree.
2. The method first checks whether val is less than the value of node. If val is less than the value of node, then the
method checks whether the left child of node is None.
3. If the left child of node is None, then the method creates a new node with the value val and sets it as the left child
of node.
4. If the left child of node is not None, then the method calls _insert_helper recursively with the left child of node and
val as its arguments.
5. If val is greater than or equal to the value of node, then the method checks whether the right child of node is None.
6. If the right child of node is None, then the method creates a new node with the value val and sets it as the right
child of node.
7. If the right child of node is not None, then the method calls _insert_helper recursively with the right child of node
and val as its arguments.
8. Steps 5-10 are repeated until the value has been inserted into the tree.
"""
"""
REMOVE
1. The remove method takes a single argument, val, which is the value to be removed from the binary search tree.
2. The method first checks whether the root node of the tree is None. If the root node is None, then the method returns
None as the tree is empty and there is nothing to remove.
3. If the root node is not None, then the method calls the _remove_helper method with the root node and the value to be
removed as its arguments.
REMOVE_HELPER
1. The _remove_helper method takes two arguments: val and node. val represents the value to be removed from the binary
search tree, and node represents the current node being examined in the removal process.
2. The method first checks whether node is None. If node is None, then the method returns None as the value to be
removed was not found in the tree.
3. If val is less than the value of node, then the method calls _remove_helper recursively with val and the left child
of node as its arguments.
4. If val is greater than the value of node, then the method calls _remove_helper recursively with val and the right
child of node as its arguments.
5. If val is equal to the value of node, then the method checks whether node is a leaf node (i.e., it has no children).
6. If node is a leaf node, then the method simply removes it by returning None.
7. If node has only one child (either a left child or a right child), then the method returns that child as the
replacement node for node.
8. If node has two children, then the method finds the successor of node, which is the smallest value in the right
subtree of node.
9. The method replaces the value of node with the value of its successor.
10. The method calls _remove_helper recursively with the value of the successor and the right child of node as its
arguments to remove the successor from the tree.
11. Steps 6-14 are repeated until the value has been removed from the tree.
"""
class Node:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
class BinarySearchTree:
def __init__(self):
self.root = None
def insert_helper(self, val, node):
if val < node.val:
if node.left is None:
node.left = Node(val)
else:
self.insert_helper(val, node.left)
elif val > node.val:
if node.right is None:
node.right = Node(val)
else:
self.insert_helper(val, node.right)
else:
print("Node already exists")
def insert(self, val):
if self.root is None:
self.root = Node(val)
else:
self.insert_helper(val, self.root)
def remove_helper(self, val, node):
if node is None:
return node
if val < node.val:
node.left = self.remove_helper(val, node.left)
elif val > node.val:
node.right = self.remove_helper(val, node.right)
else:
if node.left is None:
temp = node.right
node = None
return temp
elif node.right is None:
temp = node.right
node = None
return temp
temp = self.get_min_value_node(node.right)
node.val = temp.val
node.right = self.remove_helper(temp, val, node.right)
return node
def remove(self, val):
self.root = self.remove_helper(val, self.root)
def get_min_value_node(self, node):
current = node
while current.left is not None:
current = current.left
return current
|
AvishekC-st/pythonDSA-basics
|
python algo examples/Trees-BST_insert_and_remove.py
|
Trees-BST_insert_and_remove.py
|
py
| 5,338 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73503551228
|
from .space.space import Space
from .vqspace.space import Space as VQSpace
from .qrspace.space import Space as QRSpace
# from .vqspace.space import Space as VQSpace
__all__ = ['get_model']
def get_model(cfg):
"""
Also handles loading checkpoints, data parallel and so on
:param cfg:
:return:
"""
model = None
if cfg.model == 'SPACE':
model = Space(cfg.arch)
elif cfg.model == 'VQSPACE':
model = VQSpace(cfg.vqarch)
elif cfg.model == 'QRSPACE':
model = QRSpace(cfg.arch)
print('+++++++++++++++++++++++++++++++')
print(f'Using Model {model.__class__}.')
return model
|
albertcity/OCARL
|
space/model/__init__.py
|
__init__.py
|
py
| 645 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2962541494
|
# TomoPy recon on Cyclone: compare different algorithms
import tomopy
import dxchange
import numpy as np
import os
import logging
from time import time
def touint8(data, quantiles=None):
# scale data to uint8
# if quantiles is empty data is scaled based on its min and max values
if quantiles == None:
data_min = np.min(data)
data_max = np.max(data)
data_max = data_max - data_min
data = 255 * ((data - data_min) / data_max)
return np.uint8(data)
else:
[q0, q1] = np.quantile(np.ravel(data), quantiles)
q1 = q1 - q0
data = 255 * ((data - q0) / q1)
return np.uint8(data)
def writemidplanesDxchange(data, filename_out):
if data.ndim == 3:
filename, ext = os.path.splitext(filename_out)
dxchange.writer.write_tiff(touint8(data[int(data.shape[0] / 2), :, :]), fname=filename+'_XY.tiff', dtype='uint8')
dxchange.writer.write_tiff(touint8(data[:, int(data.shape[1] / 2), :]), fname=filename + '_XZ.tiff', dtype='uint8')
dxchange.writer.write_tiff(touint8(data[:, :, int(data.shape[2] / 2)]), fname=filename + '_YZ.tiff', dtype='uint8')
h5file = "/tmp/tomoData/8671_8_B_01_/8671_8_B_01_.h5"
path_recon = "/scratch/recon/algorithm_test/"
# path_recon = "/nvme/h/jo21gi1/data_p029/test_00_/recon_phase/"
logging.basicConfig(filename=path_recon+'recon_algorithm_test.log', level=logging.DEBUG)
CPU_algorithms = ['gridrec', 'fbp', 'mlem', 'sirt', 'art']
# read projections, darks, flats and angles
projs, flats, darks, theta = dxchange.read_aps_32id(h5file, exchange_rank=0)
# If the angular information is not available from the raw data you need to set the data collection angles.
# In this case, theta is set as equally spaced between 0-180 degrees.
if theta is None:
theta = tomopy.angles(projs.shape[0])
# flat-field correction
logging.info("Flat-field correct.")
projs = tomopy.normalize(projs, flats, darks)
# - log transform
logging.info("- log transform.")
projs = tomopy.minus_log(projs)
# COR was found with Vo method + manual inspection
COR = 1303
for alg in CPU_algorithms:
time_start = time()
# CPU recon
recon = tomopy.recon(projs, theta, center=COR, algorithm=alg, sinogram_order=False)
time_end = time()
execution_time = time_end - time_start
logging.info("{} reconstructed in {} s".format(alg, str(execution_time)))
# apply circular mask
recon = tomopy.circ_mask(recon, axis=0, ratio=0.95)
# rescale GV range to uint8 from MIN and MAX of 3D data
recon_uint8Range = touint8(recon)
# apply again circ mask
recon_uint8Range = tomopy.circ_mask(recon_uint8Range, axis=0, ratio=0.95)
# write output stack of TIFFs as uint8
fileout = path_recon+alg
# dxchange.writer.write_tiff_stack(recon_uint8Range, fname=fileout, dtype='uint8', axis=0, digit=5, start=0, overwrite=True)
writemidplanesDxchange(recon_uint8Range, fileout)
del recon
del recon_uint8Range
|
SESAME-Synchrotron/BEATS_recon
|
tests/Cyclone/tomopy_testCyclone_recon_algorithms_comparison.py
|
tomopy_testCyclone_recon_algorithms_comparison.py
|
py
| 2,975 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24990905758
|
from tkinter import *
from PIL import ImageTk, Image
import mysql.connector
root = Tk()
root.geometry("400x500")
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "password123",
database = "codemy",
)
# print(mydb)
my_cursor = mydb.cursor()
#creation de la base de données la première fois après on commente....
# my_cursor.execute("CREATE DATABASE codemy")
# my_cursor.execute("SHOW DATABASES")
# for db in my_cursor:
# print(db)
# my_cursor.execute("DROP TABLE customers")
# my_cursor.execute("CREATE TABLE customers (first_name VARCHAR(255), last_name VARCHAR(255), zipcode INT(10), price_paid DECIMAL(10, 2), user_id INT AUTO_INCREMENT PRIMARY KEY) ")
my_cursor.execute("CREATE TABLE IF NOT EXISTS customers (first_name VARCHAR(255), \
last_name VARCHAR(255), \
zipcode INT(10), \
price_paid DECIMAL(10, 2), \
user_id INT AUTO_INCREMENT PRIMARY KEY \
) ")
#alter table
'''
my_cursor.execute("ALTER TABLE customers ADD (\
email VARCHAR(255),\
address_1 VARCHAR(255),\
address_2 VARCHAR(255),\
city VARCHAR(255),\
state VARCHAR(255),\
country VARCHAR(255),\
phone VARCHAR(255),\
payment_method VARCHAR(50),\
discount_code VARCHAR(255))")
'''
# my_cursor.execute("SELECT * FROM customers")
# print(my_cursor.description)
# for thing in my_cursor.description:
# print(thing)
#fonctions
def add_customer():
sql_command = "INSERT INTO customers (first_name,last_name,zipcode,price_paid,email,address_1,address_2,city,state,country,phone,payment_method,discount_code) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
values = (first_name_box.get(),last_name_box.get(),zipcode_box.get(),price_paid_box.get(),email_box.get(),address1_box.get(),address2_box.get(),city_box.get(),state_box.get(),country_box.get(),phone_box.get(),payment_method_box.get(),discount_code_box.get())
my_cursor.execute(sql_command, values)
#commit
mydb.commit()
#on efface ensuite les champs
clear_fields()
def clear_fields():
first_name_box.delete(0, END)
last_name_box.delete(0, END)
address1_box.delete(0, END)
address2_box.delete(0, END)
city_box.delete(0, END)
state_box.delete(0, END)
zipcode_box.delete(0, END)
country_box.delete(0, END)
phone_box.delete(0, END)
email_box.delete(0, END)
payment_method_box.delete(0, END)
discount_code_box.delete(0, END)
price_paid_box.delete(0, END)
def list_customers():
list_customer_query = Tk()
list_customer_query.title("liste des clients")
list_customer_query.geometry("800x600")
my_cursor.execute("SELECT * FROM customers")
result = my_cursor.fetchall()
# for x in result:
# lookup_label = Label(list_customer_query, text="nom: " + x[0] + ' prenom : ' + x[1])
# lookup_label.pack()
for index, x in enumerate(result):
num = 0
for y in x:
lookup_label = Label(list_customer_query, text=y)
lookup_label.grid(row=index, column= num )
num +=1
#creation d'un label
title_label = Label(root, text="Codemy Customers Database", font=("Helvetica",16))
title_label.grid(row=0, column=0, columnspan=2, pady="10")
#main form
first_name_label = Label(root, text="First name").grid(row=1, column=0, sticky=W, padx=10)
last_name_label = Label(root, text="last name").grid(row=2, column=0, sticky=W, padx=10)
address1_label = Label(root, text="Address 1").grid(row=3, column=0, sticky=W, padx=10)
address2_label = Label(root, text="Address 2").grid(row=4, column=0, sticky=W, padx=10)
city_label = Label(root, text="City").grid(row=5, column=0, sticky=W, padx=10)
state_label = Label(root, text="State").grid(row=6, column=0, sticky=W, padx=10)
zipcode_label = Label(root, text="Zipcode").grid(row=7, column=0, sticky=W, padx=10)
country_label = Label(root, text="Country").grid(row=8, column=0, sticky=W, padx=10)
phone_label = Label(root, text="Phone Number").grid(row=9, column=0, sticky=W, padx=10)
email_label = Label(root, text="Email Address").grid(row=10, column=0, sticky=W, padx=10)
# username_label = Label(root, text="Username").grid(row=11, column=0, sticky=W, padx=10)
payment_method_label = Label(root, text="Payment Method").grid(row=11, column=0, sticky=W, padx=10)
discount_code_label = Label(root, text="Discount Code").grid(row=12, column=0, sticky=W, padx=10)
price_paid_label = Label(root, text="Price Paid").grid(row=13, column=0, sticky=W, padx=10)
#box input
first_name_box = Entry(root)
first_name_box.grid(row=1, column=1)
last_name_box = Entry(root)
last_name_box.grid(row=2, column=1, pady=5)
address1_box = Entry(root)
address1_box.grid(row=3, column=1, pady=5)
address2_box = Entry(root)
address2_box.grid(row=4, column=1, pady=5)
city_box = Entry(root)
city_box.grid(row=5, column=1, pady=5)
state_box = Entry(root)
state_box.grid(row=6, column=1, pady=5)
zipcode_box = Entry(root)
zipcode_box.grid(row=7, column=1, pady=5)
country_box = Entry(root)
country_box.grid(row=8, column=1, pady=5)
phone_box = Entry(root)
phone_box.grid(row=9, column=1, pady=5)
email_box = Entry(root)
email_box.grid(row=10, column=1, pady=5)
# username_box = Entry(root)
# username_box.grid(row=11, column=1, pady=5)
payment_method_box = Entry(root)
payment_method_box.grid(row=11, column=1, pady=5)
discount_code_box = Entry(root)
discount_code_box.grid(row=12, column=1, pady=5)
price_paid_box = Entry(root)
price_paid_box.grid(row=13, column=1, pady=5)
#boutons
add_customer_button = Button(root, text="Ajout de Client", command=add_customer)
add_customer_button.grid(row=14, column=0, padx=10, pady=10)
clear_fields_button = Button(root, text="Clear Fields", command=clear_fields)
clear_fields_button.grid(row=14, column=1)
list_customers_button = Button(root, text="list customers", command=list_customers)
list_customers_button.grid(row=15, column=0, sticky=W ,padx=10)
my_cursor.execute("SELECT * FROM customers")
result = my_cursor.fetchall()
for x in result:
print(x)
root.mainloop()
|
miraceti/tkinter
|
gui_28tk_CRm_Db_tools.py
|
gui_28tk_CRm_Db_tools.py
|
py
| 6,059 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18390234941
|
from django import template
from photos.models import GalleryCustom
register = template.Library()
@register.filter
def stripgallery(title):
"""
Remove gallery prefix from photo titles of the form gallery__mytitle.
"""
idx = title.find("__")
if idx < 0:
return title
return title[idx+2:]
@register.inclusion_tag('photologue/tags/galleries.html')
def get_public_photo_galleries():
"""
Return all public galleries as an HTML ul element.
"""
galleries = GalleryCustom.objects.filter(gallery__is_public=True) \
.order_by('gallery__date_added')
return {'galleries': galleries,
'private_galleries_list': False}
def get_private_photo_galleries_array(user):
galleries_private = GalleryCustom.objects.filter(gallery__is_public=False) \
.order_by('gallery__date_added')
if user.is_superuser:
return galleries_private
else:
return [gal for gal in galleries_private if user in gal.allowed_users.all()]
@register.simple_tag(takes_context=True)
def get_private_photo_galleries_num(context):
"""
Return the number of private galleries accessible to the user.
"""
return len(get_private_photo_galleries_array(context['user']))
@register.inclusion_tag('photologue/tags/galleries.html', takes_context=True)
def get_private_photo_galleries(context):
"""
Return all private galleries accessible to the user as an HTML ul element.
"""
return {'galleries': get_private_photo_galleries_array(context['user']),
'private_galleries_list': True,
'user': context['user']}
|
ria4/tln
|
photos/templatetags/photos_extras.py
|
photos_extras.py
|
py
| 1,682 |
python
|
en
|
code
| 3 |
github-code
|
6
|
25607545289
|
def palindrom(p, case_sensitive=True):
"""Tells if param is palindrome
Parameters
----------
p : str or int or list
input parameter
case_sensitive : bool
case sensitive or not
Raises
------
TypeError
if type is not str, int or list
>>> palindrom(['ABC', 12, True, 12, 'abc'])
False
>>> palindrom(['ABC', 12, True, 12, 'abc'], case_sensitive=False)
True
>>> palindrom('Cojoc')
False
>>> palindrom('Cojoc', case_sensitive=False)
True
>>> palindrom(12345)
False
>>> palindrom(1234554321)
False
"""
if type(p) is int:
p = str(p)
elif type(p) is str:
if case_sensitive is False:
p = p.lower()
elif type(p) is list:
p = list(p)
if case_sensitive is False:
for i in range(len(p)):
if type(p[i]) is str:
p[i] = p[i].lower()
else:
raise TypeError('Only int, str, list allowed')
return p[::-1] == p
if __name__ == '__main__':
from doctest import testmod
testmod()
# print(palindrom('Cojoc'))
# print(palindrom('Cojoc', case_sensitive=False))
# print(palindrom(12345))
# print(palindrom(1234554321))
|
ciprianstoica/python-samples
|
palindrome2.py
|
palindrome2.py
|
py
| 1,343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30755556995
|
class AlarmClock :
def __init__(self) :
self.current_time = 0
self.alarm_is_on = True
self.alarm_time = 0
self.toggle_result = ''
def set_current_time(self):
self.current_time = input('What is the current time?')
print(self.current_time)
def set_alarm_on_or_off(self): #seguestion use not
toggle = input('Toggle alarm on or off?')
if toggle == 'on':
self.alarm_is_on = True
self.toggle_result = 'Alarm is ON'
elif toggle == 'off':
self.alarm_is_on = False
self.toggle_result = 'Alarm is OFF'
else:
self.alarm_is_on = False
self.toggle_result = 'on or off was not entered. Alarm is OFF'
def set_alarm_time(self):
self.alarm_time = input('What time would you like to set your alarm to?')
print(self.alarm_time)
# return self.alarm_time
|
StevenSisavath/ClassesAndObjects
|
alarm_clock.py
|
alarm_clock.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16090817554
|
from django.urls import path
from . import api_endpoints as views
from .api_endpoints.Contacts.ContactsList.views import ContactListAPIView
app_name = 'about'
urlpatterns = [
path('addresses/', views.AddressListAPIView.as_view(), name='address_list'),
path('company_stats/', views.CompanyStatListAPIView.as_view(), name='company_stat'),
path('company_histories/', views.CompanyHistoryListAPIView.as_view(), name='company_history'),
path('emails/', views.EmailListAPIView.as_view(), name='email_list'),
path('phone_numbers/', views.PhoneNumberListView.as_view(), name='phone_number_list'),
path('social_medias/', views.SocialMediaListAPIView.as_view(), name='social_media_list'),
path('contacts/', ContactListAPIView.as_view(), name='contact_list'),
path('showroom/', views.ShowroomListAPIView.as_view(), name='showroom_list')
]
|
bilolsolih/decormax
|
apps/about/urls.py
|
urls.py
|
py
| 863 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39607752443
|
import logging
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.models import Batch, OcrDump
configure_logging("dump_ocr_logging.config", "dump_ocr.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "looks for batches that need to have ocr dump files created"
def handle(self, *args, **options):
if not os.path.isdir(settings.OCR_DUMP_STORAGE):
os.makedirs(settings.OCR_DUMP_STORAGE)
for batch in Batch.objects.filter(ocr_dump__isnull=True):
_logger.info("starting to dump ocr for %s", batch)
try:
if batch.ocr_dump:
_logger.info("Ocr is already generated for %s", batch)
continue
except OcrDump.DoesNotExist:
pass
dump = OcrDump.new_from_batch(batch)
_logger.info("created ocr dump %s for %s", dump, batch)
|
open-oni/open-oni
|
core/management/commands/dump_ocr.py
|
dump_ocr.py
|
py
| 1,024 |
python
|
en
|
code
| 43 |
github-code
|
6
|
15399778008
|
from BayCab4BEM.downSampler import DownSampler
from Util.io import getFileDir, getFileName
import numpy as np
import os
import csv
simDataFile = './iwCabData/config_16/dataFromSim/raw/DEBUG_D_sim_org.csv'
fieldDataFile = './iwCabData/config_16/dataFromSim/raw/DEBUG_D_field_org.csv'
bins = 30;
qualityThres = 0.90;
outputPath = getFileDir(simDataFile, 2) + os.sep + 'down' + os.sep + 'b%d_t%d'%(bins, qualityThres * 100);
try:
os.makedirs(outputPath);
except Exception as e:
print (e)
dirichlet_prior = 0.5;
# Read file header
d_sim_head = None;
with open(simDataFile, 'r') as f:
reader = csv.reader(f);
d_sim_head = ','.join(next(reader));
d_field_head = None;
with open(fieldDataFile, 'r') as f:
reader = csv.reader(f);
d_field_head = ','.join(next(reader));
# Read data from file
d_sim = np.genfromtxt(simDataFile, delimiter = ',', skip_header = 1)
d_field = np.genfromtxt(fieldDataFile, delimiter = ',', skip_header = 1)
# Down sample
downSampler_dSim = DownSampler(d_sim, bins = bins, dirichlet_prior = dirichlet_prior);
(d_sim_down, d_sim_sp_hist) = downSampler_dSim.sample(stSampleSize = 50, increRatio = 1.05, qualityThres = qualityThres);
downSampler_dField = DownSampler(d_field, bins = bins, dirichlet_prior = dirichlet_prior);
(d_field_down, d_field_sp_hist) = downSampler_dField.sample(stSampleSize = 50, increRatio = 1.05, qualityThres = qualityThres);
# Save down sampled data to file
np.savetxt(outputPath + os.sep + getFileName(simDataFile, False) + '_down.csv', d_sim_down, delimiter=",", header = d_sim_head);
np.savetxt(outputPath + os.sep + getFileName(fieldDataFile, False) + '_down.csv', d_field_down, delimiter=',', header = d_field_head);
|
zhangzhizza/BayCab4BEM
|
src/downSampleData.py
|
downSampleData.py
|
py
| 1,671 |
python
|
en
|
code
| 3 |
github-code
|
6
|
30793138915
|
from tkinter import *
window = Tk()
window.title("Entry in tkinter")
window.minsize(width=500,height=300)
# Label
my_label = Label(text="Type below to change text", font=("Arial", 24, "bold"))
# places the label on to the screen and automatically centers it
my_label.pack()
# Entry (basically just input)
input = Entry(width=10)
input.pack()
input.get() # 'get()' method returns the input given by the user as a string
# Button
def button_clicked():
new_text = input.get()
my_label["text"] = new_text
button = Button(text="Change text", command=button_clicked) # 'command' takes the name of a function not calling of the function, so '()' are not required
button.pack()
# keeps the window on the screen
window.mainloop()
|
shrijanlakhey/100-days-of-Python
|
027/entry_in_tkiner.py
|
entry_in_tkiner.py
|
py
| 747 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17642557437
|
import os
from flask import Flask, request, jsonify
from flask_pymongo import PyMongo
from bson import ObjectId
import bcrypt
import jwt
import ssl
import datetime
from functools import wraps
from dotenv import load_dotenv
load_dotenv('.env')
app = Flask(__name__)
app.config['MONGO_URI'] = os.environ.get('MONGO_URI')
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
mongo = PyMongo(app, ssl_cert_reqs=ssl.CERT_NONE)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.headers.get('Authorization')
if not token:
return jsonify({'message': 'Token is missing'}), 401
try:
jwt.decode(token.split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])
except jwt.ExpiredSignatureError:
return jsonify({'message': 'Token has expired'}), 401
except jwt.InvalidTokenError:
return jsonify({'message': 'Token is invalid'}), 401
return f(*args, **kwargs)
return decorated
class HomePage:
@staticmethod
@app.route("/")
def index():
return "Homepage<br> Use /register to register user <br> Use /login to login user<br> Use " \
"/template to get template<br> Use /template/<template_id> to do 'GET', 'PUT', 'DELETE' methods"
class UserManagement:
@staticmethod
@app.route('/register', methods=['POST'])
def register():
data = request.get_json()
hashed_pw = bcrypt.hashpw(data['password'].encode('utf-8'), bcrypt.gensalt())
user = mongo.db.users.find_one({'email': data['email']})
if user:
return jsonify({'message': 'User already registered'}), 409
user_id = str(mongo.db.users.insert_one({
'first_name': data['first_name'],
'last_name': data['last_name'],
'email': data['email'],
'password': hashed_pw}).inserted_id)
return jsonify({'message': 'User registered successfully!', 'user_id': user_id}), 201
@staticmethod
@app.route('/login', methods=['POST'])
def login():
auth = request.get_json()
user = mongo.db.users.find_one({'email': auth['email']})
if user and bcrypt.checkpw(auth['password'].encode('utf-8'), user['password']):
token = jwt.encode({
'user_id': str(user['_id']),
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=1)
}, app.config['SECRET_KEY'], algorithm='HS256')
return jsonify({'token': token})
return jsonify({'message': 'Invalid credentials'}), 401
class TemplateManagement:
@staticmethod
@app.route('/template', methods=['POST'])
@token_required
def create_template():
user_id = jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])['user_id']
data = request.get_json()
data['user_id'] = user_id
inserted_template = mongo.db.templates.insert_one(data)
inserted_id = str(inserted_template.inserted_id)
return jsonify({'template_id': inserted_id, 'message': 'Template created successfully'}), 201
@staticmethod
@app.route('/template', methods=['GET'])
@token_required
def get_all_templates():
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
templates = list(mongo.db.templates.find({'user_id': user_id}, {'_id': 1}))
formatted_templates = [{'_id': str(template['_id'])} for template in templates]
result = []
for template in formatted_templates:
template_id = template['_id']
template_data = mongo.db.templates.find_one({'_id': ObjectId(template_id), 'user_id': user_id}, {'_id': 0})
if template_data:
template_data['_id'] = template_id
result.append(template_data)
return jsonify(result), 200
@staticmethod
@app.route('/template/<template_id>', methods=['GET'])
@token_required
def get_template(template_id):
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
template = mongo.db.templates.find_one({'_id': ObjectId(template_id), 'user_id': user_id}, {'_id': 0})
if template:
return jsonify({'template_id': template_id, "template": template}), 200
else:
return jsonify({'template_id': template_id, 'message': 'Template not found'}), 404
@staticmethod
@app.route('/template/<template_id>', methods=['PUT'])
@token_required
def update_template(template_id):
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
data = request.get_json()
result = mongo.db.templates.update_one({'_id': ObjectId(template_id), 'user_id': user_id}, {'$set': data})
print(result)
if result.modified_count > 0:
return jsonify({'template_id': template_id, 'message': 'Template updated successfully'}), 200
else:
return jsonify({'template_id': template_id, 'message': 'Template not found'}), 404
@staticmethod
@app.route('/template/<template_id>', methods=['DELETE'])
@token_required
def delete_template(template_id):
user_id = \
jwt.decode(request.headers.get('Authorization').split()[1], app.config['SECRET_KEY'], algorithms=['HS256'])[
'user_id']
result = mongo.db.templates.delete_one({'_id': ObjectId(template_id), 'user_id': user_id})
if result.deleted_count > 0:
return jsonify({'template_id': template_id, 'message': 'Template deleted successfully'}), 200
else:
return jsonify({'template_id': template_id, 'message': 'Template not found'}), 404
if __name__ == '__main__':
user_manager = UserManagement()
template_manager = TemplateManagement()
app.add_url_rule('/register', methods=['POST'], view_func=user_manager.register)
app.add_url_rule('/login', methods=['POST'], view_func=user_manager.login)
app.add_url_rule('/template', methods=['POST'], view_func=template_manager.create_template)
app.add_url_rule('/template', methods=['GET'], view_func=template_manager.get_all_templates)
app.add_url_rule('/template/<template_id>', methods=['GET'], view_func=template_manager.get_template)
app.add_url_rule('/template/<template_id>', methods=['PUT'], view_func=template_manager.update_template)
app.add_url_rule('/template/<template_id>', methods=['DELETE'], view_func=template_manager.delete_template)
app.run(debug=True)
|
abhi1083/simple_crud_ops
|
main.py
|
main.py
|
py
| 6,831 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22555751097
|
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
class Product:
db = "my_solo"
def __init__(self, data):
self.id = data['id']
self.wood = data['wood']
self.thickness = data['thickness']
self.description = data['description']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.customers_id = data['customers_id']
@classmethod
def create(cls, data):
query = "INSERT INTO products (wood, thickness, description, customers_id) VALUES (%(wood)s, %(thickness)s, %(description)s, %(customers_id)s)"
results = connectToMySQL(
cls.db).query_db(query, data)
return results
@classmethod
def get_by_cust_id(cls, data):
query = "SELECT * FROM products WHERE customers_id = %(id)s"
results = connectToMySQL(cls.db).query_db(query, data)
list = []
for row in results:
list.append(cls(row))
return list
@classmethod
def get_one(cls, data):
query = "SELECT * FROM products WHERE id = %(id)s;"
results = connectToMySQL(
cls.db).query_db(query, data)
this_product = cls(results[0])
return this_product
@classmethod
def cancel(cls, data):
query = "DELETE FROM products WHERE id=%(id)s"
results = connectToMySQL(cls.db).query_db(query, data)
return results
@classmethod
def update(cls, data):
query = "UPDATE products SET wood= %(wood)s, thickness= %(thickness)s, description= %(description)s WHERE id = %(id)s"
results = connectToMySQL(cls.db).query_db(query, data)
return results
@ staticmethod
def validate_product(product):
is_valid = True
if len(product['wood']) < 1:
flash("Please choose wood type!")
is_valid = False
if len(product['thickness']) < 1:
flash("Thickness amount needed!")
is_valid = False
if len(product['description']) < 3:
flash("Input at least 3 letters!")
is_valid = False
if len(product['description']) >= 4:
flash("Input at only 3 letters!")
is_valid = False
return is_valid
|
tsu112/solo_project
|
flask_app/models/product.py
|
product.py
|
py
| 2,288 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8519707697
|
import bs4 as bs
import requests
import regex as re
import pandas as pd
from src.config import *
def get_page_body(url: str):
try:
response = requests.get(url, timeout=10)
if response.status_code == 200:
page = bs.BeautifulSoup(response.text)
return page.body
except requests.exceptions.HTTPError as errh:
print("http error:", errh)
except requests.exceptions.ConnectionError as errc:
print("connection error:", errc)
except requests.exceptions.Timeout as errt:
print("timeout error:", errt)
except requests.exceptions.RequestException as err:
print("other error:", err)
else:
return None
def clean_text(s: str):
s = re.sub(r'[\n\t]', ' ', s)
s = s.strip()
s = ' '.join(s.split())
return s
def get_separate_book_urls(url: str):
page_body = get_page_body(url)
urls = []
if page_body:
urls = [URL_SOURCE + section['href'] for section in page_body.find_all("a", {"class": "bookTitle"})]
return urls
def get_category_books_urls(
input_url: str = URL_START,
book_categories=BOOK_CATEGORIES,
top_n: int = NUMBER_OF_CATEGORY_PAGES_TO_SCRAPE) -> dict:
category_urls = {}
for category in book_categories:
page_body = get_page_body(input_url + category)
if not page_body:
continue
category_link = page_body.find("div", {"class": "listImgs"}).find("a")["href"]
top_pages_links = [f"{URL_SOURCE}{category_link}?page={i}" for i in range(1, top_n + 1)]
category_urls[category] = [book_url for page_url in top_pages_links for book_url in get_separate_book_urls(page_url)]
return category_urls
def get_text(x):
return clean_text(getattr(x, "text", ""))
def get_single_book_info(url: str, book_category: str):
page_body = get_page_body(url)
book_info = {}
if page_body:
book_info["category"] = book_category
book_info["title"] = get_text(page_body.find("h1", id="bookTitle"))
book_info["author"] = get_text(page_body.find("span", itemprop="name"))
book_info["description"] = get_text(page_body.find("div", id="description"))
book_info["rating"] = get_text(page_body.find("span", itemprop="ratingValue"))
book_info["number_of_pages"] = get_text(page_body.find("span", itemprop="numberOfPages"))
book_info["url"] = url
return book_info
def get_books_data(category_urls: dict):
books_data = []
for category in category_urls.keys():
book_urls = category_urls[category]
if not book_urls:
continue
for book_url in book_urls:
book_info = get_single_book_info(book_url, category)
if book_info:
books_data += list(book_info)
return books_data
|
bakalstats/py_project
|
src/scraping_utils.py
|
scraping_utils.py
|
py
| 2,826 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15409655756
|
"""
一个网站域名,如"discuss.leetcode.com",包含了多个子域名。作为顶级域名,常用的有"com",下一级则有"leetcode.com",最低的一级为"discuss.leetcode.com"。当我们访问域名"discuss.leetcode.com"时,也同时访问了其父域名"leetcode.com"以及顶级域名 "com"。
给定一个带访问次数和域名的组合,要求分别计算每个域名被访问的次数。其格式为访问次数+空格+地址,例如:"9001 discuss.leetcode.com"。
接下来会给出一组访问次数和域名组合的列表cpdomains 。要求解析出所有域名的访问次数,输出格式和输入格式相同,不限定先后顺序。
示例 2
输入:
["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
输出:
["901 mail.com","50 yahoo.com","900 google.mail.com","5 wiki.org","5 org","1 intel.mail.com","951 com"]
说明:
按照假设,会访问"google.mail.com" 900次,"yahoo.com" 50次,"intel.mail.com" 1次,"wiki.org" 5次。
而对于父域名,会访问"mail.com" 900+1 = 901次,"com" 900 + 50 + 1 = 951次,和 "org" 5 次。
"""
import collections
class Solution:
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
ans = collections.Counter()
for el in cpdomains:
count = int(el.split(' ')[0])
domain = el.split(' ')[1]
frags = domain.split('.')
for i in range(len(frags)):
ans[".".join(frags[i:])] += count
return ["{} {}".format(ct, dom) for dom, ct in ans.items()]
if __name__ == '__main__':
A = ["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
S = Solution()
res = S.subdomainVisits(A)
print(res)
|
Octoberr/letcode
|
easy/811subdomainvisitcount.py
|
811subdomainvisitcount.py
|
py
| 1,796 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
6448028292
|
import datetime
import time
import MySQLdb
import cv2, os
cascadePath = ("haarcascade_frontalface_default.xml")
faceCascade = cv2.CascadeClassifier(cascadePath)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('dataTrain/train.yml')
now = datetime.datetime.now()
def getProfile(id):
db = MySQLdb.connect("localhost", "root", "", "presensi")
curs = db.cursor()
cmd = "select *from facebase where npm="+str(id)
curs.execute(cmd)
profile = None
rows = curs.fetchall()
for row in rows:
profile = row
curs.close()
return profile
def getFace_info():
cam = cv2.VideoCapture(0)
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
id, conf = recognizer.predict(gray[y:y + h, x:x + w])
profile = getProfile(id)
print(str(id) + str(conf))
if (conf < 40):
if (profile != None):
cv2.imwrite("absensi/" + profile[1] + "/" + now.strftime("%Y-%m-%d %H-%M") + "[1]" + ".jpg", img)
cv2.imwrite("absensi/" + profile[2] + "/" + now.strftime("%Y-%m-%d &H-&M") + "[2]" + ".jpg", img)
time.sleep(3)
return profile[1], profile[2]
break
else:
cam.release()
cv2.destroyAllWindows()
cv2.imshow('img', img)
if cv2.waitKey(10) & 0xff == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
|
Kuroboy/Presensi-Face
|
faceRec.py
|
faceRec.py
|
py
| 1,634 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45333966266
|
from markdown import markdown
from unittest import TestCase
from markdown_vimwiki.extension import VimwikiExtension
class TestExtension(TestCase):
def test_default_config(self):
source = """
Hello World
===========
* [-] rejected
* [ ] done0
* [.] done1
* [o] done2
* [O] done3
* [X] done4
:lorem:ipsum:
""".strip()
expected = """
<h1>Hello World</h1>
<ul>
<li class="rejected"> rejected</li>
<li class="done0"> done0<ul>
<li class="done1"> done1</li>
<li class="done2"> done2</li>
<li class="done3"> done3</li>
<li class="done4"> done4</li>
</ul>
</li>
</ul>
<p><span class="tag">lorem</span> <span class="tag">ipsum</span></p>
""".strip()
html = markdown(source, extensions=[VimwikiExtension()])
self.assertEqual(html, expected)
html = markdown(source, extensions=['markdown_vimwiki'])
self.assertEqual(html, expected)
def test_custom_config(self):
source = """
Hello World
===========
* [i] yip
* [a] yap
* [o] yop
:lorem:ipsum:
""".strip()
expected = """
<h1>Hello World</h1>
<ul>
<li class="yip"> yip</li>
<li class="yap"> yap</li>
<li class="yop"> yop</li>
</ul>
<p><span class="bark">lorem</span> <span class="bark">ipsum</span></p>
""".strip()
html = markdown(source, extensions=[VimwikiExtension(
list_levels='iao',
list_classes=['yip', 'yap', 'yop'],
tag_class='bark')])
self.assertEqual(html, expected)
|
makyo/markdown-vimwiki
|
markdown_vimwiki/tests/test_extension.py
|
test_extension.py
|
py
| 1,499 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5896021463
|
import torch
import torch.utils.data as data
import numpy as np
from collections import defaultdict
from tqdm import tqdm
from copy import deepcopy
import config_data as conf
import random
infor_train_data_path = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/infor_train.npy', allow_pickle = True).tolist()
eva_infor_test_data_path = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/infor_test.npy', allow_pickle = True).tolist()
social_ratings = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/soc_ratings.npy', allow_pickle = True).tolist()
social_links = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/social_links.npy', allow_pickle = True).tolist()
soc_test = np.load('/content/drive/MyDrive/DASR-WGAN/data/dianping/version3/soc_test_1000.npy', allow_pickle = True).tolist()
infor_fake = np.load('/content/drive/MyDrive/DASR-WGAN/src/dianping/Final_Model_augmentation_edge/data/edge_modification/infor_fake.npy', allow_pickle = True).tolist()
social_fake = np.load('/content/drive/MyDrive/DASR-WGAN/src/dianping/Final_Model_augmentation_edge/data/edge_modification/fake_links_2.npy', allow_pickle = True).tolist()
def load_all():
max_user, max_item, max_user_soc = 0, 0, 0
##################################################################
'''
information domain
'''
##################################################################
infor_hash_data = set()
infor_rating_train_data = []
infor_item_dict = defaultdict(set)
infor_user_dict = infor_train_data_path
all_user = set()
infor_user = set()
for user, items in tqdm(infor_train_data_path.items()):
all_user.add(user)
infor_user.add(user)
for item in items:
infor_hash_data.add((user, item))
infor_rating_train_data.append([user, item])
infor_item_dict[item].add(user)
eva_infor_rating_test_data = []
for user, items in tqdm(eva_infor_test_data_path.items()):
for item in items:
eva_infor_rating_test_data.append([user, item])
eva_common_rating_test_data = []
common_rating_dict = defaultdict(set)
for user, friends in social_links.items():
if user in eva_infor_test_data_path:
for i in eva_infor_test_data_path[user]:
eva_common_rating_test_data.append([user, i])
common_rating_dict[user].add(i)
social_link_dict = social_links
link_hash_data = set()
link_train_data = []
soc_user = set()
for user, friends in social_link_dict.items():
all_user.add(user)
soc_user.add(user)
for friend in friends:
all_user.add(friend)
soc_user.add(friend)
link_hash_data.add((user, friend))
link_train_data.append([user, friend])
eva_social_rating_test_data = []
for user, friends in tqdm(social_ratings.items()):
if user not in common_rating_dict:
for i in social_ratings[user]:
if i <= 8626:
eva_social_rating_test_data.append([user, i])
# infor_fake_dict = infor_fake
# social_fake_dict = social_fake
# infor_fake_item_dict = defaultdict(set)
# for user,items in infor_fake.items():
# for i in items:
# infor_fake_item_dict[i].add(user)
eva_soc_test = soc_test
eva_soc_test_ground_truth = social_ratings
print('all user count:', len(all_user))
print('max user id:', max(all_user))
print('infor user count:', len(infor_user))
print('max infor user id:', max(infor_user))
print('common user count:', len(common_rating_dict))
print('soc user count:', len(soc_user))
print('max soc user id:', max(soc_user))
#import sys;sys.exit(0)
return infor_hash_data, infor_rating_train_data, eva_infor_rating_test_data,\
infor_user_dict, infor_item_dict, eva_common_rating_test_data, social_link_dict, eva_social_rating_test_data,\
link_train_data, link_hash_data, eva_soc_test, eva_soc_test_ground_truth, common_rating_dict
####################edge modification################
def load_corrupt_edge():
infor_train_data_path_cp = infor_train_data_path
social_links_cp = social_links
for _ in range(1000):
u = random.randint(0, 10181)
if len(infor_train_data_path_cp[u]) > 0:
u_value = list(infor_train_data_path_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
infor_train_data_path_cp[u].remove(u_value[i])
except:
import pdb;pdb.set_trace()
for _ in range(1000):
u = random.randint(0, 10181)
i = random.randint(0, 8626)
if i not in infor_train_data_path_cp[u]:
infor_train_data_path_cp[u].add(i)
########add edges in social domain
for _ in range(500):
u = random.randint(8486, 14174)
if len(social_links_cp[u]) > 0:
u_value = list(social_links_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
social_links_cp[u].remove(u_value[i])
except:
pdb.set_trace()
for _ in range(500):
u1 = random.randint(8486, 14174)
u2 = random.randint(8486, 14174)
if u2 not in social_links_cp[u1]:
social_links_cp[u1].add(u2)
infor_fake_item_dict_cp = defaultdict(set)
for user,items in infor_train_data_path_cp.items():
for i in items:
infor_fake_item_dict_cp[i].add(user)
return infor_train_data_path_cp, social_links_cp, infor_fake_item_dict_cp
################node mask################
def load_corrupt_node_mask():
infor_train_data_path_cp = infor_train_data_path
social_links_cp = social_links
for _ in range(1000):
u = random.randint(0, 10181)
if len(infor_train_data_path_cp[u]) > 0:
u_value = list(infor_train_data_path_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
infor_train_data_path_cp[u].remove(u_value[i])
except:
import pdb;pdb.set_trace()
for _ in range(1000):
u = random.randint(0, 10181)
i = random.randint(0, 8626)
if i not in infor_train_data_path_cp[u]:
infor_train_data_path_cp[u].add(i)
########add edges in social domain
for _ in range(300):
u = random.randint(8486, 14174)
if len(social_links_cp[u]) > 0:
u_value = list(social_links_cp[u])
u_value_len = len(u_value)
i = random.randint(0,u_value_len-1)
try:
social_links_cp[u].remove(u_value[i])
except:
pdb.set_trace()
for _ in range(300):
u1 = random.randint(8486, 14174)
u2 = random.randint(8486, 14174)
if u2 not in social_links_cp[u1]:
social_links_cp[u1].add(u2)
infor_fake_item_dict_cp = defaultdict(set)
for user,items in infor_train_data_path_cp.items():
for i in items:
infor_fake_item_dict_cp[i].add(user)
return infor_train_data_path_cp, social_links_cp, infor_fake_item_dict_cp
# construct original local graph#
def construct_infor_mat(soc_dict, user_dict, item_dict, is_user):
if is_user == True:
infor_index, infor_value = [], []
#common user
#'''
for user in soc_dict.keys():
friends_list = soc_dict[user]
if user not in user_dict:
for f in friends_list:
fri_friend = soc_dict[f]
infor_index.append([user, f])
#infor_value.append(1.0/(np.sqrt(len(friends_list)*len(fri_friend))))
infor_value.append(1.0/len(friends_list))
#'''
for user in user_dict.keys():
item_list = user_dict[user]
if user not in soc_dict:
for i in item_list:
user_list = item_dict[i]
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for user in user_dict.keys():
if user in soc_dict.keys():
friends_list = soc_dict[user]
item_list = user_dict[user]
#'''
for f in friends_list:
fri_friend = soc_dict[f]
infor_index.append([user, f])
#infor_value.append(1.0/(np.sqrt(len(friends_list)*len(fri_friend))))
infor_value.append(1.0/(len(friends_list)+len(item_list)))
for i in item_list:
user_list = item_dict[i]
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/(len(item_list)+len(friends_list)))
#'''
for item in item_dict.keys():
user_list = item_dict[item]
for u in user_list:
item_list = user_dict[u]
infor_index.append([item + conf.num_all_user_id, u])
#infor_value.append(1.0/(np.sqrt(len(user_list)*len(item_list))))
infor_value.append(1.0/len(user_list))
length = conf.num_all_user_id + conf.num_items
user_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return user_agg_mat
############construct corrupted local graph###############
def construct_corrupted_graph(infor_fake_dict, infor_fake_item_dict, social_fake_dict):
infor_index, infor_value = [], []
#common user
#'''
for user in infor_fake_dict.keys():
item_list = infor_fake_dict[user]
for i in item_list:
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for item in infor_fake_item_dict.keys():
user_list = infor_fake_item_dict[item]
for u in user_list:
infor_index.append([item+conf.num_all_user_id, u])
infor_value.append(1.0/len(user_list))
for user in social_fake_dict.keys():
friend_list = social_fake_dict[user]
for f in friend_list:
infor_index.append([user, f])
infor_value.append(1.0/len(friend_list))
length = conf.num_all_user_id + conf.num_items
fake_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return fake_agg_mat
##############3construct global in social domian################
def construct_global_social(soc_dict):
infor_index, infor_value = [], []
for user in soc_dict.keys():
friends_list = soc_dict[user]
for f in friends_list:
fri_friend = soc_dict[f]
infor_index.append([user, f])
#infor_value.append(1.0/(np.sqrt(len(friends_list)*len(fri_friend))))
infor_value.append(1.0/len(friends_list))
length = conf.num_all_user_id + conf.num_items
user_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return user_agg_mat
#construct global in information domain#
def construct_global_infor(user_dict, item_dict):
infor_index, infor_value = [], []
for user in user_dict.keys():
item_list = user_dict[user]
for i in item_list:
user_list = item_dict[i]
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for item in item_dict.keys():
user_list = item_dict[item]
for u in user_list:
item_list = user_dict[u]
infor_index.append([item + conf.num_all_user_id, u])
#infor_value.append(1.0/(np.sqrt(len(user_list)*len(item_list))))
infor_value.append(1.0/len(user_list))
length = conf.num_all_user_id + conf.num_items
user_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return user_agg_mat
#
def construct_infor_fake_graph(infor_fake_dict, infor_fake_item_dict, soc_dict, is_true):
infor_index, infor_value = [], []
#common user
#'''
for user in infor_fake_dict.keys():
item_list = infor_fake_dict[user]
for i in item_list:
infor_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
infor_value.append(1.0/len(item_list))
for item in infor_fake_item_dict.keys():
user_list = infor_fake_item_dict[item]
for u in user_list:
infor_index.append([item+conf.num_all_user_id, u])
infor_value.append(1.0/len(user_list))
if is_true == True:
for user in soc_dict.keys():
friend_list = soc_dict[user]
for f in friend_list:
infor_index.append([user, f])
infor_value.append(1.0/len(friend_list))
length = conf.num_all_user_id + conf.num_items
fake_infor_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return fake_infor_agg_mat
##########construct fake graph in social domain#############
def construct_social_fake_graph(social_fake_dict, user_dict, item_dict, is_true):
social_index,social_value = [],[]
for user in social_fake_dict.keys():
friend_list = social_fake_dict[user]
for f in friend_list:
social_index.append([user, f])
social_value.append(1.0/len(friend_list))
if is_true == True:
for user in user_dict.keys():
item_list = user_dict[user]
for i in item_list:
social_index.append([user, i+conf.num_all_user_id])
#infor_value.append(1.0/(np.sqrt(len(item_list)*len(user_list))))
social_value.append(1.0/len(item_list))
for item in item_dict.keys():
user_list = item_dict[item]
for u in user_list:
item_list = user_dict[u]
social_index.append([item + conf.num_all_user_id, u])
#infor_value.append(1.0/(np.sqrt(len(user_list)*len(item_list))))
social_value.append(1.0/len(user_list))
length = conf.num_all_user_id + conf.num_items
fake_social_agg_mat = torch.sparse.FloatTensor(torch.LongTensor(social_index).t().cuda(), \
torch.FloatTensor(social_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return fake_social_agg_mat
###########construct infor h-g link ##############
def construct_infor_link(soc_dict, user_dict, item_dict):
infor_index, infor_value = [], []
for user in user_dict.keys():
if user in soc_dict.keys():
items = user_dict[user]
for i in items:
infor_index.append([user, i])
infor_value.append(1.0)
length = conf.num_all_user_id + conf.num_items
agg_mat = torch.sparse.FloatTensor(torch.LongTensor(infor_index).t().cuda(), \
torch.FloatTensor(infor_value).cuda(), torch.Size([length, length]))#.to_dense()
#import pdb;pdb.set_trace()
return agg_mat
# TrainData is used to train the model
class TrainData():
def __init__(self, infor_rating_train_data, infor_hash_data, social_link_dict, infor_user_dict, link_hash_data):
self.features_ps = infor_rating_train_data
self.train_mat = infor_hash_data
self.social_link = social_link_dict
self.infor_user = infor_user_dict
self.social_hash = link_hash_data
def ng_sample(self):
features_fill = []
for x in self.features_ps:
u, i = x[0], x[1]
for t in range(conf.num_train_neg):
j = np.random.randint(conf.num_items)
while (u, j) in self.train_mat:
j = np.random.randint(conf.num_items)
features_fill.append([u, i, j])
self.features_fill = features_fill
self.link_ng_sample()
self.infor_bridge_sample()
#'''
def link_ng_sample(self):
link_features_fill = []
for user,friends in self.social_link.items():
if user in self.infor_user:
for f in friends:
j = np.random.randint(conf.num_bri_user_start, conf.num_all_user_id)
while (user,j) in self.social_hash:
j = np.random.randint(conf.num_bri_user_start, conf.num_all_user_id)
link_features_fill.append([user, f, j])
self.link_features_fill = link_features_fill
#'''
def infor_bridge_sample(self):
infor_bridge_fill = []
for user,items in self.infor_user.items():
if user in self.social_link:
for i in items:
j = np.random.randint(conf.num_items)
while (user, j) in self.train_mat:
j = np.random.randint(conf.num_items)
infor_bridge_fill.append([user, i, j])
self.infor_bridge_fill = infor_bridge_fill
def __len__(self):
return len(self.features_ps) * (conf.num_train_neg)
def __getitem__(self, idx):
features = self.features_fill
user = features[idx][0]
pos = features[idx][1]
neg = features[idx][2]
link_features_fill = self.link_features_fill
idx = np.random.randint(len(link_features_fill))
s_bri = link_features_fill[idx][0]
s_bri_pos = link_features_fill[idx][1]
s_bri_neg = link_features_fill[idx][2]
infor_bridge_fill = self.infor_bridge_fill
idx_2 = np.random.randint(len(infor_bridge_fill))
i_bri = infor_bridge_fill[idx_2][0]
i_bri_pos = infor_bridge_fill[idx_2][1]
i_bri_neg = infor_bridge_fill[idx_2][2]
return user, pos, neg, s_bri, s_bri_pos, s_bri_neg, i_bri, i_bri_pos, i_bri_neg
class EvaData():
def __init__(self, eva_data):
self.eva_data = eva_data
self.length = len(eva_data.keys())
def get_batch(self, batch_idx_list):
user_list, item_list = [], []
for idx in batch_idx_list:
user_list.extend([self.eva_data[idx][0]]*(len(self.eva_data[idx])-1))
item_list.extend(self.eva_data[idx][1:])
return torch.LongTensor(user_list).cuda(), \
torch.LongTensor(item_list).cuda()
|
PeiJieSun/AAAI-submission
|
DataModule_domain_infor.py
|
DataModule_domain_infor.py
|
py
| 19,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12215224198
|
# -*- coding: utf-8 -*-
# vim: set ts=2 sw=2 sts=2 tw=80 et:
# pylint: disable=missing-docstring
import tensorflow as tf
def mlp(inputs, mode="train", batch_norm=True, dropout=True, weight_decay=0.0,
layer_sizes=None, activations=None, trainables=None, model_prefix=''):
"""
"""
layer_sizes = layer_sizes if layer_sizes is not None else []
activations = activations if activations is not None else []
trainables = trainables if trainables is not None else []
is_training = bool(mode == 'train')
he_initializer = tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode='FAN_AVG', uniform=True)
weights_regularizer = None
if weight_decay != 0:
weights_regularizer = tf.contrib.layers.l2_regularizer(
weight_decay)
this_layer = inputs
for idx, next_layer_size in enumerate(layer_sizes):
this_layer_trainable = trainables[idx]
with tf.variable_scope("%s_mlp_%d" % (model_prefix, idx), reuse=tf.AUTO_REUSE) as scope:
this_layer = tf.contrib.layers.fully_connected(this_layer, next_layer_size,
activation_fn=None,
trainable=this_layer_trainable,
weights_initializer=he_initializer,
weights_regularizer=weights_regularizer,
scope=scope)
if batch_norm:
this_layer = tf.contrib.layers.batch_norm(this_layer,
center=True,
scale=True,
is_training=is_training,
trainable=this_layer_trainable,
scope=scope)
this_layer = activations[idx](this_layer)
if dropout:
if is_training:
this_layer = tf.nn.dropout(this_layer, 0.9, name='dropout')
else:
pass
return this_layer
def network(inputs, embedding_size, activation="relu", mode="train",
enable_dropout=True, weight_decay=0.0):
"""
Args:
embedding_size: inner embedding dim size
"""
with tf.variable_scope("title"):
layer_sizes = [8, 6, embedding_size]
trainables = [True, True, True]
if activation == "relu":
activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu]
elif activation == "tanh":
activations = [tf.nn.tanh, tf.nn.tanh, tf.nn.tanh]
else:
raise ValueError('unknown activation: %s' % activation)
output = mlp(inputs, mode=mode,
batch_norm=True,
dropout=enable_dropout,
weight_decay=weight_decay,
layer_sizes=layer_sizes,
activations=activations,
trainables=trainables)
return output
|
siwendy/interestGraph
|
models/mlp.py
|
mlp.py
|
py
| 3,215 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29792580081
|
#!/usr/bin/env python3
import sys
import rospy
from demo_interface import DemoInterface
from geometry_msgs.msg import Point
DEBUG = True
if __name__ == "__main__":
d = DemoInterface()
if sys.argv[0] == 'rosrun' and len(sys.argv) > 2:
point_topics = sys.argv[3:]
rospy.loginfo(f"Showing points for topics {point_topics}")
elif len(sys.argv) > 1:
point_topics = sys.argv[1:]
rospy.loginfo(f"Showing points for topics {point_topics}")
else:
rospy.loginfo("No point topics specified. Defaulting to /cal_ee_position")
point_topics = ['/cal_ee_position']
rospy.sleep(0.5)
while not rospy.is_shutdown():
rospy.sleep(0.1)
i = 0
points = []
for topic in point_topics:
points.append(rospy.wait_for_message(topic, Point))
d.publish_object(f"show_point{i}", point, 0.015, primitive='sphere')
i += 1
|
dwya222/end_effector_control
|
scripts/test_scripts/show_at_point.py
|
show_at_point.py
|
py
| 932 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74992084668
|
import cv2
import numpy as np
import os
import sys
import json
import math
import time
import argparse
from enum import Enum
import platform
class Config:
@classmethod
def init(cls):
if platform.system() == "Windows":
cls.QUIT_KEY = ord("q")
cls.CONTINUE_KEY = 2555904 #right arrow
cls.BACK_KEY = 2424832 #left arrow
cls.REWIND_KEY = ord("r")
cls.PLAYPAUSE_KEY = 32 #spacebar
else:
cls.QUIT_KEY = ord("q")
cls.CONTINUE_KEY = 65363 #right arrow
cls.BACK_KEY = 65361 #left arrow
cls.REWIND_KEY = ord("r")
cls.PLAYPAUSE_KEY = 32 #spacebar
if os.path.exists(os.path.join(os.getcwd(), "./manim-presentation.json")):
json_config = json.load(open(os.path.join(os.getcwd(), "./manim-presentation.json"), "r"))
for key, value in json_config.items():
setattr(cls, key, value)
class State(Enum):
PLAYING = 0
PAUSED = 1
WAIT = 2
END = 3
def __str__(self):
if self.value == 0: return "Playing"
if self.value == 1: return "Paused"
if self.value == 2: return "Wait"
if self.value == 3: return "End"
return "..."
def now():
return round(time.time() * 1000)
def fix_time(x):
return x if x > 0 else 1
class Presentation:
def __init__(self, config, last_frame_next=False):
self.last_frame_next = last_frame_next
self.slides = config["slides"]
self.files = config["files"]
self.lastframe = []
self.caps = [None for _ in self.files]
self.reset()
self.add_last_slide()
def add_last_slide(self):
last_slide_end = self.slides[-1]["end_animation"]
last_animation = len(self.files)
self.slides.append(dict(
start_animation = last_slide_end,
end_animation = last_animation,
type = "last",
number = len(self.slides) + 1,
terminated = False
))
def reset(self):
self.current_animation = 0
self.load_this_cap(0)
self.current_slide_i = 0
self.slides[-1]["terminated"] = False
def next(self):
if self.current_slide["type"] == "last":
self.current_slide["terminated"] = True
else:
self.current_slide_i = min(len(self.slides) - 1, self.current_slide_i + 1)
self.rewind_slide()
def prev(self):
self.current_slide_i = max(0, self.current_slide_i - 1)
self.rewind_slide()
def rewind_slide(self):
self.current_animation = self.current_slide["start_animation"]
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
def load_this_cap(self,cap_number):
if self.caps[cap_number] == None:
# unload other caps
for i in range(len(self.caps)):
if self.caps[i] != None:
self.caps[i].release()
self.caps[i] = None
# load this cap
self.caps[cap_number] = cv2.VideoCapture(self.files[cap_number])
@property
def current_slide(self):
return self.slides[self.current_slide_i]
@property
def current_cap(self):
self.load_this_cap(self.current_animation)
return self.caps[self.current_animation]
@property
def fps(self):
return self.current_cap.get(cv2.CAP_PROP_FPS)
# This function updates the state given the previous state.
# It does this by reading the video information and checking if the state is still correct.
# It returns the frame to show (lastframe) and the new state.
def update_state(self, state):
if state == State.PAUSED:
if len(self.lastframe) == 0:
_, self.lastframe = self.current_cap.read()
return self.lastframe, state
still_playing, frame = self.current_cap.read()
if still_playing:
self.lastframe = frame
elif state in [state.WAIT, state.PAUSED]:
return self.lastframe, state
elif self.current_slide["type"] == "last" and self.current_slide["terminated"]:
return self.lastframe, State.END
if not still_playing:
if self.current_slide["end_animation"] == self.current_animation + 1:
if self.current_slide["type"] == "slide":
# To fix "it always ends one frame before the animation", uncomment this.
# But then clears on the next slide will clear the stationary after this slide.
if self.last_frame_next:
self.load_this_cap(self.next_cap)
self.next_cap = self.caps[self.current_animation + 1]
self.next_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
_, self.lastframe = self.next_cap.read()
state = State.WAIT
elif self.current_slide["type"] == "loop":
self.current_animation = self.current_slide["start_animation"]
state = State.PLAYING
self.rewind_slide()
elif self.current_slide["type"] == "last":
self.current_slide["terminated"] = True
elif self.current_slide["type"] == "last" and self.current_slide["end_animation"] == self.current_animation:
state = State.WAIT
else:
# Play next video!
self.current_animation += 1
self.load_this_cap(self.current_animation)
# Reset video to position zero if it has been played before
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
return self.lastframe, state
class Display:
def __init__(self, presentations, start_paused=False, fullscreen=False):
self.presentations = presentations
self.start_paused = start_paused
self.state = State.PLAYING
self.lastframe = None
self.current_presentation_i = 0
self.lag = 0
self.last_time = now()
if fullscreen:
cv2.namedWindow("Video", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Video", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
@property
def current_presentation(self):
return self.presentations[self.current_presentation_i]
def run(self):
while True:
self.lastframe, self.state = self.current_presentation.update_state(self.state)
if self.state == State.PLAYING or self.state == State.PAUSED:
if self.start_paused:
self.state = State.PAUSED
self.start_paused = False
if self.state == State.END:
if self.current_presentation_i == len(self.presentations) - 1:
self.quit()
else:
self.current_presentation_i += 1
self.state = State.PLAYING
self.handle_key()
self.show_video()
self.show_info()
def show_video(self):
self.lag = now() - self.last_time
self.last_time = now()
cv2.imshow("Video", self.lastframe)
def show_info(self):
info = np.zeros((130, 420), np.uint8)
font_args = (cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255)
grid_x = [30, 230]
grid_y = [30, 70, 110]
cv2.putText(
info,
f"Animation: {self.current_presentation.current_animation}",
(grid_x[0], grid_y[0]),
*font_args
)
cv2.putText(
info,
f"State: {self.state}",
(grid_x[1], grid_y[0]),
*font_args
)
cv2.putText(
info,
f"Slide {self.current_presentation.current_slide['number']}/{len(self.current_presentation.slides)}",
(grid_x[0], grid_y[1]),
*font_args
)
cv2.putText(
info,
f"Slide Type: {self.current_presentation.current_slide['type']}",
(grid_x[1], grid_y[1]),
*font_args
)
cv2.putText(
info,
f"Scene {self.current_presentation_i + 1}/{len(self.presentations)}",
((grid_x[0]+grid_x[1])//2, grid_y[2]),
*font_args
)
cv2.imshow("Info", info)
def handle_key(self):
sleep_time = math.ceil(1000/self.current_presentation.fps)
key = cv2.waitKeyEx(fix_time(sleep_time - self.lag))
if key == Config.QUIT_KEY:
self.quit()
elif self.state == State.PLAYING and key == Config.PLAYPAUSE_KEY:
self.state = State.PAUSED
elif self.state == State.PAUSED and key == Config.PLAYPAUSE_KEY:
self.state = State.PLAYING
elif self.state == State.WAIT and (key == Config.CONTINUE_KEY or key == Config.PLAYPAUSE_KEY):
self.current_presentation.next()
self.state = State.PLAYING
elif self.state == State.PLAYING and key == Config.CONTINUE_KEY:
self.current_presentation.next()
elif key == Config.BACK_KEY:
if self.current_presentation.current_slide_i == 0:
self.current_presentation_i = max(0, self.current_presentation_i - 1)
self.current_presentation.reset()
self.state = State.PLAYING
else:
self.current_presentation.prev()
self.state = State.PLAYING
elif key == Config.REWIND_KEY:
self.current_presentation.rewind_slide()
self.state = State.PLAYING
def quit(self):
cv2.destroyAllWindows()
sys.exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("scenes", metavar="scenes", type=str, nargs="+", help="Scenes to present")
parser.add_argument("--folder", type=str, default="./presentation", help="Presentation files folder")
parser.add_argument("--start-paused", action="store_true", help="Start paused")
parser.add_argument("--fullscreen", action="store_true", help="Fullscreen")
parser.add_argument("--last-frame-next", action="store_true", help="Show the next animation first frame as last frame (hack)")
args = parser.parse_args()
args.folder = os.path.normcase(args.folder)
Config.init()
presentations = list()
for scene in args.scenes:
config_file = os.path.join(args.folder, f"{scene}.json")
if not os.path.exists(config_file):
raise Exception(f"File {config_file} does not exist, check the scene name and make sure to use Slide as your scene base class")
config = json.load(open(config_file))
presentations.append(Presentation(config, last_frame_next=args.last_frame_next))
display = Display(presentations, start_paused=args.start_paused, fullscreen=args.fullscreen)
display.run()
if __name__ == "__main__":
main()
|
galatolofederico/manim-presentation
|
manim_presentation/present.py
|
present.py
|
py
| 11,126 |
python
|
en
|
code
| 153 |
github-code
|
6
|
74531208506
|
"""
Solutions to exam tasks for modul 1 for exam 2021-10-25
"""
import random
import time
import math
# Task A1
def length_longest(lst):
# Variant 1: Iteratively in the 'width' direction
if type(lst) != list:
return 0
result = len(lst)
for x in lst:
result = max(result, length_longest(x))
return result
def length_longest(lst):
# Variant 2: Recursively in both directions
if type(lst) != list or lst==[]:
return 0
else:
return max(len(lst), length_longest(lst[0]), length_longest(lst[1:]))
def bubbelsort(a):
for i in range(len(a)-1):
for j in range(len(a)-1):
if a[j] > a[j+1]:
a[j], a[j+1] = a[j+1], a[j]
"""
"""
def random_list(n):
res = []
for i in range(n):
res.append(random.random())
return res
def foo(n):
result = 1
for k in range(3):
for i in range(n*n):
result += k*n
return result
def main():
print('Testing length_longest')
print(length_longest(1), '\t Should be 0')
l1 = [1,2,[[1,2,3,[1,2,3,4,5,6,7,8],5,6],2,3,4,5,6,7,8]]
print(length_longest(l1), '\t Should be 8')
l1 = [1,2,[[1,2,3,[1,2,3,4,5,6,7],5,6],2,3]]
print(length_longest(l1), '\t Should be 7')
print('\nTiming of bubbelsort')
measured = []
for n in [1000, 2000, 4000, 8000]:
rl = random_list(n)
# rl = [x for x in range(n)] # Try this if you want to see that it is Theta(n^2) even in best case
tstart = time.perf_counter()
bubbelsort(rl)
print(n, end =' \t')
dt= time.perf_counter()-tstart
measured.append(dt)
print(f"{dt:5.1f}")
print('\nTime growth when doubling input' )
for i in range(1,len(measured)):
print(f'{measured[i]/measured[i-1]:5.1f}')
print('\nTiming of foo')
measured = []
for n in [1000, 2000, 4000, 8000]:
tstart = time.perf_counter()
print(n, end ='\t')
foo(n)
dt= time.perf_counter()-tstart
measured.append(dt)
print(f" {dt:5.1f}")
print('\nTime growth when doubling input' )
for i in range(1,len(measured)):
print(f'{measured[i]/measured[i-1]:5.1f}')
print('\nEstimation of time for foo(1000000)')
n = 10000
tstart = time.perf_counter()
foo(n)
dt= time.perf_counter()-tstart
print(f"Time for {n}: {dt:5.1f}")
if __name__ == "__main__":
main()
"""
Solution to A2 (Time complexity for bubbelsort):
Complexity Theta(n^2).
For each of the n rounds in the outer loop the inner loop will also goes n rounds.
It is practically demonstrated in the testrun where n is doubled thus expecting the time
to be multiplied by 4 each step.
Note that you want to see a n^2 behaviour you should at least have three measurements where
you, at lerast, double n. Changing n with a factor 10 is also convenient in which case the
time should increase with a factor 10^2 = 100.
Several students claimed that the code the time was Theta(n) if the list was already sorted
but that is not true. Even if the code doesn't do any swaps, n^2 comparisons will be done.
Bubbelsort CAN be implemented to have a Theta(n) complexity but that was not in the given
implementation.
Solution to B1 (Time complexity for function foo):
Complexity Theta(n^2)
To compute foo(1000000) can be estimated to time for foo(10000) multiplied
by 100^2 which is 20*10000 seconds 55 hours.
"""
|
bupa8694/programming2
|
Exams/2021-11-25/Exam_1TD722_20211025_solutions/Exam_1TD722_20211025_solutions/m1_sol.py
|
m1_sol.py
|
py
| 3,519 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70075663868
|
#!/usr/bin/env python3
""" Funtion that lists all documents in a collection"""
def list_all(mongo_collection):
"""
return an empty list if no documento in the collection
"""
documents = mongo_collection.find()
documents_list = [doc for doc in documents]
if documents_list.count == 0:
return []
return documents_list
|
lemejiamo/holbertonschool-backend-storage
|
0x01-NoSQL/8-all.py
|
8-all.py
|
py
| 354 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3453773218
|
fileName = "word.txt"
f = open(fileName,'r')
wordcount={}
for str in f.read().split():
if str not in wordcount:
wordcount[str] = 1
else:
wordcount[str] += 1
print("Output written in file")
print(wordcount, file=open("count.txt", "w"))
|
sravan9393/SummerSemester_Python
|
ICP2_Python/wordcount_file.py
|
wordcount_file.py
|
py
| 269 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5005373290
|
from typing import Any, Iterable, MutableMapping
from typing_extensions import TypeAlias
from .. import etree
from .._types import Unused, _AnyStr, _ElemClsLookupArg, _FileReadSource
from ._element import HtmlElement
_HtmlElemParser: TypeAlias = etree._parser._DefEtreeParsers[HtmlElement]
#
# Parser
#
# Stub version before March 2023 used to omit 'target' parameter, which
# would nullify default HTML element lookup behavior, degenerating html
# submodule parsers into etree ones. Since it is decided to not support
# custom target parser for now, we just add back 'target' parameter for
# coherence. Same for XHTMLParser below.
class HTMLParser(etree.HTMLParser[HtmlElement]):
"""An HTML parser configured to return ``lxml.html`` Element
objects.
Notes
-----
This subclass is not specialized, unlike the ``etree`` counterpart.
They are designed to always handle ``HtmlElement``;
for generating other kinds of ``_Elements``, one should use
etree parsers with ``set_element_class_lookup()`` method instead.
In that case, see ``_FeedParser.set_element_class_lookup()`` for more info.
"""
def __init__(
self,
*,
encoding: _AnyStr | None = ...,
remove_blank_text: bool = ...,
remove_comments: bool = ...,
remove_pis: bool = ...,
strip_cdata: bool = ...,
no_network: bool = ...,
target: etree.ParserTarget[Any] | None = ...,
schema: etree.XMLSchema | None = ...,
recover: bool = ...,
compact: bool = ...,
default_doctype: bool = ...,
collect_ids: bool = ...,
huge_tree: bool = ...,
) -> None: ...
@property
def target(self) -> None: ...
class XHTMLParser(etree.XMLParser[HtmlElement]):
"""An XML parser configured to return ``lxml.html`` Element
objects.
Notes
-----
This subclass is not specialized, unlike the ``etree`` counterpart.
They are designed to always handle ``HtmlElement``;
for generating other kinds of ``_Elements``, one should use
etree parsers with ``set_element_class_lookup()`` method instead.
In that case, see ``_FeedParser.set_element_class_lookup()`` for more info.
Original doc
------------
Note that this parser is not really XHTML aware unless you let it
load a DTD that declares the HTML entities. To do this, make sure
you have the XHTML DTDs installed in your catalogs, and create the
parser like this::
>>> parser = XHTMLParser(load_dtd=True)
If you additionally want to validate the document, use this::
>>> parser = XHTMLParser(dtd_validation=True)
For catalog support, see http://www.xmlsoft.org/catalog.html.
"""
def __init__(
self,
*,
encoding: _AnyStr | None = ...,
attribute_defaults: bool = ...,
dtd_validation: bool = ...,
load_dtd: bool = ...,
no_network: bool = ...,
target: etree.ParserTarget[Any] | None = ...,
ns_clean: bool = ...,
recover: bool = ...,
schema: etree.XMLSchema | None = ...,
huge_tree: bool = ...,
remove_blank_text: bool = ...,
resolve_entities: bool = ...,
remove_comments: bool = ...,
remove_pis: bool = ...,
strip_cdata: bool = ...,
collect_ids: bool = ...,
compact: bool = ...,
) -> None: ...
@property
def target(self) -> None: ...
html_parser: HTMLParser
xhtml_parser: XHTMLParser
#
# Parsing funcs
#
# Calls etree.fromstring(html, parser, **kw) which has signature
# fromstring(text, parser, *, base_url)
def document_fromstring(
html: _AnyStr,
parser: _HtmlElemParser | None = ...,
ensure_head_body: bool = ...,
*,
base_url: str | None = ...,
) -> HtmlElement: ...
def fragments_fromstring(
html: _AnyStr,
no_leading_text: bool = ...,
base_url: str | None = ...,
parser: _HtmlElemParser | None = ...,
**kw: Unused,
) -> list[HtmlElement]: ...
def fragment_fromstring(
html: _AnyStr,
create_parent: bool = ...,
base_url: str | None = ...,
parser: _HtmlElemParser | None = ...,
**kw: Unused,
) -> HtmlElement: ...
def fromstring(
html: _AnyStr,
base_url: str | None = ...,
parser: _HtmlElemParser | None = ...,
**kw: Unused,
) -> HtmlElement: ...
def parse(
filename_or_url: _FileReadSource,
parser: _HtmlElemParser | None = ...,
base_url: str | None = ...,
**kw: Unused,
) -> etree._ElementTree[HtmlElement]: ...
#
# Element Lookup
#
class HtmlElementClassLookup(etree.CustomElementClassLookup):
def __init__(
self,
# Should have been something like Mapping[str, type[HtmlElement]],
# but unfortunately classes mapping is required to be mutable
classes: MutableMapping[str, Any] | None = ...,
# docstring says mixins is mapping, but implementation says otherwise
mixins: Iterable[tuple[str, type[HtmlElement]]] = ...,
) -> None: ...
def lookup(
self,
node_type: _ElemClsLookupArg | None,
document: Unused,
namespace: Unused,
name: str, # type: ignore[override]
) -> type[HtmlElement] | None: ...
|
abelcheung/types-lxml
|
lxml-stubs/html/_parse.pyi
|
_parse.pyi
|
pyi
| 5,211 |
python
|
en
|
code
| 23 |
github-code
|
6
|
13350264318
|
#!/usr/bin/python3
"""https://www.hackerrank.com/challenges/find-the-median/problem?isFullScreen=true"""
def partition(arr, low, high):
pivot = arr[high]
i = low - 1
for j in range(low, high):
if arr[j] <= pivot:
i += 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return i + 1
def quickSort(arr, low, high):
if low < high:
pivot = partition(arr, low, high)
quickSort(arr, low, pivot - 1)
quickSort(arr, pivot + 1, high)
def findMedian(arr):
low = 0
high = len(arr) - 1
quickSort(arr, low, high)
print(arr)
return arr[(low + high) // 2]
|
Velin-Todorov/HackerRank-Leetcode
|
Find_the_Median.py
|
Find_the_Median.py
|
py
| 747 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21569825740
|
#!/usr/bin/env python
from prl_tsid.commander import PathFollower
import rospy
rospy.init_node("TSID_example", anonymous=True)
# Plan a trajectory using HPP
from prl_hpp.ur5 import planner, robot, commander_left_arm, commander_right_arm
pi = 3.1415926
planner.lock_grippers()
planner.lock_right_arm()
planner.set_velocity_limit(0.25)
planner.set_acceleration_limit(0.25)
pose_1 = [[-0.50, 0, 0.1], [pi, 0, 0]]
pose_2 = [[-0.40, 0, 0.1], [pi, 0, 0]]
path = planner.make_gripper_approach(robot.left_gripper_name, pose_1, approach_distance = 0.2)
# Start the commanders
commander_left_arm.start_fwd()
commander_right_arm.start_fwd()
# Exectute the trajectories using TSID
pf = PathFollower(robot)
pf.set_velocity_limit(0.5)
pf.set_acceleration_limit(0.5)
path.targetFrames.append("right_gripper_grasp_frame")
input("Press enter to execute path")
pf.execute_path(path, [commander_left_arm, commander_right_arm], 0.02, True)
|
inria-paris-robotic-lab/prl_hpp_tsid
|
prl_tsid/scripts/example_ur5_hpp.py
|
example_ur5_hpp.py
|
py
| 929 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20426981888
|
"""
All rights reserved.
--Yang Song ([email protected])
--2021/1/7
"""
import os
import pickle
import random
from abc import abstractmethod
from lifelines import CoxPHFitter, AalenAdditiveFitter
from lifelines.utils.printer import Printer
from lifelines import utils
from SA.Utility import mylog
from SA.DataContainer import DataContainer
class BaseFitter(object):
def __init__(self, fitter=None, name=None):
self.fitter = fitter
self.name = name
def Fit(self, dc: DataContainer):
self.fitter.fit(dc.df, duration_col=dc.duration_name, event_col=dc.event_name)
def Save(self, store_folder):
with open(os.path.join(store_folder, 'model.pkl'), 'wb') as f:
pickle.dump(self.fitter, f)
def Load(self, store_folder):
with open(os.path.join(store_folder, 'model.pkl'), 'rb') as f:
self.fitter = pickle.load(f)
def Plot(self):
self.fitter.plot()
def Summary(self):
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
self.fitter.print_summary()
out = f.getvalue()
return out
class CoxPH(BaseFitter):
def __init__(self):
random.seed(0)
super(CoxPH, self).__init__(CoxPHFitter(), self.__class__.__name__)
def Fit(self, dc: DataContainer):
self.fitter.fit(dc.df, duration_col=dc.duration_name, event_col=dc.event_name)
class AalenAdditive(BaseFitter):
def __init__(self):
super(AalenAdditive, self).__init__(AalenAdditiveFitter(), self.__class__.__name__)
#
# class Weibull(BaseFitter):
# def __init__(self):
# super(Weibull, self).__init__(WeibullAFTFitter(), self.__class__.__name__)
if __name__ == '__main__':
import numpy as np
model = CoxPH()
print(model.name)
# model = AalenAdditive()
# print(model.name)
train_dc = DataContainer()
train_dc.Load(r'..\..\Demo\train.csv', event_name='status', duration_name='time')
model.Fit(train_dc)
result = model.Summary()
print(result)
# model.Save(r'..\..\Demo')
#
# model_new = AalenAdditive()
# model_new.Load(r'..\..\Demo')
# model_new.Summary()
|
salan668/FAE
|
SA/Fitter.py
|
Fitter.py
|
py
| 2,221 |
python
|
en
|
code
| 121 |
github-code
|
6
|
25818064734
|
#!/usr/bin/env python3
# =============================================================================
# Author: Julen Bohoyo Bengoetxea
# Email: [email protected]
# =============================================================================
""" Description: A set of tools for semantic image segmentations """
# =============================================================================
import os
import glob
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
from patchify import patchify
##########################################################
######### GENERAL TOOLS #########
##########################################################
def get_class_weights(path, preprocess_function, img_size=256):
"""
Get the class weights of the masks generated from the .png images of the specified directory
Parameters
----------
path : string
Path to de directory.
preprocess_function : function
Function to preprocess data in oder to get weight in the correct order:
def preprocess_data(img, mask): return(img, mask).
img_size : int, optional
image reading size. (default is 256).
Returns
-------
class_weights : list
List containing the weights of each class.
"""
from sklearn.utils import class_weight
#Capture mask/label info as a list
masks = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.png")))
for mask_path in paths:
mask = cv2.imread(mask_path, 0)
mask = cv2.resize(mask, (img_size, img_size), interpolation = cv2.INTER_NEAREST) #Otherwise ground truth changes due to interpolation
masks.append(mask)
# Convert list to array for machine learning processing
imgs = np.zeros(shape=(1,1))
masks = np.array(masks)
# Preprocess masks same way as in training in order to get the weight in the correct order
imgs, masks = preprocess_function(imgs, masks)
masks = np.argmax(masks, axis=3) # preprocess_function hot encodes the masks so must be reverted
masks = masks.reshape(-1) # Masks must be array of shape (num_samples,)
class_weights = class_weight.compute_class_weight('balanced', np.unique(masks), masks)
return class_weights
def drawProgressBar(percent, barLen = 20):
"""
Prints a progress bar
Parameters
----------
percent : float
Completed percentage (0-1).
barLen : int, optional
Size of the bar. (default is 20).
Returns
-------
None.
"""
import sys
sys.stdout.write("\r")
sys.stdout.write("[{:<{}}] {:.0f}%".format("=" * int(barLen * percent), barLen, percent * 100))
sys.stdout.flush()
##########################################################
######### PLOTTING TOOLS #########
##########################################################
def plot_legend(classes, cmap='viridis', size=2):
"""
Plots legend of the colors using matplotlib.pyplot
Parameters
----------
classes : Dict
Dict contaning the number and name of each class.
cmap : string, optional
Color map to use in masks. (default is viridis).
size : int, optional
Plotting size. (default is 2).
Returns
-------
None.
"""
x = []
my_xticks = []
for i in range(len(classes)):
x.append(i)
my_xticks.append(classes[i])
f = plt.figure(figsize = (size, size))
f.add_subplot(1,1,1)
plt.yticks(x, my_xticks)
plt.xticks([], [])
x = np.reshape(x,(1,len(classes))).T
plt.imshow(x, cmap=cmap)
def plot_mask(images, masks, num_classes, num_plots=1, cmap='viridis', size=10):
"""
Plots images and masks from lists using matplotlib.pyplot
Parameters
----------
images : list
List with the original images (3 channel).
masks : list
List with the original masks (1 channel).
num_classes : int
Number of classes to plot
num_plots : int, optional
Ammount of images to plot. (default is 1).
cmap : string, optional
Color map to use in masks. (default is viridis).
size : int, optional
Plotting size. (default is 10).
Returns
-------
None.
"""
# Place all pixel values for colour coherence
print('Masks modified for plotting', num_classes, 'classes')
for i in range(num_plots):
mask=masks[i]
for j in range(num_classes):
mask[0,j]=j
masks[i]=mask
for i in range(num_plots):
f = plt.figure(figsize = (size, size))
f.add_subplot(1,3,1)
plt.axis('off')
plt. title('Original image')
plt.imshow(images[i])
f.add_subplot(1,3,2)
plt.axis('off')
plt. title('Ground truth mask')
plt.imshow(masks[i], cmap=cmap)
plt.show(block=True)
plt.show
def plot_prediction(images, masks, predictions, num_classes, num_plots=1, cmap='viridis', size=10, alpha=0.7):
"""
Plots images, original masks, predicted masks and overlays from lists using matplotlib.pyplot
Parameters
----------
images : list
List with the original images (3 channel).
masks : list
List with the original masks (1 channel).
predictions : list
List with the predicted masks (1 channel).
num_classes : int
Number of classes to plot
num_plots : int, optional
Ammount of images to plot. (default is 1).
cmap : string, optional
Color map to use in masks. (default is viridis).
size : int, optional
Plotting size. (default is 10).
alpha : float, optional
Transparency for the prediction over image. (default is 0.7).
Returns
-------
None.
"""
# Place all pixel values for colour coherence
print('Masks modified for plotting', num_classes, 'classes')
for i in range(num_plots):
mask=masks[i]
prediction=predictions[i]
for j in range(num_classes):
mask[0,j]=j
prediction[0,j]=j
masks[i]=mask
predictions[i]=prediction
for i in range(num_plots):
f = plt.figure(figsize = (size, size))
f.add_subplot(1,3,1)
plt.axis('off')
plt. title('Original image')
plt.imshow(images[i])
f.add_subplot(1,3,2)
plt.axis('off')
plt. title('Ground truth mask')
plt.imshow(masks[i], cmap=cmap)
f.add_subplot(1,3,3)
plt.axis('off')
plt. title('Predicted mask')
plt.imshow(predictions[i], cmap=cmap)
f = plt.figure(figsize = (size, size))
f.add_subplot(1,1,1)
plt.axis('off')
plt. title('Predicted mask over image')
plt.imshow(images[i])
no_background_predictions = np.ma.masked_where(predictions == 0, predictions) # remove background(0) from prediction
plt.imshow(no_background_predictions[i], cmap=cmap, alpha=alpha)
plt.show(block=True)
plt.show
##########################################################
######### READING IMAGES TO LISTS #########
##########################################################
def get_image_list(path, size=256):
"""
Returns a list containing all the .jpg images of the specified directory resized to size*size.
Parameters
----------
path : string
Path to the directory containing the images.
size : int, optional
Size to load the images. (default is 256).
Returns
-------
image_list : list.
A list containing the images as np.arrays.
"""
image_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.jpg")))
for img_path in paths:
img = cv2.imread(img_path, 1) #1 for readin 3 channel(rgb or bgr)
img = cv2.resize(img, (size, size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_list.append(img)
#Convert list to array for machine learning processing
image_list = np.array(image_list)
return(image_list)
def get_mask_list(path, size=256):
"""
Returns a list containing all the masks generated from the .png images of the specified directory resized to size*size.
Parameters
----------
path : string
Path to the directory containing the masks.
size : int, optional
Size to load the masks. (default is 256).
Returns
-------
mask_list : list.
A list containing the masks as np.arrays.
"""
#Capture mask/label info as a list
mask_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.png")))
for mask_path in paths:
mask = cv2.imread(mask_path, 0) #1 for readin 3 channel(greyscale)
mask = cv2.resize(mask, (size, size), interpolation = cv2.INTER_NEAREST) #Otherwise ground truth changes due to interpolation
mask_list.append(mask)
#Convert list to array for machine learning processing
mask_list = np.array(mask_list)
#detect number of classes in the masks
num_classes = len(np.unique(mask_list))
return(mask_list, num_classes)
#NOT FINISHED, muest check augmentation and mode
def get_generator_from_list(images, masks, mode, preprocess_function, augmentation=True,
val_split=0.2, batch_size=32, seed=123):
"""
Returns a generator for both input images and masks preprocessed.
Parameters
----------
images : list
List containing the images(not preprocessed).
masks : list
List containing the masks (not preprocessed).
mode : string
Spicify whether is training or validation split.
preprocess_function : function
Function to preprocess data: def preprocess_data(img, mask): return(img, mask).
augmentation : boolean, optional
Poolean for performing data augmentation. (default is True).
val_split : float, optional
Perentage of the images for validation split. (default is 0.2).
batch_size : int, optional
Size of the loaded batches on each call to the generator. (default is 32).
seed : int, optional
seed fot the random transformations. (default is 123).
Yields
------
img :
Preprocessed image.
mask :
Preprocessed mask.
"""
if(augmentation):
data_gen_args = dict(validation_split=val_split,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect', #'constant','nearest','reflect','wrap'
)
else: data_gen_args = dict(validation_split=val_split,
)
image_data_generator = ImageDataGenerator(**data_gen_args)
image_data_generator.fit(images, augment=True, seed=seed)
image_generator = image_data_generator.flow(images, seed=seed)
mask_data_generator = ImageDataGenerator(**data_gen_args)
mask_data_generator.fit(masks, augment=True, seed=seed)
mask_generator = mask_data_generator.flow(masks, seed=seed)
generator = zip(image_generator, mask_generator)
for (img, mask) in generator:
img, mask = preprocess_function(img, mask)
yield (img, mask)
##########################################################
######### FLOW FROM DIRECTORY #########
##########################################################
def get_generator_from_directory(img_path, mask_path, size, mode, preprocess_function, augmentation=True,
val_split=0.2, batch_size=32, seed=123):
"""
Returns a generator for both input images and masks(hot encoded).
dataset must be structured in "images" and "masks" directories.
Parameters
----------
img_path : string
Path to the target dir containing images.
mask_path : string
Path to the target dir containing masks.
size : int
Image loading size.
mode : string
Spicify whether is training or validation split.
preprocess_function : function
Function to preprocess data: def preprocess_data(img, mask): return(img, mask).
augmentation : boolean, optional
Poolean for performing data augmentation. (default is True).
val_split : float, optional
Perentage of the images for validation split. (default is 0.2).
batch_size : int, optional
Size of the loaded batches on each call to the generator. (default is 32).
seed : int, optional
seed fot the random transformations. (default is 123).
Yields
------
img :
Preprocessed image.
mask :
Preprocessed mask.
"""
if(augmentation):
data_gen_args = dict(validation_split=val_split,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect', #'constant','nearest','reflect','wrap'
)
else: data_gen_args = dict(validation_split=val_split,
)
# same arguments in order to transform images and masks equaly
image_datagen = ImageDataGenerator(**data_gen_args)
image_generator = image_datagen.flow_from_directory(img_path,
target_size=(size, size),
subset=mode, # train or validation
batch_size=batch_size,
shuffle=True,
class_mode=None,
seed=seed)
mask_generator = image_datagen.flow_from_directory(mask_path,
target_size=(size, size),
subset=mode, # train or validation
batch_size=batch_size,
color_mode='grayscale',
shuffle=True,
class_mode=None,
seed=seed)
generator = zip(image_generator, mask_generator)
for (img, mask) in generator:
img, mask = preprocess_function(img, mask)
yield (img, mask)
##########################################################
######### TILE GENERATING #########
##########################################################
def get_image_tiles(path, tile_size, step=None, print_resize=False, dest_path=None):
"""
Generates image tiles from the masks on a given directory.
Parameters
----------
path : string
Path to the original images dir.
tile_size : int
Size of the resulting tiles.
step : int, optional
Step pixel from tile to tile. (default is tile_size).
print_resize : boolean, optional
Option to print the cropped size of the image. (default is False).
dest_path : string, optional
Path to the destination dir for the tiles, not saved if None. (default is None).
Returns
-------
mask_array:
Array of tiled masks
"""
print('Reading images:')
if(not step): step=tile_size
image_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.jpg")))
for img_path in paths:
#update progress var
percentage = 1/(len(paths)/(paths.index(img_path)+1))
drawProgressBar(percentage, barLen = 50)
img = cv2.imread(img_path, 1) #1 for reading image as BGR (3 channel)
# Cut each image to a size divisible by tile_size
original_width=img.shape[1] # useful for crop locations
original_height=img.shape[0] # useful for crop locations
width = (img.shape[1]//tile_size)*tile_size # get nearest width divisible by tile_size
height = (img.shape[0]//tile_size)*tile_size # get nearest height divisible by tile_size
img = Image.fromarray(img)
#img = img.crop((0 ,0, width, height)) #Crop from top left corner ((left, top, right, bottom))
img = img.crop((original_width-width ,0, original_width, height)) #Crop from top right corner ((left, top, right, bottom))
img = np.array(img)
if (print_resize): print('Cropped image size:', img.shape)
# Extract patches from each image
patches_img = patchify(img, (tile_size, tile_size, 3), step=step) #Step=256 for 256 patches means no overlap
for i in range(patches_img.shape[0]):
for j in range(patches_img.shape[1]):
single_patch_img = patches_img[i,j,:,:]
single_patch_img = single_patch_img[0] #Drop the extra unecessary dimension that patchify adds.
image_list.append(single_patch_img)
# Saving the image
if dest_path is not None:
filename = img_path.rsplit( ".", 1 )[ 0 ] #remove extension
filename = filename.rsplit( "/")[ -1 ] #remove original path
filename = filename+' '+str(i)+'-'+str(j)+'.jpg' # add tile indexes
cv2.imwrite(dest_path+filename, single_patch_img)
image_array = np.array(image_list)
print('\nGot an image array of shape', image_array.shape, image_array.dtype)
return(image_array)
def get_mask_tiles(path, tile_size, step=None, print_resize=False, dest_path=None):
"""
Generates mask tiles from the masks on a given directory.
Parameters
----------
path : string
Path to the original masks dir.
tile_size : int
Size of the resulting tiles.
step : int, optional
Step pixel from tile to tile. (default is tile_size).
print_resize : boolean, optional
Option to print the cropped size of the mask. (default is False).
dest_path : string, optional
Path to the destination dir for the tiles, not saved if None. (default is None).
Returns
-------
mask_array:
Array of tiled masks
"""
print('Reading masks:')
if(not step): step=tile_size
mask_list = []
for directory_path in glob.glob(path):
paths = sorted(glob.glob(os.path.join(directory_path, "*.png")))
for mask_path in paths:
#update progress var
percentage = 1/(len(paths)/(paths.index(mask_path)+1))
drawProgressBar(percentage, barLen = 50)
mask = cv2.imread(mask_path, 0) #0 for reading image as greyscale (1 channel)
# Cut each image to a size divisible by tile_size
original_width=mask.shape[1] # useful for crop locations
original_height=mask.shape[0] # useful for crop locations
width = (mask.shape[1]//tile_size)*tile_size # get nearest width divisible by tile_size
height = (mask.shape[0]//tile_size)*tile_size # get nearest height divisible by tile_size
mask = Image.fromarray(mask)
#mask = mask.crop((0 ,0, width, height)) #Crop from top left corner ((left, top, right, bottom))
mask = mask.crop((original_width-width ,0, original_width, height)) #Crop from top right corner ((left, top, right, bottom))
mask = np.array(mask)
if (print_resize): print('Cropped mask size:', mask.shape)
# Extract patches from each mask
patches_mask = patchify(mask, (tile_size, tile_size), step=step) #Step=256 for 256 patches means no overlap
for i in range(patches_mask.shape[0]):
for j in range(patches_mask.shape[1]):
single_patch_mask = patches_mask[i,j,:,:]
mask_list.append(single_patch_mask)
# Saving the mask
if dest_path is not None:
filename = mask_path.rsplit( ".", 1 )[ 0 ] #remove extension
filename = filename.rsplit( "/")[ -1 ] #remove original path
filename = filename+' '+str(i)+'-'+str(j)+'.png' # add tile indexes
cv2.imwrite(dest_path+filename, single_patch_mask)
mask_array = np.array(mask_list)
print('\nGot a mask array of shape', mask_array.shape, mask_array.dtype, 'with values', np.unique(mask_array))
return(mask_array)
def get_useful_images(IMG_DIR, MASK_DIR, USEFUL_IMG_DIR, USEFUL_MASK_DIR, PERCENTAGE=0.05):
"""
Read the image tiles from a given directory an saves in the new directory
only the ones with more than a percentage not labelled as 0(background).
Parameters
----------
IMG_DIR : string
Path of the original image tiles directory.
MASK_DIR : string
Path of the original mask tiles directory.
USEFUL_IMG_DIR : string
Destination path of the filtered image tiles directory.
USEFUL_MASK_DIR : string
Destination path of the filtered mask tiles directory.
PERCENTAGE : float
The minimum percentage to accept an image. (default is 0.05)
Returns
-------
None.
"""
# needs to be sorted as linux doesn't list sorted
img_list = sorted(os.listdir(IMG_DIR))
msk_list = sorted(os.listdir(MASK_DIR))
useless=0 #Useless image counter
for img in range(len(img_list)):
percentage = 1/(len(img_list)/(img+1))
drawProgressBar(percentage, barLen = 50)
img_name=img_list[img]
mask_name = msk_list[img]
#print("Now preparing image and masks number: ", img)
temp_image=cv2.imread(IMG_DIR+img_list[img], 1)
temp_mask=cv2.imread(MASK_DIR+msk_list[img], 0)
val, counts = np.unique(temp_mask, return_counts=True)
if (1 - (counts[0]/counts.sum())) > PERCENTAGE: #At least 5% useful area with labels that are not 0
cv2.imwrite(USEFUL_IMG_DIR+img_name, temp_image)
cv2.imwrite(USEFUL_MASK_DIR+mask_name, temp_mask); #print("Save Me")
else: useless +=1; #print("I am useless")
print("\nTotal useful images are: ", len(img_list)-useless)
|
julenbhy/biomedical_segmentation
|
tools/segmentation_utils.py
|
segmentation_utils.py
|
py
| 23,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30353773151
|
from mayavi.filters.filter_base import FilterBase
from mayavi.components.common import convert_to_poly_data
######################################################################
# `PolyDataFilterBase` class.
######################################################################
class PolyDataFilterBase(FilterBase):
""" Base class for a filter requiring polydata input. Converts the
source to polydata.
"""
######################################################################
# `Filter` interface.
######################################################################
def update_pipeline(self):
# Do nothing if there is no input.
inputs = self.inputs
if len(inputs) == 0:
return
# By default we set the input to the first output of the first
# input.
fil = self.filter
self.configure_input(fil, convert_to_poly_data(inputs[0].outputs[0]))
fil.update()
self._set_outputs([fil])
|
enthought/mayavi
|
mayavi/filters/poly_data_filter_base.py
|
poly_data_filter_base.py
|
py
| 1,003 |
python
|
de
|
code
| 1,177 |
github-code
|
6
|
40194290799
|
import tweepy
import time
print('Starting bot....')
CONSUMER_KEY = "Cqw4pXPk4lz2EEUieSDKjKuQT"
CONSUMER_SECRET = "AhQZvxkBNS2bmXdUOX8tu5SoZi9vYdNimwmTuzkE9ZJJuzTEk5"
ACCES_KEY = "1323551878483865600-LVgJ1466OXyOnZqKNt4H3k0hBBQlmO"
ACCES_SECRET = "yWdPUmakm5Cn4eMURajaZkNkbeaXgLhzvD7msCsB5Ipxw"
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCES_KEY, ACCES_SECRET)
api = tweepy.API(auth)
FILE_NAME = 'last_seen.txt'
def retrieve_last_seen_id(file_name):
f_read = open(file_name, 'r')
last_seen = int(f_read.read().strip())
f_read.close()
return last_seen
def store_last_seen_id(last_seen, file_name):
f_write = open(file_name, 'w')
f_write.write(str(last_seen))
f_write.close()
return
def reply_back():
print('retrieving and reply to tweets....')
last_seen = retrieve_last_seen_id(FILE_NAME)
mentions = api.mentions_timeline(last_seen, tweet_mode = 'extended')
for mention in reversed(mentions):
print(str(mention.id) + '--' + mention.full_text)
last_seen = mention.id
store_last_seen_id(last_seen, FILE_NAME)
if '#hello' in mention.full_text.lower():
print('found #hello!')
print('Responding back...')
api.update_status('@' + mention.user.screen_name + '#hello back to you!', mention.id)
while True:
reply_back()
time.sleep(15)
|
byte-exe/bot-reply-back_tweets
|
twt_bot.py
|
twt_bot.py
|
py
| 1,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39432945596
|
import sys
import os.path
current_dir = os.path.dirname(os.path.relpath(__file__))
analysis_dir = current_dir[0:-7] + "Analysis"
sys.path.append(analysis_dir)
import check
def printSummary(data):
print("Summary:")
unique_list = check.findUnique(data)
for i in unique_list:
print(f"{i} (1)")
for item in data:
if item not in unique_list:
print(f"{item} ({data.count(item)})")
unique_list.append(item)
if __name__ == "__main__":
test_data = [2, 2, -1, -71, 0, 0, 2, 2, 0, 0, 345, 345, 678]
printSummary(test_data)
|
svrohith9/100-days-python
|
Bonus/Ass4/Summary/dataOutput.py
|
dataOutput.py
|
py
| 581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30366690531
|
"""
Example of how to use a DataView and bare renderers to create plots
"""
from numpy import linspace, sin, cos
# Enthought library imports.
from chaco.api import (
DataView,
ArrayDataSource,
ScatterPlot,
LinePlot,
LinearMapper,
)
from chaco.tools.api import PanTool, ZoomTool
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, View
class PlotExample(HasTraits):
plot = Instance(Component)
traits_view = View(
UItem("plot", editor=ComponentEditor()),
width=700,
height=600,
resizable=True,
title="Dataview + renderer example",
)
def _plot_default(self):
x = linspace(-5, 10, 500)
y = sin(x)
y2 = 0.5 * cos(2 * x)
view = DataView(border_visible=True)
scatter = ScatterPlot(
index=ArrayDataSource(x),
value=ArrayDataSource(y),
marker="square",
color="red",
outline_color="transparent",
index_mapper=LinearMapper(range=view.index_range),
value_mapper=LinearMapper(range=view.value_range),
)
line = LinePlot(
index=scatter.index,
value=ArrayDataSource(y2),
color="blue",
index_mapper=LinearMapper(range=view.index_range),
value_mapper=LinearMapper(range=view.value_range),
)
# Add the plot's index and value datasources to the dataview's
# ranges so that it can auto-scale and fit appropriately
view.index_range.sources.append(scatter.index)
view.value_range.sources.append(scatter.value)
view.value_range.sources.append(line.value)
# Add the renderers to the dataview. The z-order is determined
# by the order in which renderers are added.
view.add(scatter)
view.add(line)
view.tools.append(PanTool(view))
view.overlays.append(ZoomTool(view))
return view
demo = PlotExample()
if __name__ == "__main__":
demo.configure_traits()
|
enthought/chaco
|
chaco/examples/demo/data_view.py
|
data_view.py
|
py
| 2,098 |
python
|
en
|
code
| 286 |
github-code
|
6
|
43078051498
|
import time
from datadog import initialize
from datadog import api as dogapi
from datadog.dogstatsd.base import DogStatsd
from datadog.dogstatsd.context import TimedContextManagerDecorator
from flask import g, request
class TimerWrapper(TimedContextManagerDecorator):
def __init__(self, statsd, *args, **kwargs):
super(TimerWrapper, self).__init__(statsd, *args, **kwargs)
def start(self):
self.__enter__()
def stop(self):
self.__exit__(None, None, None)
class StatsD(object):
def __init__(self, app=None, config=None):
"""
Constructor for `flask.ext.datadog.StatsD`
>>> from flask.ext.datadog import StatsD
>>> app = Flask(__name__)
>>> statsd = StatsD(app=app)
:param app: Flask app to configure this client for, if `app` is `None`, then do not
configure yet (call `init_app` manually instead)
:type app: flask.Flask or None
:param config: Configuration for this client to use instead of `app.config`
:type config: dict or None
"""
self.config = config
self.statsd = None
# If an app was provided, then call `init_app` for them
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app, config=None):
"""
Initialize Datadog DogStatsd client from Flask app
>>> from flask.ext.datadog import StatsD
>>> app = Flask(__name__)
>>> statsd = StatsD()
>>> statsd.init_app(app=app)
Available DogStatsd config settings:
STATSD_HOST - statsd host to send metrics to (default: 'localhost')
STATSD_MAX_BUFFER_SIZE - max number of metrics to buffer before sending, only used when batching (default: 50)
STATSD_NAMESPACE - metric name prefix to use, e.g. 'app_name' (default: None)
STATSD_PORT - statsd port to send metrics to (default: 8125)
STATSD_TAGS - list of tags to include by default, e.g. ['env:prod'] (default: None)
STATSD_USEMS - whether or not to report timing in milliseconds (default: False)
Available Flask-Datadog config settings:
DATADOG_CONFIGURE_MIDDLEWARE - whether or not to setup response timing middleware (default: True)
DATADOG_RESPONSE_METRIC_NAME - the name of the response time metric (default: 'flask.response.time')
DATADOG_RESPONSE_SIZE_METRIC_NAME - the name of the response time metric (default: 'flask.response.size')
DATADOG_RESPONSE_SAMPLE_RATE - the sample rate to use for response timing middleware (default: 1)
DATADOG_RESPONSE_AUTO_TAG - whether to auto-add request/response tags to response metrics (default: True)
DATADOG_RESPONSE_ENDPOINT_TAG_NAME - tag name to use for request endpoint tag name (default: 'endpoint')
DATADOG_RESPONSE_METHOD_TAG_NAME - tag name to use for the request method tag name (default: 'method')
:param app: Flask app to configure this client for
:type app: flask.Flask
:param config: optional, dictionary of config values (defaults to `app.config`)
:type config: dict
"""
# Used passed in config if provided, otherwise use the config from `app`
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
# Set default values for expected config properties
self.config.setdefault('STATSD_HOST', 'localhost')
self.config.setdefault('STATSD_MAX_BUFFER_SIZE', 50)
self.config.setdefault('STATSD_NAMESPACE', None)
self.config.setdefault('STATSD_PORT', 8125)
self.config.setdefault('STATSD_TAGS', None)
self.config.setdefault('STATSD_USEMS', False)
self.app = app
# Configure DogStatsd client
# https://github.com/DataDog/datadogpy/blob/v0.11.0/datadog/dogstatsd/base.py
self.statsd = DogStatsd(host=self.config['STATSD_HOST'],
port=self.config['STATSD_PORT'],
max_buffer_size=self.config['STATSD_MAX_BUFFER_SIZE'],
namespace=self.config['STATSD_NAMESPACE'],
constant_tags=self.config['STATSD_TAGS'],
use_ms=self.config['STATSD_USEMS'])
# Configure any of our middleware
self.setup_middleware()
def timer(self, *args, **kwargs):
"""Helper to get a `flask_datadog.TimerWrapper` for this `DogStatsd` client"""
return TimerWrapper(self.statsd, *args, **kwargs)
def incr(self, *args, **kwargs):
"""Helper to expose `self.statsd.increment` under a shorter name"""
return self.statsd.increment(*args, **kwargs)
def decr(self, *args, **kwargs):
"""Helper to expose `self.statsd.decrement` under a shorter name"""
return self.statsd.decrement(*args, **kwargs)
def setup_middleware(self):
"""Helper to configure/setup any Flask-Datadog middleware"""
# Configure response time middleware (if desired)
self.config.setdefault('DATADOG_CONFIGURE_MIDDLEWARE', True)
self.config.setdefault('DATADOG_RESPONSE_SIZE_METRIC_NAME', 'flask.response.size')
self.config.setdefault('DATADOG_RESPONSE_METRIC_NAME', 'flask.response.time')
self.config.setdefault('DATADOG_RESPONSE_SAMPLE_RATE', 1)
self.config.setdefault('DATADOG_RESPONSE_AUTO_TAG', True)
self.config.setdefault('DATADOG_RESPONSE_ENDPOINT_TAG_NAME', 'endpoint')
self.config.setdefault('DATADOG_RESPONSE_METHOD_TAG_NAME', 'method')
if self.config['DATADOG_CONFIGURE_MIDDLEWARE']:
self.app.before_request(self.before_request)
self.app.after_request(self.after_request)
def before_request(self):
"""
Flask-Datadog middleware handle for before each request
"""
# Set the request start time
g.flask_datadog_start_time = time.time()
g.flask_datadog_request_tags = []
# Add some default request tags
if self.config['DATADOG_RESPONSE_AUTO_TAG']:
self.add_request_tags([
# Endpoint tag
'{tag_name}:{endpoint}'.format(tag_name=self.config['DATADOG_RESPONSE_ENDPOINT_TAG_NAME'],
endpoint=str(request.endpoint).lower()),
# Method tag
'{tag_name}:{method}'.format(tag_name=self.config['DATADOG_RESPONSE_METHOD_TAG_NAME'],
method=request.method.lower()),
])
def after_request(self, response):
"""
Flask-Datadog middleware handler for after each request
:param response: the response to be sent to the client
:type response: ``flask.Response``
:rtype: ``flask.Response``
"""
# Return early if we don't have the start time
if not hasattr(g, 'flask_datadog_start_time'):
return response
# Get the response time for this request
elapsed = time.time() - g.flask_datadog_start_time
# Convert the elapsed time to milliseconds if they want them
if self.use_ms:
elapsed = int(round(1000 * elapsed))
# Add some additional response tags
if self.config['DATADOG_RESPONSE_AUTO_TAG']:
self.add_request_tags(['status_code:%s' % (response.status_code, )])
tags = self.get_request_tags()
sample_rate = self.config['DATADOG_RESPONSE_SAMPLE_RATE']
# Emit timing metric
self.statsd.timing(self.config['DATADOG_RESPONSE_METRIC_NAME'],
elapsed,
tags,
sample_rate)
# Emit response size metric
if 'content-length' in response.headers:
size = int(response.headers['content-length'])
self.statsd.histogram(self.config['DATADOG_RESPONSE_SIZE_METRIC_NAME'],
size,
tags,
sample_rate)
# We ALWAYS have to return the original response
return response
def get_request_tags(self):
"""
Get the current list of tags set for this request
:rtype: list
"""
return getattr(g, 'flask_datadog_request_tags', [])
def add_request_tags(self, tags):
"""
Add the provided list of tags to the tags stored for this request
:param tags: tags to add to this requests tags
:type tags: list
:rtype: list
"""
# Get the current list of tags to append to
# DEV: We use this method since ``self.get_request_tags`` will ensure that we get a list back
current_tags = self.get_request_tags()
# Append our new tags, and return the new full list of tags for this request
g.flask_datadog_request_tags = current_tags + tags
return g.flask_datadog_request_tags
def __getattr__(self, name):
"""
Magic method for fetching any underlying attributes from `self.statsd`
We utilize `__getattr__` to ensure that we are always compatible with
the `DogStatsd` client.
"""
# If `self.statsd` has the attribute then return that attribute
if self.statsd and hasattr(self.statsd, name):
return getattr(self.statsd, name)
raise AttributeError('\'StatsD\' has has attribute \'{name}\''.format(name=name))
def __enter__(self):
"""
Helper to expose the underlying `DogStatsd` client for context managing
>>> statsd = StatsD(app=app)
>>> # Batch any metrics within the `with` block
>>> with statsd:
>>> statsd.increment('metric')
"""
return self.statsd.__enter__()
def __exit__(self, *args, **kwargs):
"""Helper to expose the underlying `DogStatsd` client for context managing"""
return self.statsd.__exit__(*args, **kwargs)
class API(object):
def __init__(self, app=None, config=None):
"""
Constructor for `flask.ext.datadog.API`
>>> from flask.ext.datadog import API
>>> app = Flask(__name__)
>>> dogapi = API(app=app)
:param app: Flask app to configure this client for, if `app` is `None`, then do not
configure yet (call `init_app` manually instead)
:type app: flask.Flask or None
:param config: Configuration for this client to use instead of `app.config`
:type config: dict or None
"""
self.config = config
# If an app was provided, then call `init_app` for them
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app, config=None):
"""
Initialize Datadog API client from Flask app
>>> from flask.ext.datadog import API
>>> app = Flask(__name__)
>>> dogapi = API()
>>> dogapi.init_app(app=app)
Available config settings:
DATADOG_API_KEY - Datadog API key from https://app.datadoghq.com/account/settings#api
DATADOG_APP_KEY - Datadog APP key from https://app.datadoghq.com/account/settings#api
:param app: Flask app to configure this client for
:type app: flask.Flask
:param config: optional, dictionary of config values (defaults to `app.config`)
:type config: dict
"""
# Used passed in config if provided, otherwise use the config from `app`
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
# Set default values for expected config properties
self.config.setdefault('DATADOG_API_KEY', None)
self.config.setdefault('DATADOG_APP_KEY', None)
self.app = app
# Initialize datadog client
# DEV: Datadog client uses module level variables for storing API keys rather than initializing a
# class to manage a connection/and keys
# https://github.com/DataDog/datadogpy/blob/v0.11.0/datadog/__init__.py
# https://github.com/DataDog/datadogpy/blob/v0.11.0/datadog/api/__init__.py#L4-L9
options = {
'api_key': self.config['DATADOG_API_KEY'],
'app_key': self.config['DATADOG_APP_KEY'],
}
initialize(**options)
def __getattr__(self, name):
"""
Magic method for fetching attributes from `datadog.api`
We utilize `__getattr__` to ensure that we are always compatible with
the `datadog.api` module.
"""
# If `self.statsd` has the attribute then return that attribute
if dogapi and hasattr(dogapi, name):
return getattr(dogapi, name)
raise AttributeError('\'API\' has has attribute \'{name}\''.format(name=name))
|
sky107/python-lab-project-sky
|
pyprojectbackend/lib/python3.9/site-packages/flask_datadog.py
|
flask_datadog.py
|
py
| 13,019 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21141343602
|
import asyncio
import math
import sys
from collections import Counter, defaultdict
from pprint import pprint
import aiohttp
import async_timeout
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pyecharts import Bar as Line
from pyecharts import Overlap
from lucky.commands import util
URL = '/k/min_max_counter/{code}?resample={resample}&window_size={window_size}'
async def process(resample='1w', window_size=7*52, min_timetomarket=None, test=False, where='ALL'):
rsts = await util.fetch(URL, resample=resample, window_size=window_size,
min_timetomarket=min_timetomarket, test=test, where=where)
c = defaultdict(Counter)
for rst in rsts:
for key, value in rst['result'].items():
c[key].update(**value)
df = None
for key, value in c.items():
tmp = pd.DataFrame.from_dict(value, 'index', columns=[key])
if df is None:
df = tmp
else:
df = pd.concat([df, tmp], axis=1, sort=True)
df.index = pd.DatetimeIndex(df.index)
df = df.sort_index()
ds = pd.date_range(min(df.index), max(df.index), freq=resample)
df = df.reindex(ds,
copy=False, fill_value=0)
# print(df)
# x = df.plot()
# plt.show()
df = df.fillna(value=0)
line1 = Line()
line1.add('is_rolling_max', df.index, df['is_rolling_max'])
line2 = Line()
line2.add('is_rolling_min', df.index, df['is_rolling_min'])
overlap = Overlap(
)
overlap.add(line1)
overlap.add(line2) # , yaxis_index=1, is_add_yaxis=True
util.render(overlap, path="render.html",)
line1 = Line()
line1.add('ismax', df.index, df['ismax'])
line2 = Line()
line2.add('ismin', df.index, df['ismin'])
overlap = Overlap(
)
overlap.add(line1)
overlap.add(line2)
util.render(overlap, path="render2.html",)
# overlap.render(path="render2.html",)
for c in df.columns:
df[c] = pd.to_numeric(df[c])
df = df.resample('1m').sum()
market_size = await util.get_marketsize(where=where)
market_size = pd.DataFrame.from_dict(market_size)
market_size.index = pd.DatetimeIndex(market_size.index)
df['marketsize'] = market_size
df['ismin'] = df['ismin'] / df['marketsize']
df['ismax'] = df['ismax'] / df['marketsize']
line1 = Line()
line1.add('ismax', df.index, df['ismax'])
line2 = Line()
line2.add('ismin', df.index, df['ismin'])
overlap = Overlap(
)
overlap.add(line1)
overlap.add(line2)
util.render(overlap, path="render3.html",)
return df
@click.command()
@click.option('--resample', default='1d', help='用于减少结果集,类似于周线,月线')
@click.option('--window_size', default=7*52, help='窗口')
@click.option('--min_timetomarket', default=20180101, help='用于去除近期上市的股票')
@click.option('--where', default='ALL', help='市场')
@click.option('--test', default=False, help='是否启用test, 只处理少量code')
def main(resample, window_size, min_timetomarket, where, test):
"""
破新高和新低的股票数
"""
print('='*50)
print('破新高和新低的股票数')
import time
b = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(
process(resample, window_size, min_timetomarket, test=True, where=where)
)
e = time.time()
print(e-b)
if __name__ == '__main__':
main()
|
onecans/my
|
mystockservice/lucky/commands/min_max_counter.py
|
min_max_counter.py
|
py
| 3,486 |
python
|
en
|
code
| 2 |
github-code
|
6
|
35817144385
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automatic electric field computation
------------------------------------
:download:`examples/auto_efield.py` demonstrates how
drift can be added self-consistently by calculating the
electric field generated from the concentration profile
of charged species.
::
$ python auto_efield.py --help
.. exec::
echo "::\\n\\n"
python examples/examples/auto_efield.py --help | sed "s/^/ /"
Here is an example generated by:
::
$ python auto_efield.py --plot --savefig auto_efield.png
.. image:: ../_generated/auto_efield.png
"""
from __future__ import print_function, division, absolute_import
from math import log, erf, exp
import argh
import numpy as np
from chempy.einstein_smoluchowski import electrical_mobility_from_D
from chemreac import ReactionDiffusion
from chemreac.integrate import run
from chemreac.util.plotting import save_and_or_show_plot
def sigm(x, lim=150., n=8):
# Algebraic sigmoid to avoid overflow/underflow of 'double exp(double)'
return x/((x/lim)**n+1)**(1./n)
sq2 = 2**0.5
pi = np.pi
sqpi = pi**0.5
def _gaussian(x, mu, sigma, logy, logx, geom, use_log2=False):
# Formula for normalization from derived in following mathematica code:
# $Assumptions = {(sigma | mu) \[Element] Reals, sigma > 0}
# 1/Integrate[E^(-1/2*((x - mu)/sigma)^2), {x, -Infinity, Infinity}]
# 1/Integrate[2*pi*x*E^(-1/2*((x - mu)/sigma)^2), {x, 0, Infinity}]
# 1/Integrate[4*pi*x^2*E^(-1/2*((x - mu)/sigma)^2), {x, 0, Infinity}]
if geom == 'f':
a = 1/sigma/(2*np.pi)**0.5
elif geom == 'c':
a = 1/pi/sigma/(2*exp(-mu**2/2/sigma**2)*sigma +
mu*sq2*sqpi*(1 + erf(mu/(sq2*sigma))))
elif geom == 's':
a = 1/2/pi/sigma/(2*exp(-mu**2/2/sigma**2)*mu*sigma +
sq2*sqpi*(mu**2 + sigma**2)*(1 + erf(mu/sq2/sigma)))
else:
raise NotImplementedError("Unkown geomtry: %s" % geom)
b = -0.5*((x-mu)/sigma)**2
logb = (lambda arg: log(arg)/log(2)) if use_log2 else log
if logy:
return logb(a) + b*logb(np.e)
else:
return a*np.exp(b)
def pair_of_gaussians(x, offsets, sigma, logy, logx, geom, use_log2=False):
try:
sigma0, sigma1 = sigma[0], sigma[1]
except:
sigma0 = sigma1 = sigma
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x = expb(x) if logx else x
xspan = (x[-1] - x[0])
xl = x[0] + offsets[0]*xspan # lower
xu = x[0] + offsets[1]*xspan # upper
return (
_gaussian(x, xl, sigma0, logy, logx, geom, use_log2),
_gaussian(x, xu, sigma1, logy, logx, geom, use_log2)
)
def integrate_rd(D=-3e-1, t0=0.0, tend=7., x0=0.1, xend=1.0, N=1024,
base=0.5, offset=0.25, nt=25, geom='f',
logt=False, logy=False, logx=False, random=False,
nstencil=3, lrefl=False, rrefl=False,
num_jacobian=False, method='bdf', plot=False,
savefig='None', atol=1e-6, rtol=1e-6, random_seed=42,
surf_chg=(0.0, 0.0), sigma_q=101, sigma_skew=0.5,
verbose=False, eps_rel=80.10, use_log2=False):
"""
A negative D (diffusion coefficent) denotes:
mobility := -D
D := 0
A positive D calculates mobility from Einstein-Smoluchowski relation
"""
assert 0 <= base and base <= 1
assert 0 <= offset and offset <= 1
if random_seed:
np.random.seed(random_seed)
n = 2
if D < 0:
mobility = -D
D = 0
else:
mobility = electrical_mobility_from_D(D, 1, 298.15)
print(D, mobility)
# Setup the grid
logb = (lambda arg: log(arg)/log(2)) if use_log2 else log
_x0 = logb(x0) if logx else x0
_xend = logb(xend) if logx else xend
x = np.linspace(_x0, _xend, N+1)
if random:
x += (np.random.random(N+1)-0.5)*(_xend-_x0)/(N+2)
# Setup the system
stoich_active = []
stoich_prod = []
k = []
rd = ReactionDiffusion(
n, stoich_active, stoich_prod, k, N,
D=[D, D],
z_chg=[1, -1],
mobility=[mobility, -mobility],
x=x,
geom=geom,
logy=logy,
logt=logt,
logx=logx,
nstencil=nstencil,
lrefl=lrefl,
rrefl=rrefl,
auto_efield=True,
surf_chg=surf_chg,
eps_rel=eps_rel, # water at 20 deg C
faraday_const=1,
vacuum_permittivity=1,
use_log2=use_log2
)
# Initial conditions
sigma = (xend-x0)/sigma_q
sigma = [(1-sigma_skew)*sigma, sigma_skew*sigma]
y0 = np.vstack(pair_of_gaussians(
rd.xcenters, [base+offset, base-offset], sigma, logy, logx, geom, use_log2)).transpose()
if logy:
y0 = sigm(y0)
if plot:
# Plot initial E-field
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 10))
rd.calc_efield((rd.expb(y0) if logy else y0).flatten())
plt.subplot(4, 1, 3)
plt.plot(rd.xcenters, rd.efield, label="E at t=t0")
plt.plot(rd.xcenters, rd.xcenters*0, label="0")
# Run the integration
tout = np.linspace(t0, tend, nt)
integr = run(rd, y0, tout,
atol=atol, rtol=rtol, sigm_damp=True,
C0_is_log=logy,
with_jacobian=(not num_jacobian), method=method)
Cout = integr.Cout
if verbose:
print(integr.info)
# Plot results
if plot:
def _plot(y, ttl=None, **kwargs):
plt.plot(rd.xcenters, y, **kwargs)
plt.xlabel((('log_%s({})' % ('2' if use_log2 else 'e')) if logx else '{}').format('x / m'))
plt.ylabel('C / M')
if ttl:
plt.title(ttl)
for i in range(nt):
plt.subplot(4, 1, 1)
c = 1-tout[i]/tend
c = (1.0-c, .5-c/2, .5-c/2)
_plot(Cout[i, :, 0], 'Simulation (N={})'.format(rd.N),
c=c, label='$z_A=1$' if i == nt-1 else None)
_plot(Cout[i, :, 1], c=c[::-1],
label='$z_B=-1$' if i == nt-1 else None)
plt.legend()
plt.subplot(4, 1, 2)
delta_y = Cout[i, :, 0] - Cout[i, :, 1]
_plot(delta_y, 'Diff',
c=[c[2], c[0], c[1]],
label='A-B (positive excess)' if i == nt-1 else None)
plt.legend(loc='best')
plt.xlabel("$x~/~m$")
plt.ylabel(r'Concentration / M')
ylim = plt.gca().get_ylim()
if N < 100:
plt.vlines(rd.x, ylim[0], ylim[1],
linewidth=1.0, alpha=0.2, colors='gray')
plt.subplot(4, 1, 3)
plt.plot(rd.xcenters, rd.efield, label="E at t=tend")
plt.xlabel("$x~/~m$")
plt.ylabel(r"$E~/~V\cdot m^{-1}$")
plt.legend()
for i in range(3):
plt.subplot(4, 1, i+1)
ylim = plt.gca().get_ylim()
for d in (-1, 1):
center_loc = [x0+(base+d*offset)*(xend-x0)]*2
plt.plot(rd.logb(center_loc) if logx else center_loc,
ylim, '--k')
plt.subplot(4, 1, 4)
for i in range(n):
amount = [rd.integrated_conc(Cout[j, :, i]) for j in range(nt)]
plt.plot(tout, amount, c=c[::(1, -1)[i]], label=chr(ord('A')+i))
plt.xlabel('Time / s')
plt.ylabel('Amount / mol')
plt.legend(loc='best')
plt.tight_layout()
save_and_or_show_plot(savefig=savefig)
return tout, Cout, integr.info, rd
if __name__ == '__main__':
argh.dispatch_command(integrate_rd, output_file=None)
|
chemreac/chemreac
|
examples/auto_efield.py
|
auto_efield.py
|
py
| 7,622 |
python
|
en
|
code
| 14 |
github-code
|
6
|
16930544030
|
from __future__ import absolute_import
from .dataset_iter import default_collate, DatasetIter
from .samplers import RandomSampler, SequentialSampler
import torch
import os
import os.path
import warnings
import fnmatch
import math
import numpy as np
try:
import nibabel
except:
warnings.warn('Cant import nibabel.. Cant load brain images')
try:
from PIL import Image
except:
warnings.warn('Cant import PIL.. Cant load PIL images')
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.nii.gz', '.npy'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def pil_loader(path):
return Image.open(path).convert('RGB')
def npy_loader(path):
return torch.from_numpy(np.load(path).astype('float32'))
def nifti_loader(path):
return nibabel.load(path)
def make_dataset(directory, class_mode, class_to_idx=None,
input_regex=None, target_regex=None, ):
"""Map a dataset from a root folder"""
if class_mode == 'image':
if not input_regex and not target_regex:
raise ValueError('must give input_regex and target_regex if'+
' class_mode==image')
inputs = []
targets = []
for subdir in sorted(os.listdir(directory)):
d = os.path.join(directory, subdir)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if fnmatch.fnmatch(fname, input_regex):
path = os.path.join(root, fname)
inputs.append(path)
if class_mode == 'label':
targets.append(class_to_idx[subdir])
if class_mode == 'image' and fnmatch.fnmatch(fname, target_regex):
path = os.path.join(root, fname)
targets.append(path)
if class_mode is None:
return inputs
else:
return inputs, targets
class Dataset(object):
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def one_epoch(self):
"""Return an iterator that will loop through all the samples one time"""
return DatasetIter(self)
def __iter__(self):
"""Return an iterator that will loop through all the samples one time"""
return DatasetIter(self)
def __next__(self):
"""Return the next batch in the data. If this batch is the last
batch in the data, the iterator will be reset -- allowing you
to loop through the data ad infinitum
"""
new_batch = next(self._iter)
self.batches_seen += 1
if self.batches_seen % self.nb_batches == 0:
#print('Last Batch of Current Epoch')
self._iter = DatasetIter(self)
return new_batch
next = __next__
class FolderDataset(Dataset):
def __init__(self,
root,
class_mode='label',
input_regex='*',
target_regex=None,
transform=None,
target_transform=None,
co_transform=None,
loader='npy',
batch_size=1,
shuffle=False,
sampler=None,
num_workers=0,
collate_fn=default_collate,
pin_memory=False):
"""Dataset class for loading out-of-memory data.
Arguments
---------
root : string
path to main directory
class_mode : string in `{'label', 'image'}`
type of target sample to look for and return
`label` = return class folder as target
`image` = return another image as target as found by 'target_regex'
NOTE: if class_mode == 'image', you must give an
input and target regex and the input/target images should
be in a folder together with no other images in that folder
input_regex : string (default is any valid image file)
regular expression to find input images
e.g. if all your inputs have the word 'input',
you'd enter something like input_regex='*input*'
target_regex : string (default is Nothing)
regular expression to find target images if class_mode == 'image'
e.g. if all your targets have the word 'segment',
you'd enter somthing like target_regex='*segment*'
transform : torch transform
transform to apply to input sample individually
target_transform : torch transform
transform to apply to target sample individually
loader : string in `{'npy', 'pil', 'nifti'} or function
defines how to load samples from file
if a function is provided, it should take in a file path
as input and return the loaded sample.
Examples
--------
For loading input images and target images (e.g. image and its segmentation):
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='image', input_regex='*input*',
target_regex='*segment*', loader='pil')
For loading input images with sub-directory as class label:
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='label', loader='pil')
"""
if loader == 'npy':
loader = npy_loader
elif loader == 'pil':
loader = pil_loader
elif loader == 'nifti':
loader = nifti_loader
root = os.path.expanduser(root)
classes, class_to_idx = find_classes(root)
inputs, targets = make_dataset(root, class_mode,
class_to_idx, input_regex, target_regex)
if len(inputs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = os.path.expanduser(root)
self.inputs = inputs
self.targets = targets
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.co_transform = co_transform
self.loader = loader
self.class_mode = class_mode
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
if sampler is not None:
self.sampler = sampler
elif shuffle:
self.sampler = RandomSampler(nb_samples=len(self.inputs))
elif not shuffle:
self.sampler = SequentialSampler(nb_samples=len(self.inputs))
if class_mode == 'image':
print('Found %i input images and %i target images' %
(len(self.inputs), len(self.targets)))
elif class_mode == 'label':
print('Found %i input images across %i classes' %
(len(self.inputs), len(self.classes)))
self.batches_seen = 0
self.nb_batches = int(math.ceil(len(self.sampler) / float(self.batch_size)))
self._iter = DatasetIter(self)
def __getitem__(self, index):
# get paths
input_sample = self.inputs[index]
target_sample = self.targets[index]
# load samples into memory
input_sample = self.loader(os.path.join(self.root, input_sample))
if self.class_mode == 'image':
target_sample = self.loader(os.path.join(self.root, target_sample))
# apply transforms
if self.transform is not None:
input_sample = self.transform(input_sample)
if self.target_transform is not None:
target_sample = self.target_transform(target_sample)
if self.co_transform is not None:
input_sample, target_sample = self.co_transform(input_sample, target_sample)
return input_sample, target_sample
def __len__(self):
return len(self.inputs)
class TensorDataset(Dataset):
def __init__(self,
input_tensor,
target_tensor=None,
transform=None,
target_transform=None,
co_transform=None,
batch_size=1,
shuffle=False,
sampler=None,
num_workers=0,
collate_fn=default_collate,
pin_memory=False):
"""Dataset class for loading in-memory data.
Arguments
---------
input_tensor : torch tensor
target_tensor : torch tensor
transform : torch transform
transform to apply to input sample individually
target_transform : torch transform
transform to apply to target sample individually
loader : string in `{'npy', 'pil', 'nifti'} or function
defines how to load samples from file
if a function is provided, it should take in a file path
as input and return the loaded sample.
Examples
--------
For loading input images and target images (e.g. image and its segmentation):
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='image', input_regex='*input*',
target_regex='*segment*', loader='pil')
For loading input images with sub-directory as class label:
>>> data = FolderDataset(root=/path/to/main/dir,
class_mode='label', loader='pil')
"""
self.inputs = input_tensor
self.targets = target_tensor
if target_tensor is None:
self.has_target = False
else:
self.has_target = True
self.transform = transform
self.target_transform = target_transform
self.co_transform = co_transform
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
if sampler is not None:
self.sampler = sampler
else:
if shuffle:
self.sampler = RandomSampler(nb_samples=len(self.inputs))
elif not shuffle:
self.sampler = SequentialSampler(nb_samples=len(self.inputs))
self.batches_seen = 0
self.nb_batches = int(math.ceil(len(self.sampler) / float(self.batch_size)))
self._iter = DatasetIter(self)
def __getitem__(self, index):
"""Return a (transformed) input and target sample from an integer index"""
# get paths
input_sample = self.inputs[index]
if self.has_target:
target_sample = self.targets[index]
# apply transforms
if self.transform is not None:
input_sample = self.transform(input_sample)
if self.has_target and self.target_transform is not None:
target_sample = self.target_transform(target_sample)
if self.has_target and self.co_transform is not None:
input_sample, target_sample = self.co_transform(input_sample, target_sample)
if self.has_target:
return input_sample, target_sample
else:
return input_sample
def __len__(self):
"""Number of samples"""
return self.inputs.size(0)
|
huiyi1990/torchsample
|
torchsample/datasets.py
|
datasets.py
|
py
| 12,057 |
python
|
en
|
code
| null |
github-code
|
6
|
26552249009
|
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
# handle x64 python clipboard, ref https://forums.autodesk.com/t5/maya-programming/ctypes-bug-cannot-copy-data-to-clipboard-via-python/m-p/9197068/highlight/true#M10992
import ctypes
from ctypes import wintypes
CF_UNICODETEXT = 13
user32 = ctypes.WinDLL('user32')
kernel32 = ctypes.WinDLL('kernel32')
OpenClipboard = user32.OpenClipboard
OpenClipboard.argtypes = wintypes.HWND,
OpenClipboard.restype = wintypes.BOOL
CloseClipboard = user32.CloseClipboard
CloseClipboard.restype = wintypes.BOOL
EmptyClipboard = user32.EmptyClipboard
EmptyClipboard.restype = wintypes.BOOL
GetClipboardData = user32.GetClipboardData
GetClipboardData.argtypes = wintypes.UINT,
GetClipboardData.restype = wintypes.HANDLE
SetClipboardData = user32.SetClipboardData
SetClipboardData.argtypes = (wintypes.UINT, wintypes.HANDLE)
SetClipboardData.restype = wintypes.HANDLE
GlobalLock = kernel32.GlobalLock
GlobalLock.argtypes = wintypes.HGLOBAL,
GlobalLock.restype = wintypes.LPVOID
GlobalUnlock = kernel32.GlobalUnlock
GlobalUnlock.argtypes = wintypes.HGLOBAL,
GlobalUnlock.restype = wintypes.BOOL
GlobalAlloc = kernel32.GlobalAlloc
GlobalAlloc.argtypes = (wintypes.UINT, ctypes.c_size_t)
GlobalAlloc.restype = wintypes.HGLOBAL
GlobalSize = kernel32.GlobalSize
GlobalSize.argtypes = wintypes.HGLOBAL,
GlobalSize.restype = ctypes.c_size_t
GMEM_MOVEABLE = 0x0002
GMEM_ZEROINIT = 0x0040
def Paste( data ):
data = data.encode('utf-16le')
OpenClipboard(None)
EmptyClipboard()
handle = GlobalAlloc(GMEM_MOVEABLE | GMEM_ZEROINIT, len(data) + 2)
pcontents = GlobalLock(handle)
ctypes.memmove(pcontents, data, len(data))
GlobalUnlock(handle)
SetClipboardData(CF_UNICODETEXT, handle)
CloseClipboard()
def getFunctions(filepath):
selfmodule = (re.search(r'addons[\W]*([_a-zA-Z0-9]*)', filepath)).group(1)
# print("Checking {0} from {1}".format(filepath,selfmodule))
if (selfmodule.startswith("compat")): return []
with open(filepath, 'r') as file:
content = file.read()
srch = re.compile(r'[^E]FUNC\(([_a-zA-Z0-9]*)\)')
modfuncs = srch.findall(content)
modfuncs = sorted(set(modfuncs))
srch = re.compile(r'EFUNC\(([_a-zA-Z0-9]*),([_a-zA-Z0-9]*)\)')
exfuncs = srch.findall(content)
exfuncs = sorted(set(exfuncs))
fileFuncs = []
for func in modfuncs:
fileFuncs.append("ace_{0}_fnc_{1}".format(selfmodule,func))
for exModule,func in exfuncs:
fileFuncs.append("ace_{0}_fnc_{1}".format(exModule, func))
return fileFuncs
def getStrings(filepath):
selfmodule = (re.search(r'addons[\W]*([_a-zA-Z0-9]*)', filepath)).group(1)
# print("Checking {0} from {1}".format(filepath,selfmodule))
if (selfmodule.startswith("compat")): return []
with open(filepath, 'r') as file:
content = file.read()
srch = re.compile(r'[^E][CL]STRING\(([_a-zA-Z0-9]*)\)')
modStrings = srch.findall(content)
modStrings = sorted(set(modStrings))
srch = re.compile(r'E[CL]STRING\(([_a-zA-Z0-9]*),([_a-zA-Z0-9]*)\)')
exStrings = srch.findall(content)
exStrings = sorted(set(exStrings))
fileStrings = []
for localString in modStrings:
fileStrings.append("STR_ACE_{0}_{1}".format(selfmodule, localString))
for (exModule, exString) in exStrings:
fileStrings.append("STR_ACE_{0}_{1}".format(exModule, exString))
return fileStrings
def main():
print("#########################")
print("# All Functions #")
print("#########################")
sqf_list = []
allFunctions = []
allStrings = []
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
addon_base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for root, dirnames, filenames in os.walk(addon_base_path +"/" + 'addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.cpp'):
sqf_list.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.hpp'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
allFunctions = allFunctions + getFunctions(filename)
for filename in sqf_list:
allStrings = allStrings + getStrings(filename)
codeHeader = "diag_log text '*********** Scaning for nil functions [funcs {0} / strings {1}]';".format(len(set(allFunctions)), len(set(allStrings)))
codeFuncCheck = "{ if (isNil _x) then {systemChat format ['%1 is nil', _x]; diag_log text format ['%1 is nil', _x];}} forEach allFunctions;"
codeStringCheck = "{ if (!isLocalized _x) then {systemChat format ['%1 is not in stringtable', _x]; diag_log text format ['%1 is not in stringtable', _x];}} forEach allStrings;"
outputCode = "{0} allFunctions = {1}; allStrings = {2}; {3} {4}".format(codeHeader, list(set(allFunctions)), list(set(allStrings)), codeFuncCheck, codeStringCheck)
print(outputCode)
Paste(outputCode)
print ("")
print ("Copied to clipboard, [funcs {0} / strings {1}]'".format(len(set(allFunctions)), len(set(allStrings))))
if __name__ == "__main__":
main()
|
acemod/ACE3
|
tools/search_undefinedFunctions.py
|
search_undefinedFunctions.py
|
py
| 5,461 |
python
|
en
|
code
| 966 |
github-code
|
6
|
41310847925
|
import os
from abc import ABC
from keras import Model, layers
from keras.layers import Conv2D, BatchNormalization, Add, MaxPool2D, GlobalAveragePooling2D, Flatten, Dense, Rescaling
import tensorflow as tf
class ResnetBlock(Model, ABC):
"""
A standard resnet block.
"""
def __init__(self, channels: int, down_sample=False):
"""
channels: same as number of convolution kernels
"""
super().__init__()
self.__channels = channels
self.__down_sample = down_sample
self.__strides = [2, 1] if down_sample else [1, 1]
KERNEL_SIZE = (3, 3)
# use He initialization, instead of Xavier (a.k.a 'glorot_uniform' in Keras), as suggested in [2]
INIT_SCHEME = "he_normal"
self.conv_1 = Conv2D(self.__channels, strides=self.__strides[0],
kernel_size=KERNEL_SIZE, padding="same", kernel_initializer=INIT_SCHEME)
self.bn_1 = BatchNormalization()
self.conv_2 = Conv2D(self.__channels, strides=self.__strides[1],
kernel_size=KERNEL_SIZE, padding="same", kernel_initializer=INIT_SCHEME)
self.bn_2 = BatchNormalization()
self.merge = Add()
if self.__down_sample:
# perform down sampling using stride of 2, according to [1].
self.res_conv = Conv2D(
self.__channels, strides=2, kernel_size=(1, 1), kernel_initializer=INIT_SCHEME, padding="same")
self.res_bn = BatchNormalization()
def call(self, inputs, training=None, mask=None):
res = inputs
x = self.conv_1(inputs)
x = self.bn_1(x)
x = tf.nn.relu(x)
x = self.conv_2(x)
x = self.bn_2(x)
if self.__down_sample:
res = self.res_conv(res)
res = self.res_bn(res)
# if not perform down sample, then add a shortcut directly
x = self.merge([x, res])
out = tf.nn.relu(x)
return out
class ResNet18(Model):
def __init__(self, num_classes, **kwargs):
"""
num_classes: number of classes in specific classification task.
"""
super().__init__(**kwargs)
self.conv_1 = Conv2D(64, (7, 7), strides=2,
padding="same", kernel_initializer="he_normal")
self.init_bn = BatchNormalization()
self.pool_2 = MaxPool2D(pool_size=(2, 2), strides=2, padding="same")
self.res_1_1 = ResnetBlock(64)
self.res_1_2 = ResnetBlock(64)
self.res_2_1 = ResnetBlock(128, down_sample=True)
self.res_2_2 = ResnetBlock(128)
self.res_3_1 = ResnetBlock(256, down_sample=True)
self.res_3_2 = ResnetBlock(256)
self.res_4_1 = ResnetBlock(512, down_sample=True)
self.res_4_2 = ResnetBlock(512)
self.avg_pool = GlobalAveragePooling2D()
self.flat = Flatten()
self.fc = Dense(num_classes, activation="softmax")
self.data_augmentation = tf.keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(int(os.getenv("img_height")),
int(os.getenv("img_width")),
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
def call(self, inputs, training=None, mask=None):
out = self.data_augmentation(inputs)
out = Rescaling(scale=1.0/255)(out)
out = self.conv_1(out)
out = self.init_bn(out)
out = tf.nn.relu(out)
out = self.pool_2(out)
for res_block in [self.res_1_1, self.res_1_2, self.res_2_1, self.res_2_2, self.res_3_1, self.res_3_2,
self.res_4_1, self.res_4_2]:
out = res_block(out)
out = self.avg_pool(out)
out = self.flat(out)
out = self.fc(out)
return out
|
beishangongzi/graduation_internship
|
utils/Resnet.py
|
Resnet.py
|
py
| 3,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21202048829
|
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from app.main import ShowcaseScreen
from app.widgets.heartfelt_hellos_button import HeartfeltHellosButton
from app.widgets.heartfelt_hellos_step_progression_button import HeartfeltHellosStepProgressionButton
from app.data.data_types.idea import Idea
class IdeaCreationScreen(ShowcaseScreen):
scroll_view = None
grid_layout = None
def __init__(self, **kwargs):
super(IdeaCreationScreen, self).__init__(**kwargs)
# NOTE: replace tags and ideas with global list so that it can be editted in this class
self.tags=["books", "movies", "sports"]
self.ideas=[]
#self.progress_grid = GridLayout(spacing='10dp', padding='10dp', cols=5, size_hint_y=None)
self.grid_layout = GridLayout(spacing='10dp', padding='10dp', cols=1, size_hint_y=None)
self.grid_layout.bind(minimum_height=self.grid_layout.setter("height"))
self.scroll_view = ScrollView(do_scroll_y=True)
self.add_widget(self.scroll_view)
self.scroll_view.add_widget(self.grid_layout)
def on_pre_enter(self, *args):
self.stepOne()
def stepOne(self):
self.grid_layout.clear_widgets()
self.grid_layout.add_widget(Label(text="What is your conversation idea?", font_size=24, color=(255,255,255)))
self.grid_layout.add_widget(Label())
# text box
textinput = TextInput(hint_text="Are you still into sports?", font_size=24, size_hint_y=None, multiline=False)
textinput.bind(text=lambda x, y: print("Hi"))
self.grid_layout.add_widget(textinput)
#self.name = textinput.text
# next and back button rendering
next_button = HeartfeltHellosStepProgressionButton(text="next",on_press=lambda x: self.stepTwo(textinput.text))
progress_grid=GridLayout(spacing='10dp', padding='10dp', cols=3, size_hint_y=None)
progress_grid.add_widget(Label())
progress_grid.add_widget(Label())
progress_grid.add_widget(next_button)
self.grid_layout.add_widget(progress_grid)
def stepTwo(self, prompt):
self.grid_layout.clear_widgets()
self.prompt=prompt
self.grid_layout.add_widget(Label(text="Search and select the tag(s)\nthat matches with your idea!", height=50, color=(255,255,255), size_hint_y=None))
# text box
#NOTE TO SELF: add filtering for search bar
textinput = TextInput(hint_text="Search Tag here", height=50, font_size=24, size_hint_y=None)
#textinput.bind(on_text_validate=on_enter(textinput.text))
self.grid_layout.add_widget(textinput)
# NOTE TO SELF: add scroll bar to tags section
#tag_grid=GridLayout(spacing='10dp', padding='10dp', cols=1, size_hint_y=None)
for tag in self.getTags():
tag_button = HeartfeltHellosButton(text=tag, height=50, on_press=lambda x: self.pressTag(x.text), size_hint_y=None)
self.grid_layout.add_widget(tag_button)
#self.grid_layout.add_widget(tag_grid)
# next and back button rendering
create_person_button = HeartfeltHellosStepProgressionButton(text="Create\nIdea", on_press=lambda x: self.createIdea())
back_button = HeartfeltHellosStepProgressionButton(text="back", on_press=lambda x: self.stepOne())
progress_grid=GridLayout(spacing='10dp', padding='10dp', cols=3, size_hint_y=None)
progress_grid.add_widget(back_button)
progress_grid.add_widget(Label())
progress_grid.add_widget(create_person_button)
self.grid_layout.add_widget(progress_grid)
def pressTag(self, name: str):
print("pressed " + str)
if name not in self.tags:
self.tags.append(name)
else:
self.tags.remove(name)
def createIdea(self):
print("pressed create idea")
# NOTE: return to idea screen + add self.idea to global list of ideas as input somehow
self.ideas.append(Idea(self.prompt, self.tags)) #place holder (something like that maybe?)
def on_leave(self, *args):
self.grid_layout.clear_widgets()
def getTags(self) -> list:
return ["sports", "books", "movies"]
|
JelindoGames/HeartfeltHellos
|
app/data/screen_types/deprecated/idea_creation_screen.py
|
idea_creation_screen.py
|
py
| 4,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72789313789
|
import numpy as np
import time
from scipy import ndimage
from .toolkit import vectools
from .toolkit.colors import Colors as _C
import matplotlib.pyplot as plt
import matplotlib
import math
import cv2
import sys
import os
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
class Image:
def __init__(self, fn, calibration, lock=None):
"""
Image processing class
Parameters
----------
fn: string
filename of the image to be processed
"""
# t0 = time.time()
self.fn = fn
self.fn_npy = self.fn.split('.')[0] + '.npy'
self.id = int(self.fn.split('cpt')[-1].split('.')[0])
self.calibration = calibration
# calibration_path = __location__ + '/../data/calibration.npy'
# calibration = np.load(calibration_path)
self.midpoint = calibration[self.id - 1][:-1]
# self.midpoint = calibration[0][:-1]
print(_C.YEL + 'Processing image ' + _C.BOLD + fn + _C.ENDC)
if lock is not None:
with lock:
self.image = np.load(self.fn_npy)
# self.image = cv2.imread(self.fn, cv2.IMREAD_GRAYSCALE)
else:
# self.image = cv2.imread(self.fn, cv2.IMREAD_GRAYSCALE)
self.image = np.load(self.fn_npy)
# self.image = np.rot90(self.image)
# print('Image loaded in', str(round(time.time() - t0, 2)), 's')
self.dimensions = np.shape(self.image)
self.dimy, self.dimx = self.dimensions
def transformRadial(self, env=None, midpoint=None, plot=False):
"""
Creates a transformed image where a sector is mapped to r/phi coordinates
Parameters
----------
midpoint: 2-tuple of floats, optional
Origin of the polar coordinate system. If None is given, the calibration data from the current class instantation is taken
plot: bool, optional
Plot the transformed image. Default is False
Cannot be used if multiprocessing is active
Returns
-------
transformed: 2D array
Coordinate transformed image
angles: 1D array of floats
angles between which the image is fully covered
radii: 1D array of float
distance scaling of rmax in the transformed image
"""
r = self.dimx
if midpoint is None:
midpoint = self.midpoint
# t0 = time.time()
dr = midpoint[0] - self.dimx
rmax = r + dr
hplus = midpoint[1]
hminus = self.dimy - midpoint[1]
thetaPlus = -math.asin(hplus / rmax)
thetaMinus = math.asin(hminus / rmax)
# thetaPlus, thetaMinus = -thetaMinus, -thetaPlus
thetaPlus_idx = int((thetaPlus + np.pi) / (2 * np.pi) * self.dimy)
thetaMinus_idx = int((thetaMinus + np.pi) / (2 * np.pi) * self.dimy)
# c = tuple(midpoint)
cx, cy = midpoint
c = (cx, cy)
transformed = cv2.linearPolar(self.image, c, rmax, cv2.WARP_FILL_OUTLIERS)
# Destroy the image object to free memory
del self.image
angles = np.linspace(thetaPlus, thetaMinus, thetaMinus_idx - thetaPlus_idx, endpoint=True)
radii = np.linspace(0, rmax, self.dimx)
self.dimensions = np.shape(transformed)
self.dimy, self.dimx = self.dimensions
absoluteZero = (self.dimy / 2 - thetaPlus_idx) - 1
transformed = transformed[thetaPlus_idx:thetaMinus_idx]
# Pad the transformed image with the boundary value
"""
start_idx = np.argmax(transformed > 0, axis=1)
start_idx = np.ones((len(transformed)), np.uint8) * 1000
transformed[:, :999] = 0
for i in range(len(transformed)):
transformed[i][transformed[i] == 0] = transformed[i, start_idx[i]]"""
# Remove Calibration features
calib_size_px = np.mean(np.array([x[2] for x in self.calibration]))
calib_size_mm = env.calib_size_mm # Outer radius of calibration piece
tolerance = 1.1
calib_width_mm = env.calib_width_mm * tolerance # Width of the calibration piece
# pitch_mm = self.env.pitch_mm # Nominal electrode pitch
scale = calib_size_mm / calib_size_px
self.calibrationCutoff = (calib_size_mm - calib_width_mm) / scale * r / rmax
# pitch = pitch_mm / scale
transformed[:, int(self.calibrationCutoff):] = 0
for i in range(len(transformed)):
transformed[i][transformed[i] == 0] = transformed[i, int(self.calibrationCutoff) - 1]
# plot = True
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(transformed)
ax.axhline(y=absoluteZero)
ax.set_aspect('auto')
fig.savefig(__location__ + '/../img/out/cv2transform.png', dpi=300)
# print('Coordinate transformation completed in ', str(round(time.time() - t0, 2)), 's')
return transformed, angles, radii
def detectFeatures(self, matrix, thresh_std=.5, plot=False):
"""
Distinguish band from background in binary matrix
Parameters
----------
matrix: 2D array
8Bit single channel source matrix to be processed
plot: bool, optional
Plot the transformed image. Default is False
Cannot be used if multiprocessing is active
Returns
-------
proc: 2D array
Processed binary image
"""
# t0 = time.time()
start_range = 2000
# end_range = np.shape(matrix)[1] - start_range
# Initializing Empty array in Memory
proc = np.empty(np.shape(matrix))
start_search = np.empty(np.shape(matrix))[:, :start_range]
end_search = np.empty(np.shape(matrix))[:, start_range:]
matrix = matrix.astype(np.float64, copy=False)
# print('Blurring')
# Gaussian Blur to remove fast features
cv2.GaussianBlur(src=matrix, ksize=(15, 3), dst=proc, sigmaX=1.5, sigmaY=5)
# ndimage.maximum_filter(proc, size=(5, 5), output=proc)
# cv2.GaussianBlur(src=matrix[:, :start_range], ksize=(3, 0), dst=start_search, sigmaX=0, sigmaY=3)
# cv2.GaussianBlur(src=matrix[:, start_range:], ksize=(31, 11), dst=end_search, sigmaX=0, sigmaY=0.1)
start_search = matrix[:, :start_range]
end_search = matrix[:, start_range:int(self.calibrationCutoff)]
# print('Convolving')
# Convolving with Prewitt kernel in x-direction
prewitt_kernel_x = np.tile([-1, 0, 1], (15, 1))
# prewitt_kernel_x = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
# print(prewitt_kernel_x)
# prewitt_kernel_x = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
kernel_y_width = 15
prewitt_kernel_y = np.array([[1] * kernel_y_width, [0] *
kernel_y_width, [-1] * kernel_y_width])
# prewitt_kernel_y_element = np.tile(np.ones(kernel_y_width), (15, 1))
# prewitt_kernel_y_end = np.concatenate((prewitt_kernel_y_element, np.zeros(15), -1 * prewitt_kernel_y_element))
# print(prewitt_kernel_y_end)
# print(prewitt_kernel_y_end)
cv2.threshold(src=start_search, dst=start_search, thresh=20, maxval=255, type=cv2.THRESH_TOZERO)
cv2.GaussianBlur(src=start_search, ksize=(21, 21), dst=start_search, sigmaX=50, sigmaY=0)
cv2.filter2D(src=start_search, kernel=prewitt_kernel_y, dst=start_search, ddepth=-1)
np.abs(start_search, out=start_search)
cv2.threshold(src=start_search, dst=start_search, thresh=80, maxval=1, type=cv2.THRESH_BINARY)
start_search = start_search.astype(np.uint8, copy=False)
n_labels, labels, l_stats, l_centroids = cv2.connectedComponentsWithStats(image=start_search, connectivity=4)
sizes = [s[-1] for s in l_stats]
sizes_original = sizes[:]
sizes.remove(max(sizes))
if len(sizes) == 0:
start_amp = 0
start_centroid = (0, 0)
else:
start_amp = max(sizes)
start_centroid_idx = sizes_original.index(start_amp)
start_centroid = (int(l_centroids[start_centroid_idx][1]), int(l_centroids[start_centroid_idx][0]))
cv2.threshold(src=end_search, dst=end_search, thresh=20, maxval=255, type=cv2.THRESH_TOZERO)
cv2.GaussianBlur(src=end_search, ksize=(21, 21), dst=end_search, sigmaX=50, sigmaY=0)
cv2.filter2D(src=end_search, kernel=prewitt_kernel_y, dst=end_search, ddepth=-1)
np.abs(end_search, out=end_search)
cv2.threshold(src=end_search, dst=end_search, thresh=80, maxval=1, type=cv2.THRESH_BINARY)
end_search = end_search.astype(np.uint8, copy=False)
n_labels, labels, l_stats, l_centroids = cv2.connectedComponentsWithStats(image=end_search, connectivity=4)
sizes = [s[-1] for s in l_stats]
sizes_original = sizes[:]
sizes.remove(max(sizes))
if len(sizes) == 0:
end_amp = 0
end_centroid = (0, 0)
else:
end_amp = max(sizes)
end_centroid_idx = sizes_original.index(end_amp)
end_centroid = (int(l_centroids[end_centroid_idx][1]), int(l_centroids[end_centroid_idx][0]) + start_range)
cv2.filter2D(src=proc, kernel=prewitt_kernel_x, dst=proc, ddepth=-1)
# ndimage.maximum_filter(proc, size=(5, 5), output=proc)
# cv2.GaussianBlur(src=proc, ksize=(11, 3), dst=proc, sigmaX=0, sigmaY=5)
np.abs(proc, out=proc)
#start_amp = start_search.max()
#start_idx = np.unravel_index(start_search.argmax(), start_search.shape)
start = (start_centroid, start_amp)
# end_amp = end_search.max()
# end_idx = np.unravel_index(end_search.argmax(), end_search.shape)
#ex, ey = end_idx
# if ey != 0:
# ey += start_range
#end_idx = (ex, ey)
end = (end_centroid, end_amp)
print(self.id)
print(start)
print(end)
print()
# del start_search
del end_search
# print('Thresholding')
# proc_mean = np.mean(proc)
# proc_std = np.std(proc)
# thresh = proc_mean + thresh_std * proc_std
thresh = 50.0
# thresh = proc_mean
# thresh = 0.1
#proc = proc * 255 / np.max(proc)
cv2.threshold(src=proc, dst=proc, thresh=thresh, maxval=1, type=cv2.THRESH_BINARY)
# cv2.adaptiveThreshold(src=proc,
# dst=proc,
# maxValue=1,
# thresholdType=cv2.THRESH_BINARY,
# adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,
# blockSize=1001,
# C=0)
proc = proc.astype(np.uint8, copy=False)
Morphkernel = np.ones((11, 11), np.uint8)
cv2.dilate(proc, Morphkernel, proc)
cv2.erode(proc, Morphkernel, proc)
# print('Connecting')
# Label the complement regions of the binary image
proc_inv = 1 - proc
n_labels, labels, l_stats, l_centroids = cv2.connectedComponentsWithStats(image=proc_inv, connectivity=4)
# The maximum number of pixels in a noise field
# Everything larger is considered to be background
fieldsize = 2e4
# Label background fields
gaps = []
for i, stat in enumerate(l_stats):
if stat[-1] > fieldsize:
gaps.append(i)
# Set background fields to zero
for gap in gaps:
labels[labels == gap] = 0
# Set all forground fields to one
labels[labels != 0] = 1
labels = labels.astype(np.uint8, copy=False)
# Combine foreground noise with with thresholded image
cv2.bitwise_or(src1=proc, src2=labels, dst=proc)
filtered = np.copy(proc)
#Morphkernel = np.ones((11, 11), np.uint8)
#cv2.dilate(proc, Morphkernel, proc)
#cv2.erode(proc, Morphkernel, proc)
# plot = True
if plot:
print('Plotting')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(filtered)
# ax.plot(filtered[0], lw=0.2)
# ax.imshow(end_search)
# ax.plot(filtered[0])
ax.set_aspect('auto')
ax.set_xlabel('Radius [px]')
ax.set_ylabel('Angle [idx]')
fig.savefig(__location__ + '/../img/out/filter' + str(self.id) + '.png', dpi=300, interpolation='none')
# print('Features detected in', str(round(time.time() - t0, 2)), 's')
return proc, (start, end)
|
lspgl/csat
|
sectorImage/core/image.py
|
image.py
|
py
| 12,778 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19239438392
|
# 문자열 내 마음대로 정렬하기
# 12915
def solution(strings, n):
words = []
answer = []
strings.sort()
for i in range(len(strings)):
words.append((strings[i][n], i))
words.sort()
for w in words:
answer.append(strings[w[1]])
return answer
# strings n return
# ["sun", "bed", "car"] 1 ["car", "bed", "sun"]
# ["abce", "abcd", "cdx"] 2 ["abcd", "abce", "cdx"]
|
sdh98429/dj2_alg_study
|
PROGRAMMERS/level1/문자열_내_마음대로_정렬하기.py
|
문자열_내_마음대로_정렬하기.py
|
py
| 415 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15873196557
|
from datetime import datetime
def unix_to_dt(time):
return datetime.utcfromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')
class event():
def __init__(self, events):
self.type = events['type']
self.empty = False
if self.type == 'None':
self.empty = True
elif self.type == 'membership.nickname_changed':
self.nickname_changed(events['data'])
elif self.type == 'membership.announce.joined':
self.user_joined(events['data'])
elif self.type == 'membership.notifications.exited':
self.user_exited(events['data'])
elif self.type == 'membership.notifications.removed':
self.user_removed(events['data'])
elif self.type == 'membership.notifications.autokicked':
self.user_autokicked(events['data'])
elif self.type == 'group.avatar_change':
self.group_avatar_change(events['data'])
elif self.type == 'group.like_icon_set':
self.like_icon_change(events['data'])
elif self.type == 'group.name_change':
self.group_name_change(events['data'])
elif self.type == 'poll.created':
self.poll_created(events['data'])
elif self.type == 'poll.reminder':
self.poll_reminder(events['data'])
elif self.type == 'poll.finished':
self.poll_finished(events['data'])
elif self.type == 'message.deleted':
self.message_deleted(events['data'])
else:
print('Unknown event type: ' + self.type)
def nickname_changed(self, event_data):
self.user = event_data['user']
self.new_name = event_data['name']
def user_joined(self, event_data):
self.user = event_data['user']
def user_added(self, event_data):
self.user = event_data['adder_user']
self.added_users = event_data['added_users']
def user_exited(self, event_data):
self.placeholder = 'X'
self.charmap = event_data['charmap']
def user_removed(self, event_data):
self.user = event_data['remover_user']
self.removed_user = event_data['removed_user']
def user_autokicked(self, event_data):
self.user = event_data['user']
def group_avatar_change(self, event_data):
self.user = event_data['user']
self.url = event_data['avatar_url']
def like_icon_change(self, event_data):
self.user = event_data['user']
self.like_icon = [event_data['pack_id'], event_data['icon_index']]
# like_icon->type is ignored
def group_name_change(self, event_data):
self.user = event_data['user']
self.new_name = event_data['name']
'''POLL EVENTS'''
def poll_created(self, event_data):
self.user = event_data['user']
self.conversation = event_data['conversation']
self.poll = event_data['poll']
def poll_reminder(self, event_data):
self.conversation = event_data['conversation']
self.poll = event_data['poll']
self.expiration = unix_to_dt(event_data['expiration'])
def poll_finished(self, event_data):
self.conversation = event_data['conversation']
self.raw_options = event_data['options']
self.options = []
for opt in self.raw_options:
temp_option = {
"id": None,
"title": None,
"votes": None,
"voter_ids": None
}
temp_option["id"] = opt["id"]
temp_option["title"] = opt["title"]
try:
temp_option["votes"] = opt["votes"]
temp_option["voter_ids"] = opt["voter_ids"]
except:
temp_option["votes"] = 0
temp_option["voter_ids"] = []
self.options.append(temp_option)
'''MISC EVENTS'''
def message_deleted(self, event_data):
self.message_id = event_data['message_id']
self.deleted_at = unix_to_dt(event_data['deleted_at'])
self.deleted_at_ts = event_data['deleted_at']
self.deletion_actor = event_data['deletion_actor']
self.deleter_id = event_data['deleter_id']
|
theTrueEnder/GroupMe-Export-Parser
|
Python_Scripts/events.py
|
events.py
|
py
| 4,193 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33626128629
|
import jsonlines
import os
from pathlib import Path
from xml_handler import XmlHandler
from google_cloud_storage_client import GoogleCloudStorageClient
def main(event, context):
# Retrieve file from GCS
input_filename = event.get("name")
input_bucket_name = event.get("bucket")
output_bucket_name = os.environ.get("OUTPUT_BUCKET_NAME")
gcs_client = GoogleCloudStorageClient()
local_filename = gcs_client.download_file_from_gcs(bucket_name=input_bucket_name, source_blob=input_filename)
# Read file and parse to List[dict]
xml_content_dict = XmlHandler.read_xml_file(file_path=local_filename)
parsed_row_list = XmlHandler.parse_harvest_xml_to_json(content_dict=xml_content_dict)
# Write file to jsonlines
output_filename = Path(input_filename).stem + '.jsonl'
local_output_file = Path("/tmp") / output_filename
with jsonlines.open(local_output_file, mode="w") as writer:
writer.write_all(parsed_row_list)
# Upload file to GCS
gcs_client.upload_file_to_gcs(bucket_name=output_bucket_name, destination_blob=Path(output_filename).name,
source_filename=local_output_file, remove_local_file=True)
|
MeneerBunt/MarjolandHarvestData
|
src/convert_xml_to_json/main.py
|
main.py
|
py
| 1,202 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39627862945
|
""" Text formatting script.
Alice in Wonderland, obtained from:
https://www.gutenberg.org/ebooks/28885
"""
import string
def format_book(file_in, file_out):
with open(file_in) as f:
text = f.read()
text = text.replace('\r', ' ').replace(
'\n', ' ').replace('\t', ' ').replace('-', ' ')
text = string.join(text.split())
ok_chars = [' ', '.', ',']
text = string.join(
[char for char in text if char.isalpha() or char in ok_chars], '')
text = text.lower()
text = text.replace(',', '').replace('.', '')
with open(file_out, 'w') as f:
f.write(text)
if __name__ == 'main':
format_book('alice_in_wonderland_RAW.txt',
'alice_in_wonderland_FORMATTED.txt')
|
jmmanso/deepseries
|
examples/text_analytics/format_text.py
|
format_text.py
|
py
| 737 |
python
|
en
|
code
| 4 |
github-code
|
6
|
30367024091
|
from bisect import bisect
from math import ceil, floor, log10
from numpy import abs, argmin, array, isnan, linspace
# Local imports
from .formatters import BasicFormatter
__all__ = [
"AbstractScale",
"DefaultScale",
"FixedScale",
"Pow10Scale",
"LogScale",
"ScaleSystem",
"heckbert_interval",
"frange",
]
def frange(min, max, delta):
""" Floating point range. """
count = int(round((max - min) / delta)) + 1
return [min + i * delta for i in range(count)]
class AbstractScale(object):
""" Defines the general interface for scales. """
DEFAULT_NUM_TICKS = 8
def ticks(self, start, end, desired_ticks=None):
"""Returns the set of "nice" positions on this scale that enclose and
fall inside the interval (*start*,*end*).
Parameters
----------
start : number
The beginning of the scale interval.
end : number
The end of the scale interval.
desired_ticks : integer
Number of ticks that the caller would like to get
"""
raise NotImplementedError
def num_ticks(self, start, end, desired_ticks=None):
"""Returns an approximate number of ticks that this scale
produces for the given interval.
This method is used by the scale system to determine whether this is
the appropriate scale to use for an interval; the returned number of
ticks does not have to be exactly the same as what ticks() returns.
Parameters
----------
start : number
The beginning of the scale interval.
end : number
The end of the scale interval.
desired_ticks : integer
Number of ticks that the caller would like to get
Returns
-------
A float or an integer.
"""
raise NotImplementedError
def labels(self, start, end, numlabels=None, char_width=None):
"""Returns a series of ticks and corresponding strings for labels
that fall inside the interval (*start*,*end*).
Parameters
----------
start : number
The beginning of the scale interval.
end : number
The end of the scale interval.
numlabels : number
The ideal number of labels to generate on the interval.
char_width : number
The total character width available for labelling the interval.
One of *numlabels* or *char_width* must be provided. If both are
provided, then both are considered when picking label density and format.
"""
ticks = self.ticks(start, end, numlabels)
labels = self.formatter.format(ticks, numlabels, char_width)
return list(zip(ticks, labels))
def label_width(self, start, end, numlabels=None, char_width=None):
"""Returns an estimate of the total number of characters used by the
the labels that this scale produces for the given set of
inputs, as well as the number of labels.
Parameters
----------
start : number
The beginning of the scale interval.
end : number
The end of the scale interval.
numlabels : number
The ideal number of labels to generate on the interval.
char_width : number
The total character width available for labelling the interval.
Returns
-------
(numlabels, total label width)
"""
return self.formatter.estimate_width(
start, end, numlabels, char_width, ticker=self
)
class FixedScale(AbstractScale):
"""A scale with fixed resolution, and "nice" points that line up at
multiples of the resolution. An optional zero value can be defined
that offsets the "nice" points to (N*resolution+zero).
"""
def __init__(self, resolution, zero=0.0, formatter=None):
self.resolution = resolution
self.zero = zero
if formatter is None:
formatter = BasicFormatter()
self.formatter = formatter
def ticks(self, start, end, desired_ticks=None):
"""For FixedScale, *desired_ticks* is ignored.
Overrides AbstractScale.
"""
if start == end or isnan(start) or isnan(end):
return []
res = self.resolution
start -= self.zero
end -= self.zero
start_tick = int(ceil(start / res))
end_tick = int(floor(end / res))
ticks = [i * res for i in range(start_tick, end_tick + 1)]
return ticks
def num_ticks(self, start, end, desired_ticks=None):
"""For FixedScale, *desired_ticks* is ignored.
Overrides AbstractScale.
"""
if self.resolution is None or self.resolution == 0.0:
return 0
else:
return (end - start) / self.resolution
def _nice(x, round=False):
"""Returns a bracketing interval around interval *x*, whose endpoints fall
on "nice" values. If *round* is False, then it uses ceil(range)
This function is adapted from the original in Graphics Gems; the boundaries
have been changed to use (1, 2.5, 5, 10) as the nice values instead of
(1, 2, 5, 10).
"""
if x <= 0:
import warnings
warnings.warn(
"Invalid (negative) range passed to tick interval calculation"
)
x = abs(x)
expv = floor(log10(x))
f = x / pow(10, expv)
if round:
if f < 1.75:
nf = 1.0
elif f < 3.75:
nf = 2.5
elif f < 7.0:
nf = 5.0
else:
nf = 10.0
else:
if f <= 1.0:
nf = 1.0
elif f <= 2.5:
nf = 2.5
elif f <= 5.0:
nf = 5.0
else:
nf = 10.0
return nf * pow(10, expv)
def heckbert_interval(
data_low, data_high, numticks=8, nicefunc=_nice, enclose=False
):
"""Returns a "nice" range and resolution for an interval and a preferred
number of ticks, using Paul Heckbert's algorithm in Graphics Gems.
If *enclose* is True, then the function returns a min and a max that fall
inside *data_low* and *data_high*; if *enclose* is False, the nice interval
can be larger than the input interval.
"""
if data_high == data_low:
return data_high, data_low, 0
if numticks == 0:
numticks = 1
range = nicefunc(data_high - data_low)
if numticks > 1:
numticks -= 1
d = nicefunc(range / numticks, round=True)
if enclose:
graphmin = ceil(data_low / d) * d
graphmax = floor(data_high / d) * d
else:
graphmin = floor(data_low / d) * d
graphmax = ceil(data_high / d) * d
return graphmin, graphmax, d
class DefaultScale(AbstractScale):
"""A dynamic scale that tries to place ticks at nice numbers (1, 2, 5, 10)
so that ticks don't "pop" as the resolution changes.
"""
def __init__(self, formatter=None):
if formatter is None:
formatter = BasicFormatter()
self.formatter = formatter
def ticks(self, start, end, desired_ticks=8):
"""Returns the set of "nice" positions on this scale that enclose and
fall inside the interval (*start*,*end*).
Implements AbstractScale.
"""
if start == end or isnan(start) or isnan(end):
return [start]
min, max, delta = heckbert_interval(
start, end, desired_ticks, enclose=True
)
return frange(min, max, delta)
def num_ticks(self, start, end, desired_ticks=8):
"""Returns an approximate number of ticks that this scale
produces for the given interval.
Implements AbstractScale.
"""
return len(self.ticks(start, end, desired_ticks))
class Pow10Scale(AbstractScale):
"""A dynamic scale that shows only whole multiples of powers of 10
(including powers < 1).
"""
def __init__(self, formatter=None):
if formatter is None:
formatter = BasicFormatter()
self.formatter = formatter
def ticks(self, start, end, desired_ticks=8):
"""Returns the set of "nice" positions on this scale that enclose and
fall inside the interval (*start*,*end*).
Implements AbstractScale.
"""
if start == end or isnan(start) or isnan(end):
return [start]
min, max, delta = heckbert_interval(
start, end, desired_ticks, nicefunc=self._nice_pow10, enclose=True
)
return frange(min, max, delta)
def num_ticks(self, start, end, desired_ticks=8):
"""Returns an approximate number of ticks that this scale
produces for the given interval.
Implements AbstractScale.
"""
return len(self.ticks(start, end, desired_ticks))
def _nice_pow10(self, x, round=False):
return pow(10, floor(log10(x)))
class LogScale(AbstractScale):
"""A dynamic scale that only produces ticks and labels that work well when
plotting data on a logarithmic scale.
"""
def __init__(self, formatter=None):
if formatter is None:
formatter = BasicFormatter()
self.formatter = formatter
# In the following utility functions, "irep" stands for "integer representation".
# For a given base interval size i (i.e. "magic number"), there is a one-to-one
# mapping between the nice tick values and the integers.
def _irep_to_value(self, n, i):
"""For a given "magic number" i (i.e. spacing of the evenly spaced ticks
in the decade [1,10]), compute the tick value of the given integer
representation."""
if i == 1:
j, k = divmod(n, 9)
v = (k + 1) * 10 ** j
return v
else:
j, k = divmod(n, int(10.0 / i))
if k == 0:
v = 10 ** j
else:
v = i * k * 10 ** j
return v
def _power_and_interval(self, x, i):
# j is the power of 10 of the decade in which x lies
j = int(ceil(log10(x))) - 1
# b is the interval size of the evenly spaced ticks in the decade
b = i * 10 ** j
return (j, b)
def _power_and_index_to_irep(self, j, k, i):
if i == 1:
n = j * 9 + (k - 1)
else:
n = j * int(10.0 / i) + k
return n
def _logtickceil_as_irep(self, x, i):
"""For a given "magic number" i (i.e. spacing of the evenly spaced ticks
in the decade [1,10]), compute the integer representation of the smallest
tick not less than x."""
j, b = self._power_and_interval(x, i)
k = int(ceil(float(x) / b))
n = self._power_and_index_to_irep(j, k, i)
return n
def _logtickfloor_as_irep(self, x, i):
"""For a given "magic number" i (i.e. spacing of the evenly spaced ticks
in the decade [1,10]), compute the integer representation of the largest
tick not greater than x."""
j, b = self._power_and_interval(x, i)
k = int(floor(float(x) / b))
n = self._power_and_index_to_irep(j, k, i)
return n
def ticks(self, start, end, desired_ticks=8):
""" Compute a "nice" set of ticks for a log scale."""
if start > end:
start, end = end, start
if start == 0.0:
# Whoever calls us with a value of 0.0 puts themselves at our mercy
log_start = 1e-9
else:
log_start = log10(start)
if end == 0.0:
log_end = 1e-9
else:
log_end = log10(end)
log_interval = log_end - log_start
if log_interval < 1.0:
# If the data is spaced by less than a factor of 10, then use
# regular/linear ticking
min, max, delta = heckbert_interval(
start, end, desired_ticks, enclose=True
)
return frange(min, max, delta)
elif log_interval < desired_ticks:
magic_numbers = [1, 2, 5]
for interval in magic_numbers:
n1 = self._logtickceil_as_irep(start, interval)
n2 = self._logtickfloor_as_irep(end, interval)
ticks = [
self._irep_to_value(n, interval) for n in range(n1, n2 + 1)
]
if len(ticks) < desired_ticks * 1.5:
return ticks
return ticks
else:
# Put lines at every power of ten
startlog = ceil(log_start)
endlog = floor(log_end)
expticks = linspace(startlog, endlog, endlog - startlog + 1)
return 10 ** expticks
def num_ticks(self, start, end, desired_ticks=8):
"""Returns an approximate number of ticks that this scale
produces for the given interval.
Implements AbstractScale.
"""
return len(self.ticks(start, end, desired_ticks))
##############################################################################
#
# ScaleSystem
#
##############################################################################
class ScaleSystem(object):
"""Represents a collection of scales over some range of resolutions.
This class has settings for a default scale that is used when ticking an
interval that is smaller than the finest resolution scale or larger than
the coarsest resolution scale.
"""
def __init__(self, *scales, **kw):
"""Creates a ScaleSystem
Usage::
ScaleSystem(scale1, .., scaleN, default_scale = DefaultScale())
If *default_scale* is not specified, then an instance of DefaultScale()
is created. If no *default_scale* is needed, then set it to None.
"""
self.scales = scales
self.default_scale = kw.get("default_scale", DefaultScale())
# Heuristics for picking labels
# The ratio of total label character count to the available character width
self.fill_ratio = 0.3
self.default_numticks = 8
def ticks(self, start, end, numticks=None):
"""Computes nice locations for tick marks.
Parameters
==========
start, end : number
The start and end values of the data.
numticks : number
The desired number of ticks to produce.
scales : a list of tuples of (min_interval, Scale)
Scales to use, in order from fine resolution to coarse.
If the end-start interval is less than a particular scale's
*min_interval*, then the previous scale is used.
Returns
=======
A list of positions where the ticks are to be placed.
"""
if numticks == 0:
return []
elif start == end or isnan(start) or isnan(end):
return []
elif numticks is None:
numticks = self.default_numticks
scale = self._get_scale(start, end, numticks)
ticks = scale.ticks(start, end, numticks)
return ticks
def labels(self, start, end, numlabels=None, char_width=None):
"""Computes position and labels for an interval
Parameters
----------
start : number
The beginning of the scale interval.
end : number
The end of the scale interval.
numlabels : number
The ideal number of labels to generate on the interval.
char_width : number
The total character width available for labelling the interval.
One of *numlabels* or *char_width* must be provided. If both are
provided, then both are considered when picking label density and format.
Returns
-------
A list of (tick position, string) tuples.
"""
# Check for insufficient arguments.
if numlabels is None and char_width is None:
raise ValueError(
"Either numlabels or char_width (or both) must be given."
)
if numlabels == 0 or char_width == 0 or isnan(start) or isnan(end):
return []
# There are three cases:
# 1. we are given numlabels but not char_width
# 2. we are given char_width and not numlabels
# 3. we are given both
#
# Case 1: Use numlabels to find the closest scale purely on tick count.
# Case 2: Query all scales for their approximate label_width, pick the
# closest one to char_width * self.fill_ratio
# Case 3: Use numlabels to find the closest scale based on tick count.
if numlabels and not char_width:
# numlabels was given, but not char_width.
scale = self._get_scale(start, end, numlabels)
labels = scale.labels(start, end, numlabels)
else:
# char_width was given.
if numlabels:
# Both numlabels and char_width were given.
scale = self._get_scale(start, end, numlabels)
try:
ndx = list(self.scales).index(scale)
low = max(0, ndx - 1)
high = min(len(self.scales), ndx + 1)
scales = self.scales[low:high]
except ValueError:
scales = [scale]
else:
# Only char_width was given.
if len(self.scales) == 0:
scales = [self.default_scale]
else:
scales = self.scales
counts, widths = zip(
*[
s.label_width(start, end, char_width=char_width)
for s in scales
]
)
widths = array(widths)
closest = argmin(abs(widths - char_width * self.fill_ratio))
if numlabels is None:
numlabels = scales[closest].num_ticks(
start, end, counts[closest]
)
labels = scales[closest].labels(
start, end, numlabels, char_width=char_width
)
return labels
def _get_scale(self, start, end, numticks):
if len(self.scales) == 0:
closest_scale = self.default_scale
else:
closest_scale = self._get_scale_np(start, end, numticks)
if self.default_scale is not None:
# Handle the edge cases and see if there is a major discrepancy between
# what the scales offer and the desired number of ticks; if so, revert
# to using the default scale
approx_ticks = closest_scale.num_ticks(start, end, numticks)
if (
(approx_ticks == 0)
or (numticks == 0)
or (abs(approx_ticks - numticks) / numticks > 1.2)
or (abs(numticks - approx_ticks) / approx_ticks > 1.2)
):
closest_scale = self.default_scale
return closest_scale
def _get_scale_bisect(self, start, end, numticks):
scale_intervals = [
s.num_ticks(start, end, numticks) for s in self.scales
]
sorted_scales = sorted(zip(scale_intervals, self.scales))
ndx = bisect(sorted_scales, numticks, lo=0, hi=len(self.scales))
if ndx == len(self.scales):
ndx -= 1
return sorted_scales[ndx][1]
def _get_scale_np(self, start, end, numticks):
# Extract the intervals from the scales we were given
scale_intervals = array(
[s.num_ticks(start, end, numticks) for s in self.scales]
)
closest = argmin(abs(scale_intervals - numticks))
return self.scales[closest]
|
enthought/chaco
|
chaco/scales/scales.py
|
scales.py
|
py
| 19,821 |
python
|
en
|
code
| 286 |
github-code
|
6
|
3727746961
|
### This file is meant to run from pc, *not* from the server. It extracts the
# data from the datafile, posts it to the database and finally runs the day
# command to add the day to the data.
import math
import pandas as pd
import requests
day_of_month = 6
def upload_data(fname):
data = extract_data(fname)
data_dict_list = []
url = create_url()
for _, row in data.iterrows():
aaa = row[5]
if math.isnan(aaa):
aaa = 'NULL'
dict = {"SubmitDateTime": str(row[0]),
"UserId": row[1]+1000,
"ExerciseId": row[2],
"LearningObjectiveId": 8025,
"Correct": min(row[4], 1),
"AbilityAfterAnswer": aaa}
data_dict_list.append(dict)
print(dict, ",")
# r = requests.post(url=url + "insert/", json=data_dict_list[0],
# auth=("Group2", "Group2-1234"))
# print(r.status_code, r.reason, url + "insert/")
r = requests.get(url + "add_days/start=2018-06-04&end=2018-06-0{}".format( str(day_of_month)), auth=("Group2", "Group2-1234"))
print(r.status_code, r.reason,
url + "add_days/start=2018-06-04&end=2018-06-0{}".format(
day_of_month))
def extract_data(fname):
data = pd.read_excel(fname)
return data
def create_url():
url = "http://applab.ai.ru.nl:5000/"
return url
if __name__ == "__main__":
day_of_month = 6
upload_data("C:/Users/Rick "
"Dijkstra/Documents/Study/Applab/SnappetDataAnoniem/"
"resultaten-radboud_anoniem 4-6-18.xlsx")
|
simgeekiz/ApplabAPI
|
group2api/utils/UploadData.py
|
UploadData.py
|
py
| 1,592 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72035219069
|
#!/usr/bin/env python3
from bcc import BPF
from http.server import HTTPServer, BaseHTTPRequestHandler
import sys
import threading
clone_ebpf = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/fs.h>
#define ARGSIZE 128
BPF_PERF_OUTPUT(events);
struct data_t {
u32 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
u32 ppid; // Parent PID as in the userspace term (i.e task->real_parent->tgid in kernel)
u32 uid;
char comm[TASK_COMM_LEN];
};
int clone_ebpf(struct pt_regs *ctx) {
struct data_t data = {};
struct task_struct *task;
data.uid = bpf_get_current_uid_gid() & 0xffffffff;
data.pid = bpf_get_current_pid_tgid() >> 32;
task = (struct task_struct *)bpf_get_current_task();
data.ppid = task->real_parent->tgid;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
events.perf_submit(ctx, data, sizeof(struct data_t));
return 0;
}
"""
pid = ""
ppid = ""
uid = ""
command = ""
def clone_ebpf_thread():
b = BPF(text=clone_ebpf)
clone_fn_name = b.get_syscall_fnname("clone")
b.attach_kprobe(event=clone_fn_name, fn_name="clone_ebpf")
b["events"].open_perf_buffer(collect_events)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
def collect_events():
event = b["events"].event(data)
pid = event.pid
ppid = event.ppid
uid = event.uid
command = event.comm
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
print("received a get message", sys.stdout)
self.send_response(200)
self.end_headers()
response = "pid: " + pid + " ppid: " + ppid + " uid: " + uid + " command: " + command
self.wfile.write(response)
x = threading.Thread(target=clone_ebpf_thread)
x.start()
httpd = HTTPServer(('0.0.0.0', 8000), SimpleHTTPRequestHandler)
httpd.serve_forever()
|
madhusudanas/ebpf-mac-python
|
misc/hello_world1.py
|
hello_world1.py
|
py
| 1,927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39120803262
|
def busca_profundidade(grafo, inicio, destino, visitados=None):
if visitados is None:
visitados = [inicio]
if inicio == destino:
return visitados
for proximo in grafo[inicio]:
if proximo not in visitados and destino not in visitados:
visitados = busca_profundidade(grafo, proximo, destino, visitados + [proximo])
return visitados
|
AlbertoFelix/AgentesDeBusca
|
buscaProfundidade.py
|
buscaProfundidade.py
|
py
| 387 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
13395399902
|
from PIL import Image
def brighten_Image(pixelList):
pix_len = len(pixelList)
for i in range(pix_len): #assigns each part of tuple to a variable
current_pixel = pixelList[i] #then will brigthen each by 50
red = current_pixel[0]
green = current_pixel[1]
blue = current_pixel[2]
#this is not the best way, ex : replace current_pixel
#with pixList[i][0] ect
newPixel = (red+50, green+50, blue+50)
pixelList[i] = newPixel #replaces current pixel with one
#whos R,G,B values are +50
return pixelList
def main():
myFile = "/home/h702546919/Desktop/jimmy.jpg"
my_img_obj = Image.open(myFile)
#my_img_obj.show()
#grabs each pixel's tuple
pixelList = list(my_img_obj.getdata())
#sends list of tuples to be brightend, gives back newPic
newPic = brighten_Image(pixelList)
#pushes new brighter pixels
my_img_obj.putdata(newPic)
#displays new pic and old one
#(learn how to make a canvas and print side by side ? )
my_img_obj.show()
return
#---#
main()
|
tommulvey/CSC15_python
|
10_3/brightenImage.py
|
brightenImage.py
|
py
| 1,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12789805626
|
from errno import EIO, ENOSPC, EROFS
import sys
import os
import traceback
import glob
from decimal import Decimal, getcontext
getcontext().prec = 6
assert sys.platform == 'linux', 'This script must be run only on Linux'
assert sys.version_info.major >= 3 and sys.version_info.minor >= 5, 'This script requires Python 3.5+'
assert os.geteuid() == 0, 'This script must be run as root'
try:
import sh
import time
import tempfile
import re
import logzero
import numpy
import threading
import threading
import hashlib
import ctypes
from collections import OrderedDict
from io import StringIO
from typing import Optional, List, Dict
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
from errno import ENOENT
from stat import S_IFDIR, S_IFREG
from pynput.keyboard import Key, Listener
from utils import (
mute_system_sound,
unmute_system_sound,
enable_power_led,
disable_power_led,
init_simple_mixer_control,
save_replace_file,
file_read_bytes,
file_write_bytes,
file_read_bytes_direct
)
except ImportError as xie:
traceback.print_exc()
sys.exit(1)
APP_UNIXNAME = 'amiga_disk_devices'
APP_VERSION = '0.1'
TMP_PATH_PREFIX = os.path.join(tempfile.gettempdir(), APP_UNIXNAME)
LOG_PATHNAME = os.path.join(TMP_PATH_PREFIX, 'amiga_disk_devices.log')
ENABLE_LOGGER = False
ENABLE_REINIT_HANDLE_AFTER_SECS = 0
ENABLE_FLOPPY_DRIVE_READ_A_HEAD = True
ENABLE_SET_CACHE_PRESSURE = False
ENABLE_ADF_CACHING = True
DISABLE_SWAP = False
DEFAULT_READ_A_HEAD_SECTORS = 24 # 256 system default, 44 seems ok, 24 seems best
SYNC_DISKS_SECS = 60 * 3
AMIGA_DISK_DEVICE_TYPE_ADF = 1
AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB = 8
AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE = 2
AMIGA_DISK_DEVICE_TYPE_HDF = 5
AMIGA_DISK_DEVICE_TYPE_ISO = 10
FLOPPY_DEVICE_SIZE = 1474560
FLOPPY_ADF_SIZE = 901120
FLOPPY_DEVICE_LAST_SECTOR = 1474048
FLOPPY_ADF_EXTENSION = '.adf'
HD_HDF_EXTENSION = '.hdf'
CD_ISO_EXTENSION = '.iso'
ADF_BOOTBLOCK = numpy.dtype([
('DiskType', numpy.byte, (4, ) ),
('Chksum', numpy.uint32 ),
('Rootblock', numpy.uint32 )
])
SYSTEM_INTERNAL_SD_CARD_NAME = 'mmcblk0'
PHYSICAL_SECTOR_SIZE = 512
PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS = 100
STATUS_FILE_NAME = 'status.log'
CACHE_DATA_BETWEEN_SECS = 3
CACHED_ADFS_MAX_DIR_SIZE = 1073741824 # 1GB
CACHED_ADFS_DIR = os.path.realpath('./cached_adfs')
CACHED_ADF_SIGN = 'AMIPI400'
CACHED_ADF_HEADER_TYPE = 'CachedADFHeader'
CACHED_ADF_STR_ENCODING = 'ascii'
SHA512_LENGTH = 128
MAIN_LOOP_MAX_COUNTER = 0
fs_instance = None
key_cmd_pressed = False
key_delete_pressed = False
key_shift_pressed = False
os_read_write_mutex = threading.Lock()
devices_read_a_head_sectors = {}
def os_read(handle, offset, size):
with os_read_write_mutex:
os.lseek(handle, offset, os.SEEK_SET)
return os.read(handle, size)
def os_write(handle, offset, data):
with os_read_write_mutex:
os.lseek(handle, offset, os.SEEK_SET)
return os.write(handle, data)
class CachedADFHeader(ctypes.Structure):
_fields_ = [
('sign', ctypes.c_char * 32),
('header_type', ctypes.c_char * 32),
('sha512', ctypes.c_char * 129),
('mtime', ctypes.c_int64)
]
class AsyncFileOps(threading.Thread):
def __init__(self):
self._running = False
self._pathname_direct_readings = []
self._pathname_writings = []
self._pathname_deferred_writings = {}
threading.Thread.__init__(self)
def _direct_readings_by_pathname(self):
processed = 0
handles = {}
while self._pathname_direct_readings:
reading_data = self._pathname_direct_readings.pop(0)
try:
processed += 1
use_fd = None
use_fo = None
use_m = None
if reading_data['pathname'] in handles:
use_fd = handles[reading_data['pathname']][0]
use_fo = handles[reading_data['pathname']][1]
use_m = handles[reading_data['pathname']][2]
handles[reading_data['pathname']] = file_read_bytes_direct(
reading_data['pathname'],
reading_data['offset'],
reading_data['size'],
0,
use_fd,
use_fo,
use_m
)
if reading_data['read_handler_func']:
read_handler_func = reading_data['read_handler_func']
read_handler_func(
reading_data['pathname'],
reading_data['offset'],
reading_data['size']
)
except Exception as x:
traceback.print_exc()
print_log('_process_direct_readings_by_pathname', x)
print_log()
for pathname, handle_tuples in handles.items():
os.close(handle_tuples[0])
# handle_tuples[1].close()
handle_tuples[2].close()
return processed
# def _direct_readings_by_pathname(self):
# processed = 0
# while self._pathname_direct_readings:
# reading_data = self._pathname_direct_readings.pop(0)
# try:
# processed += 1
# file_read_bytes_direct(
# reading_data['pathname'],
# reading_data['offset'],
# reading_data['size']
# )
# if reading_data['read_handler_func']:
# read_handler_func = reading_data['read_handler_func']
# read_handler_func(
# reading_data['pathname'],
# reading_data['offset'],
# reading_data['size']
# )
# except Exception as x:
# print_log('_process_direct_readings_by_pathname', x)
# return processed
def _writings_by_pathname(self):
handles = {}
processed = 0
while self._pathname_writings:
disable_power_led()
write_data = self._pathname_writings.pop(0)
if write_data['pathname'] not in handles:
handles[write_data['pathname']] = os.open(write_data['pathname'], os.O_WRONLY)
fd = handles[write_data['pathname']]
try:
processed += 1
disable_power_led()
file_write_bytes(
write_data['pathname'],
write_data['offset'],
write_data['data'],
use_fd=fd
)
except Exception as x:
print_log('_process_writings_by_pathname', x)
disable_power_led()
for pathname, fd in handles.items():
os.close(fd)
return processed
def _deferred_one_time_writings_by_pathname(self, idle_total_secs):
handles = {}
for pathname, write_data in self._pathname_deferred_writings.copy().items():
if not write_data:
continue
if write_data['idle_min_secs'] < idle_total_secs:
continue
# print('write_data', write_data)
disable_power_led()
if write_data['pathname'] not in handles:
handles[write_data['pathname']] = os.open(write_data['pathname'], os.O_WRONLY)
fd = handles[write_data['pathname']]
try:
disable_power_led()
file_write_bytes(
write_data['pathname'],
write_data['offset'],
write_data['data'],
use_fd=fd
)
except Exception as x:
print_log('_process_writings_by_pathname', x)
disable_power_led()
if write_data['done_handler']:
write_data['done_handler'](
write_data,
write_data['done_handler_args']
)
self._pathname_deferred_writings[pathname] = None
for pathname, fd in handles.items():
os.close(fd)
def run(self):
idle_start_ts = 0
while self._running:
processed = 0
processed += self._direct_readings_by_pathname()
processed += self._writings_by_pathname()
if not processed:
# idle, process deferred one-time writings
if not idle_start_ts:
idle_start_ts = time.time()
idle_total_secs = time.time() - idle_start_ts
self._deferred_one_time_writings_by_pathname(idle_total_secs)
else:
idle_start_ts = 0
time.sleep(10 / 1000)
time.sleep(0)
def read_direct_by_pathname(self, pathname: str, offset, size, read_handler_func=None, max_at_a_time=None):
if max_at_a_time is not None:
if len(self._pathname_direct_readings) >= max_at_a_time:
return
self._pathname_direct_readings.append({
'pathname': pathname,
'offset': offset,
'size': size,
'read_handler_func': read_handler_func
})
def write_by_pathname(self, pathname: str, offset, data):
self._pathname_writings.append({
'pathname': pathname,
'offset': offset,
'data': data
})
def deferred_one_time_write_by_pathname(
self,
pathname,
offset,
data,
idle_min_secs,
done_handler=None,
done_handler_args=None
):
self._pathname_deferred_writings[pathname] = {
'pathname': pathname,
'offset': offset,
'data': data,
'idle_min_secs': idle_min_secs,
'done_handler': done_handler,
'done_handler_args': done_handler_args
}
def start(self):
self._running = True
return super().start()
def stop(self):
self._running = False
class AmigaDiskDevicesFS(LoggingMixIn, Operations):
_handles: Dict[str, int]
_access_times: Dict[str, float]
_modification_times: Dict[str, float]
def __init__(self, disk_devices: dict, async_file_ops: AsyncFileOps):
self._instance_time = time.time()
self._disk_devices = disk_devices
self._static_files = {
'/': dict(
st_mode=(S_IFDIR | 0o444),
st_ctime=self._instance_time,
st_mtime=self._instance_time,
st_atime=self._instance_time,
st_nlink=2,
st_size=4096
),
'/' + STATUS_FILE_NAME: dict(
st_mode=(S_IFREG | 0o444),
st_ctime=self._instance_time,
st_mtime=self._instance_time,
st_atime=self._instance_time,
st_nlink=1
)
}
self._handles = {}
self._mutex = threading.Lock()
self._access_times = {}
self._modification_times = {}
self._last_write_ts = 0
self._async_file_ops = async_file_ops
self._status_log_content = None
access = None
flush = None
getxattr = None
listxattr = None
open = None
opendir = None
release = None
releasedir = None
statfs = None
def _add_defaults(self, ipart_data):
if 'fully_cached' not in ipart_data:
ipart_data['fully_cached'] = False
if 'last_caching_ts' not in ipart_data:
ipart_data['last_caching_ts'] = 0
if 'enable_spinning' not in ipart_data:
ipart_data['enable_spinning'] = True
if 'cached_adf_pathname' not in ipart_data:
ipart_data['cached_adf_pathname'] = ''
def set_disk_devices(self, disk_devices: dict):
with self._mutex:
for ipart_dev, ipart_data in disk_devices.items():
self._add_defaults(ipart_data)
self._disk_devices = disk_devices
self._status_log_content = None
self._flush_handles()
def _flush_handles(self):
for device_pathname in list(self._handles.keys()):
if device_pathname not in self._disk_devices:
self._close_handle(device_pathname)
def _close_handles(self):
for device_pathname in list(self._handles.keys()):
self._close_handle(device_pathname)
def _close_handle(self, device_pathname: str):
handle = None
try:
handle = self._handles[device_pathname]
os.close(handle)
except:
pass
try:
del self._handles[device_pathname]
except:
pass
try:
del self._access_times[device_pathname]
except:
pass
try:
del self._modification_times[device_pathname]
except:
pass
return handle
def _open_handle(self, ipart_data: dict) -> Optional[int]:
device_pathname = ipart_data['device']
if device_pathname in self._handles:
return self._handles[device_pathname]
self._set_fully_cached(ipart_data, False)
is_readable = ipart_data['is_readable']
is_writable = ipart_data['is_writable']
mode = os.O_SYNC | os.O_RSYNC
if is_readable and is_writable:
mode |= os.O_RDWR
else:
mode |= os.O_RDONLY
try:
self._handles[device_pathname] = os.open(device_pathname, mode)
except:
return None
return self._handles[device_pathname]
def _find_file(self, public_name: str) -> Optional[dict]:
for ipart_dev, ipart_data in self._disk_devices.items():
if ipart_data['public_name'] == public_name:
return ipart_data
return None
def _save_file_access_time(self, device_pathname: str, _time: float = None) -> float:
if _time is None:
_time = time.time()
self._access_times[device_pathname] = _time
return _time
def _save_file_modification_time(self, device_pathname: str) -> float:
current_time = time.time()
self._modification_times[device_pathname] = current_time
self._last_write_ts = current_time
return current_time
def _get_file_access_time(self, device: str) -> float:
try:
return self._access_times[device]
except:
return self._save_file_access_time(device)
def _get_file_modification_time(self, device: str) -> float:
try:
return self._modification_times[device]
except:
return self._save_file_modification_time(device)
def _clear_pathname(self, pathname: str) -> str:
if pathname.startswith(os.path.sep):
pathname = pathname[1:]
return pathname
def _genrate_perm_int_mask(self,
user_can_read: bool,
user_can_write: bool,
user_can_execute: bool,
group_can_read: bool,
group_can_write: bool,
group_can_execute: bool,
other_can_read: bool,
other_can_write: bool,
other_can_execute: bool
) -> int:
bin_string = ''
bin_string += str(int(user_can_read))
bin_string += str(int(user_can_write))
bin_string += str(int(user_can_execute))
bin_string += str(int(group_can_read))
bin_string += str(int(group_can_write))
bin_string += str(int(group_can_execute))
bin_string += str(int(other_can_read))
bin_string += str(int(other_can_write))
bin_string += str(int(other_can_execute))
return int(bin_string, 2)
def getattr(self, path, fh=None):
with self._mutex:
self._flush_handles()
if path in self._static_files:
return self._static_files[path]
name = self._clear_pathname(path)
ipart_data = self._find_file(name)
if not ipart_data:
raise FuseOSError(ENOENT)
access_time = self._get_file_access_time(ipart_data['device'])
modification_time = self._get_file_modification_time(ipart_data['device'])
is_readable = ipart_data['is_readable']
is_writable = ipart_data['is_writable']
perm_int_mask = self._genrate_perm_int_mask(
is_readable, is_writable, False,
is_readable, is_writable, False,
is_readable, is_writable, False
)
return dict(st_mode=(S_IFREG | perm_int_mask),
st_nlink=1,
st_size=ipart_data['size'],
st_ctime=self._instance_time,
st_atime=access_time,
st_mtime=modification_time
)
def _partial_read(
self,
handle,
offset,
size,
max_read_size = None,
min_total_read_time_ms = None,
pre_read_callback = None,
post_read_callback = None,
callback_user_data = None
):
ex = None
to_read_size = size
all_data = bytes()
dynamic_offset = offset
read_time_ms = 0
total_read_time_ms = 0
count_real_read_sectors = 0
total_len_data = 0
while True:
try:
if pre_read_callback:
pre_read_callback(
read_time_ms,
total_read_time_ms,
callback_user_data
)
start_time = time.time()
data = os_read(handle, dynamic_offset, PHYSICAL_SECTOR_SIZE)
len_data = len(data)
dynamic_offset += len_data
total_len_data += len_data
read_time_ms = int((time.time() - start_time) * 1000)
total_read_time_ms += read_time_ms
if post_read_callback:
post_read_callback(
read_time_ms,
total_read_time_ms,
callback_user_data
)
if read_time_ms > PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS:
count_real_read_sectors += 1
all_data += data
to_read_size -= len_data
if len_data < PHYSICAL_SECTOR_SIZE:
break
if max_read_size is not None:
if total_len_data >= max_read_size:
break
if to_read_size <= 0:
if min_total_read_time_ms is not None:
if total_read_time_ms < min_total_read_time_ms:
continue
break
except Exception as x:
print_log('_partial_read', x)
ex = x
break
all_data = all_data[:size]
return {
'all_data': all_data,
'ex': ex,
'total_read_time_ms': total_read_time_ms,
'count_real_read_sectors': count_real_read_sectors
}
def _set_fully_cached(self, ipart_data, fully_cached_status):
if ipart_data['fully_cached'] != fully_cached_status:
ipart_data['fully_cached'] = fully_cached_status
self._status_log_content = None
def _pre_read_callback(self, read_time_ms, total_read_time_ms, callback_user_data):
ipart_data = callback_user_data
if not ipart_data['fully_cached']:
mute_system_sound(4)
self._save_file_access_time(ipart_data['device'])
def _floppy_read(self, handle, offset, size, ipart_data):
current_time = time.time()
if not ipart_data['last_caching_ts']:
ipart_data['last_caching_ts'] = current_time
if not ipart_data['fully_cached']:
mute_system_sound(4)
read_result = self._partial_read(
handle,
offset,
size,
None,
None,
self._pre_read_callback,
None,
ipart_data
)
if read_result['total_read_time_ms'] > PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS:
self._set_fully_cached(ipart_data, False)
# set_numlock_state(ipart_data['fully_cached'])
if ipart_data['fully_cached']:
if ipart_data['enable_spinning']:
self._async_file_ops.read_direct_by_pathname(
ipart_data['device'],
offset,
size,
None,
1
)
if read_result['ex'] is not None:
raise read_result['ex']
return read_result['all_data']
if read_result['total_read_time_ms'] < PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS \
and not ipart_data['fully_cached']:
if not ipart_data['fully_cached']:
if current_time - ipart_data['last_caching_ts'] >= CACHE_DATA_BETWEEN_SECS:
read_result2 = self._partial_read(
handle,
0,
PHYSICAL_SECTOR_SIZE,
FLOPPY_ADF_SIZE,
PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS,
self._pre_read_callback,
None,
ipart_data
)
ipart_data['last_caching_ts'] = current_time
if read_result2['total_read_time_ms'] < PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS:
self._set_fully_cached(ipart_data, True)
self._floppy_cache_adf(handle, ipart_data)
self._save_file_access_time(ipart_data['device'])
if read_result['ex'] is not None:
raise read_result['ex']
return read_result['all_data']
def _floppy_cache_adf(self, handle, ipart_data):
# should be called only once when saving cached ADF
# since read() and write() will not call
# _floppy_read()
if not ENABLE_ADF_CACHING:
return
# read whole ADF
read_result3 = self._partial_read(
handle,
0,
FLOPPY_ADF_SIZE,
FLOPPY_ADF_SIZE,
PHYSICAL_FLOPPY_SECTOR_READ_TIME_MS,
self._pre_read_callback,
None,
ipart_data
)
if ipart_data['cached_adf_sha512']:
# use existing sha512 ID
sha512_id = ipart_data['cached_adf_sha512']
print_log('Using existing SHA512 ID={sha512_id} for {filename} '.format(
filename=ipart_data['device'],
sha512_id=sha512_id
))
else:
# calculate sha512 hash from readed ADF
adf_hash = hashlib.sha512()
adf_hash.update(read_result3['all_data'])
sha512_id = adf_hash.hexdigest()
print_log('Calculated SHA512 ID={sha512_id} for {filename} '.format(
filename=ipart_data['device'],
sha512_id=sha512_id
))
# 123
cached_adf_pathname = os.path.join(
CACHED_ADFS_DIR,
build_cached_adf_filename(
sha512_id,
FLOPPY_ADF_EXTENSION
)
)
if not os.path.exists(cached_adf_pathname) or os.path.getsize(cached_adf_pathname) != FLOPPY_ADF_SIZE:
# save a copy of the ADF file in the cache dir
# sha512 + '.adf'
save_replace_file(
cached_adf_pathname,
read_result3['all_data'],
CACHED_ADFS_MAX_DIR_SIZE
)
os.sync()
# next call to read() or write() will be redirected to
# _floppy_read_cached() or _floppy_write_cached()
ipart_data['cached_adf_pathname'] = cached_adf_pathname
# # close the handle, it would not be needed anymore
# self._close_handle(ipart_data['device'])
print_log('{filename} saved cached ADF as {cached_adf_pathname}'.format(
filename=ipart_data['device'],
cached_adf_pathname=cached_adf_pathname
))
header = build_CachedADFHeader(sha512_id, int(os.path.getmtime(cached_adf_pathname)))
os_write(handle, FLOPPY_DEVICE_LAST_SECTOR, header)
# close the handle, it would not be needed anymore
self._close_handle(ipart_data['device'])
def _generate_status_log(self):
if self._status_log_content:
return self._status_log_content
content = ''
for ipart_dev, ipart_data in self._disk_devices.items():
content += 'device:' + ipart_dev + ', '
content += 'public_name:' + ipart_data['public_name'] + ', '
content += 'fully_cached:' + str(int(ipart_data['fully_cached']))
content += '\n'
self._status_log_content = content
return content
def _status_log_read(self, offset, size):
content = self._generate_status_log()
return bytes(
content[offset : offset + size],
'utf-8'
)
def _generic_read(self, handle, offset, size, ipart_data):
self._save_file_access_time(ipart_data['device'])
if ipart_data['is_disk_drive']:
disable_power_led()
return os_read(handle, offset, size)
def _open_cached_adf_handle(self, ipart_data: dict) -> Optional[int]:
pathname = ipart_data['cached_adf_pathname']
if pathname in self._handles:
return self._handles[pathname]
mode = os.O_SYNC | os.O_RSYNC | os.O_RDWR
try:
self._handles[pathname] = os.open(pathname, mode)
except:
return None
return self._handles[pathname]
def _floppy_read_cached(self, offset, size, ipart_data):
self._save_file_access_time(ipart_data['device'])
self._set_fully_cached(ipart_data, True)
if ipart_data['enable_spinning']:
self._async_file_ops.read_direct_by_pathname(
ipart_data['device'],
offset,
size,
None,
2
)
fd = self._open_cached_adf_handle(ipart_data)
# TODO use use_fd
return file_read_bytes(
ipart_data['cached_adf_pathname'],
offset,
size,
use_fd=fd
)
def _floppy_write_cached(self, offset, data, ipart_data):
self._save_file_modification_time(ipart_data['device'])
self._set_fully_cached(ipart_data, True)
self._async_file_ops.write_by_pathname(
ipart_data['device'],
offset,
data
)
# 456
def write_done_handler(write_data, done_handler_args):
# return
print(time.time(), 'data', locals())
fd = self._open_cached_adf_handle(ipart_data)
# TODO use use_fd
write_result = file_write_bytes(
ipart_data['cached_adf_pathname'],
offset,
data,
0,
use_fd=fd
)
header = build_CachedADFHeader(
ipart_data['cached_adf_sha512'],
int(os.path.getmtime(ipart_data['cached_adf_pathname']))
)
self._async_file_ops.deferred_one_time_write_by_pathname(
ipart_data['device'],
FLOPPY_DEVICE_LAST_SECTOR,
header,
1,
done_handler=write_done_handler,
done_handler_args=(ipart_data,)
)
return write_result
def read(self, path, size, offset, fh):
with self._mutex:
self._flush_handles()
name = self._clear_pathname(path)
if name == STATUS_FILE_NAME:
return self._status_log_read(offset, size)
ipart_data = self._find_file(name)
if not ipart_data:
raise FuseOSError(ENOENT)
file_size = ipart_data['size']
if offset + size > file_size:
size = file_size - offset
if offset >= file_size or size <= 0:
self._save_file_access_time(ipart_data['device'])
return b''
if ENABLE_ADF_CACHING:
if ipart_data['is_floppy_drive'] and ipart_data['cached_adf_pathname']:
return self._floppy_read_cached(offset, size, ipart_data)
handle = self._open_handle(ipart_data)
if handle is None:
self._save_file_access_time(ipart_data['device'])
raise FuseOSError(EIO)
if ipart_data['is_floppy_drive']:
return self._floppy_read(
handle,
offset,
size,
ipart_data
)
return self._generic_read(
handle,
offset,
size,
ipart_data
)
def truncate(self, path, length, fh=None):
# block devices cannot be truncated, so just return
return
def write(self, path, data, offset, fh):
with self._mutex:
self._flush_handles()
name = self._clear_pathname(path)
ipart_data = self._find_file(name)
if not ipart_data:
raise FuseOSError(ENOENT)
if not ipart_data['is_writable']:
raise FuseOSError(EROFS)
self._set_fully_cached(ipart_data, False)
self._save_file_modification_time(ipart_data['device'])
max_file_size = ipart_data['size']
len_data = len(data)
if offset + len_data > max_file_size or offset >= max_file_size:
self._save_file_modification_time(ipart_data['device'])
raise FuseOSError(ENOSPC)
if len_data == 0:
self._save_file_modification_time(ipart_data['device'])
return b''
if ENABLE_ADF_CACHING:
if ipart_data['is_floppy_drive'] and ipart_data['cached_adf_pathname']:
return self._floppy_write_cached(offset, data, ipart_data)
handle = self._open_handle(ipart_data)
if handle is None:
self._save_file_modification_time(ipart_data['device'])
raise FuseOSError(EIO)
if ipart_data['is_floppy_drive']:
mute_system_sound(4)
if ipart_data['is_disk_drive']:
disable_power_led()
ex = None
try:
result = os_write(handle, offset, data)
self._save_file_modification_time(ipart_data['device'])
if ipart_data['is_floppy_drive']:
mute_system_sound(4)
except Exception as x:
print_log('write', x)
ex = x
self._save_file_modification_time(ipart_data['device'])
if ex is not None:
raise ex
return result
def readdir(self, path, fh):
with self._mutex:
self._flush_handles()
entries = [
'.',
'..',
STATUS_FILE_NAME
]
if path != '/':
return entries
for ipart_dev, ipart_data in self._disk_devices.items():
entries.append(
ipart_data['public_name']
)
return entries
def destroy(self, path):
with self._mutex:
self._close_handles()
def print_log(*args):
if ENABLE_LOGGER:
if args:
logzero.logger.info(*args)
else:
print(*args)
def init_logger():
if not ENABLE_LOGGER:
return
print('Logging to ' + LOG_PATHNAME)
logzero.logfile(LOG_PATHNAME, maxBytes=1e6, backupCount=3, disableStderrLogger=True)
def print_app_version():
print('{name} v{version}'. format(
name=APP_UNIXNAME.upper(),
version=APP_VERSION
))
def check_pre_requirements():
check_system_binaries()
def configure_system():
print_log('Configuring system')
disable_swap()
set_cache_pressure()
def disable_swap():
if not DISABLE_SWAP:
return
print_log('Disable swap')
os.system('swapoff -a')
def set_cache_pressure():
if not ENABLE_SET_CACHE_PRESSURE:
return
print_log('Set cache pressure')
os.system('sysctl -q vm.vfs_cache_pressure=200')
def check_system_binaries():
print_log('Checking system binaries')
bins = [
'lsblk',
'sysctl',
'swapoff',
'blockdev',
'umount',
'hwinfo'
]
for ibin in bins:
if not sh.which(ibin):
print_log(ibin + ': command not found')
sys.exit(1)
def is_device_physical_floppy(
device_pathname: str,
device_data: dict,
physical_floppy_drives: dict
) -> bool:
return (
device_pathname in physical_floppy_drives
) and \
device_data['type'] == 'disk' and \
device_data['size'] == FLOPPY_DEVICE_SIZE
def is_device_physical_cdrom(
device_pathname: str,
device_data: dict,
physical_cdrom_drives: dict
) -> bool:
return (
device_pathname in physical_cdrom_drives
) and device_data['type'] == 'rom'
def is_device_physical_disk(device_data: dict) -> bool:
return (
not device_data['is_floppy_drive'] and
not device_data['is_cdrom_drive']
) and device_data['type'] == 'disk'
def get_partitions2(physical_cdrom_drives, physical_floppy_drives) -> 'OrderedDict[str, dict]':
lsblk_buf = StringIO()
pattern = r'NAME="(\w*)" SIZE="(\d*)" TYPE="(\w*)" MOUNTPOINT="(.*)" LABEL="(.*)" PATH="(.*)" FSTYPE="(.*)" PTTYPE="(.*)" RO="(.*)"'
ret: OrderedDict[str, dict] = OrderedDict()
try:
# lsblk -P -o name,size,type,mountpoint,label,path,fstype,pttype,ro -n -b
sh.lsblk('-P', '-o', 'name,size,type,mountpoint,label,path,fstype,pttype,ro', '-n', '-b', _out=lsblk_buf)
except Exception as x:
print_log('get_partitions2 lsblk', x)
return None
for line in lsblk_buf.getvalue().splitlines():
line = line.strip()
if not line:
continue
search_result = re.search(pattern, line)
if not search_result:
continue
found = search_result.groups()
full_path = found[5]
device_basename = os.path.basename(full_path)
if device_basename.startswith(SYSTEM_INTERNAL_SD_CARD_NAME):
continue
device_data = {
'mountpoint': found[3],
'label': found[4],
'config': None,
'device': full_path,
'device_basename': device_basename,
'is_floppy_drive': False,
'is_cdrom_drive': False,
'is_disk_drive': False,
'size': int(found[1]) if found[1] else 0,
'type': found[2],
'fstype': found[6],
'pttype': found[7],
'is_readable': True, # in Linux device is reabable by default
'is_writable': bool(int(found[8])) == False
}
device_data['is_floppy_drive'] = is_device_physical_floppy(
full_path,
device_data,
physical_floppy_drives
)
device_data['is_cdrom_drive'] = is_device_physical_cdrom(
full_path,
device_data,
physical_cdrom_drives
)
device_data['is_disk_drive'] = is_device_physical_disk(
device_data
)
if device_data['is_cdrom_drive']:
device_data['is_writable'] = False
if is_unknown_disk(device_data):
# do not add unknown cd/dvd
continue
ret[full_path] = device_data
return ret
def print_partitions(partitions: dict):
if not partitions:
return
print_log('Known partitions:')
for key, value in partitions.items():
print_log(key)
print_log(' mountpoint: ' + str(value['mountpoint']))
print_log(' label: ' + str(value['label']))
print_log(' is_floppy_drive: ' + str(value['is_floppy_drive']))
print_log(' is_cdrom_drive: ' + str(value['is_cdrom_drive']))
print_log(' is_disk_drive: ' + str(value['is_disk_drive']))
print_log(' size: ' + str(value['size']))
print_log(' type: ' + str(value['type']))
print_log(' pttype: ' + str(value['pttype']))
print_log(' fstype: ' + str(value['fstype']))
print_log()
def device_get_public_name(ipart_data: dict):
pathname = ipart_data['device'].replace(os.path.sep, '__')
if ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_ADF:
pathname += FLOPPY_ADF_EXTENSION
elif ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE or \
ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB:
pathname += HD_HDF_EXTENSION
if ipart_data['amiga_device_type'] == AMIGA_DISK_DEVICE_TYPE_ISO:
pathname += CD_ISO_EXTENSION
return pathname
def get_hdf_type(pathname: str) -> int:
# TODO test me
file_stat = os.stat(pathname)
data = file_read_bytes(pathname, 0, PHYSICAL_SECTOR_SIZE)
if len(data) < 4:
return None
char_0 = chr(data[0])
char_1 = chr(data[1])
char_2 = chr(data[2])
char_3 = chr(data[3])
first_4_chars = ''.join([char_0, char_1, char_2, char_3])
if first_4_chars == 'RDSK':
return AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB
elif first_4_chars.startswith('DOS'):
if file_stat.st_size < 4 * 1024 * 1024:
return AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE
else:
return AMIGA_DISK_DEVICE_TYPE_HDF
return None
def hdf_type_to_str(hdf_type: int):
if hdf_type == AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB:
return 'RDSK'
elif hdf_type == AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE:
return 'DISKIMAGE'
elif hdf_type == AMIGA_DISK_DEVICE_TYPE_HDF:
return 'HDF'
return None
def remove_known_disk_devices(partitions: dict, disk_devices: dict):
count_removed = 0
for device_pathname, device_data in disk_devices.copy().items():
if device_pathname not in partitions:
continue
ipart_data = partitions[device_pathname]
remove = not is_unknown_disk(ipart_data) and \
not ipart_data['is_cdrom_drive'] and \
not device_data['force_add']
if remove:
print_log(device_pathname, 'removing incorrectly added device')
del disk_devices[device_pathname]
count_removed += 1
return count_removed
def cleanup_disk_devices(partitions: dict, disk_devices: dict):
for ipart_dev in list(disk_devices.keys()):
if ipart_dev not in partitions:
del disk_devices[ipart_dev]
print_log(ipart_dev, 'ejected')
def add_adf_disk_device(
ipart_dev: str,
ipart_data: dict,
disk_devices: dict,
force_add: bool = False
):
print_log('{filename} using as ADF'.format(
filename=ipart_dev
))
if ENABLE_FLOPPY_DRIVE_READ_A_HEAD:
set_device_read_a_head_sectors(ipart_dev, DEFAULT_READ_A_HEAD_SECTORS)
else:
set_device_read_a_head_sectors(ipart_dev, 0)
disk_devices[ipart_dev] = ipart_data.copy()
disk_devices[ipart_dev]['amiga_device_type'] = AMIGA_DISK_DEVICE_TYPE_ADF
disk_devices[ipart_dev]['public_name'] = device_get_public_name(disk_devices[ipart_dev])
disk_devices[ipart_dev]['size'] = FLOPPY_ADF_SIZE
disk_devices[ipart_dev]['force_add'] = force_add
disk_devices[ipart_dev]['cached_adf_pathname'] = ''
disk_devices[ipart_dev]['cached_adf_sha512'] = ''
update_cached_adf_data(ipart_dev, disk_devices[ipart_dev])
def build_CachedADFHeader(sha512_id, mtime):
header = CachedADFHeader()
header.sign = bytes(CACHED_ADF_SIGN, CACHED_ADF_STR_ENCODING)
header.header_type = bytes(CACHED_ADF_HEADER_TYPE, CACHED_ADF_STR_ENCODING)
header.sha512 = bytes(sha512_id, CACHED_ADF_STR_ENCODING)
header.mtime = mtime
return bytes(header)
def build_cached_adf_filename(sha512_id, ext):
return sha512_id + ext
def update_cached_adf_data(ipart_dev: str, ipart_data: dict):
if not ENABLE_ADF_CACHING:
return
last_sector_data = file_read_bytes(ipart_dev, FLOPPY_DEVICE_LAST_SECTOR, PHYSICAL_SECTOR_SIZE)
adf_header = CachedADFHeader.from_buffer_copy(last_sector_data)
decoded_sign = ''
decoded_header_type = ''
decoded_sha512 = ''
try:
decoded_sign = str(adf_header.sign, CACHED_ADF_STR_ENCODING)
decoded_header_type = str(adf_header.header_type, CACHED_ADF_STR_ENCODING)
decoded_sha512 = str(adf_header.sha512, CACHED_ADF_STR_ENCODING)
except UnicodeDecodeError:
pass
if adf_header.mtime < 0:
adf_header.mtime = 0
if decoded_sign != CACHED_ADF_SIGN or \
decoded_header_type != CACHED_ADF_HEADER_TYPE or \
not decoded_sha512 or \
len(decoded_sha512) < SHA512_LENGTH:
# ADF not cached
return
ipart_data['cached_adf_sha512'] = decoded_sha512
cached_adf_pattern = os.path.join(
CACHED_ADFS_DIR,
build_cached_adf_filename(
decoded_sha512,
FLOPPY_ADF_EXTENSION
)
)
print_log('{filename} looking for {cached_adf_pattern}'.format(
filename=ipart_dev,
cached_adf_pattern=cached_adf_pattern
))
found_cached_adfs = list(glob.glob(cached_adf_pattern))
if not found_cached_adfs or \
not os.path.exists(found_cached_adfs[0]):
print_log('{filename} is cached ADF (ID={sha512_id}, mtime={mtime}, cached file does not exists, existing ID will be used)'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
mtime=adf_header.mtime
))
return
if os.path.getsize(found_cached_adfs[0]) != FLOPPY_ADF_SIZE:
print_log('{filename} is cached ADF (ID={sha512_id}, mtime={mtime}, cached file has incorrect size, removing, existing ID will be used)'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
mtime=adf_header.mtime
))
os.remove(found_cached_adfs[0])
return
# if Decimal(os.path.getmtime(found_cached_adfs[0])) < Decimal(adf_header.mtime):
if int(os.path.getmtime(found_cached_adfs[0])) < adf_header.mtime:
print_log('{filename} is cached ADF (ID={sha512_id}, mtime={mtime}, cached file has incorrect mtime, removing, existing ID will be used)'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
mtime=adf_header.mtime
))
os.remove(found_cached_adfs[0])
return
ipart_data['cached_adf_pathname'] = found_cached_adfs[0]
print_log('{filename} is cached ADF (ID={sha512_id}, as {cached_adf_pathname})'.format(
filename=ipart_dev,
sha512_id=decoded_sha512,
cached_adf_pathname=found_cached_adfs[0]
))
def add_hdf_disk_device(
ipart_dev: str,
ipart_data: dict,
disk_devices: dict,
_type: int,
force_add: bool = False
):
print_log('{filename} using as HDF'.format(
filename=ipart_dev
))
disk_devices[ipart_dev] = ipart_data.copy()
disk_devices[ipart_dev]['amiga_device_type'] = _type
disk_devices[ipart_dev]['public_name'] = device_get_public_name(disk_devices[ipart_dev])
disk_devices[ipart_dev]['force_add'] = force_add
def add_bigger_disk_device(
ipart_dev: str,
ipart_data: dict,
disk_devices: dict,
force_add: bool = False
):
hdf_type = get_hdf_type(ipart_dev)
if not hdf_type:
# could be iso
print_log('{filename} cannot determine disk device type, using DISKIMAGE by default'.format(
filename=ipart_dev
))
hdf_type = AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE
if hdf_type != AMIGA_DISK_DEVICE_TYPE_HDF_DISKIMAGE and \
hdf_type != AMIGA_DISK_DEVICE_TYPE_HDF_HDFRDB and \
hdf_type != AMIGA_DISK_DEVICE_TYPE_HDF:
print_log('{filename} {_type} is not supported'.format(
filename=ipart_dev,
_type=hdf_type_to_str(hdf_type)
))
return
add_hdf_disk_device(
ipart_dev,
ipart_data,
disk_devices,
hdf_type,
force_add
)
def add_iso_disk_device(ipart_dev: str, ipart_data: dict, disk_devices: dict):
print_log('{filename} using as ISO'.format(
filename=ipart_dev
))
disk_devices[ipart_dev] = ipart_data.copy()
disk_devices[ipart_dev]['amiga_device_type'] = AMIGA_DISK_DEVICE_TYPE_ISO
disk_devices[ipart_dev]['public_name'] = device_get_public_name(disk_devices[ipart_dev])
disk_devices[ipart_dev]['force_add'] = False
def is_unknown_disk(ipart_data: dict) -> bool:
return ipart_data['fstype'] == '' and ipart_data['pttype'] == ''
def add_disk_devices2(partitions: dict, disk_devices: dict):
force_add = is_cmd_shift_pressed()
clear_pressed_keys()
for ipart_dev, ipart_data in partitions.items():
if ipart_dev in disk_devices:
continue
unknown = is_unknown_disk(ipart_data)
if ipart_data['is_floppy_drive']:
if not unknown and not force_add:
continue
add_adf_disk_device(
ipart_dev,
ipart_data,
disk_devices,
force_add
)
if not disk_devices[ipart_dev]['cached_adf_pathname']:
# ADF is not cached, need to mute the system sound
mute_system_sound(6)
elif ipart_data['is_disk_drive']:
if not unknown and not force_add:
continue
add_bigger_disk_device(
ipart_dev,
ipart_data,
disk_devices,
force_add
)
elif ipart_data['is_cdrom_drive']:
add_iso_disk_device(
ipart_dev,
ipart_data,
disk_devices
)
def is_adf_header(header: bytes) -> bool:
# TODO provide better method to detect ADF header
parsed_header = numpy.frombuffer(header, ADF_BOOTBLOCK, 1)[0]
disk_type = parsed_header['DiskType'].tobytes().decode('ascii', 'ignore').rstrip('\0')
if disk_type != 'DOS':
return False
disk_type_other_bits = clear_bits(
parsed_header['DiskType'][3],
[0, 1, 2]
)
if disk_type_other_bits != 0:
return False
return True
def clear_bits(i: int, bits: list) -> int:
for ibit in bits:
i = i & ~(1<<ibit)
return i
def update_disk_devices(partitions: dict, disk_devices: dict):
cleanup_disk_devices(partitions, disk_devices)
add_disk_devices2(partitions, disk_devices)
def run_fuse(disk_devices: dict, async_file_ops: AsyncFileOps):
global fs_instance
fs_instance = AmigaDiskDevicesFS(disk_devices, async_file_ops)
FUSE(
fs_instance,
TMP_PATH_PREFIX,
foreground=True,
allow_other=True,
direct_io=True
)
def init_fuse(disk_devices: dict, async_file_ops: AsyncFileOps):
print_log('Init FUSE')
fuse_instance_thread = threading.Thread(target=run_fuse, args=(disk_devices, async_file_ops,))
fuse_instance_thread.start()
return fuse_instance_thread
def unmount_fuse_mountpoint():
print_log('Unmounting FUSE mountpoint')
os.system('umount {dir}'.format(
dir=TMP_PATH_PREFIX
))
def mkdir_fuse_mountpoint():
os.makedirs(TMP_PATH_PREFIX, exist_ok=True)
def affect_fs_disk_devices(disk_devices: dict):
global fs_instance
if not fs_instance:
return
fs_instance.set_disk_devices(disk_devices.copy())
def set_device_read_a_head_sectors(device: str, sectors: int):
global devices_read_a_head_sectors
if device not in devices_read_a_head_sectors:
devices_read_a_head_sectors[device] = None
if devices_read_a_head_sectors[device] == sectors:
return
devices_read_a_head_sectors[device] = sectors
os.system('blockdev --setra {sectors} {device}'.format(
sectors=sectors,
device=device
))
def find_new_devices(partitions: dict, old_partitions: dict) -> List[str]:
new_devices = []
for ipart_dev, ipart_data in partitions.items():
if not old_partitions or ipart_dev not in old_partitions:
new_devices.append(ipart_dev)
return new_devices
def quick_format_single_device(device: str):
blank_dos = bytearray(1024)
blank_dos[0] = ord('D')
blank_dos[1] = ord('O')
blank_dos[2] = ord('S')
try:
file_write_bytes(device, 0, blank_dos, os.O_SYNC | os.O_CREAT)
except OSError as ex:
print_log(str(ex))
return False
return True
def rescan_device(device_basename: str):
os.system('echo 1 > /sys/class/block/{device_basename}/device/rescan'.format(
device_basename=device_basename
))
def format_devices(partitions: dict, old_partitions: dict, loop_counter: int):
if not is_cmd_delete_pressed():
return
clear_pressed_keys()
if not loop_counter:
# do not format on first iteration
return
new_devices = find_new_devices(partitions, old_partitions)
if not new_devices:
return
to_format = []
for ipart_dev in new_devices:
ipart_data = partitions[ipart_dev]
if ipart_data['type'] != 'disk':
continue
if not ipart_data['is_writable']:
continue
print_log(ipart_dev, 'new')
print_log(ipart_dev, 'quick-formatting device')
to_format.append(ipart_dev)
# only one disk device at a time
break
if not to_format:
return
ipart_dev = to_format[0]
if quick_format_single_device(ipart_dev):
print_log(ipart_dev, 'scanning')
rescan_device(ipart_data['device_basename'])
del partitions[ipart_dev]
def is_cmd_delete_pressed() -> bool:
return key_cmd_pressed and key_delete_pressed
def is_cmd_shift_pressed() -> bool:
return key_cmd_pressed and key_shift_pressed
def clear_pressed_keys():
global key_cmd_pressed
global key_delete_pressed
global key_shift_pressed
key_cmd_pressed = False
key_delete_pressed = False
key_shift_pressed = False
def on_key_press(key):
global key_cmd_pressed
global key_delete_pressed
global key_shift_pressed
if key == Key.cmd:
key_cmd_pressed = True
if key == Key.delete:
key_delete_pressed = True
if key == Key.shift:
key_shift_pressed = True
def on_key_release(key):
global key_cmd_pressed
global key_delete_pressed
global key_shift_pressed
if key == Key.cmd:
key_cmd_pressed = False
if key == Key.delete:
key_delete_pressed = False
if key == Key.shift:
key_shift_pressed = False
def init_keyboard_listener():
keyboard_listener = Listener(
on_press=on_key_press,
on_release=on_key_release
)
keyboard_listener.start()
def init_async_file_ops():
print_log('Init AsyncFileOps')
async_file_ops = AsyncFileOps()
async_file_ops.start()
return async_file_ops
def find_physical_cdrom_drives():
hwinfo_buf = StringIO()
cdrom_data_started = False
ret = []
# hwinfo --cdrom --short
sh.hwinfo('--cdrom', '--short', _out=hwinfo_buf)
for line in hwinfo_buf.getvalue().splitlines():
line = line.strip()
if not line:
continue
if line == 'cdrom:':
cdrom_data_started = True
continue
if not cdrom_data_started:
continue
if not line.startswith('/dev/'):
continue
parts = line.split(maxsplit=1)
if len(parts) != 2:
continue
device = parts[0]
if not os.path.exists(device) or not os.path.isfile(device):
ret.append(device)
return ret
def update_physical_cdrom_drives(physical_cdrom_drives):
print_log('Getting information about physical cd-rom drives')
index = 0
for device in sorted(find_physical_cdrom_drives()):
physical_cdrom_drives[device] = {
'index': index,
'device': device
}
index += 1
def print_physical_cdrom_drives(physical_cdrom_drives):
print_log('Physical cd-rom drives:')
for key, drive_data in physical_cdrom_drives.items():
print_log(key)
print_log(' index: ' + str(drive_data['index']))
print_log(' device: ' + drive_data['device'])
print_log()
def find_physical_floppy_drives():
ufiformat_buf = StringIO()
ret = []
# ufiformat --inquire --quiet
sh.ufiformat('--inquire', '--quiet', _out=ufiformat_buf)
for line in ufiformat_buf.getvalue().splitlines():
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) != 2:
continue
device = parts[0]
if not os.path.exists(device) or not os.path.isfile(device):
ret.append(device)
return ret
def update_physical_floppy_drives(physical_floppy_drives):
print_log('Getting information about physical floppy drives')
index = 0
for device in sorted(find_physical_floppy_drives()):
physical_floppy_drives[device] = {
'index': index,
'device': device
}
index += 1
def print_physical_floppy_drives(physical_floppy_drives):
print_log('Physical floppy drives:')
for key, drive_data in physical_floppy_drives.items():
print_log(key)
print_log(' index: ' + str(drive_data['index']))
print_log(' device: ' + drive_data['device'])
print_log()
def main():
partitions = None
old_partitions = None
disk_devices = {}
loop_counter = 0
physical_floppy_drives = OrderedDict()
physical_cdrom_drives = OrderedDict()
print_app_version()
check_pre_requirements()
init_logger()
unmount_fuse_mountpoint()
mkdir_fuse_mountpoint()
# # uncomment this to enable FUSE logging
# logging.basicConfig(level=logging.DEBUG)
configure_system()
init_simple_mixer_control()
async_file_ops = init_async_file_ops()
init_fuse(disk_devices, async_file_ops)
update_physical_floppy_drives(physical_floppy_drives)
print_physical_floppy_drives(physical_floppy_drives)
update_physical_cdrom_drives(physical_cdrom_drives)
print_physical_cdrom_drives(physical_cdrom_drives)
init_keyboard_listener()
os.makedirs(CACHED_ADFS_DIR, exist_ok=True)
try:
while True:
if not MAIN_LOOP_MAX_COUNTER or loop_counter < MAIN_LOOP_MAX_COUNTER:
partitions = get_partitions2(
physical_cdrom_drives,
physical_floppy_drives
)
if partitions is not None:
if partitions != old_partitions:
# something changed
print_partitions(partitions)
format_devices(partitions, old_partitions, loop_counter)
update_disk_devices(partitions, disk_devices)
affect_fs_disk_devices(disk_devices)
if remove_known_disk_devices(partitions, disk_devices):
affect_fs_disk_devices(disk_devices)
old_partitions = partitions
loop_counter += 1
unmute_system_sound()
enable_power_led()
time.sleep(100 / 1000)
time.sleep(0)
except KeyboardInterrupt as ex:
print_log('KeyboardInterrupt')
unmute_system_sound()
enable_power_led()
unmount_fuse_mountpoint()
async_file_ops.stop()
sys.exit()
if __name__ == '__main__':
main()
|
skazanyNaGlany/amipi400
|
amiga_disk_devices.py
|
amiga_disk_devices.py
|
py
| 56,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24282938220
|
from application import app, db
from flask import redirect, render_template, request, url_for, flash
from application.child.models import Child
from application.quotes.models import Quote
from application.likes.models import Likes
from application.child.forms import ChildForm, MakeSureForm
from datetime import datetime, date
from flask_login import login_required, current_user
from datetime import date
from wtforms import ValidationError
@app.route("/child", methods=["GET"])
def child_index():
return render_template("child/listchild.html", quotes = Child.query.all())
# Käyttäjän omien lapsien haku kyselyllä
@app.route("/child/userlist/", methods=["GET"])
@login_required
def child_userchildren():
return render_template("child/ownchildren.html", find_users_children = Child.find_users_children())
@app.route("/child/newchild/")
@login_required
def child_form():
return render_template("child/newchild.html", form = ChildForm())
@app.route("/child/", methods=["GET","POST"])
@login_required
def child_create():
form = ChildForm(request.form)
if not form.validate():
return render_template("child/newchild.html", form = form)
# Tarkastetaan, ettei käyttäjällä ole samannimistä lasta
alreadyExistsChild = Child.query.filter_by(name=form.name.data, account_id=current_user.id).first()
if alreadyExistsChild:
form.name.errors.append("Sinulla on jo tämänniminen lapsi olemassa.")
return render_template("child/newchild.html", form = form)
c = Child(name = form.name.data, birthday = form.birthday.data)
c.account_id = current_user.id
db.session.add(c)
db.session().commit()
return redirect(url_for("child_userchildren"))
@app.route("/child/modifychild/<child_id>/", methods=["GET", "POST"])
@login_required
def child_modifychild(child_id):
# Asetetaan lomakkeelle valmiiksi olemassaolevat tiedot
form=ChildForm()
child = Child.query.get(child_id)
form.name.data = child.name
form.birthday.data = child.birthday
return render_template("child/modifyChild.html", form = form, child_id = child_id)
@app.route("/child/<child_id>/", methods=["POST"])
@login_required
def child_update(child_id):
child = Child.query.get(child_id)
form = ChildForm(request.form)
if not form.validate():
return render_template("child/modifyChild.html", form = form, child_id=child_id)
# Tarkastetaan, ettei käyttäjällä ole samannimistä lasta
alreadyExistsChild = Child.query.filter_by(name=form.name.data, account_id=current_user.id).first()
if alreadyExistsChild and child != alreadyExistsChild:
form.name.errors.append("Sinulla on jo tämänniminen lapsi olemassa.")
return render_template("child/modifyChild.html", form = form, child_id=child_id)
child.name = form.name.data
child.birthday =form.birthday.data
db.session().commit()
return redirect(url_for("child_userchildren"))
@app.route("/child/<child_id>/delete", methods=["POST","GET"])
@login_required
def child_delete(child_id):
# Tarkastuslomake, jottei lasta tule poistettua liian helpolla
form = MakeSureForm()
return render_template("child/deletechild.html", form = form, child_id=child_id)
@app.route("/child/<child_id>/del", methods=["POST"])
@login_required
def child_deleteConfirm(child_id):
form = MakeSureForm(request.form)
ok = form.name.data
# jos tarkastuslomakkeeseen on syötetty oikea tieto, jolla halutaan varmistaa poisto
if ok == "x":
c = Child.query.get(child_id)
# Etsitään lapsen lapsen sanonnat ja poistataan sanonnat sekä sanonnan tykkäykset
q = Quote.query.filter(Quote.child_id == child_id)
for quote in q:
likes = Likes.query.filter(Likes.quote_id==quote.id)
for like in likes:
db.session.delete(like)
db.session().commit()
db.session.delete(quote)
db.session().commit()
# Poistetaan lapsi
db.session().delete(c)
db.session().commit()
flash("Lapsi poistettu onnistuneesti", category="success")
return redirect(url_for("child_userchildren"))
flash("Lasta ei poistettu", category="warning")
return redirect(url_for("child_userchildren"))
# Yhden lapsen tietojen näyttäminen
@app.route("/child/showchild/<child_id>")
@login_required
def child_showOne(child_id):
child = Child.query.get(child_id)
return render_template("child/showchild.html", child_id=child_id, child=child)
|
millalin/Kids-Say-the-Darndest-Things
|
application/child/views.py
|
views.py
|
py
| 4,587 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73814836346
|
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if(len(strs) == 1):
return strs[0]
infoDict = dict()
for words in strs:
key = ""
for letters in words:
key += letters
if(key in infoDict):
infoDict[key] = infoDict[key] + 1
else:
infoDict[key] = 0
key = ""
maxVal = 0
maxLen = 0
result = ""
for key,val in infoDict.items():
if(len(key) > maxLen and val >= maxVal and val != 0 and val == len(strs)-1):
maxVal = val
maxLen = len(key)
result = key
return result
|
davidyip50/WallBreakers
|
stringManipulation/longestCommonPrefix.py
|
longestCommonPrefix.py
|
py
| 846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33560481799
|
"""Function to get time of execution of a function."""
# import time
# a = input("Ente a value:")
# def multiply_by_2(a):
# a = a * 2
# result = multiply_by_2(a)
from time import process_time # importing time
def main_function(decorator_function): # function to call decorator
def sub_function():
start_time = process_time()
print("before the function")
deco = decorator_function()
print("after running function")
end_time = process_time()
print("Time took for function: ", end_time-start_time)
return deco
return sub_function
def sending_function(): # function calling inside the function
print("on function")
return "test_sucess"
send_function = main_function(sending_function)
start_time = process_time()
print(send_function()) # calling the assigned main function
end_time = process_time()
print("Time took for function: ", end_time-start_time)
|
alenantony/Alokin-Task
|
Day4/decorator.py
|
decorator.py
|
py
| 933 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4635541375
|
import numpy as np
import tensorflow as tf
import json
from util import Dataset, load_data
from random import randint
import os
eid = 'e100-b50-h512-adam'
n_epochs = 100
batch_size = 50
show_steps = 1
input_dim = 784
latent_dim = 100
hidden_dim = 512
statistics = {
'architechture': '3 layers, {}-{}-{}/{}'.format(hidden_dim, hidden_dim, latent_dim, input_dim),
'reconstruction_loss': [],
'generator_loss': [],
'discriminator_loss': [],
'encoded_feature_vector': [],
'original_images': [],
'encoded_images': [],
'reconstruct_images': [],
'reconstruct_from_random': [],
}
statistics_file = 'statistics/'+eid
print('id: ', eid)
print('number of epochs = {:d}'.format(n_epochs))
print('batch_size = {:d}'.format(batch_size))
# Load data
X_train = load_data('../data/data.npy') # (2000, 784)
label_train = load_data('../data/label.npy') # (2000,)
train_dataset = Dataset(X_train, label_train, batch_size)
n_train_samples = X_train.shape[0]
n_iters = int(n_epochs * n_train_samples / batch_size)
print('number of iterations = {:d}'.format(n_iters))
def weight_variable(shape):
initial = tf.random_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.fill(shape, 0.1)
return tf.Variable(initial)
weight = {
'enc_1': weight_variable([input_dim, hidden_dim]),
'enc_2': weight_variable([hidden_dim, hidden_dim]),
'enc_3': weight_variable([hidden_dim, latent_dim]),
'dec_1': weight_variable([latent_dim, hidden_dim]),
'dec_2': weight_variable([hidden_dim, hidden_dim]),
'dec_3': weight_variable([hidden_dim, input_dim]),
'dis_1': weight_variable([latent_dim, hidden_dim]),
'dis_2': weight_variable([hidden_dim, hidden_dim]),
'dis_3': weight_variable([hidden_dim, 1]),
}
bias = {
'enc_1': bias_variable([hidden_dim]),
'enc_2': bias_variable([hidden_dim]),
'enc_3': bias_variable([latent_dim]),
'dec_1': bias_variable([hidden_dim]),
'dec_2': bias_variable([hidden_dim]),
'dec_3': bias_variable([input_dim]),
'dis_1': bias_variable([hidden_dim]),
'dis_2': bias_variable([hidden_dim]),
'dis_3': bias_variable([1]),
}
def dense(x, W, b, activation):
out = tf.add(tf.matmul(x, W), b)
out = tf.layers.batch_normalization(out)
if activation == 'relu':
out = tf.nn.relu(out)
elif activation == 'lrelu':
tf.maximum(out, 0.2*out)
elif activation == 'sigmoid':
out = tf.nn.sigmoid(out)
return out
def encoder(x):
#input_layer = tf.nn.dropout(x, 0.8)
h = dense(x, weight['enc_1'], bias['enc_1'], 'relu')
h = dense(h, weight['enc_2'], bias['enc_2'], 'relu')
h = dense(h, weight['enc_3'], bias['enc_3'], 'relu')
return h
def decoder(x):
h = dense(x, weight['dec_1'], bias['dec_1'], 'relu')
h = dense(h, weight['dec_2'], bias['dec_2'], 'relu')
h = dense(h, weight['dec_3'], bias['dec_3'], 'sigmoid')
return h
def discriminator(x):
h = dense(x, weight['dis_1'], bias['dis_1'], 'relu')
h = dense(h, weight['dis_2'], bias['dis_2'], 'relu')
h = dense(h, weight['dis_3'], bias['dis_3'], 'sigmoid')
return h
def draw_gaussian(dimension, n_samples):
return np.random.standard_normal((n_samples, dimension)).astype('float32')
def draw_multivariate_gaussian(dimension, n_samples):
means = np.zeros(dimension)
cov_matrix = np.identity(dimension)
return np.random.multivariate_normal(means, cov_matrix, n_samples).astype('float32')
# Network to train
x = tf.placeholder('float', [None, 784])
prior_samples = tf.placeholder('float', [batch_size, latent_dim])
code = encoder(x)
reconstruct = decoder(code)
discrim_prior = discriminator(prior_samples)
discrim_code = discriminator(code)
loss_discriminator = tf.negative(tf.reduce_mean(tf.log(discrim_prior+1e-9)) + tf.reduce_mean(tf.log(1.0-discrim_code+1e-9)))
loss_encoder = tf.reduce_mean(tf.log(1.0-discrim_code+1e-9))
loss_reconstruct = tf.reduce_sum(tf.abs(x - reconstruct))
decay_step = int(100 * n_train_samples / batch_size)
discriminator_learning_rate = tf.train.piecewise_constant(0, [decay_step], [0.001, 0.0001])
generator_learning_rate = tf.train.piecewise_constant(0, [decay_step], [0.001, 0.0001])
reconstruct_learning_rate = tf.train.piecewise_constant(0, [decay_step], [0.001, 0.0001])
train_discriminator = tf.train.AdamOptimizer(0.001).minimize(loss_discriminator)
train_generator = tf.train.AdamOptimizer(0.001).minimize(loss_encoder)
train_reconstruct = tf.train.AdamOptimizer(0.001).minimize(loss_reconstruct)
#train_generator_and_reconstruct = tf.train.AdamOptimizer(0.001).minimize(loss_encoder+loss_reconstruct)
# Reconstruct from random distribution with trained weights
specified_code = tf.placeholder('float', [None, 100])
reconstruct_specified_code = decoder(specified_code)
def record_loss(sess, X):
iterations = int(X.shape[0] / batch_size)
reconstruction_loss, generator_loss, discriminator_loss = 0.0, 0.0, 0.0
p = 0
for i in range(iterations):
reconstruction_loss += sess.run(loss_reconstruct, feed_dict={x: X[p:p+batch_size]}).tolist()
generator_loss += sess.run(loss_encoder, feed_dict={x: X[p:p+batch_size]}).tolist()
draw_prior_samples = draw_multivariate_gaussian(latent_dim, batch_size)
discriminator_loss += sess.run(loss_discriminator, feed_dict={x: X[p:p+batch_size], prior_samples: draw_prior_samples}).tolist()
p += batch_size
# Average
reconstruction_loss /= X.shape[0]
generator_loss /= X.shape[0]
discriminator_loss /= X.shape[0]
# Record
statistics['reconstruction_loss'].append(reconstruction_loss)
statistics['generator_loss'].append(generator_loss)
statistics['discriminator_loss'].append(discriminator_loss)
print('Loss: reconstruction = {:.8f}, generator = {:.24f}, discriminator = {:.20f}'.format(reconstruction_loss, generator_loss, discriminator_loss))
def extract_encoded_data(sess):
iterations = int(train_dataset.X.shape[0] / batch_size)
p = 0
for i in range(iterations):
if i == 0:
encoded_feature_vector = sess.run(code, feed_dict={x: train_dataset.X[p:p+batch_size]})
print(encoded_feature_vector.shape)
else:
encoded_feature_vector = np.append(encoded_feature_vector, sess.run(code, feed_dict={x: train_dataset.X[p:p+batch_size]}), axis=0)
print(encoded_feature_vector.shape)
p += batch_size
label = train_dataset.Y
statistics['encoded_feature_vector'] = encoded_feature_vector
def extract_image(sess):
num_images = 10
ori_images = train_dataset.X[0:num_images+1]
encoded_images = sess.run(code, feed_dict={x: ori_images})
reconstruct_images = sess.run(reconstruct, feed_dict={x: ori_images})
statistics['original_images'] = ori_images
statistics['encoded_images'] = encoded_images
statistics['reconstruct_images'] = reconstruct_images
saver = tf.train.Saver()
# Train the network
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
#saver.restore(sess, 'models/%s/%s.ckpt' % (eid, 533))
record_loss(sess, train_dataset.X)
k = 1
new_iters = int(n_iters/k)
for it in range(new_iters):
# Shuffle data once for each epoch
if it % int(new_iters/n_epochs) == 0:
train_dataset.shuffle()
# Train
for i in range(k):
next_x, _ = train_dataset.next_batch()
draw_prior_samples = draw_multivariate_gaussian(latent_dim, batch_size)
sess.run(train_discriminator, feed_dict={x: next_x, prior_samples: draw_prior_samples})
sess.run(train_generator, feed_dict={x: next_x})
sess.run(train_reconstruct, feed_dict={x: next_x})
# Show loss
if it % show_steps == 0:
print('Iterations %5d: ' %(it+1) , end='')
record_loss(sess, train_dataset.X)
extract_encoded_data(sess)
extract_image(sess)
# Save the model
if not os.path.exists('models/'+eid):
os.makedirs('models/'+eid)
save_path = saver.save(sess, 'models/%s/%s.ckpt' % (eid, n_iters))
print('Model saved in file: %s' % save_path)
# Reconstruct with the same weights
with tf.Session() as sess:
print('Model restore from: %s' % save_path)
saver.restore(sess, save_path)#'models/%s/%s.ckpt' % (eid, 533))
draw_prior_samples = draw_multivariate_gaussian(latent_dim, batch_size)
statistics['reconstruct_from_random'] = sess.run(reconstruct_specified_code, feed_dict={specified_code: draw_prior_samples})
np.savez(statistics_file,
reconstruction_loss=statistics['reconstruction_loss'], generator_loss=statistics['generator_loss'],
discriminator_loss=statistics['discriminator_loss'],
original_images=statistics['original_images'], encoded_images=statistics['encoded_images'], reconstruct_images=statistics['reconstruct_images'],
encoded_feature_vector=statistics['encoded_feature_vector'], label = train_dataset.Y,
random_reconstruct_img=statistics['reconstruct_from_random'],
architechture=statistics['architechture'])
print('statistics file saved in: {}'.format(statistics_file))
|
ChiWeiHsiao/Deep-Learning-Assignment
|
6-AAE/aae.py
|
aae.py
|
py
| 9,201 |
python
|
en
|
code
| 3 |
github-code
|
6
|
14837490064
|
from django.urls import path
from . import views
urlpatterns = [
path('products/', views.product_list, name='product_list'),
path('product/<int:product_pk>/', views.product_detail, name='product_detail'),
path('basket/', views.product_basket, name='product_basket'),
path('product/<int:product_pk>/add_to_basket', views.add_to_basket, name='add_to_basket'),
path('product_delete/<int:product_pk>', views.remove_from_basket, name='remove_from_basket'),
path('product_remove/<int:product_pk>', views.remove_all_products, name='remove_all_products'),
path('delete_all/', views.delete_all, name='delete_all'),
path('order/', views.order, name='order'),
path('orders_history/', views.orders_history, name='orders_history'),
path('add_to_favorites/<int:product_pk>', views.add_to_favorites, name='add_to_favorites')
]
|
meeeeeeeh/djangoblog
|
shop/urls.py
|
urls.py
|
py
| 855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24930679414
|
from datetime import datetime, timedelta
from email import message
from django.contrib.auth.models import User
from django.contrib import messages
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, JsonResponse
from .models import *
from .forms import questionForm
from django.core import serializers
# Create your views here.
def main(request):
# allQuestions = questions.objects.all()
# order by vote
if request.method == 'POST':
title = request.POST['title']
squestion = questions.objects.filter(title__icontains=title)
squestion = serializers.serialize('json', squestion)
return JsonResponse({'questions':squestion})
# squestions = questions.objects.filter(time__gt = datetime.now() - timedelta(days=100))[0:30]
# squestions = questions.objects.filter(answered=False)[0:30]
squestions = questions.objects.all()
context = {'questions':squestions}
return render(request,'main.html',context)
@login_required(login_url='login')
def addQuestion(request,pk):
if request.method == 'POST':
questioner = User.objects.get(id=pk)
if request.FILES:
newquestion = questions.objects.create(
questioner=questioner,
title = request.POST['title'],
question_image = request.FILES['question_image']
)
else:
newquestion = questions.objects.create(
questioner=questioner,
title = request.POST['title'],
)
messages.info(request,'Question Added Succesfully')
return redirect('main')
return render(request,'main.html')
def loadQuestion(request,pk):
question = questions.objects.get(id=pk)
sanswers = answers.objects.filter(question=question)
if request.method == 'POST':
answerer = User.objects.get(id=request.POST['userid'])
if request.FILES:
newanswer = answers.objects.create(
answerer = answerer,
question=question,
answer = request.POST['answer'],
answer_image=request.FILES['answer_image']
)
else:
newanswer = answers.objects.create(
answerer = answerer,
question=question,
answer = request.POST['answer']
)
messages.info(request,'Answer Added Succesfully')
return HttpResponseRedirect(request.path_info)
context={'question':question,'answers':sanswers}
print(request.path_info)
return render(request,'answers/question.html',context)
@login_required(login_url='login')
def userQuestions(request):
user = User.objects.get(id=request.user.id)
userquestions = questions.objects.filter(questioner=user)
context={'questions':userquestions}
return render(request,'answers/userquestion.html',context)
@login_required(login_url='login')
def editQuestion(request,pk):
question = questions.objects.get(id=pk)
form = questionForm(instance=question)
if request.method == 'POST':
editedquestion = questionForm(request.POST,request.FILES,instance=question)
if editedquestion.is_valid():
editedquestion.save()
return redirect('userquestions')
context={'form':form,'question':question}
return render(request,'answers/editquestion.html',context)
@login_required(login_url='login')
def deleteQuestion(request,pk):
question = questions.objects.get(id=pk)
question.delete()
messages.info(request,'Question deleted succesfully')
return redirect('userquestions')
|
SachinBhattarai0/QueAns
|
answers/views.py
|
views.py
|
py
| 3,662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26140999116
|
arr = list(map(int, input().split()))
found = []
for n in arr:
flag = True
for f in found:
if n == f:
flag = False
if flag:
found.append(n)
print("Yes" if len(found) == 2 else "No")
|
tomon9086/atcoder
|
python/abc155/abc155_a/Main.py
|
Main.py
|
py
| 203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16208817026
|
#-*- coding: UTF-8 -*-
'''
@author: chenwuji
读取原始文件 将脚本保存为按照天的文件
'''
import tools
alldata = {}
map_dict = {}
global_count = 1
def read_data(filename):
f = open(filename)
for eachline in f:
if(len(eachline.split('values (')) < 2):
continue
eachline = eachline.decode('GBK').encode('UTF-8')
# print eachline
basic_list1 = eachline.split('\n')[0].split('\t')[0].split('values (')[1].split('to_timestamp')[0].split(',')
pass
intersection_name = basic_list1[0].split('\'')[1]
lane_num = basic_list1[1]
if len(basic_list1[2].split('\'')) > 1:
direction = basic_list1[2].split('\'')[1]
else:
direction = basic_list1[2]
id = basic_list1[3].split('\'')[1]
if id == '-':
pass
if map_dict.__contains__(id):
id = map_dict[id]
else:
map_dict[id] = global_count
id = global_count
global global_count
global_count = global_count + 1
vehicle_color = basic_list1[4].split('\'')[1]
time = eachline.split('to_timestamp(\'')[1].split('.')[0]
speed = int(eachline.split('HH24:MI:SS.ff\'),\'')[1].split('\'')[0])
tools.writeToFile('data_sort_by_date/' + time.split(' ')[0] + '.csv', str(id) + ',' + intersection_name + ',' +lane_num
+ ',' +direction + ',' +vehicle_color + ',' + time + ',' + str(speed))
if __name__ == '__main__':
filename = 'data/20160301-10.sql'
read_data(filename)
filename = 'data/20160311-20.sql'
read_data(filename)
filename = 'data/20160320-31.sql'
read_data(filename)
tools.toFileWithPickle('mapping_dict', map_dict)
|
chenwuji91/vehicle
|
src_1_sql_to_day_data/data_process.py
|
data_process.py
|
py
| 1,769 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41945752494
|
# usually big companies in Hollywood watermark their script with usually an actor's name.
# So if that actor leaks the script well they'll know that that person has their name on the script and
# they're the ones that leaked it.
# so we are going to use this watermark throgh out all the pdf.
import PyPDF2
template = PyPDF2.PdfFileReader(open('super.pdf', 'rb'))
watermark = PyPDF2.PdfFileReader(open('wtr.pdf', 'rb'))
output = PyPDF2.PdfFileWriter()
for i in range(template.getNumPages()):
page = template.getPage(i)
page.mergePage(watermark.getPage(0))
output.addPage(page)
with open('watermarked.pdf', 'wb') as file:
output.write(file)
print('watermark merged with pdf')
|
hyraja/python-starter
|
12.scripting python (projects)/pdf with python/03.pdf watermark.py
|
03.pdf watermark.py
|
py
| 702 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3439984211
|
class Solution(object):
def anagramMappings(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
dic = {}
for i, num in enumerate(B):
if num in dic:
dic[num].append(i)
else:
dic[num] = [i]
res = [-1 for i in range(len(A))]
for i, num in enumerate(A):
res[i] = dic[num][-1]
dic[num].pop()
return res
|
cuiy0006/Algorithms
|
leetcode/760. Find Anagram Mappings.py
|
760. Find Anagram Mappings.py
|
py
| 487 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70422466107
|
from linked_list import MyList, ListNode
from myclass import Circle
a=Circle(10,1,2)
b=Circle(8,2,2)
c=Circle(12,3,1)
lst = MyList()
lst.enqueue(a)
lst.enqueue(b)
lst.enqueue(c)
print("obj is added")
for i in lst:
i.values()
lst.dequeue()
print("Dequeued")
for i in lst:
i.values()
|
meruert111/seminar3
|
main.py
|
main.py
|
py
| 290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5528516422
|
from datetime import datetime
from finnhub import Client
from settings.constants import FINHUB_API_KEY
class FinhubFetcher:
def __init__(self, symbol: str) -> None:
self.symbol = symbol
def _init_client(self) -> None:
return Client(FINHUB_API_KEY)
def _get_params(self, resolution: str) -> dict:
now = datetime.now()
_from = int(datetime(now.year, 1, 1, 0, 0, 0, 0).timestamp()) # 1 January of current year
to = int(now.timestamp())
return {
"symbol": f"BINANCE:{self.symbol}USDT",
"resolution": resolution,
"_from": _from,
"to": to,
}
def get_crypto_candles(self, resolution: str = "D") -> dict:
client = self._init_client()
params = self._get_params(resolution)
response = client.crypto_candles(**params)
if response.get("s") == "no_data":
return {}
return response
|
VladisIove/darkstore
|
portfolio_manager/services/fetchers/finnhub.py
|
finnhub.py
|
py
| 949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1720170439
|
from wazirx_sapi_client.rest import Client
from wazirx_sapi_client.websocket import WebsocketClient
import time
import websocket,json, pprint
from websocket import create_connection
from time import sleep
import logging
import pandas as pd
import asyncio
import socket, threading
import json, sys, os, time, csv, requests
from flask import Flask,request
from flask import render_template
from flask import current_app as app
from os.path import exists
file_exists = exists("config.py")
if file_exists:
import config
api_key = config.API_KEY
secret_key = config.SECRET_KEY
client = Client(api_key=api_key, secret_key=secret_key)
print(client.send("ping"))
global wes
wes = create_connection("wss://stream.wazirx.com/stream")
print(wes)
print("connection true")
# api_key = config.API_KEY
# secret_key = config.SECRET_KEY
app = Flask(__name__)
app.app_context().push()
def sellTrail(price):
return(client.send('create_order',
{"symbol": "ethinr", "side": "sell", "type": "stoplimit", "price": (price-price*0.003), "stopPrice":price,"quantity": quan, "recvWindow": 5000,
"timestamp": int(time.time()*1000)}))
def updateTrail(tick,orderID,price,trail_tage,quan):
try:
print(client.send('cancel_order',
{"symbol": tick, "orderId": orderID, "recvWindow": 5000, "timestamp": int(time.time()*1000)}))
except:
sleep(10)
print(client.send('cancel_order',
{"symbol": tick, "orderId": orderID, "recvWindow": 5000, "timestamp": int(time.time()*1000)}))
sleep(5)
try:
return(client.send('create_order',
{"symbol": tick, "side": "sell", "type": "stop_limit", "price": (price-price*trail_tage), "stopPrice":(price-price*(trail_tage-0.001)),"quantity": quan, "recvWindow": 5000,
"timestamp": int(time.time()*1000)}))
except:
sleep(5)
return(client.send('create_order',
{"symbol": tick, "side": "sell", "type": "stop_limit", "price": (price-price*trail_tage), "stopPrice":(price-price*(trail_tage-0.001)),"quantity": quan, "recvWindow": 5000,
"timestamp": int(time.time()*1000)}))
def gen_sign(query):
t=int(time.time())
echo = subprocess.Popen(['echo','-n',query], stdout=subprocess.PIPE, shell=False)
hmac_key=subprocess.Popen(['openssl','dgst','-sha256','-hmac',API_SECRET],stdin=echo.stdout,stdout=subprocess.PIPE,shell=False)
output = hmac_key.communicate()[0]
output=str(output.strip())
output=output.replace("b'(stdin)= ",'')
output=output.replace("'" ,'')
print(output)
def get_order(orderID):
try:
return(client.send('query_order',
{"orderId": orderID, "recvWindow": 10000, "timestamp": int(time.time() * 1000)}))
except:
sleep(10)
return(client.send('query_order',
{"orderId": orderID, "recvWindow": 10000, "timestamp": int(time.time() * 1000)}))
async def send_heartbeat( *args):
while True:
print(wes.send(json.dumps({'event': 'ping'})))
print("Beat sent")
await asyncio.sleep(10*60)
@app.route("/", methods=["GET"])
def home():
file_exists = exists("config.py")
if file_exists:
# api_key = config.API_KEY
# secret_key = config.SECRET_KEY
# global client
# global wes
# client = Client(api_key=api_key, secret_key=secret_key)
# print(client.send("ping"))
# wes = create_connection("wss://stream.wazirx.com/stream")
# print(wes)
open_ord=client.send('open_orders',
{"recvWindow": 5000,
"timestamp": int(time.time()*1000)})
# print("Ticker: ")
# tick=input()
# print("Quantity: ")
# quan=float(input())
# print("Trail %: ")
# trail_tage=float(input())
# print("orderId: ")
# orderId=int(input())
# print("sellPrice: ")
# sPrice=float(input())
sleep(5)
wes.send(json.dumps({
"event": "subscribe",
"streams": ["!ticker@arr"]
}))
print(file_exists)
return render_template("dashboard.html",open_ord=open_ord,action="parameters")
else:
return render_template("login.html")
# @app.route("/dashboard", methods=["GET"])
# def dashboardShow():
# return trail(tick,quan,trail_tage,orderId,sPrice,wes)
@app.route("/dashboard", methods=["POST"])
def dashboard():
global tick
global quan
global trail_tage
global orderId
global sPrice
tick=request.form['tick']
quan=float(request.form['quan'])
trail_tage=float(request.form['trail_tage'])
orderId=request.form['orderId']
sPrice=float(request.form['sPrice'])
r=get_order(orderId)
# render_template("dashboard.html",stat=r,action="display")
trail(tick,quan,trail_tage,orderId,sPrice,wes)
# render_template("dashboard.html",stat=r,action="display")
@app.route("/login", methods=["POST"])
def login():
api_key=request.form['apiKey']
secret_key=request.form['secretKey']
save=request.form['save']
if save=='True':
file = open("config.py", "w")
file.write("API-KEY = '"+api_key+"'\n")
file.write("SECRET_KEY = '"+secret_key+"'\n")
file.close()
# global client
# global wes
client = Client(api_key=api_key, secret_key=secret_key)
print(client.send("ping"))
wes = create_connection("wss://stream.wazirx.com/stream")
print(wes)
return render_template("dashboard.html",open_ord=open_ord,action="parameters")
# _thread = threading.Thread(target=asyncio.run, args=(self.send_heartbeat(),))
# _thread.start()
def trail(tick,quan,trail_tage,orderId,sPrice,wes):
# connections = dict()
# connections["websocket"] = wes
_thread = threading.Thread(target=asyncio.run, args=(send_heartbeat(),))
_thread.start()
result = wes.recv()
res = json.loads(result)
data={}
recvd=False
while not recvd:
result = wes.recv()
res = json.loads(result)
stream=res['data']
for dc in stream:
if isinstance(dc,dict):
# print(dc['s'])
# for keys in dc:
# print(keys)
if dc['s']==tick:
data=dc
recvd=True
print("data",data['b'])
col_heads=['Bought','MinSell','SoldP','Comp','BuyOrderID','BuyStatus','SellOrderID','SellStatus']
ob = []
prices=[]
buy_order={}
rows={}
# print(data)
bestSell=float(data['a'])
bestBuy=float(data['b'])
rows['serverTime']=data['E']
rows['bestBuy']=bestBuy
rows['bestSell']=bestSell
df=pd.DataFrame()
row=pd.DataFrame()
row = row.append(rows, ignore_index=True, sort=False)
row['serverTime']= pd.to_datetime(row['serverTime'], unit='ms')
df = df.append(row, ignore_index=True, sort=False)
print(row.loc[0])
row_ls=row.values.tolist()
# print(row_ls)
prices.append(row_ls[0])
print('prices',prices)
while True:
recvd=False
while not recvd:
try:
result = wes.recv()
except:
sleep(30)
wes = create_connection("wss://stream.wazirx.com/stream")
print(wes)
sleep(5)
wes.send(json.dumps({
"event": "subscribe",
"streams": ["!ticker@arr"]
}))
# connections = dict()
# connections["websocket"] = wes
res = json.loads(result)
# pprint.pprint(res)
stream=res['data']
for dc in stream:
if isinstance(dc,dict):
if dc['s']==tick:
data=dc
recvd=True
bestBuy=float(data['b'])
bestSell=float(data['a'])
times=data['E']
rows={}
rows['serverTime']=data['E']
rows['bestBuy']=bestBuy
rows['bestSell']=bestSell
row=pd.DataFrame()
row = row.append(rows, ignore_index=True, sort=False)
row['serverTime']= pd.to_datetime(row['serverTime'], unit='ms')
df = df.append(row, ignore_index=True, sort=False)
print("Best sell price",bestSell)
row_ls=row.values.tolist()
prices.append(row_ls[0])
try:
r=get_order(orderId)
except:
sleep(10)
r=get_order(orderId)
status=r[1]
# print(status)
if status['status']=="done":
print("complete")
# render_template("dashboard.html",action="complete")
break
elif bestSell>int(sPrice):
r=updateTrail(tick,orderId,bestSell,trail_tage,quan)
stat=r[1]
orderId=stat['id']
sPrice=bestSell
print(stat)
# return render_template("dashboard.html",stat=stat,action="display")
sleep(15)
sleep(5)
app.app_context().push()
if __name__ == '__main__':
app.run(host='localhost',port=8080, debug=True)
|
deysanjeeb/wazirX-trailstop
|
trail.py
|
trail.py
|
py
| 9,134 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15799665340
|
#7,7 map --> https://etc.usf.edu/clipart/42600/42671/grid_42671_lg.gif
class Map(object):
def __init__(self, max_size=[6,6], monster_locations=[[2, 2],[4, 4]], players={}):
self.size = {'X':[0,max_size[0]],'Y':[0,max_size[1]]}
self.players = players
self.monster_locations = monster_locations
def generate_map(self):
for cell_x in range(self.size['X'][0],self.size['X'][1]):
#print('~~~~~~~~~~~~~~~~')
for cell_y in range(self.size['Y'][0],self.size['Y'][1]):
#print([cell_x,cell_y])
if [cell_x,cell_y] in self.monster_locations:
face = '👾'
elif [cell_x,cell_y] in self.player_locations():
index = self.player_locations().index([cell_x,cell_y])
face = self.players_icons()[index]
else:
face = '--'
print(f' {face} ', end="")
print('\n')
def player_locations(self):
return [self.players[n].location for n in self.players]
def players_icons(self):
return [self.players[n].icon for n in self.players]
#for i in range(0,6):
# print(' - ', end="")
class Error(Exception):
"""Base class for other exceptions"""
pass
class PlayerAlreadyRegistered(Error):
"""A player with that name already exists. Can't add it"""
pass
class Player(object):
def __init__(self,name,starting_location,armor,weapon,strenght, stamina,icon,map):
self.name = name
self.armor = armor
self.weapon = weapon
self.strenght = strenght
self.level = stamina
self.icon = icon
self.location = starting_location
self.map = map
self.register_in_the_map(self.name)
def register_in_the_map(self,name):
if name not in self.map.players:
self.map.players[name] = self
else:
raise PlayerAlreadyRegistered
def get_name(self):
return self.name
def attack(self,player):
self.weapon.attack(player)
def get_location(self):
return self.location
def newpos(self,move):
coord_change = {'N':(0,1), 'S':(0,-1), 'W':(-1,0), 'E':(1,0) }
move = coord_change[move]
self.location[0] = self.location[0] + move[0]
self.location[1] = self.location[1] + move[1]
return True
def hit(self):
pass
#class NPC(Player):
# pass
#
#class Humans(Player):
# pass
class Weapons(object):
def __init__(self,weapon_name,weapon_strenght, weapon_use_level):
self.name = weapon_name
self.strenght = weapon_strenght
self.level = weapon_level
def attack(self,player):
if self.level > 0:
self.level = self.level -1
return self.strenght
else:
return 0
#More unicode icons: https://emojipedia.org/
m1 = ['Alien',[2,2],30,'Tentacules',10, 10,'👾']
m2 = {"name":'Shark',"starting_location":[4,4],"armor":5,"weapon":'mouth',"strenght":13, "stamina":10, 'icon': '🦈'}
p1 = ['David',[0,0],30,'axe',10, 10,'🚂']
p2 = {"name":'Mark',"starting_location":[5,5],"armor":20,"weapon":'knife',"strenght":11, "stamina":10, 'icon':'🏎️'}
p3 = ['Elliot',[0,5],10,'sword',30, 10,'🚎']
p4 = {"name":'Marco',"starting_location":[5,0],"armor":5,"weapon":'dagger',"strenght":13, "stamina":10, 'icon': '🚴'}
p5 = ['Felicity',[3,3],40,'shield',14, 10,'🛩️']
l = [p1,p2,p3,p4,p5]
map = Map()
list_of_players = []
for player in l:
if type(player) == type([]):
list_of_players.append(Player(*player, map=map))
elif type(player) == type({}):
list_of_players.append(Player(**player,map=map))
else:
print('incorrect input for player')
#print(list_of_players)
#print(map.player_locations())
#print(map.players['David'].location)
map.generate_map()
map.players['David'].newpos('N')
print(map.players['David'].location)
map.generate_map()
#print(map.players['David'])
#print(map.player_locations().index([3,3]))
|
aragaod/yaDnD
|
main.py
|
main.py
|
py
| 4,206 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8708126012
|
from __future__ import unicode_literals
import datetime
import logging
import os
import tweepy as tp
from twiker.modules.tauth import Auth
class Engine(object):
"""
The main engine class for the Twiker Bot.This class includes all the api methods
Copyright (c) 2021 The Knight All rights reserved.
"""
def __init__(self, config, verbose=False):
auth = Auth(config)
# configure logging
log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# every log file will be named with the current date and would be name differently
date = str(datetime.datetime.now().strftime("%Y-%m-%d"))
time = str(datetime.datetime.now().strftime("%H-%M-%S"))
log_file = os.path.join(log_dir, "twiker_bot_" + date + "_" + time + ".log")
if auth.file_exists(log_file):
log_file = os.path.join(log_dir, "twiker_bot_" + date + "_" + time + ".log")
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=log_file,
filemode='w')
self.logger = logging.getLogger()
if verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.info("Starting Twiker Bot")
self.api = auth.access()
# print all information about a user
def user_info(self, username=None):
"""
Get User info by username
:param username: username to get info on twitter
:return:
"""
if username is None:
username = self.api.me().screen_name
try:
info = self.api.get_user(screen_name=username)
print("Name: " + str(info.name))
print("Screen Name: " + str(info.screen_name))
print("User ID: " + str(info.id))
print("Location: " + str(info.location))
print("Description: " + str(info.description))
print("URL: " + str(info.url))
print("Followers: " + str(info.followers_count))
print("Following: " + str(info.friends_count))
print("Tweets: " + str(info.statuses_count))
print("Favorites: " + str(info.favourites_count))
print("Created at: " + str(info.created_at))
print("Time zone: " + str(info.time_zone))
print("Geo enabled: " + str(info.geo_enabled))
print("Verified: " + str(info.verified))
print("Lang: " + str(info.lang))
try:
print("Status: " + str(info.status.text))
except:
print("Status: " + "None")
print("Profile background color: " + str(info.profile_background_color))
print("Profile background image: " + str(info.profile_background_image_url))
print("Profile background image url: " + str(info.profile_background_image_url_https))
print("Profile background tile: " + str(info.profile_background_tile))
print("Profile link color: " + str(info.profile_link_color))
print("Profile sidebar border color: " + str(info.profile_sidebar_border_color))
print("Profile sidebar fill color: " + str(info.profile_sidebar_fill_color))
print("Profile text color: " + str(info.profile_text_color))
print("Profile use background image: " + str(info.profile_use_background_image))
print("Profile image: " + str(info.profile_image_url))
print("Profile image url: " + str(info.profile_image_url_https))
print("Profile image url: " + str(info.profile_background_image_url_https))
print("Profile image url: " + str(info.profile_background_image_url))
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# tweet a message
def tweet(self, message, media=None):
"""
Tweet a message
:param message: message to tweet
:param media: media to tweet
:return:
"""
self.logger.debug("Tweeting message: %s", message)
self.logger.info("Tweeting message: %s", message)
try:
if media is None:
self.api.update_status(status=message)
else:
self.api.update_with_media(media, status=message)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get user timeline
def get_timeline(self, username):
"""
Get user timeline
:param username: username to get timeline
:return:
"""
def retweet(self, tweet_id):
"""
Retweet a tweet by tweet.id
:param tweet_id: tweet id to retweet a tweet
:return:
"""
self.logger.debug("Retweeting tweet with id: %s", tweet_id)
try:
self.api.retweet(tweet_id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def reply(self, message, tweet_id):
"""
Reply to a tweet by tweet.id
:param message: message to reply
:param tweet_id: tweet id to reply
:return:
"""
try:
self.api.update_status(status=message,
in_reply_to_status_id=tweet_id,
auto_populate_reply_metadata=True)
logging.debug("Replied to tweet with id: %s", tweet_id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def follow(self, username):
"""
Follow a user on twitter
:param username: username to follow on twitter
:return:
"""
try:
self.api.create_friendship(screen_name=username)
logging.debug("Followed user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unfollow(self, username):
"""
Unfollow a user on twitter
:param username: username to unfollow on twitter
:return:
"""
try:
self.api.destroy_friendship(screen_name=username)
logging.debug("Unfollowed user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def block(self, username):
"""
Block a user on twitter
:param username: username to block on twitter
:return:
"""
try:
self.api.create_block(screen_name=username)
logging.debug("Blocked user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unblock(self, username):
"""
Unblock a user on twitter
:param username: username to unblock on twitter
:return:
"""
try:
self.api.destroy_block(screen_name=username)
logging.debug("Unblocked user: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def get_user_id(self, username):
"""
Get User id by username
:param username: username to get id on twitter
"""
try:
return self.api.get_user(screen_name=username).user_id
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# reply on a hashtag
def reply_hashtag(self, message, hashtag):
"""
Reply to all tweet on a hashtag
:param message: message to in reply with hashtag
:param hashtag: hashtag on which method have to reply
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.reply(message, tweet_id.id)
logging.debug("Replied to tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# retweet on a hashtag
def retweet_hashtag(self, hashtag):
"""
Retweet all tweet on a hashtag
:param hashtag: hashtag on which method have to retweet
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.retweet(tweet_id.id)
logging.debug("Retweeted tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unretweet_hashtag(self, hashtag):
"""
Unretweet all tweet on a hashtag
:param hashtag: hashtag on which method have to unretweet
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.unretweet(tweet_id.id)
logging.debug("Unretweeted tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# like all tweet on a hashtag
def like_hashtag(self, hashtag):
"""
Like all tweet on a hashtag
:param hashtag: hashtag on which method have to like
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
try:
self.api.create_favorite(tweet_id.id)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Liked tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
logging.info("Liked tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unlike_hashtag(self, hashtag):
"""
Unlike all tweet on a hashtag
:param hashtag: hashtag on which method have to unlike
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.api.destroy_favorite(tweet_id.id)
logging.debug("Unliked tweet on hashtag: %s tweet_id: %s", hashtag, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# follow all user on a hashtag
def follow_hashtag(self, hashtag):
"""
Follow all user on a hashtag
:param hashtag: hashtag on which method have to follow
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.follow(tweet_id.user.screen_name)
logging.debug("Followed user: %s", tweet_id.user.screen_name)
logging.info("Followed user: %s", tweet_id.user.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def unfollow_hashtag(self, hashtag):
"""
Unfollow all user on a hashtag
:param hashtag: hashtag on which method have to unfollow
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
self.unfollow(tweet_id.user.screen_name)
logging.debug("Unfollowed user: %s", tweet_id.user.screen_name)
logging.info("Unfollowed user: %s", tweet_id.user.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# like all tweet of a user
def like_user(self, username):
"""
Like all tweet of a user
:param username: username on which method have to like
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.user_timeline, screen_name=username).items():
self.api.create_favorite(tweet_id.id)
logging.debug("Liked tweet of user: %s tweet_id: %s", username, tweet_id.id)
logging.info("Liked tweet of user: %s tweet_id: %s", username, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# unlike all tweet of a user
def unlike_user(self, username):
"""
Unlike all tweet of a user
:param username: username on which method have to unlike
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.user_timeline, screen_name=username).items():
self.api.destroy_favorite(tweet_id.id)
logging.debug("Unliked tweet of user: %s tweet_id: %s", username, tweet_id.id)
logging.info("Unliked tweet of user: %s tweet_id: %s", username, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# retweet all tweet of a user
def retweet_user(self, username):
"""
Retweet all tweet of a user
:param username: username on which method have to retweet
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.user_timeline, screen_name=username).items():
self.api.retweet(tweet_id.id)
logging.debug("Retweeted tweet of user: %s tweet_id: %s", username, tweet_id.id)
logging.info("Retweeted tweet of user: %s tweet_id: %s", username, tweet_id.id)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# dm to single user
def dm(self, username, message, media=None):
"""
Direct message to single user
:param username: username on which method have to dm
:param message: message to dm
:param media: media to dm
:return:
"""
try:
recipient = self.api.get_user(username)
self.api.send_direct_message(recipient_id=recipient.id_str, text=message)
logging.debug("Direct message to user sent: %s", recipient.screen_name)
logging.info("Direct message to user sent: %s", recipient.screen_name)
if media:
self.api.media_upload(media)
self.api.send_direct_message(recipient_id=recipient.id_str, text=message, attachment_type="media",
attachment_media_id=self.api.media_upload(media).media_id)
logging.debug("Direct message to user with media sent: %s", recipient.screen_name)
logging.info("Direct message to user with media sent: %s", recipient.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# dm to multiple user
def dm_multiple(self, usernames, message, media=None):
"""
Direct message to multiple user
:param usernames: list(usernames) on which method have to dm ["username1", "username2", ...]
:param message: message to dm
:return:
"""
try:
for user in usernames:
recipient = self.api.get_user(user)
try:
self.api.send_direct_message(recipient_id=recipient.id, text=message)
logging.debug("Direct message to user sent: %s", recipient.screen_name)
logging.info("Direct message to user sent: %s", recipient.screen_name)
except Exception as e:
logging.error("Error: %s", e)
print(e)
if media:
try:
self.api.media_upload(media)
self.api.send_direct_message(recipient_id=recipient.id_str, text=message,
attachment_type="media",
attachment_media_id=self.api.media_upload(media).media_id)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Direct message to user with media sent: %s", recipient.screen_name)
logging.info("Direct message to user with media sent: %s", recipient.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# dm all user of a hashtag
def dm_hashtag(self, hashtag, message, media=None):
"""
Direct message to all user of a hashtag
:param hashtag: hashtag on which method have to dm
:param message: message to dm
:return:
"""
try:
for tweet_id in tp.Cursor(self.api.search, q=hashtag).items():
recipient = self.api.get_user(tweet_id.user.screen_name)
#
# check if user is protected or dm is disabled
users = []
if not recipient.protected:
users.append(recipient.id)
for user in users:
try:
self.api.send_direct_message(recipient_id=user, text=message)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Direct message to user sent: %s", recipient.screen_name)
logging.info("Direct message to user sent: %s", recipient.screen_name)
print(recipient.screen_name)
if media:
try:
self.api.media_upload(media)
self.api.send_direct_message(recipient_id=user, text=message, attachment_type="media",
attachment_media_id=self.api.media_upload(media).media_id)
except Exception as e:
logging.error("Error: %s", e)
print(e)
logging.debug("Direct message to user with media sent: %s", recipient.screen_name)
logging.info("Direct message to user with media sent: %s", recipient.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def update_profile(self, *args):
"""
Update profile
possible args:
name, url, location, description, profile_link_color, include_entities, skip_status
:param args:
:return:
"""
try:
self.api.update_profile(*args)
logging.debug("Profile updated")
logging.info("Profile updated")
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# follow all followers who have followed you
def follow_followers(self):
"""
Follow all followers who have followed you
:return:
"""
try:
for follower in tp.Cursor(self.api.followers).items():
if not follower.following:
follower.follow()
logging.debug("Followed: %s", follower.screen_name)
logging.info("Followed: %s", follower.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# unfollow all followers who have followed you
def unfollow_followers(self):
"""
Unfollow all followers who have followed you
:return:
"""
try:
for follower in tp.Cursor(self.api.followers).items():
if follower.following:
follower.unfollow()
logging.debug("Unfollowed: %s", follower.screen_name)
logging.info("Unfollowed: %s", follower.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# follow users with a keyword
def follow_keyword(self, keyword, count=1):
"""
Follow users with a keyword
:param keyword: keyword to search
:param count: number of users to follow
:return:
"""
try:
for tweet in tp.Cursor(self.api.search, q=keyword).items(count):
if not tweet.user.following:
tweet.user.follow()
logging.debug("Followed: %s", tweet.user.screen_name)
logging.info("Followed: %s", tweet.user.screen_name)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get latest tweets from twitter
def get_tweets(self, count=1):
"""
Get latest tweets from twitter
:param count: number of tweets to get
:return:
"""
try:
for tweet in tp.Cursor(self.api.home_timeline).items(count):
print("Tweet: %s" % tweet.text)
print("User: %s" % tweet.user.screen_name)
print("User id: %s" % tweet.user.id)
print("Date: %s" % tweet.created_at)
logging.debug("Tweet: Feched")
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get all followers of a user
def get_followers(self, username=None):
"""
Get all followers of a user
Note: twitter api only have limit of of fetching limited requests at a time
:param username: username of user
:param count: number of followers to get
:return:
"""
if username is None:
username = self.api.me().screen_name
try:
count = 1
for follower in tp.Cursor(self.api.followers, screen_name=username).items():
# get total followers
print("Follower: %s" % follower.screen_name)
print("Follower Count: %s" % count)
print("Follower id: %s" % follower.id)
print("Follower date: %s" % follower.created_at)
count += 1
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def get_following(self, username=None):
"""
Get all following of a user
Note: twitter api only have limit of of fetching limited requests at a time
:param username: username of user
"""
if username is None:
username = self.api.me().screen_name
try:
count = 1
for follower in tp.Cursor(self.api.friends, screen_name=username).items():
# get total followers
print("Following: %s" % follower.screen_name)
print("Following Count: %s" % count)
print("Following id: %s" % follower.id)
print("Following date: %s" % follower.created_at)
count += 1
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def search_user(self, username):
"""
Search a user on twitter by username
:param username: username to search
:return:
"""
try:
self.user_info(username=username)
logging.debug("User searched: %s", username)
logging.info("User searched: %s", username)
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get random user and return username
def get_random_user(self, *args, count=1):
"""
Get random user
:param args:
pssible args:
keyword
:param count: number of users to get
:return: username
"""
try:
# get random user
for tweet in tp.Cursor(self.api.search, q=args).items(count):
return tweet.user.screen_name
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
def get_hashtag_tweets(self, hashtag):
"""
Get tweets by hashtag
:param hashtag: hashtag to search
:return:
"""
try:
for tweet in tp.Cursor(self.api.search, q=hashtag).items():
print("Tweet: %s" % tweet.text)
print("User: %s" % tweet.user.screen_name)
print("User id: %s" % tweet.user.id)
print("Date: %s" % tweet.created_at)
logging.debug("Tweet: Feched")
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
# get all direct messages
def get_messages(self, count=1):
"""
Get all direct messages
:param count: number of messages to get
:return:
"""
try:
message = self.api.list_direct_messages(count=count)
for msg in reversed(message):
sender = msg.message_create['sender_id']
recipient = msg.message_create['target']['recipient_id']
sender_name = self.api.get_user(sender).screen_name
recipient_name = self.api.get_user(recipient).screen_name
print("Sender: %s" % sender)
print("Sender name: %s" % sender_name)
print("Recipient: %s" % recipient)
print("Recipient name: %s" % recipient_name)
print("Message: %s" % msg.message_create['message_data']['text'])
except tp.TweepError as e:
logging.error("Error: %s", e)
print(e)
#
|
Twiker-Bot/twiker
|
twiker/modules/engine.py
|
engine.py
|
py
| 26,243 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38504427534
|
import requests
from scraper.get_strava_access_token import refreshed_access_token
BASE_URL = 'https://www.strava.com'
ACCESS_TOKEN = refreshed_access_token()
def get_starred_segments():
print('Getting segement list')
request_dataset_url = BASE_URL + '/api/v3/segments/starred' # check https://developers.strava.com/docs/reference/ for STRAVA API REQUESTS
header = {'Authorization': 'Bearer ' + ACCESS_TOKEN}
param = {'per_page': 200, 'page': 1}
result_list = requests.get(request_dataset_url, headers=header, params=param).json()
segments_id_list = []
for i in result_list:
segments_id_list.append(i['id'])
return segments_id_list
def get_segment_details():
starred_segment_list = get_starred_segments()
detailed_segment_list = []
for i in range(len(starred_segment_list)):
request_dataset_url = BASE_URL + f'/api/v3/segments/{starred_segment_list[i]}'
header = {'Authorization': 'Bearer ' + ACCESS_TOKEN}
segment_details = requests.get(request_dataset_url, headers=header).json()
detailed_segment_list.append(segment_details)
print(f'Segment no. {starred_segment_list[i]} fetched')
return detailed_segment_list
if __name__ == '__main__':
print(get_starred_segments())
print(get_segment_details())
|
ADV-111/Srodunia
|
scraper/request_dataset_through_api.py
|
request_dataset_through_api.py
|
py
| 1,312 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1662811500
|
import csv
import os
import numpy as np
import json
from pyexcel_ods import get_data
PATTERN_TYPE = '1'
COURSE_TYPE = 'speed'
DATA_DIR = os.path.join(PATTERN_TYPE, COURSE_TYPE)
fieldnames = ['min_speed','max_speed','delay','spacing','min_angle','max_angle','num_rows','min_b_scale','max_b_scale','clear_threshold','hard_clear_threshold', 'burst_spacing', 'burst_spacing_dur', 'burst_spacing_ratio', 'bskip_hang']
# ods to csv
book = get_data(os.path.join(DATA_DIR, 's5_data.ods'))
data = list(book.values())[0]
with open('tmp.csv','w') as f:
writer = csv.writer(f)
for row in data:
writer.writerow(row)
with open('tmp.csv','r') as f:
reader = csv.DictReader(f)
for s5row in reader:
if 'X' in s5row.values():
break
s5row['num_rows'] = int(s5row['num_rows'])
for key in ['min_speed', 'max_speed', 'delay*speed']:
s5row[key] = float(s5row[key])
l1_min_speed = s5row['min_speed']*.9 - 2.5
l1_max_speed = s5row['max_speed']*.9 - 2.5
l10_min_speed = s5row['min_speed']*1.1+1.5
l10_max_speed = s5row['max_speed']*1.1+1.5
min_speeds = list(
np.append(np.linspace(
l1_min_speed, s5row['min_speed'], num=4, endpoint=False
), np.linspace(
s5row['min_speed'], l10_min_speed, num=6, endpoint=True
))
)
max_speeds = list(
np.append(np.linspace(
l1_max_speed, s5row['max_speed'], num=4, endpoint=False
), np.linspace(
s5row['max_speed'], l10_max_speed, num=6, endpoint=True
))
)
out_dir = os.path.join(DATA_DIR, 'pattern' + s5row['pattern'])
if not os.path.exists(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'metadata.json'),'w') as f:
json.dump({
'title':'(%sS%s) Pattern %s' % (
PATTERN_TYPE
, s5row['pattern'].zfill(2)
, s5row['pattern'].zfill(2)
)
, 'pattern type': PATTERN_TYPE
, 'course type': COURSE_TYPE
}, f)
out_fn = os.path.join(out_dir, 'stages.txt')
with open(out_fn, 'w') as outf:
writer = csv.DictWriter(outf, fieldnames = fieldnames)
writer.writeheader()
for i in range(10):
min_speed = min_speeds[i]
max_speed = max_speeds[i]
avg_speed = (min_speed + max_speed)/2
delay = s5row['delay*speed'] / avg_speed
out_data = {
'min_speed': min_speed
, 'max_speed': max_speed
, 'delay': delay
, 'clear_threshold': 2000
, 'hard_clear_threshold': 4000
}
for fieldname in fieldnames:
if fieldname not in out_data:
out_data[fieldname] = s5row[fieldname]
writer.writerow(out_data)
|
gebgebgeb/bdt
|
courses/write_speed_courses.py
|
write_speed_courses.py
|
py
| 3,180 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38269743748
|
import socket
import ssl
# Server'ın IP adresi ve portu
HOST = "127.0.0.1"
PORT = 3131
# SSL sertifikalarının yolunu belirtin
ssl_cert = "server.crt"
# Soketi oluşturun
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_verify_locations(ssl_cert)
client_sock = ssl_context.wrap_socket(client_sock, server_hostname=HOST)
# Server'a bağlanın
client_sock.connect((HOST, PORT))
print("Server'a bağlandı")
# Gelen mesajları okuyun ve gönderin
while True:
message = input("Mesajınızı girin: ")
client_sock.send(message.encode())
data = client_sock.recv(1024)
if not data:
break
print("Server'dan mesaj geldi:", data.decode())
# Bağlantıyı kapatın
client_sock.close()
|
Varp0s/private_chat
|
client.py
|
client.py
|
py
| 798 |
python
|
tr
|
code
| 0 |
github-code
|
6
|
42663409589
|
import copy
import math #needed for calculation of weight and bias initialization
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch, torch.nn as nn, torch.nn.functional as F
import torchvision
from torchvision import transforms, models, utils
#Set seeds
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
#Import components
from . import components as cts
from . import custom_models_diseasereps as cmdr
class AxialNet_Mask(nn.Module):
"""Identical implementation to the one in custom_models_base.py except
that it returns an intermediate calculation of the convolution step
which will be used in calculating a mask-related loss.
(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) FC layer (implemented via conv) to [n_outputs, 1, 1]
(4) Avg pooling over slices to get [n_outputs]"""
def __init__(self, n_outputs, slices):
super(AxialNet_Mask, self).__init__()
self.slices = slices #equal to 15 for 9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.fc = nn.Conv2d(16, n_outputs, kernel_size = (6,6), stride=(6,6), padding=0)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
assert list(x.shape)==[1,self.slices,3,420,420]
x1 = x.squeeze() #out shape [slices,3,420,420]
x1 = self.features(x1) #out shape [slices,512,14,14]
x1f = self.conv2d(x1) #out shape [slices, 16, 6, 6]
x2 = self.fc(x1f) #out shape [slices,n_outputs,1,1]
x2 = torch.squeeze(x2) #out shape [slices, n_outputs]
x2_perslice_scores = x2.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x2 = self.avgpool_1d(x2_perslice_scores) #out shape [1, n_outputs, 1]
x2f = torch.squeeze(x2, dim=2) #out shape [1, n_outputs]
#Now calculate what the disease specific representation is in the
#intermediate calculation of the fc layer.
#First, make n_outputs copies of the slices x 16 x 6 x 6 representation:
x1_repeated = x1f.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
#Now select the fc_weights:
fc_weights = self.fc.weight #shape [132, 16, 6, 6], where 132 is n_outputs
fc_weights_unsq = fc_weights.unsqueeze(dim=1) #out shape [n_outputs, 1, 16, 6, 6]
#Now multiply element wise. Broadcasting will occur.
#we have [n_outputs, slices, 16, 6, 6] x [n_outputs, 1, 16, 6, 6]
disease_reps = torch.mul(x1_repeated, fc_weights_unsq) #out shape [n_outputs, slices, 16, 6, 6]
out = {'out':x2f,
'x_perslice_scores':x2_perslice_scores,
'disease_reps':disease_reps}
return out
class AxialNet_Mask_VanillaGradCAM(nn.Module):
"""Identical implementation to the one in custom_models_base.py except
that it returns an intermediate calculation of the convolution step
which will be used in calculating a mask-related loss; this intermediate
calculation is based on vanilla Grad-CAM.
(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) FC layer (implemented via conv) to [n_outputs, 1, 1]
(4) Avg pooling over slices to get [n_outputs]"""
def __init__(self, n_outputs, slices):
super(AxialNet_Mask_VanillaGradCAM, self).__init__()
self.slices = slices #equal to 15 for 9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.fc = nn.Conv2d(16, n_outputs, kernel_size = (6,6), stride=(6,6), padding=0)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
assert list(x.shape)==[1,self.slices,3,420,420]
x1 = x.squeeze() #out shape [slices,3,420,420]
x1 = self.features(x1) #out shape [slices,512,14,14]
x1f = self.conv2d(x1) #out shape [slices, 16, 6, 6]
x2 = self.fc(x1f) #out shape [slices,n_outputs,1,1]
x2 = torch.squeeze(x2) #out shape [slices, n_outputs]
x2_perslice_scores = x2.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x2 = self.avgpool_1d(x2_perslice_scores) #out shape [1, n_outputs, 1]
x2f = torch.squeeze(x2, dim=2) #out shape [1, n_outputs]
#Now calculate what the disease specific representation is in the
#intermediate calculation of the fc layer.
#First, make n_outputs copies of the slices x 16 x 6 x 6 representation:
x1_repeated = x1f.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
#Now select the fc_weights. These weights are also the gradients leaving
#the last layer.
fc_weights = self.fc.weight #shape [80, 16, 6, 6], where 80 is n_outputs
#To calculate the alpha_ks, we need to take the mean across the height
#and width so that we get one alpha_k per feature per disease:
#(confirmed that this is the mean across the 6x6 in the gradcam code)
alpha_ks = torch.mean(fc_weights,dim=(2,3)) #out shape [n_outputs, 16]
alpha_ks_unsq = alpha_ks.unsqueeze(dim=1).unsqueeze(dim=3).unsqueeze(dim=3) #out shape [n_outputs, 1, 16, 1, 1]
#Now multiply element wise. Broadcasting will occur.
#we have [n_outputs, slices, 16, 6, 6] x [n_outputs, 1, 16, 1, 1]
disease_reps = torch.mul(x1_repeated, alpha_ks_unsq) #out shape [n_outputs, slices, 16, 6, 6]
#the summing over the feature dimension takes place in the loss
#calculation
out = {'out':x2f,
'x_perslice_scores':x2_perslice_scores,
'disease_reps':disease_reps}
return out
class AxialNet_Mask_Final3DConv(nn.Module):
"""Identical implementation to the one in custom_models_base.py except
that it returns an intermediate calculation of the convolution step
which will be used in calculating a mask-related loss.
(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) Final FC layer implemented via 3D convolution to produce [n_outputs]"""
def __init__(self, n_outputs, slices):
super(AxialNet_Mask_Final3DConv, self).__init__()
self.slices = slices #equal to 15 for 9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv() #out shape [slices, 16, 6, 6]
#Final step is 3D convolution!
#Rep is first reshaped to [1, 16, slices, 6, 6]
self.fc = nn.Conv3d(16, n_outputs, kernel_size=(self.slices,6,6), stride=(self.slices,6,6), padding=0)
def forward(self, x):
assert list(x.shape)==[1,self.slices,3,420,420]
x1 = x.squeeze() #out shape [slices,3,420,420]
x1 = self.features(x1) #out shape [slices,512,14,14]
x1 = self.conv2d(x1) #out shape [slices, 16, 6, 6]
#Reshape:
x1f = x1.transpose(0,1).unsqueeze(0) #out shape [1, 16, slices, 6, 6]
#Final classification
x2 = self.fc(x1f) #out shape [1,n_outputs,1,1,1]
x2f = x2.squeeze(dim=2).squeeze(dim=2).squeeze(dim=2) #out shape [1,n_outputs]
#TODO TEST THIS (or at least make visualizations of disease_reps)
#Now calculate what the disease specific representation is in the
#intermediate calculation of the fc layer.
#First, make n_outputs copies of the 16 x slices x 6 x 6 representation:
x1_repeated = x1f.squeeze(dim=0).repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, 16, slices, 6, 6]
#Now select the fc_weights:
fc_weights = self.fc.weight #shape [n_outputs, 16, slices, 6, 6]
assert x1_repeated.shape==fc_weights.shape
#Now multiply element wise. Broadcasting will occur.
#we have [n_outputs, 16, slices, 6, 6] x [n_outputs, 16, slices, 6, 6]
disease_reps_orig = torch.mul(x1_repeated, fc_weights) #out shape [n_outputs, 16, slices, 6, 6]
#But for the attention ground truth calculation we assume that the
#disease_reps has shape [n_outputs, slices, 16, 6, 6], so transpose!
disease_reps = disease_reps_orig.transpose(1,2) #out shape [n_outputs, slices, 16, 6, 6]
out = {'out':x2f,
'disease_reps':disease_reps}
return out
class BodyLocationAttn3Mask(nn.Module): #7/2/2020, updated 7/7/2020, redone for mask 8/27/2020
"""Modification on 8/27 involves the shape of the attention calculated.
Old version calculated [1,1,1,6,6 attention]. This version calculates
[1,slices,1,6,6] attention (i.e. fully 3d spatially.)
There is also a special loss associated with this model which requires the
model to match the organ attention to ground truth organ masks.
OLD DOCUMENTATION from model that this model was based on,
BodyLocationAttn3 in custom_models_diseasereps.py:
See AxialNetDiseaseFeatureAttn for more documentation including code comments.
Difference from AxialNetDiseaseFeatureAttn: uses spatial attention instead of
feature attention. Specifically there is right lung, heart, and left lung
spatial attention. Also, instead of being fixed weights every time, the
weights are learned based on using the center slices (since the center
slices are most indicative of where the right lung, heart, and left
lung are located.) So this is trainable soft self-attention."""
def __init__(self, n_outputs_lung, n_outputs_heart):
super(BodyLocationAttn3Mask, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = (2*n_outputs_lung)+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on ALL the slices
in_size = self.slices*16*6*6
out_size = self.slices*6*6
self.heart_attn_fc = nn.Sequential(nn.Linear(in_size, out_size),nn.Sigmoid())
self.left_lung_attn_fc = nn.Sequential(nn.Linear(in_size, out_size),nn.Sigmoid())
self.right_lung_attn_fc = nn.Sequential(nn.Linear(in_size, out_size),nn.Sigmoid())
self.fclayers_weights, self.fclayers_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate attention mask based on all slices
#This attention mask is basically doing low-dimensional organ
#segmentation. The nice thing about doing the segmentation this way
#is that the model can still look at both lungs when predicting a
#lung disease but it's forced to look MORE at the relevant lung.
all_slices_flat = x.flatten().unsqueeze(dim=0) #out shape [1,8640]
#The spatial maps must be able to be broadcast multiplied against
#a Tensor of shape [slices, n_outputs_organ, 16, 6, 6]
self.heart_spatial = self.heart_attn_fc(all_slices_flat).reshape(self.slices,1,1,6,6) #out shape [slices,1,1,6,6]
self.left_lung_spatial = self.left_lung_attn_fc(all_slices_flat).reshape(self.slices,1,1,6,6) #out shape [slices,1,1,6,6]
self.right_lung_spatial = self.right_lung_attn_fc(all_slices_flat).reshape(self.slices,1,1,6,6) #out shape [slices,1,1,6,6]
#Repeat x
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, n_outputs, 16, 6, 6]
#Apply the attention maps
#Must follow ground truth label order, which is heart, left_lung, right_lung
x_heart = torch.mul(x[:,0:self.n_outputs_heart,:,:,:],self.heart_spatial)
x_left_lung = torch.mul(x[:,self.n_outputs_heart:self.n_outputs_heart+self.n_outputs_lung,:,:,:],self.left_lung_spatial)
x_right_lung = torch.mul(x[:,-1*self.n_outputs_lung:,:,:,:],self.right_lung_spatial)
x = torch.cat((x_heart,x_left_lung,x_right_lung),dim=1) #out shape [slices, n_outputs, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, n_outputs, 16*6*6] = [slices, n_outputs, 576]
slice_preds = cmdr.apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x = self.avgpool_1d(x) #out shape [1, n_outputs, 1]
x = torch.squeeze(x, dim=2) #out shape [1, n_outputs]
out = {'out':x,
'heart_spatial':self.heart_spatial,
'left_lung_spatial':self.left_lung_spatial,
'right_lung_spatial':self.right_lung_spatial}
return out
class BodyDiseaseSpatialAttn4Mask(nn.Module): #7/7/2020 #TODO test this #Updated 8/27/2020 for mask
"""In this model a 3D attention mask of shape [slices,6,6] is calculated for
each disease, before the classification step.
Note that this model is identical to BodyDiseaseSpatialAttn4 except for
its usage:
(a) custom loss function: in the loss, the location information is used to
determine what locations the disease-specific attention is allowed to
look at. e.g. if there is atelectasis only in the left lung then the
attention for atelectasis for that scan should be only in the place
demarcated as left lung in the segmentation ground truth.
Furthermore, if there is NO atelectasis present, then the attention
for atelectasis should all be zero.
In order to calculate this custom loss, this model has to return
the attention maps in addition to the predictions.
(b) custom labels: this model is different from everything else I have
been doing because it assumes that we just want to predict lung
diseases generically and so it only makes
n_outputs_lung+n_outputs_heart predictions, rather than
(2*n_outputs_lung+n_outputs_heart) predictions.
OLD DOCUMENTATION from model that this model was based on,
BodyDiseaseSpatialAttn4 in custom_models_diseasereps.py
See AxialNetDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyLocationAttn3: while 4 also uses spatial
attention (like 3), 4 does spatial attention per disease instead of per
location."""
def __init__(self, n_outputs_lung, n_outputs_heart):
super(BodyDiseaseSpatialAttn4Mask, self).__init__()
self.slices = 15 #9 projections
#NOTE that here, we have only n_outputs_lung overall! We are not doing
#separate predictions for the right and left lungs!
self.n_outputs = n_outputs_lung+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate per-disease spatial attention based on ALL the slices
#Repeated representation: [slices, n_outputs, 16, 6, 6]
#Attention shape we want: [slices, n_outputs, 1, 6, 6]
self.nonlinearity = nn.Sigmoid()
#FC layers for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.fcattns_weights, self.fcattns_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs*6*6, in_features = 16)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [n_outputs, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, n_outputs, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn_raw_list = []
for slice_num in range(self.slices):
slice_data = x[slice_num,:,:,:,:] #out shape [n_outputs, 16, 6, 6]
slice_data = slice_data.flatten(start_dim=2,end_dim=3).transpose(1,2) #out shape [n_outputs, 6*6, 16]
slice_data = slice_data.flatten(start_dim=0,end_dim=1) #out shape [n_outputs*6*6, 16]
temp1 = torch.mul(slice_data,self.fcattns_weights) #out shape [n_outputs*6*6, 16]
temp2 = torch.sum(temp1,dim=1) #out shape [n_outputs*6*6]
temp3 = (temp2+self.fcattns_biases).unsqueeze(0) #out shape [n_outputs*6*6]
attn_raw_list.append(temp3)
attn_raw = torch.cat(attn_raw_list,dim=0) #out shape [slices, n_outputs*6*6]
attn_raw = torch.reshape(attn_raw,(self.slices,self.n_outputs,6*6)) #out shape [slices, n_outputs, 6*6]
attn = self.nonlinearity(attn_raw) #out shape [slices, n_outputs, 6*6]
attn = torch.reshape(attn,(self.slices,self.n_outputs,6,6)).unsqueeze(2) #out shape [slices, n_outputs, 1, 6, 6]
#Apply the attention
x_times_attn = torch.mul(x, attn) #out shape [slices, n_outputs, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, n_outputs, 16*6*6] = [slices, n_outputs, 576]
slice_preds = cmdr.apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, n_outputs, slices]
x = self.avgpool_1d(x) #out shape [1, n_outputs, 1]
x = torch.squeeze(x, dim=2) #out shape [1, n_outputs]
out = {'out':x,
'attn':attn} #attn out shape [slices, n_outputs, 1, 6, 6]
return out
class BodyDiseaseSpatialAttn5Mask(nn.Module): #7/7/2020 #TODO test this
#On the natural images dataset, this model had better performance
#than model 4
"""Exactly the same as the BodyDiseaseSpatialAttn5 model except that
this returns the attn so that it can be trained with a loss function that
acts on the attn as well.
OLD DOCUMENTATION from model that this model was based on,
BodyDiseaseSpatialAttn5 in custom_models_diseasereps.py:
See AxialNetDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyDiseaseSpatialAttn4: whereas 4 learns a different
mapping of 16 features -> 1 spatial attn value for each element of the 6x6
square, 5 uses a convolution layer such that the mapping of 16 -> 1 is
the same for each element of the 6x6 square"""
def __init__(self, n_outputs, nonlinearity):
super(BodyDiseaseSpatialAttn5Mask, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2)
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
#Conv layer for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.attn_conv = nn.Sequential(
nn.Conv2d(16, self.n_outputs, kernel_size = (1,1), stride=(1,1), padding=0),
self.nonlinearity)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = cmdr.init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn = self.attn_conv(x).unsqueeze(2) #out shape [slices, 83, 1, 6, 6]
#Apply the attention
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
x_times_attn = torch.mul(x, attn) #out shape [slices, 83, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = cmdr.apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
out = {'out':x,
'attn':attn} #attn out shape [slices, n_outputs, 1, 6, 6]
return out
|
rachellea/explainable-ct-ai
|
src/models/custom_models_mask.py
|
custom_models_mask.py
|
py
| 21,512 |
python
|
en
|
code
| 3 |
github-code
|
6
|
34839501596
|
import numpy as np
import torch
import torchvision
import PIL
import os
def save_video(img,outdir, drange,fname="video0.mp4", normalize=True):
_, C ,T ,H ,W = img.shape
# print (f'Saving Video with {T} frames, img shape {H}, {W}')
img = img.cpu().xdetach().numpy()
if normalize:
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
# gw, gh = grid_size
# _N, C, T, H, W = img.shape
# img = img.reshape(gh, gw, C, T, H, W)
img = np.squeeze(img)
img = img.transpose(1,2,3,0)
# img = img.reshape(T, H, W, C)
# assert C in [3]
if C == 3:
torchvision.io.write_video(os.path.join(outdir,fname), torch.from_numpy(img), fps=8)
# imgs = [PIL.Image.fromarray(img, 'RGB') for i in range(len(img))]
# imgs[0].save(fname, quality=95, save_all=True, append_images=imgs[1:], duration=100, loop=0)
|
interiit-Team10/HP_BO_DIGAN
|
src/scripts/__init__.py
|
__init__.py
|
py
| 979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.