seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
23012946135
|
import pandas as pd
import numpy as np
import geopandas as gpd
from helper_functions import add_subset_address_cols, interpolate_polygon
from data_constants import default_crs, make_data_dict
from name_parsing import combine_names
from address_parsing import clean_parse_address
from helper_functions import make_panel
from pathos.multiprocessing import ProcessingPool as Pool
import re
import fiona
import warnings
warnings.filterwarnings("ignore", 'This pattern has match groups')
# function for reading in corrupted gdb files. really only relevant for LA CAMS data
def readShp_nrow(path, numRows):
fiona_obj = fiona.open(str(path))
toReturn = gpd.GeoDataFrame.from_features(fiona_obj[0:numRows])
toReturn.crs = fiona_obj.crs
return (toReturn)
# classify & clean name columns+ clean & parse primary and mailing addresses
# function that runs code in parallel
def parallelize_dataframe(df:pd.DataFrame, func, n_cores=4) -> pd.DataFrame:
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
# have to include this to prevent leakage and allow multiple parallel function calls
pool.terminate()
pool.restart()
return df
# wrapper function to run each city in parallel
def clean_parse_parallel(df:pd.DataFrame) -> pd.DataFrame:
df = clean_parse_address(
dataframe=df, address_col='address_fa',st_name="address_sn", st_sfx="address_ss",
st_d="address_sd", unit='address_u', st_num='address_n1',
country='address_country', state='address_state', st_num2 ='address_n2',city='address_city',
zipcode='address_zip', prefix2='parsed_', prefix1='cleaned_'
)
return df
# ADDRESS CLEANING FUNCTIONS #
# takes an address df (geopandas or pandas), stanardizes and cleans it and returns a standardized pandas dataframe
# these functions get address dataframes to be in standardized formats (renamed columns, added variables, etc)
# such that the dataframe can be passed to clean_parse_parallel and exported
# see address cols in data constants for full list of necessary columns needed for clean_parse_parallel
# ill note if there is anything special with the function, but otherwise assume that it follows a standard flow of
# 1. rename columns -> add columns -> subset to only needed columns -> clean_parse_parrallel -> return
# chicago cleaning functions:
# chicago address files come in two seperate files that together represent a full set of addresses in cook county
# clean chi_add_points cleans a points file that represents centroid points for cook county parcel polygons
def clean_chi_add_points(df):
chicago_rename_dict = {
'ADDRNOCOM': 'address_n1',
'STNAMEPRD': 'address_sd',
'STNAME': 'address_sn',
'STNAMEPOT': 'address_ss',
'PLACENAME': 'address_city',
'ZIP5': 'address_zip',
'CMPADDABRV': 'address_fa',
'PIN': 'parcelID',
'XPOSITION': 'long',
'YPOSITION': 'lat'
}
df.rename(columns=chicago_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
# basically the same as address points, but these are for parcel polygons (lat long are centroid points, so it is
# basically equivalent, these just have some addresses not in the other df and vice versa
def clean_chi_add_parcels(df):
chicago_rename_dict = {
'property_address':'address_fa',
'property_city': 'address_city',
'property_zip': 'address_zip',
'pin': 'parcelID',
'latitude': 'lat',
'longitude': 'long'
}
df.rename(columns=chicago_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=4)
return df
def concat_chi_add(df1, df2):
df1 = df1.append(df2).drop_duplicates(subset = [
'parcelID',
"parsed_addr_n1",
"parsed_addr_sn",
"parsed_addr_ss",
"parsed_city"
])
return df1
# saint louis is a little strange because they provide parcel polygons for entire streets
# eg main st 100-900. This is fine for small streets as its not problematic to take centroid polygons, but
# it becomes an issue for larger streets. For larger streets I take a best guess on which way the street runs and
# linearly interpolate lat long between the bottom and top range of the address span
# so if main st 100-900 runs nw that means it has its smallest numbers in the south east and increases going north west
def clean_stl_add(df):
df = df.rename(
columns = {
"STREETNAME": "address_sn", "STREETTYPE": "address_ss", "PREDIR": "address_sd", "ZIP_CODE": "address_zip"
}
)
df['index'] = np.arange(df.shape[0])
df = df.to_crs(default_crs)
df.crs = default_crs
bounds = df.bounds
df['address_city'] = 'saint louis'
df['latitude_min'] = bounds["miny"]
df['latitude_max'] = bounds["maxy"]
df['longitude_min'] = bounds["minx"]
df['longitude_max'] = bounds["maxx"]
df['direction'] = np.where(
((df['FROMLEFT'] < df['TOLEFT']) & (df['FROMRIGHT'] < df['TORIGHT'])),
"NE",
np.where(
((df['FROMLEFT'] < df['TOLEFT']) & (df['FROMRIGHT'] > df['TORIGHT'])),
"NW",
np.where(
((df['FROMLEFT'] > df['TOLEFT']) & (df['FROMRIGHT'] < df['TORIGHT'])),
"SE",
np.where(
((df['FROMLEFT'] > df['TOLEFT']) & (df['FROMRIGHT'] > df['TORIGHT'])),
"SW",
"SW"
)
)
)
)
df_r = df[[col for col in df.columns if not bool(re.search("LEFT", col))]]
df_r['address_n1'] = np.where(
df_r['FROMRIGHT'] > df_r['TORIGHT'],
df_r['TORIGHT'],
df_r['FROMRIGHT']
)
df_r['address_n2'] = np.where(
df_r['TORIGHT'] > df_r['FROMRIGHT'],
df_r['TORIGHT'],
df_r['FROMRIGHT']
)
df_l = df[[col for col in df.columns if not bool(re.search("RIGHT", col))]]
df_l['address_n1'] = np.where(
df_l['FROMLEFT'] > df_l['TOLEFT'],
df_l['TOLEFT'],
df_l['FROMLEFT']
)
df_l['address_n2'] = np.where(
df_l['TOLEFT'] > df_l['FROMLEFT'],
df_l['TOLEFT'],
df_l['FROMLEFT']
)
df = pd.concat([df_r, df_l])
df = df[~((df['address_n1'] <= 0) & (df['address_n1'] <= 0))]
df = make_panel(df,start_year="address_n1", end_year="address_n2", current_year=df['address_n2'],
evens_and_odds=True ).rename(columns = {'year': 'address_n1'})
# interpolate lat long
df = interpolate_polygon(df, "index", "direction")
df['lat'] = df['lat_interpolated']
df['long'] = df["long_interpolated"]
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_la_add(df):
la_rename_dict = {
'AIN': 'parcelID',
'UnitName': 'address_u',
'Number': 'address_n1',
'PostType': 'address_ss',
'PreDirAbbr': 'address_sd',
'ZipCode': 'address_zip',
'LegalComm': 'address_city',
}
df.rename(columns=la_rename_dict, inplace=True)
combine_names(df, name_cols=['PreType', 'StArticle', 'StreetName'], newCol="address_sn")
df = df.to_crs(default_crs)
df.crs = default_crs
df['long'] = df.geometry.centroid.x
df['lat'] = df.geometry.centroid.y
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_sd_add(df):
sd_rename_dict = {
'addrunit': 'address_u',
'addrnmbr': 'address_n1',
'addrpdir':'address_sd',
'addrname': 'address_sn',
'addrsfx': 'address_ss',
'addrzip': 'address_zip',
'community': 'address_city',
'PIN': 'parcelID',
}
df.rename(columns=sd_rename_dict, inplace=True)
df = df.to_crs(default_crs)
df.crs = default_crs
df['long'] = df.geometry.centroid.x
df['lat'] = df.geometry.centroid.y
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_sf_add(df):
sf_rename_dict = {
"Parcel Number": 'parcelID',
'Unit Number': 'address_u',
'Address Number': 'address_n1',
'Street Name': 'address_sn',
'Street Type': 'address_ss',
'ZIP Code': 'address_zip',
'Address': 'address_fa',
#'PIN': 'parcelID',
'Longitude': 'long',
'Latitude': 'lat'
}
df.rename(columns=sf_rename_dict, inplace=True)
df['address_city'] = "San Francisco"
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_seattle_add(df):
seattle_rename_dict = {
'PIN': 'parcelID',
'ADDR_NUM': 'address_n1',
'ADDR_SN': 'address_sn',
'ADDR_ST': 'address_ss',
'ADDR_SD': 'address_sd',
'ZIP5': 'address_zip',
'CTYNAME': 'address_city',
'ADDR_FULL': 'address_fa',
'LON': 'long',
'LAT': 'lat'
}
df.rename(columns=seattle_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_orlando_add(df):
orlando_rename_dict = {
'OFFICIAL_P': 'parcelID',
"COMPLETE_A": 'address_fa',
"ADDRESS__1": 'address_n1',
"ADDRESS__2": "address_n2",
"BASENAME": "address_sn",
"POST_TYPE":"address_ss",
"POST_DIREC": "address_sd",
"MUNICIPAL_": 'address_city',
"ZIPCODE": "address_zip",
"LATITUDE": "lat",
"LONGITUDE": "long",
}
df.rename(columns=orlando_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
def clean_baton_rouge_add(df):
baton_rouge_rename_dict = {
'ADDRNOCOM': 'address_n1',
'ASTREET PREFIX DIRECTION': 'address_sd',
'STREET NAME': 'address_sn',
'STREET SUFFIX TYPE': 'address_ss',
'CITY': 'address_city',
'ZIP': 'address_zip',
'FULL ADDRESS': 'address_fa'
}
df.rename(columns=baton_rouge_rename_dict, inplace=True)
lat_long = df['GEOLOCATION'].str.extract('([0-9\.]+),([0-9\.]+)')
df['lat'] = lat_long.iloc[:,0]
df['long'] = lat_long.iloc[:,1]
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=4)
return df
def merge_sac_parcel_id(sac_add = pd.DataFrame, xwalk = pd.DataFrame):
return pd.merge(
sac_add,
xwalk[xwalk['Parcel_Number'].notna()][["Address_ID", "Parcel_Number"]].drop_duplicates(),
left_on = "Address_ID", right_on = "Address_ID", how = "left"
)
def clean_sac_add(df):
sac_rename_dict = {
'APN': 'parcelID',
"Address_Number": 'address_n1',
"Street_Name": "address_sn",
"Street_Suffix":"address_ss",
"Pre_Directiona;": "address_sd",
"Postal_City": 'address_city',
"Zip_Code": "address_zip",
"Latitude_Y": "lat",
"Longitude_X": "long",
}
df.rename(columns=sac_rename_dict, inplace=True)
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
# used to reclean data in the event that you dont want to read in a shapefile
# mostly uses because its faster to read in a csv than a shp
def clean_int_addresses(df):
df = add_subset_address_cols(df)
df = parallelize_dataframe(df=df, func=clean_parse_parallel, n_cores=2)
return df
if __name__ == "__main__":
print("hello")
data_dict = make_data_dict(use_seagate=False)
# stl_add = gpd.read_file(data_dict['raw']['stl']['parcel'] + 'streets/tgr_str_cl.shp')
# stl_add = clean_stl_add(stl_add)
# stl_add.to_csv(data_dict['intermediate']['stl']['parcel'] + 'addresses.csv', index=False)
# baton_rouge_add = pd.read_csv(
# data_dict['raw']['baton_rouge']['parcel'] + 'addresses_Property_Information_ebrp.csv')
# baton_rouge_add = clean_baton_rouge_add(baton_rouge_add)
# baton_rouge_add.to_csv(data_dict['intermediate']['baton_rouge']['parcel'] + 'addresses.csv', index=False)
# chicago_add1 = pd.read_csv(data_dict['raw']['chicago']['parcel'] + 'Cook_County_Assessor_s_Property_Locations.csv')
# chicago_add2 = pd.read_csv(data_dict['raw']['chicago']['parcel'] + 'Address_Points_cook_county.csv')
# orlando_add = gpd.read_file(data_dict['raw']['orlando']['parcel'] + "Address Points/ADDRESS_POINT.shp")
# clean_orlando_add(orlando_add).to_csv(data_dict['intermediate']['orlando']['parcel'] + 'addresses.csv', index=False)
# la_add = gpd.read_file("/Users/JoeFish/Desktop/la_addresspoints.gdb", nrows = 100)
# la_add = pd.read_csv(data_dict['intermediate']['la']['parcel'] + 'addresses.csv')
# file is corrupted so we have to read it in this way...
# print(la_add.head())
#sd_add = gpd.read_file(data_dict['raw']['sd']['parcel'] + 'addrapn_datasd_san_diego/addrapn_datasd.shp')
# sf_add = pd.read_csv(
# data_dict['raw']['sf']['parcel'] + 'Addresses_with_Units_-_Enterprise_Addressing_System_san_francisco.csv')
# seattle_add = gpd.read_file(data_dict['raw']['seattle']['parcel'] +
# 'Addresses_in_King_County___address_point/Addresses_in_King_County___address_point.shp')
#
# # clean_baton_rouge_add(baton_rouge_add).to_csv(data_dict['intermediate']['baton_rouge']['parcel'] + 'addresses.csv', index=False)
# clean_chi_add2(chicago_add1).to_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_parcels.csv', index=False)
# clean_chi_add1(chicago_add2).to_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_points.csv', index=False)
# clean_int_addresses(la_add).to_csv(data_dict['intermediate']['la']['parcel'] + 'addresses_temp.csv', index=False)
# clean_sf_add(sf_add).to_csv(data_dict['intermediate']['sf']['parcel'] + 'addresses.csv', index=False)
# #clean_sd_add(sd_add).to_csv(data_dict['intermediate']['sd']['parcel'] + 'addresses.csv', index=False)
# clean_seattle_add(seattle_add).to_csv(data_dict['intermediate']['seattle']['parcel'] + 'addresses.csv', index=False)
# chi1 = pd.read_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_parcels.csv', dtype={"parsed_addr_n1": str})
# chi2 = pd.read_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_from_points.csv', dtype={"parsed_addr_n1": str})
# concat_chi_add(chi1,chi2).to_csv(data_dict['intermediate']['chicago']['parcel'] + 'addresses_concat.csv', index=False)
sac_add = pd.read_csv(data_dict['raw']['sac']['parcel'] + 'Address.csv')
sac_xwalk = pd.read_csv(data_dict['raw']['sac']['parcel'] + 'Address_parcel_xwalk.csv')
sac_add = merge_sac_parcel_id(sac_add=sac_add, xwalk=sac_xwalk)
clean_sac_add(sac_add).to_csv(data_dict['intermediate']['sac']['parcel'] + 'addresses_concat.csv', index=False)
pass
|
jfish-fishj/boring_cities
|
python_modules/clean_address_data.py
|
clean_address_data.py
|
py
| 15,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70714254267
|
from sre_constants import MAX_REPEAT
from .constants import MAX_STACK_DEPTH, MAX_UINT256
class Stack:
def __init__(self, max_depth=MAX_STACK_DEPTH) -> None:
self.stack = []
self.max_depth = max_depth
def push(self, item: int) -> None:
if item < 0 or item > MAX_UINT256:
raise InvalidStackItem({"item": item})
if (len(self.stack) + 1) > self.max_depth:
raise StackOverflow()
self.stack.append(item)
def pop(self) -> int:
if len(self.stack) == 0:
raise StackUnderflow()
return self.stack.pop()
def peek(self, i) -> int:
"""returns a stack element without popping it -- peek(0) is the top element, peek(1) is the next one, etc."""
if len(self.stack) <= i:
raise StackUnderflow()
return self.stack[-(i + 1)]
def swap(self, i: int) -> None:
"""swaps the top of the stack with the i+1th element"""
if i == 0:
return
if len(self.stack) < i:
raise StackUnderflow()
self.stack[-1], self.stack[-i - 1] = self.stack[-i - 1], self.stack[-1]
def __str__(self) -> str:
return str(self.stack)
def __repr__(self) -> str:
return str(self)
class StackUnderflow(Exception):
...
class StackOverflow(Exception):
...
class InvalidStackItem(Exception):
...
|
karmacoma-eth/smol-evm
|
src/smol_evm/stack.py
|
stack.py
|
py
| 1,394 |
python
|
en
|
code
| 165 |
github-code
|
6
|
42319245603
|
from setuptools import setup, find_packages
import codecs
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
import prefetch_generator
# loading README
long_description = prefetch_generator.__doc__
version_string = '1.0.2'
setup(
name="prefetch_generator",
version=version_string,
description="a simple tool to compute arbitrary generator in a background thread",
long_description=long_description,
# Author details
author_email="[email protected]",
url="https://github.com/justheuristic/prefetch_generator",
# Choose your license
license='The Unlicense',
packages=find_packages(),
classifiers=[
# Indicate who your project is intended for
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: The Unlicense (Unlicense)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
# What does your project relate to?
keywords='background generator, prefetch generator, parallel generator, prefetch, background,' + \
'deep learning, theano, tensorflow, lasagne, blocks',
# List run-time dependencies here. These will be installed by pip when your project is installed.
install_requires=[
#nothing
],
)
|
justheuristic/prefetch_generator
|
setup.py
|
setup.py
|
py
| 1,969 |
python
|
en
|
code
| 260 |
github-code
|
6
|
72441337147
|
# Tip Calculator
# print welcome to the tip calculator
#what was the total bill ?
#what percentage tip would you like to a give ? 10,12,15
#how many people to split the bill ?
#each pearson should pay
print("##Welcome to the tip calculator##")
bill = float(input("what is total bill amount:"))
tip = int(input("what percenatge tip would you like to a give , 10 , 12 or 15 ?"))
people = int(input("how many people split the bill ?"))
bill_with_tip = tip/100*bill+bill
print(bill_with_tip)
total = bill_with_tip/people
print("each pearosn payable amount is:\n",total)
|
pravinpawar17/Python_100_days
|
day2.2.py
|
day2.2.py
|
py
| 586 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32638070044
|
def voto(ano):
from datetime import datetime
atual = datetime.now().year
idade = atual - ano
if 16 <= idade <= 17 or idade > 60:
return idade, 'VOTO OPCIONAL!'
elif 18 <= idade < 60:
return idade, 'VOTO OBRIGATÓRIO!'
else:
return idade, 'NÃO VOTA!'
nas = int(input('Em que ano voce nasceu? '))
print(f'Com {voto(nas)[0]} anos: {voto(nas)[1]}')
|
LeoWshington/Exercicios_CursoEmVideo_Python
|
ex101.py
|
ex101.py
|
py
| 396 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
73643617788
|
from __future__ import absolute_import
import math
from collections import OrderedDict
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
import torch.utils.model_zoo as model_zoo
from .res2net import res2net50_26w_4s
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'TempoAvgPooling', 'TempoWeightedSum', 'TempoRNN']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class TempoAvgPooling(nn.Module):
""" Temporal Average Pooling """
def __init__(self, num_classes):
super(TempoAvgPooling, self).__init__()
# resnet50 = torchvision.models.resnet50(pretrained=True)
resnet50 = res2net50_26w_4s(pretrained=True)
self.backbone = nn.Sequential(*list(resnet50.children())[:-2])
self.last_layer_ch = 2048
self.classifier = nn.Linear(self.last_layer_ch, num_classes, bias=False)
nn.init.normal_(self.classifier.weight, std=0.01)
def forward(self, x):
"""
Args:
x: (b t 3 H W)
"""
b, t = x.size(0), x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.backbone(x) # (b*t c h w)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1).permute(0, 2, 1) # (b t c) to (b c t)
feature = F.avg_pool1d(x, t) # (b c 1)
feature = feature.view(b, self.last_layer_ch)
if not self.training:
return feature
logits = self.classifier(feature)
return logits, feature
class TempoWeightedSum(nn.Module):
def __init__(self, num_classes):
super(TempoWeightedSum, self).__init__()
resnet50 = torchvision.models.resnet50(pretrained=True)
self.backbone = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softMax or sigmoid
self.last_layer_ch = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.last_layer_ch, num_classes, bias=False)
nn.init.normal_(self.classifier.weight, std=0.01)
# (7,4) corresponds to (224, 112) input image size
self.spatial_attn = nn.Conv2d(self.last_layer_ch, self.middle_dim, kernel_size=[7, 4])
self.temporal_attn = nn.Conv1d(self.middle_dim, 1, kernel_size=3, padding=1)
def forward(self, x):
b, t = x.size(0), x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
featmaps = self.backbone(x) # (b*t c h w)
attn = F.relu(self.spatial_attn(featmaps)).view(b, t, -1).permute(0, 2, 1) # (b*t c 1 1) to (b t c) to (b c t)
attn = F.relu(self.temporal_attn(attn)).view(b, t) # (b 1 t) to (b t)
if self.att_gen == 'softmax':
attn = F.softmax(attn, dim=1)
elif self.att_gen == 'sigmoid':
attn = F.sigmoid(attn)
attn = F.normalize(attn, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
feature = F.avg_pool2d(featmaps, featmaps.size()[2:]).view(b, t, -1) # (b*t c 1 1) to (b t c)
att_x = feature * attn.unsqueeze(attn, dim=-1) # (b t c)
att_x = torch.sum(att_x, dim=1)
feature = att_x.view(b, -1) # (b c)
if not self.training:
return feature
logits = self.classifier(feature)
return logits, feature
class TempoRNN(nn.Module):
def __init__(self, num_classes):
super(TempoRNN, self).__init__()
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.hidden_dim = 512
self.feat_dim = 2048
self.classifier = nn.Linear(self.hidden_dim, num_classes, bias=False)
nn.init.normal_(self.classifier.weight, std=0.01)
self.lstm = nn.LSTM(input_size=self.feat_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
output, (h_n, c_n) = self.lstm(x)
output = output.permute(0, 2, 1)
f = F.avg_pool1d(output, t)
f = f.view(b, self.hidden_dim)
if not self.training:
return f
y = self.classifier(f)
return y, f
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, conv1_ch=3, conv5_stride=1, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(conv1_ch, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=conv5_stride)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
model = resnet50()
print(model)
for block in model.layer2:
print(block)
|
DeepAlchemist/video-person-reID
|
lib/model/resnet.py
|
resnet.py
|
py
| 11,011 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31528905029
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# $Id: setup.py 30 2005-10-30 07:24:38Z oli $
import os, sys
from setuptools import setup, find_packages
sys.path.insert(0, 'package/lib')
from scapy import VERSION
PACKAGE_NAME = 'scapy'
DESCRIPTION="""Packet manipulation tool, packet generator, network scanner, packet sniffer, and much more."""
LONG_DESCRIPTION="""Powerful interactive packet... manipulation tool, packet generator, \
network... scanner, network discovery tool, and packet... sniffer."""
def find_data_files():
files = [
('/usr/local/share/doc/scapy/', ['package/doc/LICENSE']),
('/usr/local/share/doc/scapy/', ['package/doc/ChangeLog']),
('/usr/local/share/doc/scapy/', ['package/doc/TODO']),
('/usr/local/bin/', ['package/usr/bin/iscapy'])
]
if os.path.exists('package/doc/scapy.info.gz'):
files.append( ('/usr/local/info/', ['package/doc/scapy.info.gz']) )
if os.path.exists('package/doc/scapy.1.gz'):
files.append( ('/usr/local/man/man1', ['package/doc/scapy.1.gz']) )
return files
setup(name=PACKAGE_NAME,
version=VERSION,
license = """GNU General Public License (GPL)""",
platforms = ['POSIX'],
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
url = "http://www.secdev.org/projects/scapy/",
download_url = "http://www.secdev.org/projects/scapy/files/scapy.py",
author = "Philippe Biondi",
author_email = "[email protected]",
classifiers = ["""Development Status :: 4 - Beta""",
"""Environment :: Console""",
"""Intended Audience :: Developers""",
"""Intended Audience :: Education""",
"""Intended Audience :: End Users/Desktop""",
"""Intended Audience :: Information Technology""",
"""Intended Audience :: Other Audience""",
"""Intended Audience :: Science/Research""",
"""Intended Audience :: System Administrators""",
"""License :: OSI Approved :: GNU General Public License (GPL)""",
"""Natural Language :: English""",
"""Operating System :: POSIX""",
"""Programming Language :: Python""",
"""Topic :: Education :: Testing""",
"""Topic :: Internet""",
"""Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator""",
"""Topic :: Security""",
"""Topic :: Software Development :: Libraries :: Python Modules""",
"""Topic :: Software Development :: Testing""",
"""Topic :: Software Development :: Testing :: Traffic Generation""",
"""Topic :: System""",
"""Topic :: System :: Networking""",
"""Topic :: System :: Networking :: Firewalls""",
"""Topic :: System :: Networking :: Monitoring"""],
package_dir = {'':'package/lib'},
py_modules = ['scapy'],
zip_safe=True,
data_files = find_data_files()
)
|
BackupTheBerlios/gruik-svn
|
trunk/projects/packaging_scapy/setup.py
|
setup.py
|
py
| 3,232 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11165153113
|
from uuid import uuid4
from demo.data_loading.data_fetching import get_countries_data
from demo.data_loading.fixes import fix_alpha2_value, fix_alpha3_value, fix_string_value
from demo.server.config import get_pyorient_client
def load_countries_and_regions(countries_df):
graph = get_pyorient_client()
country_cls = graph.registry['Country']
region_cls = graph.registry['Region']
subarea_cls = graph.registry['GeographicArea_SubArea']
area_name_and_type_to_vertex = dict()
# Create all countries.
for _, country_item in countries_df.iterrows():
name = fix_string_value(country_item['CLDR display name'])
uuid = str(uuid4())
alpha2 = fix_alpha2_value(country_item['ISO3166-1-Alpha-2'])
alpha3 = fix_alpha3_value(country_item['ISO3166-1-Alpha-3'])
props = {
'name': name,
'uuid': uuid,
'alpha2': alpha2,
'alpha3': alpha3,
}
vertex = graph.create_vertex(country_cls, **props)
area_name_and_type_to_vertex[(name, 'Country')] = vertex
# Create all non-country regions.
for _, country_item in countries_df.iterrows():
for region_column in ('Intermediate Region Name', 'Sub-region Name', 'Region Name'):
name = fix_string_value(country_item[region_column])
if name is None or (name, 'Region') in area_name_and_type_to_vertex:
# Don't create regions with no name, or regions that were already added.
continue
uuid = str(uuid4())
props = {
'name': name,
'uuid': uuid,
}
vertex = graph.create_vertex(region_cls, **props)
area_name_and_type_to_vertex[(name, 'Region')] = vertex
# Create all relationships between countries/regions.
created_edges = set()
for _, country_item in countries_df.iterrows():
hierarchy_order = (
('CLDR display name', 'Country'),
('Intermediate Region Name', 'Region'),
('Sub-region Name', 'Region'),
('Region Name', 'Region'),
)
regions_in_order = [
(region_name, kind)
for region_name, kind in (
(fix_string_value(country_item[column_name]), kind)
for column_name, kind in hierarchy_order
)
if region_name is not None
]
for index, (parent_region_name, parent_region_kind) in enumerate(regions_in_order):
if index == 0:
continue
child_region_name, child_region_kind = regions_in_order[index - 1]
uniqueness_key = (
parent_region_name,
parent_region_kind,
child_region_name,
child_region_kind,
)
if uniqueness_key not in created_edges:
graph.create_edge(
subarea_cls,
area_name_and_type_to_vertex[(parent_region_name, parent_region_kind)],
area_name_and_type_to_vertex[(child_region_name, child_region_kind)])
created_edges.add(uniqueness_key)
# Link all currently parent-less regions to the World region.
all_region_names = set(area_name_and_type_to_vertex.keys())
all_regions_with_parents = {
(child_region_name, child_region_kind)
for _, _, child_region_name, child_region_kind in created_edges
}
all_regions_without_parents = all_region_names - all_regions_with_parents
world_vertex = graph.create_vertex(region_cls, name='World', uuid=str(uuid4()))
for region_name, region_kind in all_regions_without_parents:
graph.create_edge(
subarea_cls,
world_vertex,
area_name_and_type_to_vertex[(region_name, region_kind)])
def orientdb_load_all():
countries_df = get_countries_data()
load_countries_and_regions(countries_df)
if __name__ == '__main__':
orientdb_load_all()
|
obi1kenobi/graphql-compiler-cross-db-example
|
demo/data_loading/orientdb_loading.py
|
orientdb_loading.py
|
py
| 4,019 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31498010379
|
import os
import string
import Model.Gobject
import Model.Global
import Model.Exceptions
import gettext
t = Model.Global.getTrans()
if t != None:
_= t.gettext
else:
def _(x):
return x
def createTable(dataBase):
db= Database.DbAccess.DbAccess(dataBase)
db.createTable(VatList._tableName, Vat._varsType)
db.close()
class Vat(Model.Gobject.Gobject):
"""Vat variables from database table:<br>
'id:' The Vat's unique database table index, int<br>
'vatCode:' A numeric identifier used somewhere, 1 char<br>
'vatName:' Name of this variant, ≤15 char <br>
'vatRate:' The rate in percent, ≤10 char (int or float)<br>
'vatAccount:' The account number to post the VAT of this variant,
4 char<br>
'salesAccount:' For sale variants: The account number where the net amount
shall be posted, 4 char.
"""
_varsType= (('id','INDEX.0'),
('vatCode','BCHAR.1'),
('vatName','BCHAR.15'),
('vatRate','BCHAR.10'),
('vatAccount','BCHAR.4'),
('salesAccount', 'BCHAR.4'))
_vars= ()
for i in _varsType:
_vars= _vars+ (i[0],)
# Generate the format, a number of '%s:%s:%s:...'
_fmt= ("%s:"*len(_vars))[:-1]
# Generate the expression, '(self._var1, self._var2,...)'
_tuple=(('('+'self._%s,'*len(_vars))[:-1] + ')'
)%_vars
# compiled expressions
_cToTuple= compile(_tuple, '<string>', 'eval')
_cToObject= compile(_tuple+'= t','<string>', 'exec')
def __init__(self, t= None):
Model.Gobject.Gobject.__init__(self, t)
def copyOfVat(self):
t= self.objectToDbTuple()
return Vat(t)
def __cmp__(self, o2):
return cmp(self._vatCode, o2._vatCode)
# property actions
def getVatCode(self):
return self._vatCode
def setVatCode(self, vc):
self._vatCode= vc
vatCode= property(getVatCode, setVatCode, None, None)
def getVatName(self):
return self._vatName
def setVatName(self, vc):
self._vatName= vc
vatName= property(getVatName, setVatName, None, None)
def getVatRate(self):
return self._vatRate
def setVatRate(self, vc):
self._vatRate= vc
vatRate= property(getVatRate, setVatRate, None, None)
def getVatAccount(self):
return self._vatAccount
def setVatAccount(self, vc):
self._vatAccount= vc
vatAccount= property(getVatAccount, setVatAccount, None, None)
def getSalesAccount(self):
return self._salesAccount
def setSalesAccount(self, vc):
self._salesAccount= vc
salesAccount= property(getSalesAccount, setSalesAccount, None, None)
def getId(self):
return self._id
def setId(self, dummy):
self._id= None
id= property(getId, setId, None, None)
### VatList ###
class VatList(Model.Gobject.GList):
_tableName= 'vat'
_objectName= None
def __init__(self, database=None):
self._objectName= 'Vat'
self._vars= Vat._vars
self._init= Vat
self._database= database
if database != None:
self._connection= Database.DbAccess.DbAccess(database)
else:
self._connection= None
Model.Gobject.GList.__init__(self, self._connection)
def fixup(self, lists):
self.sort()
def getByCode(self, code):
"""
Return the object with vatCode == code<br>
'code:' vatCode, 1 digit char
'return:' The found object, None if not found
"""
for e in self:
if e._vatCode == code: return e
return None
#For some reason the Vat-objects are only referenced to by the vatCode
# id is only used to follow the Gobject template and to make a distinction
# between saved and new objects
def __repr__(self):
return 'VatList'
def _readFile(f):
vatL= []
all= f.readlines()
for a in all:
li= string.strip(a)
if len(li) < 3 or li[0] == '#' : continue
fields= string.split(a, ':') # raises ValueError
if len(a) < 10: continue
vr= Vat()
try:
#raises IndexError if not 5 fields
vr.vatCode= string.strip(fields[0])
vr.vatName= string.strip(fields[1])
vr.vatRate= string.strip(fields[2])
vr.vatAccount= string.strip(fields[3])
vr.salesAccount= string.strip(fields[4])
except IndexError:
raise(Model.Exceptions.FileError(_(
"Missing field in file of default VAT")))
vatL.append(vr)
return vatL
def readFile(fn):
try:
f= open(fn, 'r')
vL= _readFile(f)
f.close()
vatL= Model.Books.getList('vat')
for v in vL:
vatL.saveEntry(v)
except IOError:
raise(Model.Exceptions.FileError(_("Could not read file ") + fn))
except ValueError:
raise(Model.Exceptions.FileError(_("Syntax error in file ") + fn))
|
BackupTheBerlios/gryn-svn
|
trunk/Model/Vat.py
|
Vat.py
|
py
| 5,052 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5821379456
|
# Дана последовательность чисел. Определить наибольшую длину монотонно возрастающего фрагмента
# последовательности (то есть такого фрагмента, где все элементы больше предыдущего).
n = int(input('Введите количество чисел '))
m = float(input('Введите первое число '))
prev = m
lenght = 1
max_lenght = 0
for k in range(2, n + 1):
m = float(input('Введите очередное число '))
if m > prev:
lenght = lenght + 1
# Кончился какой-то монотонно возрастающий фрагмент последовательности.Сравниваем его длину с максимальной длиной
else:
if lenght > max_lenght:
max_lenght = lenght
# Начинается "новый" фрагмент
lenght = 1
# Введенное число станет предыдущим для следующего числа
prev = m
# Проверяем длину последнего монотонно возрастающего фрагмента последовательности
if lenght > max_lenght:
max_lenght = lenght
print(max_lenght)
|
GarryG6/PyProject
|
32.py
|
32.py
|
py
| 1,354 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
27264187100
|
"""
GenT2_Rulebase.py
Created 9/1/2022
"""
from juzzyPython.generalType2zSlices.system.GenT2Engine_Intersection import GenT2Engine_Intersection
from juzzyPython.generalType2zSlices.system.GenT2Engine_Union import GenT2Engine_Union
from juzzyPython.generalType2zSlices.system.GenT2_Rule import GenT2_Rule
from juzzyPython.intervalType2.system.IT2_Rulebase import IT2_Rulebase
from juzzyPython.generalType2zSlices.system.GenT2_Antecedent import GenT2_Antecedent
from typing import List, OrderedDict
from juzzyPython.testing.timeRecorder import timeDecorator
class GenT2_Rulebase():
"""
Class GenT2_Rulebase
Keeps track of rules and generates results
Parameters:
None
Functions:
addRule
addRules
getRules
getFuzzyLogicType
get_GenT2zEngine_Intersection
get_GenT2zEngineUnion
getOverallOutput
evaluateGetCentroid
evaluate
getIT2Rulebases
getRule
changeRule
removeRule
getNumberOfRules
containsRule
getRulesWithAntecedents
getImplicationMethod
setImplicationMethod
toString
"""
def __init__(self) -> None:
self.rules = []
self.outputs = []
self.DEBUG = False
self.CENTEROFSETS = 0
self.CENTROID = 1
self.implicationMethod = 1
self.PRODUCT = 0
self.MINIMUM = 1
self.gzEU = GenT2Engine_Union()
self.gzEI = GenT2Engine_Intersection()
def addRule(self,r: GenT2_Rule) -> None:
"""Add a new rule to the rule set"""
self.rules.append(r)
it = r.getConsequents()
for i in it:
o = i.getOutput()
if not o in self.outputs:
self.outputs.append(o)
def addRules(self,r: List[GenT2_Rule]) -> None:
"""Add multiple new rules to the rule set"""
for i in range(len(r)):
self.addRule(i)
def getRules(self) -> List[GenT2_Rule]:
"""Return all the rules in the set"""
return self.rules
def getRule(self,ruleNum: int) -> GenT2_Rule:
"""Return a specific rule"""
return self.rules[ruleNum]
def getNumberOfRules(self) -> int:
"""Get the number of rules in the set"""
return len(self.rules)
def getFuzzyLogicType(self) -> int:
"""Returns the type of fuzzy logic that is employed.
return 0: type-1, 1: interval type-2, 2: zSlices based general type-2"""
return 2
def containsRule(self,rule: GenT2_Rule) -> bool:
"""Check if a rule in the ruleset"""
return rule in self.rules
def getGenT2zEngineIntersection(self) -> GenT2Engine_Intersection:
"""Return the intersection engine"""
return self.gzEI
def getGenT2zEngineUnion(self) -> GenT2Engine_Union:
"""Return the union engine"""
return self.gzEU
def removeRule(self,ruleNumber: int) -> None:
"""Remove a rule based on its index"""
del self.rules[ruleNumber]
def getImplicationMethod(self) -> str:
"""Return if the implication is product or minimum"""
if self.implicationMethod == self.PRODUCT:
return "product"
else:
return "minimum"
def setImplicationMethod(self,implicationMethod: int) -> None:
"""Sets the implication method, where by implication, we mean the implementation
of the AND logical connective between parts of the antecedent.
The desired implication method is applied for all rules."""
if implicationMethod == self.PRODUCT:
self.implicationMethod = self.PRODUCT
elif implicationMethod == self.MINIMUM:
self.implicationMethod = self.MINIMUM
else:
raise Exception("Only product (0) and minimum (1) implication is currently supported.")
def toString(self) -> str:
"""Convert the class to string"""
s = "General Type-2 Fuzzy Logic System with "+str(self.getNumberOfRules())+" rules:\n"
for i in range(self.getNumberOfRules()):
s += str(self.rules[i].toString())+"\n"
return s
def getOverallOutput(self) -> dict:
"""Return the overall output of the rules"""
returnValue = OrderedDict()
for r in range(len(self.rules)):
temp = self.rules[r].getRawOutput()
for o in self.outputs:
if r == 0:
returnValue[o] = temp[o]
else:
returnValue[o] = self.gzEU.getUnion(returnValue.get(o),temp.get(o))
return returnValue
def evaluateGetCentroid(self,typeReductionType: int) -> dict:
"""Returns the output of the FLS after type-reduction, i.e. the centroid.
param: typeReductionType
return: A TreeMap where Output is used as key and the value is an Object[]
where Object[0] is a Tuple[] (the centroids, one per zLevel) and Object[1] is a Double holding the associated yValues for the centroids. If not rule fired for the given input(s),
then null is returned as an Object[]."""
returnValue = OrderedDict()
rbsIT2 = self.getIT2Rulebases()
zValues = self.rules[0].getAntecedents()[0].getSet().getZValues()
for i in range(len(rbsIT2)):
temp = rbsIT2[i].evaluateGetCentroid(typeReductionType)
for o in temp.keys():
if i == 0:
returnValue[o] = [[],[]]
returnValue[o][0].append(temp[o][0])
returnValue[o][1].append(zValues[i])
return returnValue
def evaluate(self,typeReductionType: int) -> dict:
"""The current evaluate function is functional but inefficient. It creates an IT2
version of all the rules in the rulebase and computes each IT2 rule separately...
param typeReductionType: 0: Center Of Sets, 1: Centroid
param discretizationLevel: The discretization level on the xAxis"""
returnValue = OrderedDict()
rbsIT2 = self.getIT2Rulebases()
rawOutputValues = []
for i in range(len(rbsIT2)):
rawOutputValues.append(rbsIT2[i].evaluate(typeReductionType))
zValues = self.rules[0].getAntecedents()[0].getSet().getZValues()
for o in self.outputs:
i=0
numerator = 0.0
denominator = 0.0
for outputValue in rawOutputValues:
numerator += outputValue[o] * zValues[i]
denominator += zValues[i]
i+= 1
returnValue[o] = numerator/denominator
return returnValue
def getIT2Rulebases(self) -> List[IT2_Rulebase]:
"""Returns the whole zSlices based rulebase as a series of interval type-2
rule bases (one per zLevel) which can then be computed in parallel.
param typeReductionMethod: The type-reduction method to be used at the IT2 level
0: Center Of Sets, 1: Centroid.
param discretizationLevelXAxis: The number of discretizations to be used at the IT2 level."""
rbs = [0] * self.rules[0].getAntecedents()[0].getSet().getNumberOfSlices()
for i in range(len(rbs)):
rbs[i] = IT2_Rulebase()
for currentRule in range(self.getNumberOfRules()):
rbs[i].addRule(self.rules[currentRule].getRuleasIT2Rules()[i])
rbs[i].setImplicationMethod(self.implicationMethod)
return rbs
def getRulesWithAntecedents(self,antecedents: List[GenT2_Antecedent]) -> List[GenT2_Rule]:
""" Returns all rules with a matching (i.e. equal) set of antecedents."""
matches = []
for i in range(len(self.rules)):
if self.rules[i].getAntecedents()==antecedents:
matches.append(self.rules[i])
return matches
|
LUCIDresearch/JuzzyPython
|
juzzyPython/generalType2zSlices/system/GenT2_Rulebase.py
|
GenT2_Rulebase.py
|
py
| 7,915 |
python
|
en
|
code
| 4 |
github-code
|
6
|
32661723209
|
from decimal import Decimal
from fractions import Fraction
from typing import Generator
from numeric_methods.language import TRANSLATE
from numeric_methods.language.docs.one_variable import SECANT_METHOD_DOCS
from numeric_methods.mathematics import compare, convert, widest_type
NUMBER = Decimal | float | Fraction
@TRANSLATE.documentation(SECANT_METHOD_DOCS)
def secant_method(function, x_prev: NUMBER, x: NUMBER, epsilon: NUMBER) -> Generator[tuple[NUMBER] | NUMBER, None, None]:
# Type normalization
Number = widest_type(x_prev, x, epsilon)
x_prev = convert(x_prev, Number)
x = convert(x, Number)
epsilon = convert(epsilon, Number)
step = 1
next_x = x - (x - x_prev) * function(x) / (function(x) - function(x_prev))
yield step, next_x
while not compare(abs(next_x - x), "<", epsilon):
step += 1
x_prev = x
x = next_x
next_x = x - (x - x_prev) * function(x) / (function(x) - function(x_prev))
yield step, next_x
yield next_x
|
helltraitor/numeric-methods
|
numeric_methods/one_variable/secant_method.py
|
secant_method.py
|
py
| 1,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15622117384
|
from aws_cdk import (
aws_ec2 as ec2,
aws_ecs as ecs,
cdk,
)
class GhostOnEcsStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, *kwargs)
# Create VPC and Fargate Cluster
# NOTE: Limit AZs to avoid reaching resource quotas
vpc = ec2.VpcNetwork(
self, "MyGhostOnEcsVpc",
max_a_zs=2
)
cluster = ecs.Cluster(
self, 'Ec2GhostOnEcsCluster',
vpc=vpc
)
fargate_service = ecs.LoadBalancedFargateService(
self, "FargateGhostOnEcsService",
cluster=cluster,
image=ecs.ContainerImage.from_registry("ghost")
)
cdk.CfnOutput(
self, "LoadBalancerDNS",
value=fargate_service.load_balancer.dns_name
)
|
samkeen/aws-cdk-python-ecs-fargate
|
ghost_on_ecs/ghost_on_ecs_stack.py
|
ghost_on_ecs_stack.py
|
py
| 866 |
python
|
en
|
code
| 6 |
github-code
|
6
|
29963241712
|
import os
from BadParser import BadParser
proxies = []
class ProxyWorker(object):
def handler(path):
if os.path.isfile(path=path) != True:
return False
if os.path.splitext(path)[1] != '.txt':
return False
f = open(path, 'r', encoding='utf-8')
line = f.readline().replace('\\n', '')
line = line.strip()
while line:
proxies.append(line)
line = f.readline().replace('\\n', '')
line = line.strip()
f.close()
return proxies
def AutoGrabber(pages):
print('\n')
prox = BadParser(3, pages)
data = prox.Grab()
if ProxyWorker.FileCreating(data) is False:
print('Can\'t save proxies.\n')
return data
def DirectoryChecker():
try:
if not os.path.exists('./proxies/'):
os.mkdir('./proxies/')
return True
except:
print('Can\'t save proxies.\n')
return False
def FileCreating(data):
if ProxyWorker.DirectoryChecker() is False:
return False
f = open('./proxies/last_proxies.txt', 'w', encoding='utf-8')
for i in data:
f.write(i + '\n')
f.close()
|
icYFTL/ShadowServants-Brute-Python
|
sources/ProxyWorker.py
|
ProxyWorker.py
|
py
| 1,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5508352220
|
import random, os, shutil, yaml, gzip
import pandas as pd
import numpy as np
import prepare.configs as configs
from google.cloud import storage
import pickle
import time
storage_client = storage.Client()
bucket = storage_client.bucket(configs.bucketName)
def encodeConfigs(_confs):
return [
_confs['sim time settings']['time step'],
_confs['sim time settings']['total time'],
_confs['sim time settings']['sampling rate'],
int(_confs['change Na mem']['event happens']),
_confs['change Na mem']['change start'],
_confs['change Na mem']['change finish'],
_confs['change Na mem']['change rate'],
_confs['change Na mem']['multiplier'],
int(_confs['change K mem']['event happens']),
_confs['change K mem']['change start'],
_confs['change K mem']['change finish'],
_confs['change K mem']['change rate'],
_confs['change K mem']['multiplier']
]
def generateDataset():
srcFolder = "storage/processed"
destFolder = "storage/ready"
inputMaxCellsNumber = 250
retryCounter = 0
# Fetch available simulation folders
runsIdxs = []
for blob in bucket.list_blobs(prefix=srcFolder):
folderName = blob.name.split("/")
if (folderName[2] not in runsIdxs and folderName[2] != ""):
runsIdxs.append(folderName[2])
# Ftech simulation folders already processed
with open("./prepare/processed.txt","r") as f:
processedRunsIdxs = f.readlines()
processedRunsIdxs = [folder.strip() for folder in processedRunsIdxs]
availableFolders = []
for runIdx in runsIdxs:
if (runIdx not in processedRunsIdxs):
availableFolders.append(runIdx)
print("[GENERATE DATASET] Folders {} | Processed {} | Left {}".format(len(runsIdxs), len(processedRunsIdxs), len(availableFolders)))
for i, runFolderIdx in enumerate(availableFolders):
# Keep track of the progress
if (i in [int(len(availableFolders)*0.25), int(len(availableFolders)*0.5), int(len(availableFolders)*0.75)]):
print(">> {} %".format(int(i / len(availableFolders) * 100)))
try:
data = pd.read_csv('gs://{}/{}/{}/simulation.csv'.format(configs.bucketName, srcFolder, runFolderIdx))
print(">> {} | {}".format(runFolderIdx, data['folderName'][0]))
# 1. Download Sim Config File and encode It
fileDest = '/tmp/rawSimConfig.yml'
bucket.blob('storage/raw/{}/configs.yml'.format(data['folderName'][0])).download_to_filename(fileDest)
with open(fileDest, 'r') as stream:
simConfigRaw = yaml.safe_load(stream)
simConfigsEncoded = np.asarray(encodeConfigs(simConfigRaw))
simConfigsEncoded = np.append(simConfigsEncoded, [0]) # Add timestamp information
# 2. Download Sim.betse File and open it ( to extract Membrane permeabilities values)
fileDest = '/tmp/sim_1.betse.gz'
bucket.blob('storage/raw/{}/sim_1.betse.gz'.format(data['folderName'][0])).download_to_filename(fileDest)
with gzip.open(fileDest, "rb") as f:
sim, cells, params = pickle.load(f)
# 3. Generate training examples files. One for each simulation timestep using sim config, sim.betse & vmems
for timestampIdx in range(len(sim.time)):
inputVmem = np.asarray(data[data['timestamp'] == timestampIdx]['vmem'])
outputVmem = np.asarray(data[data['timestamp'] == timestampIdx + 1]['vmem'])
# Update timestamp information
simConfigsEncoded[simConfigsEncoded.shape[0] - 1] = timestampIdx
# 1. Compute cells perms values from cells membranes perms values. From {3, 6} values to 1 (average)
cellsPopulationSize = inputVmem.shape[0]
cells_mems = [[] for cell in range(cellsPopulationSize)]
for memUniqueIdx, cellIdx in enumerate(cells.mem_to_cells):
cells_mems[cellIdx].append(sim.dd_time[timestampIdx][:, memUniqueIdx])
cells_permeabilities = []
for cellMembranes in cells_mems:
cells_permeabilities.append(np.mean(cellMembranes, axis=0))
cells_permeabilities = np.asarray(cells_permeabilities) # N, 4 # K, Na, M-, Proteins-
# concat Vmem values with perms values
inputVmem = np.concatenate((inputVmem.reshape((-1, 1)), cells_permeabilities), axis=1) # N, 5
# concat cells centers to input vector
inputVmem = np.concatenate((inputVmem, cells.cell_centres), axis=1) # N, 7
# Concat env concentrations
env_cc = np.transpose(sim.cc_env_time[timestampIdx])[ : inputVmem.shape[0]] # get only same shape as inputVmem since env cc all the same
inputVmem = np.concatenate((inputVmem, env_cc), axis=1) # N, 11
# Concat cytosilic concentrations
cytosolic_cc = np.transpose(sim.cc_time[timestampIdx])
inputVmem = np.concatenate((inputVmem, cytosolic_cc), axis=1) # N, 15
#Pad Input
'''
TODO:
- Not pad with 0 since it is a possible Vmem value.
'''
if (inputVmem.shape[0] < inputMaxCellsNumber):
inputVmemPad = np.zeros((inputMaxCellsNumber, inputVmem.shape[1]))
inputVmemPad[:inputVmem.shape[0]] = inputVmem
inputVmem = inputVmemPad
outputVmemPad = np.zeros((inputMaxCellsNumber))
outputVmemPad[:outputVmem.shape[0]] = outputVmem
outputVmem = outputVmemPad
#Discard Input
elif (inputVmem.shape[0] > inputMaxCellsNumber):
print("<<ATTENTION>> Found Input with Numbers of cells higher that current Max: {} > {}".format(inputVmem.shape[0], inputMaxCellsNumber))
continue
# Discard example if data
# - Vmem < - 100 || > 100
# - K_env, Na_env, M_env, X_env, K_cc, Na_cc, M_cc, X_cc > 1000
#
if (np.any(inputVmem[:, 0] < -100) or np.any(inputVmem[:, 0] > 100)):
print("Discard example, Vmem {}".format(np.max(np.abs(inputVmem))))
continue
if (np.any(inputVmem[: , 7:] > 1000)):
print("Discard example, Concentration {}".format(np.max(inputVmem[: , 7:])))
continue
if (np.any(outputVmem[:, 0] < -100) or np.any(outputVmem[:, 0] > 100)):
print("Discard example, Vmem Output {}".format(np.max(np.abs(outputVmem))))
continue
#print("inputVmem length: {}".format(inputVmem.shape[0]))
#print("Configs length: {}".format(configs.shape[0]))
#print("outputVmem length: {}".format(outputVmem.shape[0]))
filePath = '/tmp/example.npy'
np.save(filePath, np.asarray([
inputVmem,
simConfigsEncoded,
outputVmem
], dtype="object"))
blob = bucket.blob('{}/{}/{}.npy'.format(destFolder, runFolderIdx, timestampIdx))
blob.upload_from_filename(filePath)
retryCounter = 0
with open("./prepare/processed.txt","a+") as f:
f.write(runFolderIdx + "\n")
# If for some reason processing fails. Handle it. It will not save on the processed.txt allowing to be processed at the next restart
except:
print("Handle Excpetion | Sleeping for {}".format(2 ** retryCounter))
time.sleep(2 ** retryCounter) # sleep since may be due to too many requests
retryCounter += 1
continue
|
R-Stefano/betse-ml
|
prepare/utils.py
|
utils.py
|
py
| 7,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73730161788
|
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from dae.dae import DAE
from beta_vae.beta_vae import BetaVAE
from history import History
# hyperparameters
num_epochs = 100
batch_size = 128
lr = 1e-4
beta = 4
save_iter = 20
shape = (28, 28)
n_obs = shape[0] * shape[1]
# create DAE and ß-VAE and their training history
dae = DAE(n_obs, num_epochs, batch_size, 1e-3, save_iter, shape)
beta_vae = BetaVAE(n_obs, num_epochs, batch_size, 1e-4, beta, save_iter, shape)
history = History()
# fill autoencoder training history with examples
print('Filling history...', end='', flush=True)
transformation = transforms.Compose([
transforms.ColorJitter(),
transforms.ToTensor()
])
dataset = MNIST('data', transform=transformation)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True)
for data in dataloader:
img, _ = data
img = img.view(img.size(0), -1).numpy().tolist()
history.store(img)
print('DONE')
# train DAE
dae.train(history)
# train ß-VAE
beta_vae.train(history, dae)
|
BCHoagland/DARLA
|
train.py
|
train.py
|
py
| 1,115 |
python
|
en
|
code
| 8 |
github-code
|
6
|
30354806111
|
import sys
# Enthought library imports
from pyface.qt import QtCore, QtGui
# Local imports
from tvtk.util.gradient_editor import (
ColorControlPoint, ChannelBase, FunctionControl, GradientEditorWidget
)
##########################################################################
# `QGradientControl` class.
##########################################################################
class QGradientControl(QtGui.QWidget):
"""Widget which displays the gradient represented by an GradientTable
object (and does nothing beyond that)"""
def __init__(self, parent=None, gradient_table=None, width=100, height=100):
"""master: panel in which to place the control. GradientTable is the
Table to which to attach."""
super(QGradientControl, self).__init__(parent=parent)
self.resize(width, height)
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
self.width = width
self.height = height
self.gradient_table = gradient_table
assert gradient_table.size == width
self.setMinimumSize(100, 50)
# currently only able to use gradient tables in the same size as the
# canvas width
def paintEvent(self, event):
"""Paint handler."""
super(QGradientControl, self).paintEvent(event)
painter = QtGui.QPainter(self)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0) )
painter.setBrush(brush)
painter.setBackgroundMode(QtCore.Qt.OpaqueMode)
sz = self.size()
width, height = sz.width(), sz.height()
xform = self.gradient_table.scaling_function
start_y = 0
end_y = height
if xform:
# if a scaling transformation is provided, paint the original
# gradient under the scaled gradient.
start_y = height/2
# paint the original gradient as it stands in the table.
color = QtGui.QColor()
for x in range(width):
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(float(x)/(width-1))
color.setRgb(int(255*r), int(255*g), int(255*b))
painter.setPen(color)
brush.setColor(color)
painter.drawLine(x, start_y, x, end_y)
if xform:
# paint the scaled gradient below
end_y = start_y
start_y = 0
for x in range(width):
f = float(x)/(width-1)
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(xform(f))
color.set(int(255*r), int(255*g), int(255*b))
brush.setColor(color)
painter.drawLine(x, start_y, x, end_y)
##########################################################################
# `Channel` class.
##########################################################################
class Channel(ChannelBase):
def paint(self, painter):
"""Paint current channel into Canvas (a canvas of a function control
object).
Contents of the canvas are not deleted prior to painting,
so more than one channel can be painted into the same canvas.
"""
table = self.control.table
# only control points which are active for the current channel
# are to be painted. filter them out.
relevant_control_points = [
x for x in table.control_points if self.name in x.active_channels
]
# lines between control points
color = QtGui.QColor(*self.rgb_color)
painter.setPen(color)
brush = QtGui.QBrush(color)
painter.setBrush(brush)
painter.setBackgroundMode(QtCore.Qt.OpaqueMode)
for k in range( len(relevant_control_points) - 1 ):
cur_point = relevant_control_points[k]
next_point = relevant_control_points[1+k]
painter.drawLine(self.get_pos_index(cur_point.pos),
self.get_value_index(cur_point.color),
self.get_pos_index(next_point.pos),
self.get_value_index(next_point.color))
# control points themself.
color = QtCore.Qt.black
painter.setPen(color)
for control_point in relevant_control_points:
x = self.get_pos_index( control_point.pos )
y = self.get_value_index( control_point.color )
radius=6
#print(x,y)
painter.drawRect(x-(radius/2.0), y-(radius/2.0), radius, radius)
painter.drawRect(100,80,6,6)
##########################################################################
# `QFunctionControl` class.
##########################################################################
class QFunctionControl(QtGui.QWidget, FunctionControl):
"""Widget which displays a rectangular regions on which hue, sat, val
or rgb values can be modified. An function control can have one or more
attached color channels."""
# Radius around a control point center in which we'd still count a
# click as "clicked the control point"
control_pt_click_tolerance = 4
ChannelFactory = Channel
def __init__(self, master=None, gradient_table=None, color_space=None,
width=100, height=100):
"""Initialize a function control widget on tkframe master.
Parameters:
-----------
master: The master widget. Note that this widget *must* have
the methods specified in the `AbstractGradientEditorWidget`
interface.
on_table_changed: Callback function taking a bool argument of meaning
'FinalUpdate'. FinalUpdate is true if a control point is dropped,
created or removed and false if the update is due to a control point
currently beeing dragged (but not yet dropped)
color_space: String which specifies the channels painted on this control.
May be any combination of h,s,v,r,g,b,a in which each channel
occurs only once.
set_status_text: a callback used to set the status text
when using the editor.
"""
kw = dict(
master=master, gradient_table=gradient_table,
color_space=color_space, width=width,
height=height
)
super().__init__(**kw)
self.resize(width, height)
self.setMinimumSize(100, 50)
######################################################################
# Qt event handlers.
######################################################################
def paintEvent(self, event):
super(QFunctionControl, self).paintEvent(event)
painter = QtGui.QPainter(self)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
painter.setBrush(brush)
width, height = self.size().width(), self.size().height()
painter.drawRect(0, 0, width, height)
for channel in self.channels:
channel.paint(painter)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.cur_drag = self.find_control_point(event.x(), event.y())
super(QFunctionControl, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
elif event.button() == QtCore.Qt.RightButton:
# toggle control point. check if there is a control point
# under the mouse. If yes, delete it, if not, create one
# at that point.
cur_control_point = self.find_control_point(event.x(), None)
if cur_control_point:
# found a marker at the click position. delete it and return,
# unless it is a fixed marker (at pos 0 or 1)..
if ( cur_control_point[1].fixed ):
# in this case do nothing. Fixed markers cannot be deleted.
return
self.table.control_points.remove(cur_control_point[1])
self.table_config_changed(final_update=True)
else:
# since there was no marker to remove at the point, we assume
# that we should place one there
new_control_point = ColorControlPoint(active_channels=self.active_channels_string)
new_control_point.set_pos(self.channels[0].get_index_pos(event.x()))
# set new control point color to the color currently present
# at its designated position
new_control_point.color = self.table.get_pos_color(new_control_point.pos)
self.table.insert_control_point(new_control_point)
self.table_config_changed(final_update = True)
if isinstance(event, QtGui.QMouseEvent):
super(QFunctionControl, self).mouseReleaseEvent(event)
def leaveEvent(self, event):
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
super(QFunctionControl, self).leaveEvent(event)
def resizeEvent(self, event):
sz = self.size()
self.width = sz.width()
self.height = sz.height()
def mouseMoveEvent(self, event):
# currently dragging a control point?
channel = None
point = None
if self.cur_drag:
channel = self.cur_drag[0]
point = self.cur_drag[1]
if ( not point.fixed ):
point.set_pos( channel.get_index_pos(event.x()) )
point.activate_channels( self.active_channels_string )
self.table.sort_control_points()
channel.set_value_index( point.color, event.y() )
self.table_config_changed( final_update = False )
screenX = event.x()
screenY = event.y()
width, height = self.size().width(), self.size().height()
master = self.master
s1, s2 = master.get_table_range()
if channel is not None:
name = self.text_map[channel.name]
pos = s1 + (s2 - s1)*point.pos
val = channel.get_value(point.color)
txt = '%s: (%.3f, %.3f)'%(name, pos, val)
else:
x = s1 + (s2 - s1)*float(screenX)/(width-1)
y = 1.0 - float(screenY)/(height-1)
txt = "position: (%.3f, %.3f)"%(x, y)
self.master.set_status_text(txt)
##########################################################################
# `QGradientEditorWidget` class.
##########################################################################
class QGradientEditorWidget(QtGui.QWidget, GradientEditorWidget):
"""A Gradient Editor widget that can be used anywhere.
"""
def __init__(self, master, vtk_table, on_change_color_table=None,
colors=None):
"""
Parameters:
-----------
vtk_table : the `tvtk.LookupTable` or `tvtk.VolumeProperty` object
to set.
on_change_color_table : A callback called when the color table
changes.
colors : list of 'rgb', 'hsv', 'h', 's', 'v', 'a'
(Default : ['rgb', 'hsv', 'a'])
'rgb' creates one panel to edit Red, Green and Blue
colors.
'hsv' creates one panel to edit Hue, Saturation and
Value.
'h', 's', 'v', 'r', 'g', 'b', 'a' separately
specified creates different panels for each.
"""
kw = dict(master=master, vtk_table=vtk_table,
on_change_color_table=on_change_color_table,
colors=colors)
super().__init__(**kw)
gradient_preview_width = self.gradient_preview_width
gradient_preview_height = self.gradient_preview_height
channel_function_width = self.channel_function_width
channel_function_height = self.channel_function_height
# set up all the panels in a grid
# 6x2 size: 6 rows, 2 columns...
grid = QtGui.QGridLayout()
grid.setColumnStretch(0, 0)
grid.setColumnStretch(1, 1)
# "Gradient Viewer" panel, in position (0,1) for sizer
self.gradient_control = QGradientControl(self,
self.gradient_table,
gradient_preview_width,
gradient_preview_height)
self.setToolTip('Right click for menu')
grid.addWidget(QtGui.QLabel("", self), 0, 0)
grid.addWidget(self.gradient_control, 0, 1)
# Setup the context menu to fire for the Gradient control alone.
gc = self.gradient_control
gc.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
gc.customContextMenuRequested.connect(self.contextMenuEventOnGradient)
# Add the function controls:
function_controls = self.function_controls
editor_data = self.editor_data
row = 1
for color in self.colors:
data = editor_data[color]
control = QFunctionControl(self, self.gradient_table, color,
channel_function_width,
channel_function_height)
txt = data[0] + self.tooltip_text
control.setToolTip(txt)
# Add name of editor (to left side of editor)
grid.addWidget(QtGui.QLabel(data[1], self), row, 0)
# Add the "RGB" control point editor
grid.addWidget(control, row, 1)
function_controls.append(control)
row += 1
# The status text.
self.text = QtGui.QLabel('status', self)
grid.addWidget(self.text, row, 0, 1, 2)
self.setLayout(grid)
self.show()
######################################################################
# `GradientEditorWidget` interface.
######################################################################
def set_status_text(self, msg):
self.text.setText(msg)
######################################################################
# Qt event methods.
######################################################################
def contextMenuEventOnGradient(self, pos):
menu = QtGui.QMenu(self)
saveAction = menu.addAction("Save as")
loadAction = menu.addAction("Load")
action = menu.exec_(self.mapToGlobal(pos))
if action == saveAction:
self.on_save()
elif action == loadAction:
self.on_load()
def on_save(self, event=None):
"""
Open "Save" dialog, write lookuptable to 3 files: ``*.lut``
(lookuptable) ``*.grad`` (gradient table for use with this program),
and ``*.jpg`` (image of the gradient)
"""
wildcard = "Gradient Files (*.grad);;All Files (*.*)"
filename, filter = QtGui.QFileDialog.getSaveFileName(self,
"Save LUT to...",
'',
wildcard)
if filename:
self.save(filename)
def on_load(self, event=None):
"""
Load a ``*.grad`` lookuptable file.
"""
wildcard = "Gradient Files (*.grad);;All Files (*.*)"
filename, filter = QtGui.QFileDialog.getOpenFileName(self,
"Open gradient file...",
'',
wildcard)
if filename:
self.load(filename)
##########################################################################
# `QGradientEditor` class.
##########################################################################
class QGradientEditor(QtGui.QMainWindow):
""" QMainWindow that displays the gradient editor window,
i.e. the thing that contains the gradient display, the function
controls and the buttons.
"""
def __init__(self, vtk_table, on_change_color_table=None, colors=None):
"""Initialize the gradient editor window.
Parameters
----------
vtk_table: Instance of vtkLookupTable, designating the table which is
to be edited.
on_change_color_table: Callback function taking no arguments. Called
when the color table was changed and rendering is
requested.
"""
super(QGradientEditor, self).__init__()
self.setWindowTitle("Color Gradient Editor")
self.widget = QGradientEditorWidget(
master=self, vtk_table=vtk_table,
on_change_color_table=on_change_color_table,
colors=colors
)
self.setCentralWidget(self.widget)
self.resize(300, 500)
self.statusBar()
## Set up the MenuBar
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_action = QtGui.QAction("&Save", self)
file_action.setStatusTip("Save CTF")
file_action.triggered.connect(self.widget.on_save)
file_menu.addAction(file_action)
load_action = QtGui.QAction("&Load", self)
load_action.setStatusTip("Load CTF")
load_action.triggered.connect(self.widget.on_load)
file_menu.addAction(load_action)
quit_action = QtGui.QAction("&Quit", self)
quit_action.setStatusTip("Quit application")
quit_action.triggered.connect(QtGui.QApplication.instance().quit)
file_menu.addAction(quit_action)
help_menu = menu.addMenu("&Help")
action = QtGui.QAction("&Help", self)
action.setStatusTip("Help")
action.triggered.connect(self.on_help)
help_menu.addAction(action)
action = QtGui.QAction("&About", self)
action.setStatusTip("About application")
action.triggered.connect(self.on_about)
help_menu.addAction(action)
def on_help(self, event=None):
""" Help defining the mouse interactions """
message = "Right click to add control points. Left click to move control points"
QtGui.QMessageBox.information(self, 'Help', message)
def on_about(self, event=None):
""" Who wrote the program?"""
message = 'tk Gradient Editor for MayaVi1: Gerald Knizia ([email protected])\n'\
'wxPython port: Pete Schmitt ([email protected])\n'\
'Qt port: Prabhu Ramachandran\n'\
'Enhanced for Mayavi2: Prabhu Ramachandran'
QtGui.QMessageBox.information(self, 'About gradient editor', message)
def main():
from tvtk.util.traitsui_gradient_editor import make_test_table
import sys
table, ctf, otf = make_test_table(lut=False)
# the actual gradient editor code.
def on_color_table_changed():
"""If we had a vtk window running, update it here"""
# print("Update Render Window")
pass
app = QtGui.QApplication.instance()
editor = QGradientEditor(table,
on_color_table_changed,
colors=['rgb', 'a', 'h', 's', 'v'],
)
editor.setWindowTitle("Gradient editor")
editor.show()
sys.exit(app.exec_())
##########################################################################
# Test application.
##########################################################################
if __name__ == "__main__":
main()
|
enthought/mayavi
|
tvtk/util/qt_gradient_editor.py
|
qt_gradient_editor.py
|
py
| 19,600 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
10609649346
|
from createProtocol import ARP, EthernetII
from parseProtocol import Parser
from optparse import OptionParser
from helper import subnet_creator, get_mac_address, get_ip_address
from rich.progress import track
from time import sleep
import socket
import netifaces
import threading
def get_user_parameters():
parse_options = OptionParser()
parse_options.add_option("-n", "--network", dest="sub_network", help="Enter Network Address \n[+] Example : 192.168.1.0/24")
parse_options.add_option("-i", "--interface", dest="interface", help="Enter Your Interface")
options, _ = parse_options.parse_args()
interfaces = netifaces.interfaces()
if not options.interface and not options.sub_network:
print("\nPlease enter parameters. You can use '--help' for parameters.")
if options.interface not in interfaces:
print("\nThere is no such interface.")
if not options.sub_network:
print("\nEnter network address.")
return options
def send_packet(interface, ip):
ethernet = EthernetII(src_mac=get_mac_address(interface))
arp = ARP(dst_mac="00:00:00:00:00:00", src_mac=get_mac_address(interface), src_ip=get_ip_address(interface))
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0806))
sock.bind((interface, 0x0806))
arp._dst_ip = ip
packet = ethernet() + arp()
sock.send(packet)
def receive_packet(interface):
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0806))
sock.bind((interface, 0x0806))
parser = Parser()
while True:
data, _ = sock.recvfrom(65535)
_, _, _, otherData = parser.ethernetFrame(data)
opcode, dst_mac, dst_ip, src_mac, src_ip = parser.arp_frame(otherData)
if opcode == 2:
parser.print_frame(dst_mac=dst_mac, dst_ip=dst_ip)
def main():
user_params = get_user_parameters()
user_network = user_params.sub_network
user_interface = user_params.interface
ip_list = subnet_creator(user_network)
receive_thread = threading.Thread(target=receive_packet, args=(user_interface,), daemon=True)
receive_thread.start()
sleep(1.5)
for ip in track(ip_list, "Sending Packet => "):
send_packet(user_interface,ip)
if __name__ == "__main__":
main()
|
oguzhan-kurt/Network-Scanner
|
main.py
|
main.py
|
py
| 2,295 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17212386207
|
def report(dtgen, predicts, metrics, total_time, plus=""):
"""Calculate and organize metrics and predicts informations"""
e_corpus = "\n".join([
f"Total test sentences: {dtgen.size['test']}",
f"{plus}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics (before):",
f"Character Error Rate: {metrics[0][0]:.8f}",
f"Word Error Rate: {metrics[0][1]:.8f}",
f"Sequence Error Rate: {metrics[0][2]:.8f}\n",
f"Metrics (after):",
f"Character Error Rate: {metrics[1][0]:.8f}",
f"Word Error Rate: {metrics[1][1]:.8f}",
f"Sequence Error Rate: {metrics[1][2]:.8f}"
])
p_corpus = []
for i in range(dtgen.size['test']):
p_corpus.append(f"GT {dtgen.dataset['test']['gt'][i]}")
p_corpus.append(f"DT {dtgen.dataset['test']['dt'][i]}")
p_corpus.append(f"PD {predicts[i]}\n")
return (p_corpus, e_corpus)
|
u1956242/GEC
|
src/lib/utils/report.py
|
report.py
|
py
| 1,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
47036004516
|
import time
from sqlalchemy import Column, Integer, String, Float, Boolean, ForeignKey
import sqlalchemy.types as types
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import func
from sqlalchemy import or_, and_, desc
from marshmallow import Schema, fields
from database import Base
class KycRequestSchema(Schema):
date = fields.Float()
token = fields.String()
greenid_verification_id = fields.String()
status = fields.String()
class KycRequest(Base):
__tablename__ = 'kyc_requests'
id = Column(Integer, primary_key=True)
date = Column(Float, nullable=False, unique=False)
token = Column(String, nullable=False, unique=True)
greenid_verification_id = Column(String, nullable=False, unique=True)
status = Column(String )
def __init__(self, token, greenid_verification_id):
self.date = time.time()
self.token = token
self.greenid_verification_id = greenid_verification_id
self.status = "created"
@classmethod
def count(cls, session):
return session.query(cls).count()
@classmethod
def from_token(cls, session, token):
return session.query(cls).filter(cls.token == token).first()
def __repr__(self):
return '<KycRequest %r>' % (self.token)
def to_json(self):
schema = KycRequestSchema()
return schema.dump(self).data
|
djpnewton/zap-merchant
|
models.py
|
models.py
|
py
| 1,387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19809314779
|
import os
from PIL import Image
from typing import Dict, List
from preprocessing.image_metadata import ImageMetadata
class ImagesReader:
def __init__(self, base_path: str) -> None:
self.__basePath = base_path
def read_train_images(self) -> Dict[str, List[ImageMetadata]]:
images = {}
dataset_dir = os.path.join(self.__basePath, 'train')
for root, dirs, files in os.walk(dataset_dir, topdown=False):
if root not in [self.__basePath, dataset_dir]:
files = [img for img in files if img.endswith('.jpg') or img.endswith('.JPEG')]
class_id = self.__get_class_id__(root)
images[class_id] = []
for name in files:
image = self.__get_image_metadata__(os.path.join(root, name))
images[class_id].append(image)
return images
def read_test_images(self) -> List[ImageMetadata]:
images = []
dataset_dir = os.path.join(self.__basePath, 'test')
files = [img for img in os.listdir(dataset_dir) if img.endswith('.jpg') or img.endswith('.JPEG')]
for name in files:
image = self.__get_image_metadata__(os.path.join(dataset_dir, name))
images.append(image)
return images
@staticmethod
def __get_image_metadata__(image_path: str) -> ImageMetadata:
image = Image.open(image_path)
return ImageMetadata(image.filename, (image.width, image.height), image.layers, image.mode)
@staticmethod
def __get_class_id__(dir_path: str) -> str:
class_id = dir_path.split(os.sep)[-1].split('.')[0]
return class_id
|
sachokFoX/caltech_256
|
code/preprocessing/images_reader.py
|
images_reader.py
|
py
| 1,666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71913785148
|
import qrcode as qr
from PIL import Image
q=qr.QRCode(version=1,
error_correction=qr.constants.ERROR_CORRECT_H,
box_size=10,
border=4,)
q.add_data("https://youtu.be/NaQ_4ZvCbOE")
q.make(fit=True)
img= q.make_image(fill_color='darkblue', back_color='steelblue')
img.save("x.png")
|
Xander1540/Python-Projects
|
QRcode/QRcode.py
|
QRcode.py
|
py
| 316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3037458100
|
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.readline
# 변수 초기화
n, m = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(n)]
dx, dy = [-1, 1, 0, 0], [0, 0, -1, 1]
answer = 0
def dfs(x, y):
for i in range(4):
nx, ny = x+dx[i], y+dy[i]
if 0<=nx<n and 0<=ny<m and not visited[nx][ny]:
if arr[nx][ny] != 0:
arr[nx][ny] += 1
else:
visited[nx][ny]=1
dfs(nx, ny)
def remove():
for i in range(n):
for j in range(m):
if arr[i][j] >= 3:
arr[i][j] = 0
elif arr[i][j] == 2:
arr[i][j] = 1
def check():
for i in range(n):
for j in range(m):
if arr[i][j]==1:
return False
return True
while True:
if check():
print(answer)
break
visited = [[0]*m for _ in range(n)]
dfs(0, 0)
remove()
visited[0][0] = 1
answer += 1
|
sunyeongchoi/sydsyd_challenge
|
argorithm/2638_re.py
|
2638_re.py
|
py
| 998 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11016530679
|
import os
import discord
import requests
import asyncio
from dotenv import load_dotenv
from discord.utils import get
from discord.ext import commands
compteur = 301
nbConnected = 0
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
SERVER_IP = os.getenv('SERVER_IP')
SERVER_PORT = os.getenv('SERVER_PORT')
CHANNEL_ID = int(os.getenv('CHANNEL_ID'))
VOCAL_ID = int(os.getenv('VOCAL_ID'))
bot = commands.Bot(command_prefix="!")
# Background task
async def background_task():
global compteur
await bot.wait_until_ready()
while not bot.is_closed():
await call_api()
await asyncio.sleep(30)
compteur += 30
async def call_api():
global nbConnected
global compteur
for guild in bot.guilds:
if (guild.id == CHANNEL_ID):
channel = discord.utils.get(guild.channels, id=VOCAL_ID)
response = requests.get('https://minecraft-api.com/api/ping/online/' + SERVER_IP + '/' + str(SERVER_PORT))
nbConnected2 = response.content.decode("utf-8")
if nbConnected != nbConnected2 and compteur > 300:
nbConnected = nbConnected2
message = 'Il y a ' + str(nbConnected) + (' connectés' if int(nbConnected) > 1 else ' connecté')
compteur = 0
await channel.edit(name=message)
bot.loop.create_task(background_task())
# Start bot
bot.run(TOKEN)
|
AudricCh/minecraft-discord-bot
|
bot/main.py
|
main.py
|
py
| 1,395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6166872296
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 09:32:16 2017
@author: Francesco
"""
from sklearn.preprocessing import StandardScaler
import numpy as np
import threading as th
import time
import re
import matplotlib.pyplot as plt
movement_kind = ["wrist up",
"wrist down",
"wrist rotation out",
"wrist rotation inside",
"hand open",
"hand closed"]
class up(object):
index_array = []
value_array = []
def __init__(self):
self.index_array.append(0)
def update(self,index,value):
self.index_array.append(index)
self.value_array.append(value)
def load_from_C_formatting(file):
f = open(file,'r').read()
temp = f.split('\n')
Ltemp = len(temp)
state = int(temp[0][-1])
start_index = 0
X = []
Y = []
for j in range(1,Ltemp-1):
new_state = int(temp[j][-1])
if(int(new_state != state)):
Xraw = temp[start_index:j-1]
start_index = j
L = len(Xraw)
X_current = np.zeros((L,8))
i = 0
k = 0
for line in Xraw:
#print(line[:-2])
for value in line[:-2].split(','):
X_current[i,k] = value
k+=1
i+=1
k=0
Y.append(state)
X.append(X_current)
state = new_state
#last start index is the index of the last recording
Xraw = temp[start_index:-1]
L = len(Xraw)
X_current = np.zeros((L,8))
i = 0
k = 0
for line in Xraw:
#print(line[:-2])
for value in line[:-2].split(','):
X_current[i,k] = value
k+=1
i+=1
k=0
Y.append(state)
X.append(X_current)
figures = []
for movement in np.unique(Y):
figures.append(plt.subplots(1,1))
for p in [2,3,4,5,6,7]:
y = X[movement][:,p]
moving_average(y,10)
#figures è una tupla (fig,axes) e noi dobbiamo
#plottare su axes
movement = int(movement)
figures[movement][1].plot(y,color=colorarray[p],label='ch'+str(p))
legend = figures[movement][1].legend(loc='upper left', shadow=True)
figures[movement][0].suptitle(movement_kind[movement])
plt.show()
return (X,Y)
def load_dataset(file):
""" RIFARE INIZIO """
n_channels = 8
f = open(file,'r')
#jump to the second block(the first is corrupted)
while(1):
if(f.read(1) == '-'):
start = f.tell()+2
break
f.seek(start)
#now we are ready to read the first block, which is the first feature actually
#understand the block length, must be equal for each block
dataset = f.read()
n_linee = 0
for line in dataset.split('\n'):
n_linee+=1
if(line == '-'):
n_linee -= 1
break
len_blocco = n_linee+1
#create the structure that will hold the features
#each feature is a matrix n_linee*9 (n_channels + classe movimento)
n_blocks = (len(dataset.split('\n'))-1)/len_blocco
features = np.zeros((n_linee,n_channels+1,int(n_blocks)+1))
i = 0
j = 0
block = 0
for line in dataset.split('\n'):
if(len(line)<5):
block+=1
i = 0
#print(line)
else:
for value in line.split(','):
features[i,j,block] = value
j+=1
#print(line)
j=0
i+=1
return features
def gradient(data,channels):
der = np.zeros((len(data),channels))
for i in range(1,len(data)):
der[i,:] = data[i,:]-data[i-1,:]
return der
def moving_average(data,samp_for_average):
n_windows = int(len(data)/samp_for_average)
for i in range(n_windows):
data[i*samp_for_average:(i+1)*samp_for_average] = np.average(data[i*samp_for_average:(i+1)*samp_for_average])
def open_outfile(file,rep):
f = open(file,'r').read()
lines = f.split('\n')
info_decoded = lines[rep].split('\t')
first_matrix = info_decoded[:-1]
n_cluster = int(info_decoded[-1])
#this code fails when there is a number without decimals, because 2. doesn't match the pattern
#since it searches for another number after the dot, that's the reason why the second "try"
#to catch this behaviour we say that two possible patterns may exist, 3.0 is recognized as well as 3.
patterns=re.compile(r'-\d+\.\d+|\d+\.\d+|-\d+\.|\d+\.')
#as a note: we search for both positive or negative(minus sign) but the order is important,
#because if -\d+\. was before -\d+\.\d+, the number -2.3 would be recognized as -2.
matrix = np.array(patterns.findall(first_matrix[0]),dtype='f')
for row in first_matrix[1:]: #the first has alread been taken
try:
temp = np.array(patterns.findall(row),dtype='f')
matrix = np.vstack((matrix,temp))
except ValueError:
print("Error:",row)
return (matrix,n_cluster)
#load data
def load_data_into_matrix(file,startline=0,endline=-1,cols=8,mode="signal"):
graph_data = open(file,'r').read()
lines = graph_data.split('\n')
n_channels = cols
n_lines = (len(lines))
vertical_lines = len(lines[startline:endline])
data = np.zeros((vertical_lines,n_channels)) #read all channels (8), plot only the desired
#the last acquisition may be corrupted, sudden termination of serial comm
#the first lines may be corrupted by giggering of the sensors/serial reads garbage
if mode == "signal":
i=0
j=0
for line in lines[startline:endline]:
if(len(line)>1):
t = line.split(',')
for value in t:
data[i,j] = t[j]
j+=1
j=0
i+=1
return data
if mode == "encoded":
i=0
j=0
data = np.chararray((n_lines - (startline-endline),n_channels))
for line in lines[startline:endline]:
if(len(line)>1):
t = line.split(',')
for value in t:
data[i,j] = t[j]
j+=1
j=0
i+=1
return data
def unsigned_derivative(x_t,x_tmen1):
return np.abs((x_t - x_tmen1)/x_t)
colorarray = ['b','g','r','c','m','y','k','0.75']
mode = {'polso_piegato_alto':[0,1,4], #estensori
'polso_piegato_basso':[2,3,7], #flessori
'polso_ruotato_esterno':[0,3], #ulnari
'polso_ruotato_interno':[1,2], #radiali
'updown':[0,1],
'intest':[2,3],
'tutti':range(8)}
class track(object):
def __init__(self,data):
self.data = data
self.channels = data.shape[1] #number of channels
self.samples = data.shape[0] #number of samples
def set_baseline(self,number_of_samples = 30):
#define the baseline for each channel, with this code we
#don't care about how many channels are there, 2 or 3 or n
#the shape of baseline will be 1xn
#basically this code is doing this: for each column sum the first
#30 values and do the average, the subtract this value from
#all the values
self.baseline = np.sum(self.data[0:number_of_samples,:],axis=0)/number_of_samples
self.data -= self.baseline
def moving_avg(self,samp_for_average):
n_windows = int(len(self.data)/samp_for_average)
for s in range(self.channels):
for i in range(n_windows):
self.data[i*samp_for_average:(i+1)*samp_for_average,s] = np.average(self.data[i*samp_for_average:(i+1)*samp_for_average,s])
def __getitem__(self,index):
return self.data[index[0]][index[1]]
def read_channel(self,channel):
return self.data[:,channel]
def shape(self):
return self.data.shape
class computation(th.Thread):
def __init__(self,name,signal):
th.Thread.__init__(self)
self.signal = signal
self.name = name
def run(self):
#we use alto/basso together with esterno/interno since
#they aren't mutual exclusive movements, the wirst can
#in fact go up while it may be extern/intern, but cannot go
#up while it is down
#we somehow simulate the fact that we are reading a stream of data
#so we don't use all the data together, but once more at each step
#feature extraction: position: baseline and movement: derivative
#t represents time that goes by
""" !!!! MUST BE A MULTIPLE OF 10 !!!! """
windows_length = 10
n_chann = self.signal.shape()[1]
encoder = (lambda Y: 'a' if Y > windows_length/100 else 'b' if Y > -windows_length/100 else 'c')
encoded = ['x']*8
t = 0
outfile = open('thread_data'+self.name+'.txt','w')
#outfilerrr = open('prova_pos'+self.name+'.txt','w')
flag = 1
print("%s: samples %d, channels %d"%(self.name,self.signal.shape()[0],self.signal.shape()[1]) )
try:
while(1):
der_ = self.signal[t,:] - self.signal[t+windows_length,:]
#print(der_[0], self.signal[t,0], self.signal[t+windows_length,0] )
#se deltaY > deltaX .... calcoli sul quaderno,
#qua aggiungo solo deltaX è sempre "window length" perchè è la distanza alla quale sono presi i punti
i=0
encoded[0] = encoder(der_[0])
outfile.write("%c"%encoded[0])
for i in range(1,8):
encoded[i] = encoder(der_[i])
outfile.write(',')
outfile.write("%c"%encoded[i])
#slide window
t += windows_length #deve essere almeno superiore alla media mobile
#print(line)
flag+=1
outfile.write('\n')
#print(time.time()-start_time)
except IndexError:
outfile.close()
print(flag)
"""
*********************** MAIN **********************
"""
class offline_process(object):
def __init__(self,filename):
""" LOAD DATA """
data = load_data_into_matrix(filename,0,-1,8)
""" DIVIDE INTO MEANINGFUL CHANNELS """
self.polso_updown = track(data[:,mode['tutti']])
#self.polso_intest = track(data[:,mode['intest']])
""" REMOVE BASELINE """
# self.polso_alto_track.set_baseline()
# self.polso_basso_track.set_baseline()
# self.polso_esterno_track.set_baseline()
# self.polso_interno_track.set_baseline()
""" LOW PASS FILTER """
self.polso_updown.moving_avg(10)
#self.polso_intest.moving_avg(30)
""" START TWO THREADS TO COMPUTE"""
self.thread_updown = computation("-encoding",self.polso_updown)
#self.thread_leftright = computation("intest",self.polso_updown)
def __call__(self):
#start a thread for each computation, which is left-right or
#up down
try:
self.thread_updown.start()
#self.thread_leftright.start()
self.thread_updown.join()
#self.thread_leftright.join()
except KeyboardInterrupt:
self.thread_updown.join()
#self.thread_leftright.join()
class occurrence_table(object):
def __init__(self):
self.items = []
self.number_of_occurrence = []
self.l = 0
self.total = 0
def __repr__(self):
return "Object filled with %d items"%self.l
def __str__(self):
for i in range(self.l):
print("%s: %d"%(self.items[i],self.number_of_occurrence[i]))
return "----- End ------ "
def append(self,item):
j=0
for occurrence in self.items:
if occurrence != item:
j=j+1
else:
self.number_of_occurrence[j]+=1
self.total += 1
return
#se hai fatto tutto il for senza entrare nell'else vuol
#dire che è nuovo, quindi lo appendo
self.items.append(item)
#ovviamente metto nel conteggio che ne ho aggiunto uno
self.number_of_occurrence.append(1)
self.l += 1
self.total += 1
#conteggio e item sono due liste separate ma l'elemento
#j esimo di number_of.. indica quante volte l'elemento
#j esimo di items è presente
def get(self):
return (self.items,self.number_of_occurrence)
def prob(self):
temp = [1]*self.l
for i in range(self.l):
temp[i] = self.number_of_occurrence[i]/self.total
return temp
if __name__ == "__main__":
p = offline_process("model_updown.txt")
p()
encoded_signal = load_data_into_matrix("thread_data-encoding.txt",mode="encoded")
entropy = lambda p: 0 if p==0 else -p*np.log2(p)
symbols_taken = 3
n_samples = encoded_signal.shape[0]
window_len = 30
start = 0
start_for_plot = 0
channel = encoded_signal.shape[1]
n_steps= window_len - symbols_taken + 1
print("n_steps:",n_steps)
ch = np.zeros((n_samples - window_len,channel),dtype='f')
outfile = open('entropy.txt','w')
while(start < n_samples - window_len):
table = []
for i in range(channel):
table.append(occurrence_table())
window = encoded_signal[start:start+window_len,:]
for i in range(n_steps):
for j in range(channel):
table[j].append(window[i:i+symbols_taken,j].tostring())
entropy_per_channel = [0]*channel #il massimo dell'entropia quando ho tutto uguale, 3**3 perchè ho 3 simboli per 3 posizioni
for j in range(channel):
list_of_prob = table[j].prob()
#print(list_of_prob)
for i in range(len(list_of_prob)):
entropy_per_channel[j] += entropy(list_of_prob[i])
ch[start_for_plot,j] = entropy_per_channel[j]
outfile.write(str(entropy_per_channel[j]))
outfile.write('\t')
start += 1
start_for_plot += 1
outfile.write('\n')
#print(table[0])
outfile.close()
fig2, ax2 = plt.subplots(1,1)
for p in range(channel):
y = ch[:,p]
ax2.plot(y,color=colorarray[p],label='ch'+str(p))
legend = ax2.legend(loc='upper left', shadow=True)
plt.show()
#
|
FrancesoM/UnlimitedHand-Learning
|
python_side/utilities.py
|
utilities.py
|
py
| 16,063 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18659651890
|
import models
import typing
import sqlite3
import os
import sys
class Storage:
def __init__(self):
self._conn = sqlite3.connect('v_store.db')
self._cursor = self._conn.cursor()
self._queries: dict[str, str] = self.read_queries()
def __del__(self):
self._cursor.close()
self._conn.close()
def read_queries(self) -> dict[str, str]:
queries = {}
current_key = None
current_query = []
with open("./queries.sql", "r") as f:
for line in f:
line = line.strip()
if line.startswith("--"):
if current_key is not None:
queries[current_key] = "\n".join(current_query)
current_query = []
current_key = line[2:].strip()
else:
current_query.append(line)
if current_key is not None:
queries[current_key] = "\n".join(current_query)
return queries
def make_table(self) -> int:
try:
self._cursor.execute(self._queries["make_vector_table"])
return 0
except Exception as e:
raise e
def add_point(self, point: models.Point) -> int:
try:
query = self._queries["add_point"].format(str(point), repr(point))
self._cursor.execute(query)
return 0
except Exception as e:
raise e
def get_point(self, id) -> int | models.Vector:
try:
query = self._queries["get_point_by_id"].format(id)
self._cursor.execute(query)
vec: models.Vector = self._cursor.fetchone()
if vec:
return exec(vec)
else:
raise ValueError("Point with that id does not exist")
except Exception as e:
raise e
def delete_point(self, id) -> int:
try:
query = self._queries["delete_point_by_id"].format(id)
self._cursor.execute(query)
return 0
except Exception as e:
raise e
s = Storage()
print(s.read_queries())
|
Ayon-Bhowmick/Vec4Rec
|
src/storage.py
|
storage.py
|
py
| 2,150 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26964335493
|
from setuptools import setup
from hostinfo.version import __version__ as VERSION
from build_utils import BuildCommand
from build_utils import PublishCommand
from build_utils import BinaryDistribution
PACKAGE_NAME = 'pimjpeg'
BuildCommand.pkg = PACKAGE_NAME
# BuildCommand.py2 = False
# BuildCommand.py3 = False
PublishCommand.pkg = PACKAGE_NAME
PublishCommand.version = VERSION
README = open('README.rst').read()
GITHUB = "https://github.com/walchko/{}".format(PACKAGE_NAME)
INSTALL_REQ = open("requirements.txt").readlines()
TEST_REQ = ['nose']
CMDS = {'publish': PublishCommand, 'make': BuildCommand}
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Kevin J. Walchko",
keywords=['package', 'keywords'],
author_email="[email protected]",
description="raspbery pi camera mjpeg streamer",
license="MIT",
package_data={
'package': ['templates', 'static'],
},
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: System :: Shells',
'Environment :: Console'
],
install_requires=INSTALL_REQ,
tests_require=TEST_REQ,
url=GITHUB,
long_description=README,
cmdclass=CMDS,
packages=[PACKAGE_NAME],
# scripts=[
# 'bin/hello.py'
# ]
)
|
walchko/mjpeg
|
setup.py
|
setup.py
|
py
| 1,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71179089147
|
class Solution:
def isPalindrome(self, x: int) -> bool:
l=str(x)
f=""
for i in range(len(l)-1,0,-1):
f=f+l[i]
f=f+l[0]
if(f==l):
return True
else:
return False
|
Yoheniy/competitive
|
leetcode_solution/palindrome-number.py
|
palindrome-number.py
|
py
| 263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36766609482
|
# date: 2021/09/06
# link: https://programmers.co.kr/learn/courses/30/lessons/17680
from collections import deque
def solution(cacheSize, cities):
answer = 0
status = deque()
if cacheSize == 0:
answer = len(cities) * 5
else:
for city in cities:
city = city.upper()
if city in status:
answer += 1
status.remove(city)
else:
answer += 5
if len(status) >= cacheSize:
status.popleft()
status.append(city)
return answer
|
jiyoung-dev/Algorithm
|
Kakao 기출문제/캐시.py
|
캐시.py
|
py
| 608 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39262703446
|
from eums import settings_export
from eums.models import Alert
from eums.services.exporter.abstract_csv_exporter import AbstractCSVExporter
class AlertCSVExporter(AbstractCSVExporter):
def __init__(self, host_name, alert_type):
self.export_category = 'alert'
self.export_label = 'Alerts'
self.file_name = 'Alerts_by_%s' % alert_type
self.alert_type = alert_type
super(AlertCSVExporter, self).__init__(host_name)
def assemble_csv_data(self, alerts=None):
total_rows = [self._init_header()]
for alert in alerts:
total_rows.append(self.__export_row_data(alert))
return total_rows
def __export_row_data(self, alert):
reported_by = "%s\n%s" % (alert.contact['contact_name'], alert.contact['contact_phone'])
keys = {Alert.ITEM: [alert.issue_display_name(), alert.created_on, alert.order_number, alert.date_shipped(),
alert.quantity_delivered(), alert.total_value(), alert.item_description, reported_by,
alert.consignee_name, alert.location(), alert.remarks, alert.is_resolved],
Alert.DELIVERY: [alert.issue_display_name(), alert.created_on, alert.order_number, alert.date_shipped(),
alert.total_value(), reported_by, alert.consignee_name, alert.location(),
alert.remarks,
alert.is_resolved, alert.is_retriggered()],
Alert.DISTRIBUTION: [alert.created_on, alert.order_number, alert.date_shipped(), alert.date_received,
alert.total_value(), reported_by, alert.consignee_name, alert.location(),
alert.remarks, alert.is_resolved]}
return keys.get(self.alert_type, [])
def _subject(self):
return settings_export.EMAIL_COMMON_SUBJECT
def _init_header(self):
headers = {Alert.ITEM: ['STATUS', 'ALERT DATE', 'PO/WAYBILL', 'DATE SHIPPED', 'QTY', 'VALUE', 'ITEM',
'REPORTED BY', 'IMPLEMENTING PARTNER', 'DISTRICT', 'UNICEF REMARKS', 'RESOLVED'],
Alert.DELIVERY: ['STATUS', 'ALERT DATE', 'PO/WAYBILL', 'DATE SHIPPED', 'VALUE', 'REPORTED BY',
'IMPLEMENTING PARTNER', 'DISTRICT', 'UNICEF REMARKS', 'RESOLVED', 'RETRIGGERED'],
Alert.DISTRIBUTION: ['DISTRIBUTION DEADLINE', 'PO/WAYBILL', 'DATE SHIPPED', 'DATE RECEIVED',
'VALUE', 'REPORTED BY', 'IMPLEMENTING PARTNER', 'DISTRICT', 'UNICEF REMARKS',
'RESOLVED']}
return headers.get(self.alert_type, [])
|
unicefuganda/eums
|
eums/services/exporter/alert_csv_exporter.py
|
alert_csv_exporter.py
|
py
| 2,737 |
python
|
en
|
code
| 9 |
github-code
|
6
|
35395847394
|
from django.core.paginator import InvalidPage
class AlphabetGlossary(object):
"""Алфавитный глоссарий"""
def __init__(self, object_list, on=None, num_groups=7):
self.object_list = object_list # список объектов
self.count = len(object_list) # количество объектов в списке
self.max_froups = num_groups # количество алфавитных групп
self.groups = [] # список алфавитных групп
# Словарь, в котором ключ - буква алфавита, а значение - список объектов на эту букву из object_list
chunks = {}
for obj in self.object_list:
if on:
obj_str = str(getattr(obj, on))
else:
obj_str = str(obj)
letter = str.upper(obj_str[0])
if letter not in chunks:
chunks[letter] = []
chunks[letter].append(obj)
# Вычисляем предполагаемое количество объектов в алфавитной группе
per_group = self.count / num_groups
for letter in chunks:
chunk_len = len(chunks[letter])
if chunk_len > per_group:
per_group = chunk_len
# Распределяем объекты по алфавитным группам
current_group = AlphabetGroup(self)
for letter in sorted(chunks.keys()):
sub_list = chunks[letter] # элементы списка объектов на указанную букву
# Определяем, уместится ли sub_list в текущую алфавитную группу, или его
# нужно переносить в новую. Новая группа будет создана, если:
# - добавление sub_list приведёт к переполнению текущей группы
# - в текущей группе свободного места меньше, чем количество неумещающихся объектов
# - текущая группа не пуста (в противном случае, это будет означать, что len(sub_list) > per_group
new_group_count = len(sub_list) + current_group.count
if new_group_count > per_group and \
abs(per_group - current_group.count) < abs(per_group - new_group_count) and \
current_group.count > 0:
self.groups.append(current_group)
current_group = AlphabetGroup(self)
current_group.add(sub_list, letter)
# Если по окончании цикла осталась непустая группа, добавляем её в глоссарий
if current_group.count > 0:
self.groups.append(current_group)
def group(self, num):
"""Возвращает объект алфавитной группы"""
if len(self.groups) == 0:
return None
elif num > 0 and num <= len(self.groups):
return self.groups[num - 1]
else:
raise InvalidPage
@property
def num_groups(self):
"""Возвращает количество алфавитных групп"""
return len(self.groups)
class AlphabetGroup(object):
"""Алфавитная группа глоссария"""
def __init__(self, glossary):
self.glossary = glossary
self.object_list = []
self.letters = []
@property
def count(self):
"""Возвращает количество объектов в группе"""
return len(self.object_list)
@property
def start_letter(self):
"""Возвращает первую букву группы"""
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[0]
else:
return None
@property
def end_letter(self):
"""Возвращает последнюю букву группы"""
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[-1]
else:
return None
@property
def number(self):
"""Возвращает номер группы в глоссарии"""
return self.glossary.groups.index(self) + 1
def add(self, new_list, letter=None):
"""Добавляет список объектов в группу"""
if len(new_list) > 0:
self.object_list = self.object_list + new_list
if letter:
self.letters.append(letter)
def __repr__(self):
"""Возвращает метку группы"""
if self.start_letter == self.end_letter:
return self.start_letter
else:
return '%c-%c' % (self.start_letter, self.end_letter)
|
zarmoose/eastwood_test
|
employees/glossary.py
|
glossary.py
|
py
| 5,105 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
3578166823
|
import os
import torch
import csv
import pandas as pd
from config import FoldersConfig
def txt_to_csv(input_path, output_path):
with open(input_path, 'r') as in_file:
stripped = (line.strip() for line in in_file)
lines = (line.split() for line in stripped if line)
with open(output_path, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerows(lines)
def get_categories_and_path(input_path, output_path):
with open(input_path, 'r') as in_file:
reader = csv.reader(in_file)
next(reader)
row0 = next(reader)
with open(output_path, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(["path", "deep_fashion_category_name", "dataset"])
for r in reader:
split_r = r[0].split('_')[-2]
category = split_r.split('/')[-2]
r.append(r[0])
r.append(category)
r.append('deep_fashion')
writer.writerow( (r[2], r[3], r[4]) )
def add_column_with_article_type_equivalence(deep_fashion, map_to_product_fashion, output_path):
deep_fashion_df = pd.read_csv(deep_fashion, error_bad_lines=False)
map_to_product_fashion_df = pd.read_csv(map_to_product_fashion)
deep_fashion_with_article_type_df = deep_fashion_df.merge(map_to_product_fashion_df, on='deep_fashion_category_name', how='left')
deep_fashion_with_article_type_df['id'] = deep_fashion_with_article_type_df.index + 100000
deep_fashion_with_article_type_df = deep_fashion_with_article_type_df[['id', 'path', 'deep_fashion_category_name', 'product_fashion_article_type', 'dataset']]
deep_fashion_with_article_type_df.columns = ['id', 'path', 'categoryName', 'articleType', 'dataset']
deep_fashion_with_article_type_df.to_csv(output_path, index=False)
def prepare_datasets():
resources = FoldersConfig.RESOURCES_DIR
list_categories_path = resources + 'deep_fashion/list_category_img.txt'
list_categories_output_path = resources + 'deep_fashion/list_category_img.csv'
path_category_dataset = resources + 'deep_fashion/path_category_dataset.csv'
map_to_product_fashion_path = resources + 'map_deep_fashion_to_product_fashion.csv'
deep_fashion_with_article_type_path = resources + 'deep_fashion/deep_fashion_with_article_type.csv'
if not os.path.exists(list_categories_output_path):
txt_to_csv(list_categories_path, list_categories_output_path)
if not os.path.exists(path_category_dataset):
get_categories_and_path(list_categories_output_path, path_category_dataset)
if not os.path.exists(deep_fashion_with_article_type_path):
add_column_with_article_type_equivalence(path_category_dataset, map_to_product_fashion_path, deep_fashion_with_article_type_path)
if __name__ == "__main__":
prepare_datasets()
|
ferran-candela/upc-aidl-2021-image-retrieval
|
imageretrieval/src/prepare_datasets.py
|
prepare_datasets.py
|
py
| 2,898 |
python
|
en
|
code
| 3 |
github-code
|
6
|
13895478896
|
########################################################################################
# Run examples from our paper in RBEF
########################################################################################
import sys
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
import scipy
import scipy.signal
from scipy.integrate import simps
from joblib import Parallel, delayed
from ar_model import *
import pygc.pySpec
import pygc.parametric
import pygc.non_parametric
import pygc.granger
import plot_results
p = int(sys.argv[-1])
if p == 0:
# Generates figure 3 from the paper
print('Generating Figure 3 from RBEF paper...')
N = 5000 # Number of observations
Fs = 200 # Sampling frequency
dt = 1.0 / Fs # Time resolution
C = 0.25 # Coupling parameter
Trials = 5000 # Number of trials
# Covariance matrix
cov = np.array([ [1.00, 0.00],
[0.00, 1.00] ])
f = pygc.pySpec.compute_freq(N, Fs)
S = np.zeros([2,2,N//2+1]) + 1j*np.zeros([2,2,N//2+1])
print('Generating AR model time series...')
Z = ar_model_dhamala(N=N, Trials = Trials, C=C, Fs=Fs, t_start=0, t_stop=None, cov=cov)
print('Estimating spectral matrix from ' + str(Trials) + ' trials...')
for i in range(Trials):
if i%500 == 0:
print('Trial = ' + str(i))
S[0,0] += pygc.pySpec.cxy(X=Z[0,i,:], Y=[], f=f, Fs=Fs) / Trials
S[0,1] += pygc.pySpec.cxy(X=Z[0,i,:], Y=Z[1,i,:], f=f, Fs=Fs) / Trials
S[1,0] += pygc.pySpec.cxy(X=Z[1,i,:], Y=Z[0,i,:], f=f, Fs=Fs) / Trials
S[1,1] += pygc.pySpec.cxy(X=Z[1,i,:], Y=[], f=f, Fs=Fs) / Trials
print('Computing Granger Causality...')
Snew, Hnew, Znew = pygc.non_parametric.wilson_factorization(S, f, Fs, Niterations=30)
Ix2y, Iy2x, Ixy = pygc.granger.granger_causality(S, Hnew, Znew)
print('Saving data...')
np.save('data/fig3.npy', {'f': f, 'S': S, 'H': Hnew, 'Z': Znew, 'Ix2y': Ix2y, 'Iy2x': Iy2x, 'Ixy': Ixy})
print('Plotting results...')
plot_results.fig3()
if p == 1:
# Generates figure 4 from the paper
N = 900 # Number of observations
Fs = 200 # Sampling frequency
dt = 1.0 / Fs # Time resolution
C = 0.25 # Coupling parameter
Trials = 5000 # Number of trials
cov = np.array([ [1.00, 0.00],
[0.00, 1.00] ])
f = pygc.pySpec.compute_freq(N, Fs)
S = np.zeros([2,2,N,N//2+1]) + 1j*np.zeros([2,2,N,N//2+1])
print('Generating AR model time series...')
Z = ar_model_dhamala(N=N, Trials = Trials, C=C, Fs=Fs, t_start=0, t_stop=2.25, cov=cov)
print('Estimating wavelet matrix from ' + str(Trials) + ' trials...')
for i in range(Trials):
if i%500 == 0:
print('Trial = ' + str(i))
Wx = pygc.pySpec.morlet(Z[0,i,:], f, Fs)
Wy = pygc.pySpec.morlet(Z[1,i,:], f, Fs)
S[0,0] += Wx*np.conj(Wx) / Trials
S[0,1] += Wx*np.conj(Wy) / Trials
S[1,0] += Wy*np.conj(Wx) / Trials
S[1,1] += Wy*np.conj(Wy) / Trials
# S = S[:,:,idx,:]
print('Computing Granger Causality...')
def save_granger(S, idx):
Snew, Hnew, Znew = pygc.non_parametric.wilson_factorization(S[:,:,idx,:], f, Fs, Niterations=30, verbose=False)
Ix2y, Iy2x, Ixy = pygc.granger.granger_causality(S[:,:,idx,:], Hnew, Znew)
np.save('data/fig4_'+str(idx)+'.npy', {'f': f, 'Ix2y': Ix2y, 'Iy2x': Iy2x, 'Ixy': Ixy})
Parallel(n_jobs=40, backend='loky', max_nbytes=1e6)(delayed(save_granger)(S, idx) for idx in range(N))
print('Plotting results...')
plot_results.fig4()
if p == 2:
# Generates figure 7 and 8 from the paper
N = 5000 # Number of observations
Trials = 1000 # Number of trials
nvars = 5 # Number of variables
Fs = 2*np.pi
dt = 1.0 / Fs
f = pygc.pySpec.compute_freq(N, Fs)
print('Generating AR model time series...')
Y = ar_model_baccala(nvars, N, Trials)
print('Estimating spectral matrix from ' + str(Trials) + ' trials...')
S = np.zeros([nvars, nvars, N//2 + 1]) * (1 + 1j)
for trial in range(Trials):
if (trial % 100 == 0):
print('Trial = ' + str(trial))
for i in range(nvars):
for j in range(nvars):
S[i,j] += pygc.pySpec.cxy(X=Y[i,:,trial], Y=Y[j,:,trial], f=f, Fs=Fs) / Trials
print('Estimating pairwise Granger casalities')
GC = np.zeros([nvars, nvars])
for i in range(nvars):
for j in range(nvars):
if i == j:
continue
else:
S_aux = np.array([[S[i,i], S[i,j]],[S[j,i], S[j,j]]])
_, H, Z = pygc.non_parametric.wilson_factorization(S_aux, f, Fs, Niterations=10, tol=1e-12, verbose=False)
Ix2y, Iy2x, _ = pygc.granger.granger_causality(S_aux, H, Z)
GC[i,j] = simps(Ix2y, f) / 2*np.pi
GC[j,i] = simps(Iy2x, f) / 2*np.pi
print('Estimating conditional Granger casalities')
F = pygc.granger.conditional_granger_causality(S, f, Fs, Niterations = 10, verbose=False)
cGC = pygc.granger.conditional_spec_granger_causality(S, f, Fs, Niterations=100, tol=1e-12, verbose=False)
print('Saving data...')
np.save('data/fig_7_8.npy', {'f':f,'GC': GC, 'F': F, 'cGC': cGC})
print('Plotting results...')
plot_results.fig7_8()
if p == 3:
# Fits an AR model by solving YW equations as in appendix A of the paper.
Trials = 1000 # Number of trials
Fs = 200 # Sampling frequency
N = 1000 # Number of data points
X = np.zeros([1,N, Trials]) # Data matrix
tsim = N/Fs # Simulation time
# Coefficients of the ar model
c = [0.7, 0.2, -0.1, -0.3]
print('Generating AR model time series...')
for T in range(Trials):
X[0,:,T] = scipy.signal.lfilter([1], -np.array([-1]+c), np.random.randn(N))
print('Estimating AR model coefficients for ' + str(Trials) + ' trials')
for m in [2, 3, 4, 5, 6]:
print()
AR = np.zeros([1,1,m])
SIG = np.zeros([1,1])
for T in range(Trials):
aux1, aux2 = pygc.parametric.YuleWalker(X[:,:,T], m, maxlags=100)
AR += aux1.T/Trials
SIG += aux2.T/Trials
AR = np.round(AR, 2)
SIG = np.round(SIG, 2)
print('Using order = ' + str(m)+ '. Original coefficients: ' + str(c) + '. Estimated coefficients ' + str(AR[0][0]) + '. Noise variace: ' + str(SIG[0][0]))
if p == 4:
# Generates figure 3C from the paper, but using a paramtreic method
# Generates figure 3 from the paper
print('Generating Figure 3 from RBEF paper...')
N = 5000 # Number of observations
Fs = 200 # Sampling frequency
dt = 1.0 / Fs # Time resolution
C = 0.25 # Coupling parameter
Trials = 5000 # Number of trials
# Covariance matrix
cov = np.array([ [1.00, 0.00],
[0.00, 1.00] ])
print('Generating AR model time series...')
X = ar_model_dhamala(N=N, Trials = Trials, C=C, Fs=Fs, t_start=0, t_stop=None, cov=cov)
print('Estimating VAR coefficients using oreder m=2...')
m = 2
AR = np.zeros([m, 2,2])
SIG = np.zeros([2,2])
for T in range(Trials):
aux1, aux2 = pygc.parametric.YuleWalker(X[:,T,:], m, maxlags=100)
AR += aux1/Trials
SIG += aux2/Trials
print('Computing Granger Causality...')
f = pygc.pySpec.compute_freq(N, Fs)
H, S = pygc.parametric.compute_transfer_function(AR, SIG, f, Fs)
Ix2y, Iy2x, _ = pygc.granger.granger_causality(S, H, SIG)
plt.figure(figsize=(6,2))
plt.plot(f, Ix2y)
plt.plot(f, Iy2x)
plt.xlim([0, 100])
plt.ylim([-0.01, 1.2])
plt.ylabel('GC')
plt.xlabel('Frequency [Hz]')
plt.legend([r'$X_{1}\rightarrow X_{2}$', r'$X_{2}\rightarrow X_{1}$'])
plt.savefig('figures/fig9.pdf', dpi = 600)
plt.close()
|
ViniciusLima94/pyGC
|
runRBEF.py
|
runRBEF.py
|
py
| 7,302 |
python
|
en
|
code
| 30 |
github-code
|
6
|
74795986426
|
import openpyxl
import tkinter as tk
def add_data_to_excel(roll_number, name):
# Open the Excel file or create a new one if it doesn't exist
try:
workbook = openpyxl.load_workbook('data.xlsx')
except FileNotFoundError:
workbook = openpyxl.Workbook()
# Select the active sheet (default: first sheet)
sheet = workbook.active
# Append the data to the Excel sheet
row = [roll_number, name]
sheet.append(row)
# Save the changes to the Excel file
workbook.save('data.xlsx')
def on_submit():
roll_number = roll_entry.get()
name = name_entry.get()
try:
add_data_to_excel(roll_number, name)
result_label.config(text="Data successfully stored in Excel!", fg="green")
except Exception as e:
result_label.config(text=f"Error occurred: {e}", fg="red")
# Create the tkinter window
root = tk.Tk()
root.title("Data Entry")
# Labels and Entry widgets for roll number and name
roll_label = tk.Label(root, text="Roll Number:")
roll_label.pack()
roll_entry = tk.Entry(root)
roll_entry.pack()
name_label = tk.Label(root, text="Name:")
name_label.pack()
name_entry = tk.Entry(root)
name_entry.pack()
submit_button = tk.Button(root, text="Submit", command=on_submit)
submit_button.pack()
result_label = tk.Label(root, text="", fg="green")
result_label.pack()
# Run the tkinter main loop
root.mainloop()
|
Chandravarma2004/Push-the-data-given-to-excel-
|
project3.py
|
project3.py
|
py
| 1,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44831632974
|
#!/usr/bin/env python3
import json
import re
DEFAULT_PRICE = 9999
DEFAULT_SQFT = 0
def parse(apartment_data, apartment_filters = []):
apartment_list = []
for apartment in apartment_data:
add_apartment = False
for apartment_filter in apartment_filters:
add_apartment = apt_filter(apartment, apartment_filter)
if add_apartment == True:
break
if add_apartment == True:
apartment_list.append(apartment)
return apartment_list;
def apt_filter(apartment_data, apartment_filter):
bedrooms = apartment_filter['bedrooms'] if 'bedrooms' in apartment_filter else bedroom_filter_error()
price = apartment_filter['price'] if 'price' in apartment_filter else DEFAULT_PRICE
sqft = apartment_filter['sqft'] if 'sqft' in apartment_filter else DEFAULT_SQFT
return (
int(apartment_data['Bd']) == bedrooms and
int(re.sub(r'[$,]', '', apartment_data['Price'])) < price and
int(apartment_data['Size (sq.ft.)']) >= sqft
)
def bedroom_filter_error():
raise ValueError('Bedroom filter required')
|
juanmaberrocal/apartment-scraper
|
app/modules/parsedata.py
|
parsedata.py
|
py
| 1,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14539682858
|
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
if x >= 0:
remider = int(str(x)[::-1])
else:
remider = int(str(abs(x))[::-1]) * -1
if remider >= -pow(2, 31) and remider <= pow(2, 31) - 1:
return remider
else:
return 0
# x = int(str(x)[::-1]) if x >= 0 else - int(str(-x)[::-1])
# return x if x < 2147483648 and x >= -2147483648 else 0
if __name__ == '__main__':
s = Solution()
x = s.reverse(-123)
print(x)
|
Rainphix/LeetCode
|
007_reverse_integer.py
|
007_reverse_integer.py
|
py
| 581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31651402247
|
"""
Ian Dansereau
GroupMeReddit
runBot.py
5/5/16
"""
import sys
from groupmebot.RedditBot import RedditBot as rb
"""
Main method
"""
def main():
if not sys.version_info >= (3, 5):
print("Python 3.5+ is required. This version is %s" % sys.version.split()[0])
try:
bot = rb()
bot.run()
except Exception as e:
print(e.__str__())
if __name__ == '__main__':
main()
|
imd8594/GroupMeReddit
|
runBot.py
|
runBot.py
|
py
| 436 |
python
|
en
|
code
| 3 |
github-code
|
6
|
1523636284
|
'''
Given a password as a character array A.
Check if it is valid or not.
Password should have at least one numerical digit(0-9).
Password's length should be in between 8 to 15 characters.
Password should have at least one lowercase letter(a-z).
Password should have at least one uppercase letter(A-Z).
Password should have at least one special character ( @, #, %, &, !, $, ., *)
'''
class Solution:
# @param A : string
# @return an integer
def solve(self, A):
len_cond = False
numerical_cond = False
lower_cond = False
upper_cond = False
special_cond = False
special_chars = ['@', '#', '%', '&', '!', '$' , '.', '*']
if len(A) > 7 and len(A) < 16:
len_cond = True
for char in A:
if char.isdigit():
numerical_cond = True
if char.islower():
lower_cond = True
if char.isupper():
upper_cond = True
if char in special_chars:
special_cond = True
result = len_cond and numerical_cond and lower_cond and upper_cond and special_cond
return int(result)
|
kartikwar/programming_practice
|
strings/valid_password.py
|
valid_password.py
|
py
| 1,209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8499225984
|
from booleano.exc import InvalidOperationError
from booleano.operations.operands import Operand
__all__ = ["String", "Number", "Arithmetic", "Set"]
class Constant(Operand):
"""
Base class for constant operands.
The only operation that is common to all the constants is equality (see
:meth:`equals`).
Constants don't rely on the context -- they are constant!
.. warning::
This class is available as the base for the built-in :class:`String`,
:class:`Number` and :class:`Set` classes. User-defined constants aren't
supported, but you can assign a name to a constant (see
:term:`binding`).
"""
operations = {'equality'}
def __init__(self, constant_value):
"""
:param constant_value: The Python value represented by the Booleano
constant.
:type constant_value: :class:`object`
"""
self.constant_value = constant_value
def to_python(self, context):
"""
Return the value represented by this constant.
"""
return self.constant_value
def equals(self, value, context):
"""
Check if this constant equals ``value``.
"""
return self.constant_value == value
def check_equivalence(self, node):
"""
Make sure constant ``node`` and this constant are equivalent.
:param node: The other constant which may be equivalent to this one.
:type node: Constant
:raises AssertionError: If the constants are of different types or
represent different values.
"""
super(Constant, self).check_equivalence(node)
assert node.constant_value == self.constant_value, \
u'Constants %s and %s represent different values' % (self,
node)
class String(Constant):
u"""
Constant string.
These constants only support equality operations.
.. note:: **Membership operations aren't supported**
Although both sets and strings are item collections, the former is
unordered and the later is ordered. If they were supported, there would
some ambiguities to sort out, because users would expect the following
operation results:
- ``"ao" ⊂ "hola"`` is false: If strings were also sets, then the
resulting operation would be ``{"a", "o"} ⊂ {"h", "o", "l", "a"}``,
which is true.
- ``"la" ∈ "hola"`` is true: If strings were also sets, then the
resulting operation would be ``{"l", "a"} ∈ {"h", "o", "l", "a"}``,
which would be an *invalid operation* because the first operand must
be an item, not a set. But if we make an exception and take the first
operand as an item, the resulting operation would be
``"la" ∈ {"h", "o", "l", "a"}``, which is not true.
The solution to the problems above would involve some magic which
contradicts the definition of a set: Take the second operand as an
*ordered collection*. But it'd just cause more trouble, because both
operations would be equivalent!
Also, there would be other issues to take into account (or not), like
case-sensitivity.
Therefore, if this functionality is needed, developers should create
functions to handle it.
"""
def __init__(self, string):
"""
:param string: The Python string to be represented by this Booleano
string.
:type string: :class:`basestring`
``string`` will be converted to :class:`unicode`, so it doesn't
have to be a :class:`basestring` initially.
"""
import sys
if sys.version_info >= (3, 0):
string = str(string)
else:
string = unicode(string)
super(String, self).__init__(string)
def equals(self, value, context):
"""Turn ``value`` into a string if it isn't a string yet"""
value = str(value)
return super(String, self).equals(value, context)
def __unicode__(self):
"""Return the Unicode representation of this constant string."""
return u'"%s"' % self.constant_value
def __hash__(self):
return id(self)
def __repr__(self):
"""Return the representation for this constant string."""
return '<String "%s">' % self.constant_value.encode("utf-8")
class ArithmeticVariable(object):
def __init__(self, number, namespace, namespace_separator=":"):
self.namespace_separator = namespace_separator
self.parsed_results = number
self._namespace = namespace
self.variables = {}
self.__define_variables()
number = self.flatten(self.parsed_results)
self.__full_expression = "".join(number)
def __str__(self):
number = self.flatten(self.parsed_results)
return "".join(number)
def __define_variables(self):
number = self.parsed_results
temp = []
for n in number:
t = self.__get_variable_names(n)
if isinstance(t, list):
temp.extend(t)
else:
temp.append(t)
self.required_variables = temp
temp = {}
for v in self.required_variables:
for k, val in v.items():
temp[k] = val
self.required_variables = temp
def __get_variable_names(self, number):
from pyparsing import ParseResults
import re
temp = []
if isinstance(number, ParseResults):
for n in number:
t = self.__get_variable_names(n)
if isinstance(t, list):
temp.extend(t)
else:
temp.append(t)
return temp
elif len(re.findall("[a-zA-Z" + self.namespace_separator + "]+", number)) > 0:
var = str(number).split(self.namespace_separator)
variable_namespaces = var[0:-1]
variable_name = var[-1]
return {str(number): self._namespace.get_object(variable_name, variable_namespaces)}
return temp
@classmethod
def flatten(cls, s):
from pyparsing import ParseResults
if s == []:
return s
if isinstance(s[0], ParseResults):
return cls.flatten(s[0]) + cls.flatten(s[1:])
return s[:1] + cls.flatten(s[1:])
def replace(self, num, context, namespace=True):
for k, v in self.required_variables.items():
if namespace and self.namespace_separator not in k:
continue
num = num.replace(k, str(v.to_python(context)))
return num
def evaluate(self, context):
number = self.__full_expression
# Replace all variables with numbers
# First replace variables with namespaces defined to avoid clobbering
number = self.replace(number, context)
# Then replace variables with no namespace
number = self.replace(number, context, False)
number = number.replace("^", "**")
from booleano import SafeEval
answer = SafeEval.eval_expr(number)
return answer
class Arithmetic(Constant):
"""
Numeric constant.
These constants support inequality operations; see :meth:`greater_than`
and :meth:`less_than`.
"""
operations = Constant.operations | {'inequality'}
def __init__(self, number, namespace, namespace_separator=":"):
"""
:param number: The number to be represented, as a Python object.
:type number: :class:`object`
``number`` is converted into a :class:`float` internally, so it can
be an :class:`string <basestring>` initially.
"""
self.namespace_sparator = namespace_separator
super(Arithmetic, self).__init__(ArithmeticVariable(number, namespace, namespace_separator))
def equals(self, value, context):
"""
Check if this numeric constant equals ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
print("Constant equals")
return super(Arithmetic, self).equals(self._to_number(value), context)
def greater_than(self, value, context):
"""
Check if this numeric constant is greater than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
print("Constant gt")
return self.constant_value > self._to_number(value)
def less_than(self, value, context):
"""
Check if this numeric constant is less than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
print("Constant lt")
return self.constant_value < self._to_number(value)
def to_python(self, context):
return self.constant_value.evaluate(context)
def _to_number(self, value):
"""
Convert ``value`` to a Python float and return the new value.
:param value: The value to be converted into float.
:return: The value as a float.
:rtype: float
:raises InvalidOperationError: If ``value`` can't be converted.
"""
print("Constant to_num")
try:
return float(value)
except ValueError:
raise InvalidOperationError('"%s" is not a number' % value)
def __unicode__(self):
"""Return the Unicode representation of this constant number."""
print("constant unicode")
return str(self.constant_value)
def __repr__(self):
"""Return the representation for this constant number."""
return '<Arithmetic %s>' % self.constant_value
class Number(Constant):
"""
Numeric constant.
These constants support inequality operations; see :meth:`greater_than`
and :meth:`less_than`.
"""
operations = Constant.operations | {'inequality'}
def __init__(self, number):
"""
:param number: The number to be represented, as a Python object.
:type number: :class:`object`
``number`` is converted into a :class:`float` internally, so it can
be an :class:`string <basestring>` initially.
"""
number = float(number)
super(Number, self).__init__(number)
def equals(self, value, context):
"""
Check if this numeric constant equals ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
return super(Number, self).equals(self._to_number(value), context)
def greater_than(self, value, context):
"""
Check if this numeric constant is greater than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
return self.constant_value > self._to_number(value)
def less_than(self, value, context):
"""
Check if this numeric constant is less than ``value``.
:raises InvalidOperationError: If ``value`` can't be turned into a
float.
``value`` will be turned into a float prior to the comparison, to
support strings.
"""
return self.constant_value < self._to_number(value)
def _to_number(self, value):
"""
Convert ``value`` to a Python float and return the new value.
:param value: The value to be converted into float.
:return: The value as a float.
:rtype: float
:raises InvalidOperationError: If ``value`` can't be converted.
"""
try:
return float(value)
except ValueError:
raise InvalidOperationError('"%s" is not a number' % value)
def __unicode__(self):
"""Return the Unicode representation of this constant number."""
return str(self.constant_value)
def __repr__(self):
"""Return the representation for this constant number."""
return '<Number %s>' % self.constant_value
class Set(Constant):
"""
Constant sets.
These constants support membership operations; see :meth:`contains` and
:meth:`is_subset`.
"""
operations = Constant.operations | {"inequality", "membership"}
def __init__(self, *items):
"""
:raises booleano.exc.InvalidOperationError: If at least one of the
``items`` is not an operand.
"""
for item in items:
if not isinstance(item, Operand):
raise InvalidOperationError('Item "%s" is not an operand, so '
'it cannot be a member of a set' %
item)
super(Set, self).__init__(set(items))
def to_python(self, context):
"""
Return a set made up of the Python representation of the operands
contained in this set.
"""
items = set(item.to_python(context) for item in self.constant_value)
return items
def equals(self, value, context):
"""Check if all the items in ``value`` are the same of this set."""
value = set(value)
return value == self.to_python(context)
def less_than(self, value, context):
"""
Check if this set has less items than the number represented in
``value``.
:raises InvalidOperationError: If ``value`` is not an integer.
"""
value = self._to_int(value)
return len(self.constant_value) < value
def greater_than(self, value, context):
"""
Check if this set has more items than the number represented in
``value``.
:raises InvalidOperationError: If ``value`` is not an integer.
"""
value = self._to_int(value)
return len(self.constant_value) > value
def belongs_to(self, value, context):
"""
Check that this constant set contains the ``value`` item.
"""
for item in self.constant_value:
try:
if item.equals(value, context):
return True
except InvalidOperationError:
continue
return False
def is_subset(self, value, context):
"""
Check that the ``value`` set is a subset of this constant set.
"""
for item in value:
if not self.belongs_to(item, context):
return False
return True
def check_equivalence(self, node):
"""
Make sure set ``node`` and this set are equivalent.
:param node: The other set which may be equivalent to this one.
:type node: Set
:raises AssertionError: If ``node`` is not a set or it's a set
with different elements.
"""
Operand.check_equivalence(self, node)
unmatched_elements = list(self.constant_value)
assert len(unmatched_elements) == len(node.constant_value), \
u'Sets %s and %s do not have the same cardinality' % \
(unmatched_elements, node)
# Checking that each element is represented by a mock operand:
for element in node.constant_value:
for key in range(len(unmatched_elements)):
if unmatched_elements[key] == element:
del unmatched_elements[key]
break
assert 0 == len(unmatched_elements), \
u'No match for the following elements: %s' % unmatched_elements
def __unicode__(self):
"""Return the Unicode representation of this constant set."""
elements = [str(element) for element in self.constant_value]
elements = u", ".join(elements)
return "{%s}" % elements
def __repr__(self):
"""Return the representation for this constant set."""
elements = [repr(element) for element in self.constant_value]
elements = ", ".join(elements)
if elements:
elements = " " + elements
return '<Set%s>' % elements
@classmethod
def _to_int(cls, value):
"""
Convert ``value`` is to integer if possible.
:param value: The value to be verified.
:return: ``value`` as integer.
:rtype: int
:raises InvalidOperationError: If ``value`` is not an integer.
This is a workaround for Python < 2.6, where floats didn't have the
``.is_integer()`` method.
"""
try:
value_as_int = int(value)
is_int = value_as_int == float(value)
except (ValueError, TypeError):
is_int = False
if not is_int:
raise InvalidOperationError("To compare the amount of items in a "
"set, the operand %s has to be an "
"integer" % repr(value))
return value_as_int
|
MikeDombo/Stock_Backtester
|
booleano/operations/operands/constants.py
|
constants.py
|
py
| 15,020 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26683410836
|
#!/usr/bin/python3
'''Defines a Base class
'''
import json
from os import path
class Base:
'''Represents a base class
Attributes:
__nb_objects: holds the number of Base instances created
'''
__nb_objects = 0
def __init__(self, id=None):
'''Instantiates a Base object
Args:
id: type int. Defaults to None
'''
if id is not None:
self.id = id
else:
type(self).__nb_objects += 1
self.id = type(self).__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
'''Returns the JSON string representation of list_dictionaries
'''
if list_dictionaries is None:
return '[]'
if type(list_dictionaries) is not list:
raise TypeError('to_json_string argument must be a list of dicts')
for obj in list_dictionaries:
if type(obj) is not dict:
raise TypeError('items in to_json_string arg must be dicts')
return json.dumps(list_dictionaries)
@classmethod
def save_to_file(cls, list_objs):
'''Writes the JSON string representation of list_objs to a file
'''
if type(list_objs) not in (None, list):
raise TypeError('list_objs must be of type list')
for obj in list_objs:
if type(obj) is not cls:
raise TypeError('items in list_objs must be of same type as cls')
filename = cls.__name__ + '.json'
with open(filename, 'w', encoding='utf-8') as f:
if list_objs is None:
f.write('[]')
else:
list_dicts = [obj.to_dictionary() for obj in list_objs]
# json.dump(list_dicts, f) achieves same thing as next line
f.write(Base.to_json_string(list_dicts))
@staticmethod
def from_json_string(json_string):
'''Returns the list of JSON string representation
'''
if json_string is None or json_string == '':
return []
if type(json_string) is not str:
raise TypeError('json_string must be str repr of a list of dicts')
return json.loads(json_string)
@classmethod
def create(cls, **dictionary):
'''Returns an instance with all attributes already set
'''
# Create a dummy Rectangle or Square instance
if cls.__name__ == 'Rectangle':
dummy = cls(1, 1)
elif cls.__name__ == 'Square':
dummy = cls(1)
dummy.update(**dictionary)
return dummy
@classmethod
def load_from_file(cls):
'''Returns a list of instances
'''
filename = cls.__name__ + '.json'
if path.exists(filename):
with open(filename, 'r', encoding='utf-8') as f:
json_string = f.read()
list_dict = cls.from_json_string(json_string)
return [cls.create(**d) for d in list_dict]
return []
|
nzubeifechukwu/alx-higher_level_programming
|
0x0C-python-almost_a_circle/models/base.py
|
base.py
|
py
| 2,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9324609377
|
from flask import Blueprint, render_template
redspine = Blueprint('redspine',
__name__,
template_folder='./',
static_folder='./',
static_url_path='/')
redspine.display_name = "Redspine"
redspine.published = False
redspine.description = "A red-spine notebook. Art that folds in on itself across pages by bleeding through."
@redspine.route('/')
def _redspine():
return render_template('redspine.html')
|
connerxyz/exhibits
|
cxyz/exhibits/redspine/redspine.py
|
redspine.py
|
py
| 491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6387062201
|
from jupyterthemes import install_theme, get_themes
from jupyterthemes import stylefx
def install_themes():
themes = get_themes()
for t in themes:
try:
install_theme(theme=t, monofont=mf, nbfont=nf, tcfont=tc)
except Exception:
return False
return True
def install_fonts():
fonts = stylefx.stored_font_dicts('', get_all=True)
fontvals = [list(fonts[ff]) for ff in ['mono', 'sans', 'serif']]
monotest, sanstest, seriftest = [fv[:4] for fv in fontvals]
for i in range(4):
mono, sans, serif = monotest[i], sanstest[i], seriftest[i]
try:
install_theme(theme=t, monofont=mono, nbfont=sans, tcfont=serif)
except Exception:
return False
try:
install_theme(theme=t, monofont=mono, nbfont=serif, tcfont=sans)
except Exception:
return False
return True
install_themes()
install_fonts()
|
dunovank/jupyter-themes
|
tests/test_themes.py
|
test_themes.py
|
py
| 939 |
python
|
en
|
code
| 9,665 |
github-code
|
6
|
35175789251
|
from django.shortcuts import render
from .models import Book, Shope
def home(request):
# qs = Post.objects.all()
# # The DB query has not been executed at this point
# x = qs
# # Just assigning variables doesn't do anything
# for x in qs:
# print(x)
# # The query is executed at this point, on iteration
# for x in qs:
# print("%d" % x.id)
# # The query is not executed this time, due to caching
post_qs = Book.objects.order_by('id')
for start, end, total, qs in batch_qs(post_qs):
print("Now processing %s - %s of %s" % (start + 1, end, total))
for post in qs:
print(post.name)
return render(request, 'query_optimization/home.html')
def batch_qs(qs, batch_size=10):
"""
Returns a (start, end, total, queryset) tuple for each batch in the given
queryset.
Usage:
# Make sure to order your querset
article_qs = Article.objects.order_by('id')
for start, end, total, qs in batch_qs(article_qs):
print "Now processing %s - %s of %s" % (start + 1, end, total)
for article in qs:
print article.body
"""
total = qs.count()
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
yield (start, end, total, qs[start:end])
# def home(request):
# books = Book.objects.all().only("name", "create_date")
# for each in books:
# print(each.name)
# print(f"Cache {books._result_cache}")
# return render(request, 'query_optimization/home.html')
def home(request):
queryset = Shope.objects.prefetch_related('book').all()
stores = []
for store in queryset:
books = [book.name for book in store.book.all()]
stores.append({'id': store.id, 'name': store.name, 'books': books})
return render(request, 'query_optimization/home.html')
queryset = Store.objects.prefetch_related(
Prefetch('books', queryset=Book.objects.filter(price__range=(250, 300))))
|
Azhar-inexture-1/django_practice_models
|
query_optimization/views.py
|
views.py
|
py
| 2,036 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10134904630
|
import mysql.connector as ms
db=ms.connect(host="localhost",user="root",passwd="1234",database='school')
cn=db.cursor()
cn.execute("create table students ( rno int(3) not null unique, name char(30), marks int(3), grade char(1) )")
db.commit()
def insert_rec():
while True:
rn=int(input("Enter roll number:"))
sname=input("Enter name:")
marks=float(input("Enter marks:"))
gr=input("Enter grade:")
cn.execute("insert into students values({},'{}',{},'{}')".format(rn,sname,marks,gr))
db.commit()
ch=input("Want more records? Press (N/n) to stop entry:")
if ch in 'Nn':
break
def update_rec():
rn=int(input("Enter rollno to update:"))
marks=float(input("Enter new marks:"))
gr=input("Enter Grade:")
cn.execute("update students set marks={},grade='{}' where rno={}".format(marks,gr,rn))
db.commit()
def delete_rec():
rn=int(input("Enter rollno to delete:"))
cn.execute("delete from students where rno={}".format(rn))
db.commit()
def view_rec():
cn.execute("select * from students;")
data = cn.fetchall()
for row in data:
print(row)
while True:
print("MENU\n1. Insert Record\n2. Update Record \n3. Delete Record\n4. Display Record \n5. Exit")
ch=int(input("Enter your choice: "))
if ch==1:
insert_rec()
elif ch==2:
update_rec()
elif ch==3:
delete_rec()
elif ch==4:
view_rec()
elif ch==5:
break
else:
print("Invalid Option")
|
shreykuntal/Cbse-12-project
|
practical+project/pysql/1/pr.py
|
pr.py
|
py
| 1,542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9270626576
|
from dataclasses import dataclass
@dataclass
class block:
name: str
seperatorStart: str
seperatorEnd: str
def getBlock(blocks: list, input: list):
strings = list()
index = 0
offsetindex = 0
foundBlock = False
dontAppend = False
for string in input:
dontAppend = False
if (foundBlock and blocks[index].name == "*"):
if (type(strings) == list): strings = dict()
if (string.__contains__(blocks[index].seperatorStart)):
if (offsetindex == 0):
foundBlockName = string.split(blocks[index].seperatorStart)[0].strip().lower()
dontAppend = True
offsetindex += 1
elif (string.__contains__(blocks[index].seperatorEnd)):
if (offsetindex == 0): break
else: offsetindex -= 1
if (dontAppend == False and offsetindex > 0):
if (strings.__contains__(foundBlockName)):
strings[foundBlockName].append(string)
else:
strings[foundBlockName] = [string]
elif (foundBlock == True):
if (string.__contains__(blocks[index].seperatorStart)):
offsetindex += 1
elif (string.__contains__(blocks[index].seperatorEnd)):
if (offsetindex == 0): break
else: offsetindex -= 1
strings.append(string)
else:
if (string.__contains__(blocks[index].seperatorStart)):
stringSplit = string.split(blocks[index].seperatorStart, 1)
if (stringSplit[0].strip().lower() == blocks[index].name.lower().strip() and offsetindex == 0):
if (len(stringSplit[1].strip()) > 0):
strings.append(stringSplit[1].strip())
if (index + 1 <= len(blocks) - 1 ): index += 1
if (index == len(blocks) - 1): foundBlock = True
else:
offsetindex += 1
elif (string.__contains__(blocks[index].seperatorEnd)):
if (offsetindex > 0):
offsetindex -= 1
else: index -= 1
return strings
def getVariable(name: str, blocks: list, seperator: str, input: list):
if (len(blocks) > 0): block = getBlock(blocks, input)
else: block = input
if (name == "*"):
output = dict()
for string in block:
if (string.__contains__(seperator)):
stringSplit = [ stringSplit.strip() for stringSplit in string.split(seperator)]
output[stringSplit[0].lower()] = stringSplit[1]
else:
for string in block:
if (string.__contains__(seperator)):
stringSplit = [ stringSplit.strip() for stringSplit in string.split(seperator)]
if (stringSplit[0].lower() == name.lower()):
output = stringSplit[1]
break
return output
|
superboo07/TextAdventure
|
TAUtilities.py
|
TAUtilities.py
|
py
| 2,998 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21325119133
|
#!/usr/bin/python3
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
#import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# Define a simple sequential model
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
if __name__ == "__main__":
model = create_model()
model.summary()
model.fit(train_images, train_labels, epochs=10)
|
NJUleo/Software-Testing-Lab-ML
|
buildModel.py
|
buildModel.py
|
py
| 780 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72331238589
|
import gc
import numpy as np
import xarray as xr
import scipy.ndimage.filters as conv
from . import dc_utilities as utilities
from datetime import datetime
####################################################
# | TSM |
####################################################
# 0.0001 for the scale of ls7 data.
def _tsmi(dataset):
return (dataset.red.astype('float64') + dataset.green.astype('float64'))*0.0001 / 2
def tsm(dataset_in, clean_mask=None, no_data=0):
# Create a clean mask from cfmask if the user does not provide one
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
tsm = 3983 * _tsmi(dataset_in)**1.6246
tsm.values[np.invert(clean_mask)] = no_data # Contains data for clear pixels
# Create xarray of data
time = dataset_in.time
latitude = dataset_in.latitude
longitude = dataset_in.longitude
dataset_out = xr.Dataset({'tsm': tsm},
coords={'time': time,
'latitude': latitude,
'longitude': longitude})
return dataset_out
def mask_tsm(dataset_in, wofs):
wofs_criteria = wofs.copy(deep=True).normalized_data.where(wofs.normalized_data > 0.8)
wofs_criteria.values[wofs_criteria.values > 0] = 0
kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])
mask = conv.convolve(wofs_criteria.values, kernel, mode ='constant')
mask = mask.astype(np.float32)
dataset_out = dataset_in.copy(deep=True)
dataset_out.normalized_data.values += mask
dataset_out.total_clean.values += mask
dataset_out.normalized_data.values[np.isnan(dataset_out.normalized_data.values)] = 0
dataset_out.total_clean.values[np.isnan(dataset_out.total_clean.values)] = 0
return dataset_out
|
ceos-seo/Data_Cube_v2
|
ui/django_site_v2/data_cube_ui/utils/dc_tsm.py
|
dc_tsm.py
|
py
| 1,835 |
python
|
en
|
code
| 26 |
github-code
|
6
|
38907427149
|
import tkinter as tk
try:
import pygame
except ImportError:
audio = None
else:
audio = True
import sys
import random
import time
### Stopped Let's code: Tetris episode 19 by TigerhawkT3 at 00:58:42
### Use score_lines or high_score_lines to increase level and speed etc.
class Shape:
def __init__(self, shape, key, piece, row, column, coords):
self.shape = shape
self.key = key
self.piece = piece
self._row = row
self.kicked = False
self._rotation_index = 0
self.column = column
self.coords = coords
self.hover_time = self.spin_time = time.perf_counter()
@property
def row(self):
return self._row
@row.setter
def row(self, x):
if x != self._row and not self.kicked:
self._row = x
self.hover_time = time.perf_counter()
@property
def rotation_index(self):
return self._rotation_index
@rotation_index.setter
def rotation_index(self, x):
self._rotation_index = x
self.spin_time = time.perf_counter()
@property
def hover(self):
return time.perf_counter() - self.hover_time < 0.5
@property
def spin(self):
return time.perf_counter() - self.spin_time < 0.5
class Tetris:
def __init__(self, parent, audio):
self.debug = 'debug' in sys.argv[1:]
self.random = 'random' in sys.argv[1:]
self.hover = 'nohover' not in sys.argv[1:]
self.spin = 'spin' in sys.argv[1:]
self.kick = 'kick' in sys.argv[1:]
parent.title('Pythris')
self.parent = parent
self.audio = audio
if self.audio:
pygame.mixer.init(buffer=512)
try:
self.sounds = {name: pygame.mixer.Sound(name) for name in ('music.ogg',
'settle.ogg',
'clear.ogg',
'lose.ogg')}
except pygame.error as err:
self.audio = None
print(err)
else:
self.audio = {'m': True, 'e': True}
for char in 'mMeE':
self.parent.bind(char, self.toggle_audio)
self.sounds['music.ogg'].play(loops=-1)
self.board_width = 10
self.board_height = 24
self.high_score = 0
self.high_score_lines = 0
self.width = 200
self.height = 480
self.square_width = self.width//10
self.max_speed_score = 5000
self.speed_factor = 250
self.shapes = {'S':[['*', ''],
['*', '*'],
['', '*']],
'Z':[['', '*'],
['*', '*'],
['*', '']],
'J':[['*', '*'],
['*', ''],
['*', '']],
'L':[['*', ''],
['*', ''],
['*', '*']],
'O':[['*', '*'],
['*', '*']],
'I':[['*'],
['*'],
['*'],
['*']],
'T':[['*', '*', '*'],
['', '*', '']]
}
self.colours = {'S': '#6495ED',
'Z': '#F08080',
'J': '#B0C4DE',
'L': '#FFDAB9',
'O': '#DB7093',
'I': '#BA55D3',
'T': '#40E0D0'}
for key in ('<Down>', '<Left>', '<Right>'):
self.parent.bind(key, self.shift)
self.parent.bind('<Up>', self.rotate)
for key in ('a', 'A', 'd', 'D', 's', 'S'):
self.parent.bind(key, self.snap)
self.parent.bind('<Escape>', self.pause)
for key in ('<Control-n>', '<Control-N>'):
self.parent.bind(key, self.draw_board)
for key in ('g', 'G'):
self.parent.bind(key, self.toggle_guides)
self.canvas = None
self.preview_canvas = None
self.ticking = None
self.spawning = None
self.guide_fill = ''
self.score_var = tk.StringVar()
self.score_label = tk.Label(ROOT,
textvariable=self.score_var,
width=25,
height=5,
font=('Helvetica', 12))
self.score_label.grid(row=2, column=1, sticky="S")
self.high_score_var = tk.StringVar()
self.high_score_var.set('High Score:\n0 (0)')
self.high_score_label = tk.Label(ROOT,
textvariable=self.high_score_var,
width=25,
height=5,
font=('Helvetica', 12))
self.high_score_label.grid(row=3, column=1, sticky="N")
self.preview_label = tk.Label(ROOT,
text='Next Piece',
width=25,
height=5,
font=('Helvetica', 12))
self.preview_label.grid(row=0, column=1, sticky="S")
self.draw_board()
def tick(self):
if self.piece_is_active and not (self.spin and self.active_piece.spin):
self.shift()
self.ticking = self.parent.after(self.tickrate, self.tick)
def shift(self, event=None):
if not self.piece_is_active:
return
r = self.active_piece.row
c = self.active_piece.column
l = len(self.active_piece.shape)
w = len(self.active_piece.shape[0])
direction = (event and event.keysym) or 'Down'
# use event-keysym to check event/direction
if direction == 'Down':
rt = r+1 # row temporary
ct = c # column temporary
elif direction == 'Left':
rt = r
ct = c-1
elif direction == 'Right':
rt = r
ct = c+1
success = self.check_and_move(self.active_piece.shape, rt, ct, l, w)
if direction in 'Down' and not success and not (self.hover and self.active_piece.hover):
self.settle()
def draw_board(self, event=None):
if self.ticking:
self.parent.after_cancel(self.ticking)
if self.spawning:
self.parent.after_cancel(self.spawning)
self.score_var.set('Score:\n0')
self.board = [['' for column in range(self.board_width)]
for row in range(self.board_height)]
self.field = [[None for column in range(self.board_width)] for row in range(self.board_height)]
if self.canvas:
self.canvas.destroy()
self.canvas = tk.Canvas(ROOT, width=self.width, height=self.height)
self.canvas.grid(row=0, column=0, rowspan=4)
self.border = self.canvas.create_rectangle(2,
2,
self.width - 2,
self.height - 2,
width=2)
self.h_separator = self.canvas.create_line(0,
self.height//6,
self.width,
self.height//6,
width=2)
self.v_separator = self.canvas.create_line(self.width,
0,
self.width,
self.height,
width=2)
if self.preview_canvas:
self.preview_canvas.destroy()
self.preview_canvas = tk.Canvas(ROOT,
width=5*self.square_width,
height=5*self.square_width)
self.preview_canvas.grid(row=1, column=1)
self.tickrate = 1000
self.score = 0
self.score_lines = 0
self.piece_is_active = False
self.paused = False
self.bag = []
self.preview()
self.guides = [self.canvas.create_line(0, 0, 0, self.height),
self.canvas.create_line(0, 0, self.width, self.height)]
self.spawning = self.parent.after(self.tickrate, self.spawn)
self.ticking = self.parent.after(self.tickrate*2, self.tick)
def toggle_guides(self, event=None):
self.guide_fill = '' if self.guide_fill else 'black'
self.canvas.itemconfig(self.guides[0], fill=self.guide_fill)
self.canvas.itemconfig(self.guides[1], fill=self.guide_fill)
def toggle_audio(self, event=None):
if not event:
return
key = event.keysym.lower()
self.audio[key] = not self.audio[key]
if key == 'm':
if not self.audio['m']:
self.sounds['music.ogg'].stop()
else:
self.sounds['music.ogg'].play(loops=-1)
def pause(self, event=None):
if self.piece_is_active and not self.paused:
self.paused = True
self.piece_is_active = False
self.parent.after_cancel(self.ticking)
elif self.paused:
self.paused = False
self.piece_is_active = True
self.ticking = self.parent.after(self.tickrate, self.tick)
def print_board(self):
for row in self.board:
print(*(cell or ' ' for cell in row))
def check(self, shape, r, c, l, w):
for row, squares in zip(range(r, r+l), shape):
for column, square in zip(range(c, c+w), squares):
if ((row not in range(self.board_height))
or (column not in range(self.board_width))
or (square and self.board[row][column] == 'x')):
return
return True
def move(self, shape, r, c, l, w):
square_idxs = iter(range(4))
for row in self.board:
row[:] = ['' if cell == '*' else cell for cell in row]
for row, squares in zip(range(r, r+l), shape):
for column, square in zip(range(c, c+w), squares):
if square:
self.board[row][column] = square
square_idx = next(square_idxs)
coord = (column*self.square_width,
row*self.square_width,
(column+1)*self.square_width,
(row+1)*self.square_width)
self.active_piece.coords[square_idx] = coord
self.canvas.coords(self.active_piece.piece[square_idx], coord)
self.active_piece.row = r
self.active_piece.column = c
self.active_piece.shape = shape
self.move_guides(c, (c+w))
if self.debug:
self.print_board()
return True
def check_and_move(self, shape, r, c, l, w):
return self.check(shape, r, c, l, w) and self.move(shape, r, c, l, w)
def rotate(self, event=None):
if not self.active_piece:
return
if len(self.active_piece.shape) == len(self.active_piece.shape[0]):
self.active_piece.rotation_index = self.active_piece.rotation_index
return
r = self.active_piece.row
c = self.active_piece.column
l = len(self.active_piece.shape)
w = len(self.active_piece.shape[0])
x = c + w//2
y = r + l//2
direction = event.keysym
'''if direction in ('a', 'A'): # left
shape = rotate_array(self.active_piece.shape, -90)
rotation_index = (self.active_piece.rotation_index - 1) % 4
ra, rb = self.active_piece.rotation[rotation_index]
rotation_offsets = -ra, -rb
else: # right'''
shape = rotate_array(self.active_piece.shape, 90)
rotation_index = self.active_piece.rotation_index
rotation_offsets = self.active_piece.rotation[rotation_index]
rotation_index = (rotation_index + 1) % 4
l = len(shape)
w = len(shape[0])
rt = y - l//2
ct = x - w//2
x_correction, y_correction = rotation_offsets
rt += y_correction
ct += x_correction
if self.check_and_move(shape, rt, ct, l, w):
self.active_piece.rotation_index = rotation_index
if self.active_piece.kicked:
self.snap()
return
if self.kick:
for a, b in zip((0, 0, -1, 0, 0, -2, -1, -1, -1, -1, -2, -2, -2, -2),
(-1, 1, 0, -2, 2, 0, -1, 1, -2, 2, -1, 1, -2, 2)):
if self.check_and_move(shape, rt+a, ct+b, l, w):
self.active_piece.rotation_index = rotation_index
if not self.active_piece.kicked:
self.active_piece.kicked = a
if self.active_piece.kicked and not a:
self.snap()
return
def settle(self):
self.piece_is_active = False
for row in self.board:
row[:] = ['x' if cell == '*' else cell for cell in row]
if self.debug:
self.print_board()
for (x1, y1, x2, y2), id in zip(self.active_piece.coords, self.active_piece.piece):
self.field[y1//self.square_width][x1//self.square_width] = id
indices = [idx for idx, row in enumerate(self.board) if all(row)]
if indices:
self.score += (40, 100, 300, 1200)[len(indices) - 1]
self.score_lines += len(indices)
self.clear(indices)
if all(not cell for row in self.board for cell in row):
self.score += 2000
self.high_score = max(self.score, self.high_score)
self.high_score_lines = max(self.score_lines, self.high_score_lines)
self.score_var.set(f"Score:\n{self.score} ({self.score_lines})")
self.high_score_var.set(f"High Score:\n{self.high_score} ({self.high_score_lines})")
if self.score < self.max_speed_score:
self.tickrate = 1000 // (self.score//self.speed_factor + 1)
if any(any(row) for row in self.board[:4]):
self.lose()
return
if self.audio['e'] and not indices:
self.sounds['settle.ogg'].play()
self.spawning = self.parent.after(500 if indices and self.tickrate < 500 else self.tickrate, self.spawn)
def preview(self):
self.preview_canvas.delete(tk.ALL)
if not self.bag:
if self.random:
self.bag.append(random.choice('IJLOSTZ'))
else:
self.bag = random.sample('IJLOSTZ', 7)
key = self.bag.pop()
shape = rotate_array(self.shapes[key], random.choice((0, 90, 180, 270)))
self.preview_piece = Shape(shape, key, [], 0, 0, [])
width = len(shape[0])
half = self.square_width//2
for y, row in enumerate(shape):
for x, cell in enumerate(row):
if cell:
self.preview_piece.coords.append((self.square_width*x + half,
self.square_width*y + half,
self.square_width*(x+1) + half,
self.square_width*(y+1) + half))
self.preview_piece.piece.append(self.preview_canvas.create_rectangle(self.preview_piece.coords[-1],
fill=self.colours[key],
width=2))
self.preview_piece.rotation_index = 0
self.preview_piece.i_nudge = (len(shape) < len(shape[0])) and 4 in (len(shape), len(shape[0]))
self.preview_piece.row = self.preview_piece.i_nudge
if 3 in (len(shape), len(shape[0])):
self.preview_piece.rotation = [(0, 0),
(1, 0),
(-1, 1),
(0, -1)]
else:
self.preview_piece.rotation = [(1, -1),
(0, 1),
(0, 0),
(-1, 0)]
if len(shape) < len(shape[0]):
self.preview_piece.rotation_index += 1
def move_guides(self, left, right):
self.canvas.coords(self.guides[0], left*self.square_width, 0, left*self.square_width, self.height)
self.canvas.coords(self.guides[1], right*self.square_width, 0, right*self.square_width, self.height)
def spawn(self):
self.piece_is_active = True
self.active_piece = self.preview_piece
self.preview()
width = len(self.active_piece.shape[0])
start = (10-width)//2
self.active_piece.column = start
self.active_piece.start = start
self.active_piece.coords = []
self.active_piece.piece = []
for y, row in enumerate(self.active_piece.shape):
self.board[y+self.active_piece.i_nudge][start:start+width] = self.active_piece.shape[y]
for x, cell in enumerate(row, start=start):
if cell:
self.active_piece.coords.append((self.square_width*x,
self.square_width*(y+self.active_piece.i_nudge),
self.square_width*(x+1),
self.square_width*(y+self.active_piece.i_nudge+1)))
self.active_piece.piece.append(self.canvas.create_rectangle(self.active_piece.coords[-1],
fill=self.colours[self.active_piece.key],
width=2))
self.move_guides(start, (start+width))
if self.debug:
self.print_board()
def lose(self):
self.piece_is_active = False
if self.audio['e']:
self.sounds['lose.ogg'].play()
self.parent.after_cancel(self.ticking)
self.parent.after_cancel(self.spawning)
self.clear_iter(range(len(self.board)))
def snap(self, event=None):
down = {'s', 'S'}
left = {'a', 'A'}
right = {'d', 'D'}
if not self.piece_is_active:
return
r = self.active_piece.row
c = self.active_piece.column
l = len(self.active_piece.shape)
w = len(self.active_piece.shape[0])
direction = event.keysym if event is not None else 's'
while 1:
if self.check(self.active_piece.shape,
r+(direction in down),
c + (direction in right) - (direction in left),
l,
w):
r += direction in down
c += (direction in right) - (direction in left)
else:
break
self.move(self.active_piece.shape, r, c, l, w)
if direction in down:
self.settle()
def clear(self, indices):
if self.audio['e']:
self.sounds['clear.ogg'].play()
for idx in indices:
self.board.pop(idx)
self.board.insert(0, ['' for column in range(self.board_width)])
self.clear_iter(indices)
def clear_iter(self, indices, current_column=0):
for row in indices:
if row%2:
cc = current_column
else:
cc = self.board_width - current_column - 1
id = self.field[row][cc]
self.field[row][cc] = None
self.canvas.delete(id)
if current_column < self.board_width-1:
self.parent.after(50, self.clear_iter, indices, current_column+1)
else:
for idx, row in enumerate(self.field):
offset = sum(r > idx for r in indices)*self.square_width
for square in row:
if square:
self.canvas.move(square, 0, offset)
for row in indices:
self.field.pop(row)
self.field.insert(0, [None for x in range(self.board_width)])
def rotate_array(array, angle, wide=False):
'''
Rotates a rectangular or diamond 2D array in increments of 45 degrees.
Parameters:
array (list): a list containing sliceable sequences, such as list, tuple, or str
angle (int): a positive angle for rotation, in 45-degree increments.
wide (bool): whether a passed diamond array should rotate into a wide array
instead of a tall one (tall is the default). No effect on square matrices.
'''
angle = angle%360
if angle < 1:
return [list(row) for row in array]
lengths = list(map(len, array))
rect = len(set(lengths)) == 1
width = max(lengths)
height = sum(lengths)/width
if wide:
width, height = height, width
if not rect:
array = [list(row) for row in array]
array = [[array[row+col].pop() for row in range(width)] for col in range(height)]
angle += 45
nineties, more = divmod(angle, 90)
if nineties == 3:
array = list(zip(*array))[::-1]
else:
for i in range(nineties):
array = list(zip(*array[::-1]))
if more:
ab = abs(len(array)-len(array[0]))
m = min(len(array), len(array[0]))
tall = len(array) > len(array[0])
array = [[array[r][c] for r,c in zip(range(row-1, -1, -1), range(row))
] for row in range(1, m+1)
] + [[array[r][c] for r,c in zip(range(m-1+row*tall, row*tall-1, -1),
range(row*(not tall), m+row*(not tall)+1))
] for row in range(1, ab+(not tall))
] + [[array[r][c] for r,c in zip(range(len(array)-1, ab*tall+row-1, -1),
range(ab*(not tall)+row, len(array[0])+(not tall)))
] for row in range((not tall), m)
]
return array
ROOT = tk.Tk()
TETRIS = Tetris(ROOT, audio)
ROOT.mainloop()
|
Jack-Evitts/Pythtris
|
Tetris.py
|
Tetris.py
|
py
| 22,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44426650716
|
import glob
import re
from test_framework.util import (
assert_equal,
p2p_port,
wait_until,
count_log_msg
)
from test_framework.mininode import (
NetworkThread,
NodeConn,
NodeConnCB,
msg_block,
ToHex
)
from test_framework.test_framework import BitcoinTestFramework, ChainManager
from test_framework.blocktools import create_transaction, PreviousSpendableOutput
from test_framework.script import CScript, OP_TRUE
class Send_node():
def __init__(self, tmpdir, log, node_no, p2p_connection, rpc_connection):
self.p2p = p2p_connection
self.rpc = rpc_connection
self.node_no = node_no
self.tmpdir = tmpdir
self.log = log
def send_block(self, block, expect_reject = False):
self.rpc.submitblock(ToHex(block))
if expect_reject:
assert(self.check_frozen_tx_log(block.hash))
else:
assert_equal(block.hash, self.rpc.getbestblockhash())
assert(self.check_frozen_tx_log(block.hash) == False);
def check_frozen_tx_log(self, hash):
for line in open(glob.glob(self.tmpdir + f"/node{self.node_no}" + "/regtest/blacklist.log")[0]):
if hash in line:
self.log.debug("Found line in blacklist.log: %s", line)
return True
return False
def check_log(self, line_text):
for line in open(glob.glob(self.tmpdir + f"/node{self.node_no}" + "/regtest/bitcoind.log")[0]):
if re.search(line_text, line) is not None:
self.log.debug("Found line in bitcoind.log: %s", line.strip())
return True
return False
class FrozenTXOReindex(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.chain = ChainManager()
self.block_count = 0
def _init(self):
node_no = 0
# Create a P2P connections
node = NodeConnCB()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[node_no], node)
node.add_connection(connection)
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
node.wait_for_verack()
self.chain.set_genesis_hash(int(self.nodes[node_no].getbestblockhash(), 16))
block = self.chain.next_block(self.block_count)
self.block_count += 1
self.chain.save_spendable_output()
node.send_message(msg_block(block))
for i in range(100):
block = self.chain.next_block(self.block_count)
self.block_count += 1
self.chain.save_spendable_output()
node.send_message(msg_block(block))
self.log.info("Waiting for block height 101 via rpc")
self.nodes[node_no].waitforblockheight(101)
return node
def _create_tx(self, tx_out, unlock, lock):
unlock_script = b'' if callable(unlock) else unlock
return create_transaction(tx_out.tx, tx_out.n, unlock_script, 1, lock)
def _mine_and_send_block(self, tx, node, expect_reject = False):
block = self.chain.next_block(self.block_count)
self.chain.update_block(self.block_count, [tx] if tx else [])
self.log.debug(f"attempting mining block: {block.hash}")
node.send_block(block, expect_reject)
self.block_count += 1
return block.hash
def _remove_last_block(self):
# remove last block from chain manager
del self.chain.block_heights[self.chain.blocks[self.block_count-1].sha256]
del self.chain.blocks[self.block_count-1]
self.block_count -= 1
self.chain.set_tip(self.block_count-1)
def _mine_and_check_rejected(self, tx, node):
self.log.info(f"Mining block with transaction {tx.hash} spending TXO {tx.vin[0].prevout.hash:064x},{tx.vin[0].prevout.n} and checking that it is rejected")
old_tip = self.chain.tip
rejected_block_hash = self._mine_and_send_block(tx, node, True)
assert_equal(node.rpc.getbestblockhash(), old_tip.hash)
assert(node.check_frozen_tx_log(self.chain.tip.hash));
assert(node.check_log("Block was rejected because it included a transaction, which tried to spend a frozen transaction output.*"+self.chain.tip.hash));
# remove rejected block from test node - the only remaining copy after this point is on remote node disk
self._remove_last_block()
return rejected_block_hash
def _create_policy_freeze_block(self, spendable_out, node):
freeze_tx = self._create_tx(spendable_out, b'', CScript([OP_TRUE]))
self.log.info(f"Mining block with transaction {freeze_tx.hash} whose output will be frozen later")
self._mine_and_send_block(freeze_tx, node)
self.log.info(f"Freezing TXO {freeze_tx.hash},0 on policy blacklist")
result = node.rpc.addToPolicyBlacklist({
"funds": [
{
"txOut" : {
"txId" : freeze_tx.hash,
"vout" : 0
}
}]
});
assert_equal(result["notProcessed"], [])
spend_frozen_tx = self._create_tx(PreviousSpendableOutput(freeze_tx, 0), b'', CScript([OP_TRUE]))
self.log.info(f"Mining block with transaction {spend_frozen_tx.hash} spending frozen TXO {freeze_tx.hash},0 and checking that is accepted")
self._mine_and_send_block(spend_frozen_tx, node)
# block is accepted as consensus freeze is not in effect
assert_equal(node.rpc.getbestblockhash(), self.chain.tip.hash)
def _create_consensus_freeze_block(self, spendable_out, node):
freeze_tx = self._create_tx(spendable_out, b'', CScript([OP_TRUE]))
self.log.info(f"Mining block with transaction {freeze_tx.hash} whose output will be frozen later")
self._mine_and_send_block(freeze_tx, node)
self.log.info(f"Freezing TXO {freeze_tx.hash},0 on consensus blacklist")
result=node.rpc.addToConsensusBlacklist({
"funds": [
{
"txOut" : {
"txId" : freeze_tx.hash,
"vout" : 0
},
"enforceAtHeight": [{"start": 0}],
"policyExpiresWithConsensus": False
}]
});
assert_equal(result["notProcessed"], [])
spend_frozen_tx = self._create_tx(PreviousSpendableOutput(freeze_tx, 0), b'', CScript([OP_TRUE]))
# block is rejected as consensus freeze is in effect
rejected_block_hash = self._mine_and_check_rejected(spend_frozen_tx, node)
return (freeze_tx.hash, rejected_block_hash)
def run_test(self):
node = self._init()
out_policy_freeze_txo = self.chain.get_spendable_output()
out_consensus_freeze_txo = self.chain.get_spendable_output()
send_node = Send_node(self.options.tmpdir, self.log, 0, node, self.nodes[0])
self._create_policy_freeze_block(out_policy_freeze_txo, send_node)
[freeze_tx_hash, rejected_block_hash] = self._create_consensus_freeze_block(out_consensus_freeze_txo, send_node)
node_chain_info = send_node.rpc.getblockchaininfo()
old_tip_hash = node_chain_info['bestblockhash']
old_tip_height = node_chain_info['blocks']
assert(rejected_block_hash != old_tip_hash)
# Make sure that we get to the same height:
# best block with transactions policy frozen - should get to this point
# best block with transactions consensus frozen - should not get to this block
self.stop_node(0)
self.start_node(0, extra_args=["-reindex=1"])
# Waiting for last valid block. Waiting just for old_tip_height is not enough becuase next block (to be rejected) may not be processed yet.
send_node.rpc.waitforblockheight(old_tip_height)
# Wait for next block to be rejected. We need to wait for the second occurrence of the same log because one rejection happens before
wait_until(lambda: count_log_msg(self, "InvalidChainFound: invalid block="+rejected_block_hash+" height=105", "/node0") == 2, timeout=5)
assert_equal(send_node.rpc.getbestblockhash(), old_tip_hash)
# Unfreeze and reconsider block to show that the block was still stored on disk
result = self.nodes[0].clearBlacklists( { "removeAllEntries": True } )
assert_equal(result["numRemovedEntries"], 2)
self.stop_node(0)
self.start_node(0, extra_args=["-reindex=1"])
send_node.rpc.waitforblockheight(old_tip_height + 1)
self.log.info(send_node.rpc.getblockchaininfo())
assert_equal(send_node.rpc.getbestblockhash(), rejected_block_hash)
if __name__ == '__main__':
FrozenTXOReindex().main()
|
bitcoin-sv/bitcoin-sv
|
test/functional/bsv-frozentxo-reindex.py
|
bsv-frozentxo-reindex.py
|
py
| 8,778 |
python
|
en
|
code
| 597 |
github-code
|
6
|
86366418129
|
import numpy as np
import matplotlib.pyplot as plt
def radial_kernel(x0, X, tau):
return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * tau * tau))
def local_regression(x0, X, Y, tau):
# add bias term
x0 = np.r_[1, x0]
X = np.c_[np.ones(len(X)), X]
# fit model: normal equations with kernel
xw = X.T * radial_kernel(x0, X, tau)
beta = np.linalg.pinv(xw @ X) @ xw @ Y
# predict value
return x0 @ beta
def generate_data():
n = 1000
X = np.linspace(-3, 3, num=n)
Y = np.log(np.abs(X ** 2 - 1) + .5)
# Y = np.sin(X) + 0.3 * np.random.randn(n) #
plt.scatter(X, Y, s=5, color="green")
plt.savefig("LocalWeightedLinearRegression2-DataInitial.png")
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
# jitter X
X += np.random.normal(scale=.1, size=n)
plt.scatter(X, Y, s=5, color="green")
plt.savefig("LocalWeightedLinearRegression2-DatawithGitter.png")
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
return X, Y
def create_plot(X, Y, tau):
fig, axes = plt.subplots(3, 2, figsize=(16, 8), sharex=False, sharey=False, dpi=120)
# plt.subplots(3, 2 ) means display data in 3 rows and 2 columns
# Plot each axes
for i, ax in enumerate(axes.ravel()):
domain = np.linspace(-3, 3, num=40)
prediction = [local_regression(x0, X, Y, tau[i]) for x0 in domain]
ax.scatter(X, Y, s=5, color="green", label="actual")
ax.scatter(domain, prediction, s=5, color='red', label="prediction")
ax.set(
title="tau=" + str(tau[i]),
xlabel='X',
ylabel='Y',
)
ax.legend(loc='best')
plt.suptitle('Local Weight Linear regression', size=10, color='blue')
plt.savefig("LocalWeightedLinearRegression2-DataAndPrediction.png")
return plt
if __name__ == "__main__":
X, Y = generate_data()
tau = [800, 10, .1, .01, .08, .9]
myplot = create_plot(X, Y, tau)
myplot.show()
|
MitaAcharya/MachineLeaning
|
AndrewNG/Week2/Week2_LWR_Extra/LocalWeightedLinearRegression.py
|
LocalWeightedLinearRegression.py
|
py
| 2,053 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42154628409
|
from Filters.BubbleFilter import BubbleFilter
from Entities.BubbleCandidate import BubbleCandidate
class ShapeFilter(BubbleFilter):
def __init__(self, source_width, source_height, logger, min_ratio_component_area=None, min_pa_ratio=None,
max_pa_ratio=None, w_min=None, w_max=None, h_min=None, h_max=None):
BubbleFilter.__init__(self, "ShapeFilter", source_width, source_height, logger)
# using default values if value not provided
self.min_ratio_component_area = min_ratio_component_area if min_ratio_component_area is not None else 0.45
self.min_pa_ratio = min_pa_ratio if min_pa_ratio is not None else 0.0075
self.max_pa_ratio = max_pa_ratio if max_pa_ratio is not None else 0.15
self.w_min = w_min if w_min is not None else 0.005
self.w_max = w_max if w_max is not None else 0.6
self.h_min = h_min if h_min is not None else 0.005
self.h_max = h_max if h_max is not None else 0.6
def is_accepted(self, bubble_candidate: BubbleCandidate) -> bool:
contour_area = bubble_candidate.get_contour_area()
perimeter = bubble_candidate.get_perimeter()
if contour_area == 0:
self.log_reject_reason(bubble_candidate.component_id, "contour area > 0", 0, 1)
return False
ratio_component_area = bubble_candidate.component_area / contour_area
if ratio_component_area < self.min_ratio_component_area:
self.log_reject_reason(bubble_candidate.component_id, "component / contour area", ratio_component_area,
self.min_ratio_component_area)
return False
pa_ratio = perimeter / contour_area
if pa_ratio < self.min_pa_ratio:
self.log_reject_reason(bubble_candidate.component_id, "min perimeter / contour area", pa_ratio,
self.min_pa_ratio)
return False
if pa_ratio > self.max_pa_ratio:
self.log_reject_reason(bubble_candidate.component_id, "max perimeter / contour area", pa_ratio,
self.max_pa_ratio)
return False
x, y, w, h = bubble_candidate.get_bounding_box()
if w < self.source_width * self.w_min:
self.log_reject_reason(bubble_candidate.component_id, "min width", w,
self.source_width * self.w_min)
return False
if w > self.source_width * self.w_max:
self.log_reject_reason(bubble_candidate.component_id, "max width", w,
self.source_width * self.w_max)
return False
if h < self.source_height * self.h_min:
self.log_reject_reason(bubble_candidate.component_id, "min height", h,
self.source_height * self.h_min)
return False
if h > self.source_height * self.h_max:
self.log_reject_reason(bubble_candidate.component_id, "max height", h,
self.source_height * self.h_max)
return False
return True
|
lukasvlc3k/comics-text-recognition
|
Filters/ShapeFilter.py
|
ShapeFilter.py
|
py
| 3,134 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17515420038
|
from log_parser import LogParser
from producer import KafkaProducer
if __name__ == "__main__":
logFile = LogParser.read_log_file()
logFileGen = LogParser.fetch_log(logFile)
producer = KafkaProducer()
while True:
try:
data = next(logFileGen)
serialized_data = LogParser.serialize_log(data)
print("Message is :: {}".format(serialized_data))
producer.produce(serialized_data)
except StopIteration:
exit()
except KeyboardInterrupt:
print("Printing last message before exiting :: {}".format(serialized_data))
exit()
|
BountyHunter1999/Dashboard-App
|
main.py
|
main.py
|
py
| 636 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41913795360
|
import logging
from functools import partial
from datasets import load_dataset
from transformers import (
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from src.callbacks import ShuffleCallback
from src.config import Config, TrainingArgumentsConfig
from src.data_collator import DataCollatorSpeechSeq2SeqWithPadding
from src.metrics import compute_metrics
from src.prepare_dataset import prepare_dataset
logging.basicConfig(level=logging.INFO)
def train():
config = Config()
training_args_config = TrainingArgumentsConfig()
training_args = Seq2SeqTrainingArguments(**training_args_config.dict())
if config.prepare_dataset:
dataset, _ = prepare_dataset(config)
else:
dataset = load_dataset(config.dataset_name, config.dataset_lang)
logging.info("Training model...")
model = WhisperForConditionalGeneration.from_pretrained(config.model_name)
processor = WhisperProcessor.from_pretrained(
config.model_name, task=config.task, language=config.model_lang
)
compute_metrics_fn = partial(compute_metrics, processor=processor)
trainer = Seq2SeqTrainer(
args=training_args,
model=model,
train_dataset=dataset["train"],
eval_dataset=dataset["validation"],
data_collator=DataCollatorSpeechSeq2SeqWithPadding(processor=processor),
compute_metrics=compute_metrics_fn,
tokenizer=processor,
callbacks=[ShuffleCallback()],
)
trainer.train()
trainer.push_to_hub()
if __name__ == "__main__":
train()
|
Giorgi-Sekhniashvili/geo_whisper
|
train.py
|
train.py
|
py
| 1,605 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11941164377
|
#!/usr/bin/env python
#
# fast_mr ->
#
# Fast molecular replacement in the spirit of fast_dp, starting from coordinate
# files and using brute force (and educated guesses) to get everything going.
#
# fast_mr - main program.
import os
import sys
import time
import shutil
import math
import traceback
from multiprocessing import Pool
from iotbx import mtz
from iotbx import pdb
from libtbx.phil import parse
from cctbx.sgtbx import space_group, space_group_symbols
from iotbx.scalepack import merge as merge_scalepack
from libtbx import introspection
if not 'FAST_EP_ROOT' in os.environ:
raise RuntimeError('FAST_EP_ROOT not set')
fast_ep_lib = os.path.join(os.environ['FAST_EP_ROOT'], 'lib')
if not fast_ep_lib in sys.path:
sys.path.append(fast_ep_lib)
from xml_output import write_ispyb_xml
from generate_possible_spacegroups import generate_chiral_spacegroups, \
spacegroup_enantiomorph, spacegroup_full, sanitize_spacegroup
from run_job import run_job, run_job_cluster, is_cluster_job_finished
from fast_mr_phaser import run_phaser_cluster
from parse_pdb import pdb_file_nres
class logger:
def __init__(self):
self._fout = open('fast_mr.log', 'w')
return
def __del__(self):
self._fout.close()
self._cout = None
return
def __call__(self, _line):
sys.stdout.write('%s\n' % _line)
self._fout.write('%s\n' % _line)
return
class Fast_mr:
def __init__(self, hklin, xyzin_and_ids):
self._hklin = os.path.abspath(hklin)
self._xyzins = [os.path.abspath(xyzin_and_id[0])
for xyzin_and_id in xyzin_and_ids]
self._ids = [xyzin_and_id[1] for xyzin_and_id in xyzin_and_ids]
self._cpu = 2
self._machines = 10
self._wd = os.getcwd()
self._log = logger()
self._log('Using %d cpus / %d machines' % (self._cpu, self._machines))
self._full_command_line = ' '.join(sys.argv)
# pull information we'll need from the input MTZ file - the unit cell,
# the pointgroup and the number of reflections in the file. select
# first Miller array in file which has native data
# --- SAD DATA ---
m = mtz.object(self._hklin)
mas = m.as_miller_arrays()
self._data = None
for ma in mas:
if str(ma.observation_type()) != 'xray.amplitude':
continue
self._data = ma
break
if not self._data:
raise RuntimeError('no intensity data found in %s' % \
self._hklin)
self._pointgroup = self._data.space_group().type().number()
self._unit_cell = self._data.unit_cell().parameters()
self._nrefl = m.n_reflections()
self._spacegroups = generate_chiral_spacegroups(self._pointgroup)
# write out a nice summary of the data set properties and what columns
# were selected for analysis
self._log('Input: %s' % self._hklin)
self._log('Columns: %s' % self._data.info().label_string())
self._log('Unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \
self._unit_cell)
self._log('Pointgroup: %s' % m.space_group().type().lookup_symbol())
self._log('Resolution: %.2f - %.2f' % self._data.resolution_range())
self._log('Nrefl: %d' % self._nrefl)
self._log('Spacegroups: %s' % ' '.join(self._spacegroups))
self._log('Input coordinate files:')
self._nres = []
for xyzin, _id in zip(self._xyzins, self._ids):
nres = pdb_file_nres(xyzin)
self._nres.append(nres)
self._log('%40s %8d %.3f' % (os.path.split(xyzin)[1], nres, _id))
total_nres = sum(self._nres)
# FIXME calculate probable number of complexes in here
self._copies = 1
return
def do_mr(self):
t0 = time.time()
cluster = True
njobs = self._machines
ncpu = self._cpu
# set up N phaser jobs
jobs = [ ]
for spacegroup in self._spacegroups:
wd = os.path.join(self._wd, spacegroup)
if not os.path.exists(wd):
os.makedirs(wd)
commands = ['mode mr_auto',
'spacegroup %s' % spacegroup,
'hklin %s' % self._hklin,
'labin F=F SIGF=SIGF',
'root mr%s' % spacegroup]
for j, (xyzin, _id, nres) in enumerate(
zip(self._xyzins, self._ids, self._nres)):
commands.append('ensemble m%d pdb %s identity %f' %
(j, xyzin, 100 * _id))
for j, (xyzin, _id, nres) in enumerate(
zip(self._xyzins, self._ids, self._nres)):
commands.append('composition protein nres %d num %d' %
(nres, self._copies))
for j, (xyzin, _id, nres) in enumerate(
zip(self._xyzins, self._ids, self._nres)):
commands.append('search ensemble m%d num %d' %
(j, self._copies))
jobs.append((wd, commands))
# actually execute the tasks - either locally or on a cluster, allowing
# for potential for fewer available machines than jobs
self._log('Running %d x phaser jobs' % len(jobs))
pool = Pool(min(njobs, len(jobs)))
if cluster:
pool.map(run_phaser_cluster, jobs)
else:
print(1/0)
# now look for the results
worked = []
for job in jobs:
wd = job[0]
spacegroup = os.path.split(wd)[-1]
if os.path.exists(os.path.join(wd, 'mr%s.sol' % spacegroup)):
worked.append(os.path.join(wd, 'mr%s.sol' % spacegroup))
for w in worked:
sol = open(w).read()
for record in sol.split('\n'):
if 'SOLU SPAC' in record:
spacegroup = record.replace(
'SOLU SPAC', '').replace(' ', '')
if 'SOLU SET' in record:
tfz = float(record.replace('=', ' ').split()[5])
print('Solution: %s %.2f' % (spacegroup, tfz))
t1 = time.time()
self._log('Time: %.2f' % (t1 - t0))
if __name__ == '__main__':
xyzin_and_ids = []
for arg in sys.argv[2:]:
if ':' in arg:
xyzin = arg.split(':')[0]
_id = float(arg.split(':')[1])
if _id > 1.0:
_id /= 100.0
xyzin_and_ids.append((xyzin, _id))
else:
xyzin_and_ids.append((arg, 1.0))
fast_mr = Fast_mr(sys.argv[1], xyzin_and_ids)
try:
fast_mr.do_mr()
except RuntimeError as e:
fast_mr._log('*** MR: %s ***' % str(e))
traceback.print_exc(file = open('fast_mr.error', 'w'))
sys.exit(1)
|
DiamondLightSource/fast_ep
|
src/fast_mr.py
|
fast_mr.py
|
py
| 6,927 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7089066120
|
# import modules
import timeit
import numpy as np
import matplotlib.pyplot as plt
import time
# Question 1 ------
# selection sort
def selectionsort(arr):
for i in range(len(arr)): # outer loop to traverse array
minInd = i # minimum index starts at i
for j in range(i+1,len(arr)): # inner loop begins at i + 1, ends at the end
if arr[j] < arr[minInd]: # if a smaller value is found
minInd = j # the minimum index becomes the index that it was found
if minInd != i: # when the min index changes
# swap positions so that the minimum index is at the beginning of the inner loop
temp = arr[i]
arr[i] = arr[minInd]
arr[minInd] = temp
return arr # return the sorted array
# insertion sort
def insertionsort(arr):
for i in range(len(arr)): # outer loop to traverse array
j = i # inner loop starts at i
while(j>0 and arr[j-1]> arr[j]): # while the value at j is greater than min and greater than 0
# swap
temp = arr[j]
arr[j] = arr[j - 1]
arr[j-1] = temp
j -= 1 # j steps back
return arr # return sorted array
# merge sort
def mergesort(arr):
n = len(arr) # record array length
# new arrays for splitting
left = []
right = []
if n <= 1: # when the array length becomes 1, return the array
return arr
#split the array into left and right sides
for i in range(0,n):
if(i<(n-1)/2):
left.append(arr[i])
else:
right.append(arr[i])
leftsort = mergesort(left) # take all the left sides
rightsort = mergesort(right) # take all the right sides
return merge(leftsort,rightsort) # merge them back together
# helper function for merge sort, merges arrays together in sorted order
def merge(l,r):
# numbers at the end of the arrays that tell the array to stop merging
l.append(9999999)
r.append(9999999)
D = [] # merged array
i = j = 0 # start at index 0
while l[i] < 9999999 or r[j] < 9999999: # while not at the end of either array
if l[i] < r[j]: # find the lower of the two values in the two arrays
D.append(l[i]) # add it to the new array
i = i + 1 # increase index
else: # same as above
D.append(r[j])
j = j + 1
return D # return merged array
index = [] # array for recording array size
# Question 2 ----
'''
# arrays for holding time values and numbers for sorting
sortedarr = []
reversearr = []
reversearrtwo = []
selectsortedtime = []
selectreversetime = []
insertsortedtime = []
insertreversetime = []
mergesortedtime = []
mergereversetime = []
# how many numbers in the array at the given time; used for incrementing so we don't have to keep making the ordered
# and reverse ordered arrays over and over again
sortedarrayCount = 1
reversearrayCount = 0
for i in range(1, 101): # outer loop; creates arrays of size n = 100 to n = 10000
index.append(i*100) # add the size of the array currently being created to the list for plotting
print(i) # progress report; displays the current size being worked on to console
for j in range(sortedarrayCount,i*100 + 1): # creates the sorted array of size i*100
sortedarrayCount += 1 # used so that we don't have to keep making new arrays
sortedarr.append(j) # add the new values up to i*100
for k in range(i*100, reversearrayCount, -1): # creates the reverse sorted array of size i*100
reversearrayCount += 1
reversearrtwo.append(k)
reversearrtwo.extend(reversearr) # because it is reverse sorted, we need to add the new values to the beginning
reversearr = list(reversearrtwo) # take that list and prep it for the next iteration i
reversearrtwo = []
# for this section, make code that you don't want to see on the plot comments
# for example if you want a selection sort plot, make all insertion and merge sort related code comments
# if you just want to compare sorted array times, make all reverse sorted related code comments
# selectionsorted = list(sortedarr) # duplicate sorted array
selectionreverse = list(reversearr) # duplicate reverse sorted array
# ssort = timeit.Timer (lambda: selectionsort(selectionsorted)) # time selection sort for sorted array
srevt = timeit.Timer (lambda: selectionsort(selectionreverse)) # time selection sort for reverse sorted array
# selectsortedtime.append(ssort.timeit(number=1)) # add the time to the appropriate array
selectreversetime.append(srevt.timeit(number=1)) # same
# same as selection sort but for insertion sort
# insertionsorted = list(sortedarr)
insertionreverse = list(reversearr)
# isort = timeit.Timer (lambda: insertionsort(insertionsorted))
irevt = timeit.Timer (lambda: insertionsort(insertionreverse))
# insertsortedtime.append(isort.timeit(number=1))
insertreversetime.append(irevt.timeit(number=1))
# same as selection sort but for merge sort
# mergesorted = list(sortedarr)
# mergereverse = list(reversearr)
# msort = timeit.Timer (lambda: mergesort(mergesorted))
# mrevt = timeit.Timer (lambda: mergesort(mergereverse))
# mergesortedtime.append(msort.timeit(number=1))
# mergereversetime.append(mrevt.timeit(number=1))
# plotting the times
# for this section, make sure to call plot however many times you want to have a function on the graph
# if you want one for forward sort and reverse sort, you call it twice and label both
plt.plot(index, selectreversetime, label='Selection Sort') # selection sort function, blue line
plt.plot(index, insertreversetime, color='r', label='Insertion Sort') # insertion sort function, red line
plt.ylabel('Time') # y axis holds time
plt.xlabel('n') # x axis holds n
plt.title('Average Random Array Sort Time') # title of plot
plt.legend() # display legend
plt.show() # display plot
'''
np.random.seed(101) # seed for random number generator
# Question 4 -----
'''
selectionTotalTime = [] # array to hold all average times for selection sort
insertionTotalTime = [] # array to hold all average times for insertion sort
mergeTotalTime = [] # array to hold all average times for merge sort
for i in range(2, 101, 2): # setup for n = 200 to 10000; n = i*100 for each iteration; only every n = 200
# arrays to hold the times for 100 permutations at size n
selectionIndexTime = []
insertionIndexTime = []
mergeIndexTime = []
index.append(i*100) # records n for the plot
print(i) # progress report printed to console
arr = np.random.rand(i*100) # generates an array of i*100 random numbers
# creates 100 permutations for the random array arr and times each algorithm's sorting time for each permutation
for j in range(0, 100):
array = np.random.permutation(arr) # duplicate array
selectionArr = list(array) # duplicate array
ssort = timeit.Timer (lambda: selectionsort(selectionArr)) # time selection sort
selectionIndexTime.append(ssort.timeit(number=1)) # add the time to the appropriate array
# same follows for the next two algorithms
insertionArr = list(array)
isort = timeit.Timer (lambda: insertionsort(insertionArr))
insertionIndexTime.append(isort.timeit(number=1))
mergeArr = list(array)
msort = timeit.Timer (lambda: mergesort(mergeArr))
mergeIndexTime.append(msort.timeit(number=1))
# variables to add up all the times for index i
avgSelection = 0
avgInsertion = 0
avgMerge = 0
# add up everything for each algorithm
for k in range(0, 100):
avgSelection += selectionIndexTime[k]
avgInsertion += insertionIndexTime[k]
avgMerge += mergeIndexTime[k]
# calculate averages and add them to the average time array to be plotted
selectionTotalTime.append(avgSelection/100)
insertionTotalTime.append(avgInsertion/100)
mergeTotalTime.append(avgMerge/100)
print('%s seconds' % (time.time() - start ))
# plotting using matplotlib
plt.plot(index, selectionTotalTime, label='Selection Sort') # selection sort function, blue line
plt.plot(index, insertionTotalTime, color='r', label='Insertion Sort') # insertion sort function, red line
plt.plot(index, mergeTotalTime, color='g', label='Merge Sort') # merge sort function, green line
plt.ylabel('Time') # y axis holds time
plt.xlabel('n') # x axis holds n
plt.title('Average Random Array Sort Time') # title of plot
plt.legend() # display legend
plt.show() # display plot
'''
'''
# Question 5 ----
arr = np.random.rand(1000000) # generate the array with n = 10^6 random numbers
selectionarr = list(arr) # duplicate array
selectionstart = time.time() # start time
selectionsort(selectionarr) # selection sort it
print('%s seconds selection' % (time.time() - selectionstart )) # print the time it took by current time - start time
# same but for insertion sort
insertionarr = list(arr)
insertionstart = time.time()
insertionsort(insertionarr)
print('%s seconds insertion' % (time.time() - insertionstart ))
# same but for merge sort
mergearr = list(arr)
mergestart = time.time()
mergesort(mergearr)
print('%s seconds merge' % (time.time() - mergestart ))
'''
|
auyen/CMPS-101
|
hw2/hw2.py
|
hw2.py
|
py
| 9,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8454526301
|
# Path to scripts folder
path = "C:/Users/j_ber/root/blender_scripts/blender_modules/panels/"
# Names of python files to import
filenames = ["vertex_tools.py"]
for file in filenames:
exec(compile(open(path + file).read(), path + file, 'exec'))
|
jbernrd2/blender-scripts
|
blender_modules/script_loader.py
|
script_loader.py
|
py
| 268 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36484764773
|
import cv2
import glob
from matplotlib import pyplot as plt
faceDet = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faceDet_two = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
faceDet_three = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
faceDet_four = cv2.CascadeClassifier("haarcascade_frontalface_alt_tree.xml")
fishface = cv2.face.FisherFaceRecognizer_create()
fishface.read('fish.xml')
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
for files in glob.glob("C:\\Users\\HP\\Desktop\\classify\\*"):
gray = cv2.imread(files)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
face = faceDet.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face_two = faceDet_two.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face_three = faceDet_three.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face_four = faceDet_four.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1:
facefeatures = face
elif len(face_two) == 1:
facefeatures = face_two
elif len(face_three) == 1:
facefeatures = face_three
elif len(face_four) == 1:
facefeatures = face_four
else:
facefeatures = ""
for (x, y, w, h) in facefeatures:
gray = gray[y:y+h, x:x+w]
try:
gray = cv2.resize(gray, (350, 350))
except:
pass
plt.subplot(132)
plt.title('img')
plt.imshow(gray, 'gray')
plt.xticks([])
plt.yticks([])
plt.show()
Class, abc = fishface.predict(gray)
print(emotions[Class])
|
dishavarshney9/uhack
|
classi.py
|
classi.py
|
py
| 1,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4818248752
|
import random
suits = ('Heart', 'Clubs', 'Diamond', 'Spades')
ranks = ('two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'two': 2, 'three':3, 'four': 4, 'five': 5, 'six':6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'Jack': 11, 'Queen': 12, 'King': 13, 'Ace': 14}
class Card():
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
self.value = values[rank]
def __str__(self):
return self.rank + " of " + self.suit
class Deck():
def __init__(self):
self.cardList = []
for suit in suits:
for rank in ranks:
myCard = Card(suit, rank)
self.cardList.append(myCard)
def randomze(self):
random.shuffle(self.cardList)
class Player():
def __init__(self, name, indicator, origdeck):
self.name = name
self.deck = []
if indicator == 1:
self.deck = origdeck.cardList[0:53:2]
else:
self.deck = origdeck.cardList[1:53:2]
print("Welcome players to the WarZone! A new deck of card is being made ready for you!")
gameDeck = Deck()
gameDeck.randomze()
player1name = input("Player 1! Give me your name: ")
player1 = Player(player1name, 1, gameDeck)
player2name = input("Player 2! Give me your name: ")
player2 = Player(player2name, 2, gameDeck)
print(f"Thank you {player1name} and {player2name}! Your cards have been alternatively distributed starting from {player1name}")
smallDeck1 = []
smallDeck2 = []
tie = False
print(f"Game has been started!")
while True:
if (player1.deck.__len__() == 0 or player2.deck.__len__() == 0) and tie == False:
break
gar1 = input(f"{player1name} press space bar to place your card(s): ")
smallDeck1.append(player1.deck.pop())
print(smallDeck1[0])
gar2 = input(f"{player2name} press space bar to place your card(s): ")
smallDeck2.append(player2.deck.pop())
print(smallDeck2[0])
print(f"{player1name}'s {smallDeck1[0]} vs {player2name}'s {smallDeck2[0]}")
if smallDeck1[len(smallDeck1)-1].value > smallDeck2[len(smallDeck2)-1].value:
print(f"{player1name} won the round")
for card in smallDeck1:
player1.deck.append(card)
for card in smallDeck2:
player1.deck.append(card)
tie = False
smallDeck1.clear()
smallDeck2.clear()
random.shuffle(player1.deck)
elif smallDeck1[len(smallDeck1)-1].value < smallDeck2[len(smallDeck2)-1].value:
print(f"{player2name} won the round")
for card in smallDeck1:
player2.deck.append(card)
for card in smallDeck2:
player2.deck.append(card)
tie = False
smallDeck1.clear()
smallDeck2.clear()
random.shuffle(player2.deck)
else:
print(f"It's a draw! War has been triggered. Both players, risk in 3 more cards!:")
risk = 2
if len(player1.deck) < 3 or len(player2.deck)< 3:
if(len(player1.deck) < len(player2.deck)):
print(f"It seems like {player1name} has less than 3 cards. So, we risk {len(player1.deck)} cards only")
risk = len(player1.deck) - 1
else:
print(f"It seems like {player2name} has less than 3 cards. So, we risk {len(player2.deck)} cards only")
risk = len(player2.deck) -1
if risk < 0:
if len(player1.deck) < len(player2.deck):
print(f"{player1name}, you cant continue no more! You're out of cards")
else:
print(f"{player2name}, you cant continue no more! You're out of cards")
tie = False
else:
i = 0
while i < risk:
smallDeck1.append(player1.deck.pop())
smallDeck2.append(player2.deck.pop())
tie = True
if len(player1.deck) == 0:
print(f"{player2name} won the battle")
else:
print(f"{player1name} won the battle")
|
slama0077/Games
|
War.py
|
War.py
|
py
| 4,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42656181560
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os.path
import argparse
import logging
from tarfile import TarFile
from thirdparty.dagflow import ParallelTask, Task, DAG, do_dag
from ontbc.common import mkdir, touch, read_tsv
from ontbc.parser import add_barcode_parser
from ontbc.config import PORECHOP_BIN, QUEUE
from ontbc import __file__, __version__, __email__, __author__
LOG = logging.getLogger(__name__)
def read_tar(file):
a = os.path.dirname(file)
return [os.path.join(a, i) for i in TarFile(file).getnames()]
def scan_cell(cell):
fastqs = []
summarys = []
fast5s = []
for root, dirs, files in os.walk(cell, followlinks=True, topdown=False):
for name in files:
path = os.path.join(root, name)
if name.endswith(".fastq"):
fastqs.append(path)
elif name.endswith(".txt"):
summarys.append(path)
elif name.endswith(".fast5"):
fast5s.append(path)
elif name.endswith(".tar"):
fast5s += read_tar(path)
else:
pass
return fastqs, summarys, fast5s
def create_porechop_tasks(cell, barcodes, job_type, work_dir, out_dir):
LOG.info("find fastq, summary and fast5 files in %r" % cell)
fastq_fofn = os.path.join(work_dir, "fastq.fofn")
summary_fofn = os.path.join(work_dir, "summary.fofn")
fast5_fofn = os.path.join(work_dir, "fast5.fofn")
find_done = os.path.join(work_dir, "find_done")
if not os.path.exists(find_done):
fastqs, summarys, fast5s = scan_cell(cell)
for i, j in zip([fastq_fofn, summary_fofn, fast5_fofn], [fastqs, summarys, fast5s]):
with open(i, "w") as fh:
fh.write("%s\n" % "\n".join(j))
del fastqs, summarys, fast5s
touch(find_done)
fastqs = [i[0] for i in read_tsv(fastq_fofn)]
summarys = [i[0] for i in read_tsv(summary_fofn)]
fast5s = [i[0] for i in read_tsv(fast5_fofn)]
LOG.info("%s fastq, %s summary and %s fast5 files found" % (len(fastqs), len(summarys), len(fast5s)))
del summarys, fast5s
if job_type == "local":
_option = ""
else:
_option = "-q %s" % ",".join(QUEUE)
tasks = ParallelTask(
id="bc",
work_dir="%s/{id}" % work_dir,
type=job_type,
option=_option,
script="""
{ontbc}/ontbc.py clean {{fastq}} > clean.fastq
{porechop}/porechop-runner.py -i clean.fastq -b . -t 1 --verbosity 2 --no_split > porechop.log
rm -rf clean.fastq
""".format(
porechop=PORECHOP_BIN,
ontbc=os.path.join(os.path.dirname(__file__), "..")
),
fastq=fastqs,
)
summary = os.path.join(work_dir, "all.summary.txt")
join_summary = Task(
id="join_summary",
work_dir=work_dir,
type=job_type,
script="""
less {summary} | xargs cat - > all.summary.txt
""".format(
summary=summary_fofn
),
)
join_tasks = ParallelTask(
id="join",
work_dir=work_dir,
type=job_type,
script="""
mkdir -p {out}/{{barcode}}
if [ ! -e {{barcode}}_cat_done ]; then
cat */{{barcode}}.fastq > {out}/{{barcode}}/{{barcode}}.fastq
touch {{barcode}}_cat_done
fi
rm -rf */{{barcode}}.fastq
cd {out}/{{barcode}}
{ontbc}/ontbc.py filter --fastq {{barcode}}.fastq --summary {summary} --fast5 {fast5} \\
--min_score -100 --min_length 0 --out {{barcode}}
rm {{barcode}}.filtered.fastq
mv {{barcode}}.filtered.summary.txt {{barcode}}.summary.txt
""".format(
summary=summary,
ontbc=os.path.join(os.path.dirname(__file__), ".."),
fast5=fast5_fofn,
out=out_dir
),
barcode=barcodes
)
for i in join_tasks:
i.set_upstream(*tasks)
i.set_upstream(join_summary)
return tasks, join_tasks, join_summary
def run_porechop(cell, barcodes, job_type, threads, work_dir, out_dir):
assert os.path.isdir(cell), "%r not exist" % cell
out_dir = mkdir(out_dir)
work_dir = mkdir(work_dir)
tasks, join_tasks, join_summary = create_porechop_tasks(
cell=cell,
barcodes=barcodes,
job_type=job_type,
work_dir=work_dir,
out_dir=out_dir
)
dag = DAG("porechop")
dag.add_task(*tasks)
dag.add_task(join_summary)
dag.add_task(*join_tasks)
do_dag(dag, concurrent_tasks=threads, refresh_time=30)
def barcode(args):
run_porechop(
cell=args.cell,
barcodes=args.barcode,
job_type=args.job_type,
threads=args.threads,
work_dir=args.work_dir,
out_dir=args.out_dir
)
def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="[%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
version: %s
contact: %s <%s>\
""" % (__version__, " ".join(__author__), __email__))
parser = add_barcode_parser(parser)
args = parser.parse_args()
barcode(args)
if __name__ == "__main__":
main()
|
FlyPythons/ontbc
|
ontbc/barcode.py
|
barcode.py
|
py
| 5,174 |
python
|
en
|
code
| 5 |
github-code
|
6
|
18218820071
|
import argparse
import optparse
from .parser import *
def get_args():
parser = optparse.OptionParser()
parser.add_option('-i', '--html;',
action="store", dest="html",
help="html to parse")
parser.add_option('-o', '--output',
action="store", dest="output",
help="place to output messages", default="messages.csv")
parser.add_option('-s', '--show',
action="store", dest="show",
help="show messages as they are being written", default=True)
options, args = parser.parse_args()
return options, args
def main():
options, args = get_args()
parser = Parser(options.html, options.output, options.show)
parser.parse_all_messages_into_single_file()
if __name__ == "__main__":
main()
|
Andrew-Pynch/dappi
|
dappi/__main__.py
|
__main__.py
|
py
| 769 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15018029353
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 5 16:56:55 2022
@author: josephbriggs
"""
import pathlib
import argparse
import cv2
def main():
'''
Converts files to greyscale.
'''
parser = argparse.ArgumentParser(description='Convert files to greyscale.')
parser.add_argument('--input_path', "-i", type=str,
help='path to the image or directory of images. \
If converting a directory, use *')
parser.add_argument('--output_path', "-o", type=str,
help='output path where images will be saved.')
parser.add_argument('--res', "-r", type=int,
help='downscale factor.')
args = parser.parse_args()
pathlib.Path(args.output_path).mkdir(parents=True, exist_ok=True)
# files = pathlib.Path(args.input_path).glob(r'/*.png|')
file_extentions = ['png', 'jpeg', 'jpg']
files = []
for file_extension in file_extentions:
files += pathlib.Path(args.input_path).glob(fr'*.{file_extension}')
for file in files:
file_name = file.name
image = cv2.imread(str(file))
image_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
file_name_to_save = args.output_path+"/"+file_name
print(file_name_to_save)
cv2.imwrite(file_name_to_save, image_gs)
print('converted files to greyscale')
if __name__ == "__main__":
main()
|
jhb123/enhance_greyscale
|
imgs_to_gs.py
|
imgs_to_gs.py
|
py
| 1,430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40471740091
|
import os
import cv2
import numpy as np
import shutil
import sys
sys.path.insert(0,os.path.realpath('..'))
sys.path.insert(0,os.path.join(os.path.realpath('..'),'piano_utils'))
from tools.warper import order_points
from config import cfg
from piano_utils.networks import PSPNet
from piano_utils.util import colorize_mask
from piano_utils.keyboard import KeyBoard
from PIL import Image
from tqdm import tqdm
import shapely
from shapely.geometry import Polygon,MultiPoint
import time
from skimage.measure import label, regionprops
from collections import Counter
import json
from IPython import embed
import pickle
exp_cfg = {
'exp_imgs':'/home/data/lj/Piano/experment/keyboard/exp_imgs',
'tmp_dir':'/home/data/lj/Piano/experment/keyboard/tmp_dir',
'figure_dir':'/home/data/lj/Piano/experment/keyboard_figure'
}
class HoughKeyBoard(object):
def __init__(self):
self.theta_thersh = 0.08
def hough_transform(self,img):
res = {}
img_ori = img.copy()
h, w = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(gray, 50, 150, 3)
lines = cv2.HoughLines(edges, 1.0, np.pi / 180, 120)
thetas = [x[0][1] for x in lines if not (x[0][1] < (np.pi / 4.) or
x[0][1] > (3.*np.pi/4.0))]
dic = dict(Counter(thetas))
theta = sorted(dic.items(), key=lambda x: x[1], reverse=True)
if len(theta) > 0 and theta[0][1] > 1: #---统计角度最多重复的直线
most_theta = theta[0][0]
else:
return
x_loc, y_loc, pts = [], [], []
for line in lines:
rho, theta = line[0]
if abs(most_theta * 180 / np.pi - 90) > 1.5: #--键盘是斜着的
if abs(theta - most_theta) > self.theta_thersh:
continue
else: #---其他情况
if not theta == most_theta:
continue
pt1 = (0, max(int(rho / np.sin(theta)), 0))
pt2 = (img_ori.shape[1], max(int((rho - img_ori.shape[1] * np.cos(theta)) / np.sin(theta)),0))
cv2.line(img_ori, pt1, pt2, (0, 255, 0), 1)
pts.append((pt1, pt2))
return img_ori
def get_img_box_dict():
img_box_dict = dict()
file_name = '/home/data/lj/Piano/Segment/train.txt'
with open(file_name,'r') as fr:
items = [l.strip() for l in fr.readlines()]
mask_lists = []
for item in items:
item = item.split()
if 'tools' in item[0]:
#mask_dir = item[0].split('/')[-2]
continue
else:
mask_dir = os.path.basename(item[0]).split('_img_') [0]
if 'segment' in item[0]:continue
if mask_dir in mask_lists:continue
mask_lists.append(mask_dir)
img_mask = cv2.imread(item[1],cv2.IMREAD_GRAYSCALE)
img_mask[img_mask==2] = 1
img_mask[img_mask==1] = 255
contours,_ = cv2.findContours(img_mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
assert len(contours)==1,'value wrong'
contours = np.squeeze(contours)
rect = order_points(contours).reshape(-1,1,2).astype(int)
img_box_dict[mask_dir] = rect
json_path = os.path.join(exp_cfg['exp_imgs'],'need_labels')
json_files = [os.path.join(json_path,x) for x in os.listdir(json_path) if x.endswith('json')]
json_files.sort()
for json_file in json_files:
with open(json_file,'r') as fr:
items = json.load(fr)
basename = os.path.basename(json_file).split('.')[0]
points = np.array(items['shapes'][0]['points'])
rect = order_points(points).reshape(-1,1,2).astype(int)
img_box_dict[basename] = rect
return img_box_dict
class KeyBoard_Exp(KeyBoard):
def __init__(self):
KeyBoard.__init__(self)
print('KeyBoard load finish')
def detect(self,img):
image = img.convert('RGB')
self.prediction = self.inference(img)
contours,_ = self.find_contours(image,self.prediction)
rect = order_points(contours).reshape(-1,1,2).astype(int)
return rect
def mask2image(self,image):
image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
w, h = image.size
colorized_mask = colorize_mask(self.prediction, self.palette)
output_im = Image.new('RGB', (w*2, h))
output_im.paste(image, (0,0))
output_im.paste(colorized_mask, (w,0))
output_im = cv2.cvtColor(np.asarray(output_im),cv2.COLOR_RGB2BGR)
return output_im
def cal_iou(gt_rect, det_rect):
#---不规则的两个四边形计算Iou,不是矩形了
gt_rect = gt_rect.reshape(4, 2)
poly1 = Polygon(gt_rect).convex_hull
det_rect = det_rect.reshape(4,2)
poly2 = Polygon(det_rect).convex_hull
union_poly = np.concatenate((gt_rect,det_rect))
if not poly1.intersects(poly2):
iou = 0
else:
try:
inter_area = poly1.intersection(poly2).area
union_area = MultiPoint(union_poly).convex_hull.area
if union_area == 0:
iou= 0
iou=float(inter_area) / union_area
except shapely.geos.TopologicalError:
print('shapely.geos.TopologicalError occured, iou set to 0')
iou = 0
return iou
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def main():
seg_pickle_file = os.path.join(exp_cfg['tmp_dir'],'seg.pkl')
hour_pickle_file = os.path.join(exp_cfg['tmp_dir'],'hourgh.pkl')
path = exp_cfg['exp_imgs']
save_seg_dir = os.path.join(exp_cfg['figure_dir'],'segment')
save_hourgh_dir = os.path.join(exp_cfg['figure_dir'],'hourgh')
ensure_dir(save_seg_dir)
ensure_dir(save_hourgh_dir)
img_files = [os.path.join(path,x) for x in os.listdir(path)]
gt_box_dict = get_img_box_dict()
with open(seg_pickle_file,'rb') as f1:
seg_box_dict = pickle.load(f1)
with open(hour_pickle_file,'rb') as f2:
hour_box_dict = pickle.load(f2)
seg_ious = []
for img_mark,det_rect in seg_box_dict.items():
gt_rect = gt_box_dict[img_mark]
iou = cal_iou(gt_rect,det_rect)
if iou>0.5:
seg_ious.append(iou)
else:print(img_mark)
hour_detector = HoughKeyBoard()
keyboard_net = KeyBoard_Exp()
hour_ious = []
for img_mark,det_rect in hour_box_dict.items():
gt_rect = gt_box_dict[img_mark]
iou = cal_iou(gt_rect,det_rect)
if iou>0.5:
hour_ious.append(iou)
else:
img = cv2.imread(os.path.join(path,img_mark+'.jpg'))
img_copy = img.copy()
img_input = Image.fromarray(cv2.cvtColor(img_copy,cv2.COLOR_BGR2RGB))
seg_rect = keyboard_net.detect(img_input)
for rect in det_rect:
rect = rect[0]
cv2.circle(img,(rect[0],rect[1]),5,(0,255,0),3)
for rect in seg_rect:
rect = rect[0]
cv2.circle(img_copy,(rect[0],rect[1]),5,(0,255,0),3)
img_copy = keyboard_net.mask2image(img_copy)
img = hour_detector.hough_transform(img)
cv2.imwrite(os.path.join(save_hourgh_dir,img_mark+'.jpg'),img)
cv2.imwrite(os.path.join(save_seg_dir,img_mark+'.jpg'),img_copy)
if __name__=='__main__':
main()
|
yxlijun/vision-piano-amt
|
figures/plt_keyboard.py
|
plt_keyboard.py
|
py
| 7,430 |
python
|
en
|
code
| 2 |
github-code
|
6
|
13085423175
|
import json
# this will create a tweet, with possiblities of adding medias and replying to other tweets
def create_tweet(tas, message, media_ids = None, reply_ids = None):
payload = {"status": message}
if media_ids != None:
payload["media_ids"] = media_ids
if reply_ids != None:
payload["in_reply_to_status_id"] = reply_ids
r = tas.post("https://api.twitter.com/1.1/statuses/update.json", data = payload)
resp = json.loads(r.text)
if r.status_code == 200:
tweet_id = resp["id"]
return 0, (tweet_id,)
return 1, (r.text,)
# this will delete a tweet based on a given tweet-id
def delete_tweet(tas, tweet_id):
r = tas.delete(f"https://api.twitter.com/2/tweets/{tweet_id}")
resp = json.loads(r.text)
|
filming/Twitter
|
src/Twitter/tweet/tweet.py
|
tweet.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21368565276
|
import numpy as np
import time
import matplotlib.pyplot as plt
a=np.loadtxt('meas2/magnitude_0to40.0mA_freq_sweep.csv', delimiter=',')
c=np.loadtxt('meas2/phase_0to40.0mA_freq_sweep.csv', delimiter=',')
b=np.loadtxt('meas2/sweep_feq.csv', delimiter=',')
cstart=0 #start current
cstop=40E-3 # stop current
cstep=5E-3 # current step
csteps=int((cstop-cstart)/cstep)
fig, (ax0,ax1)=plt.subplots(2,1, sharex=True)
for i in range(csteps):
current=cstart+i*cstep
ax0.plot(b,a[:,i], label="{0:d}mA".format(int((current)*1000)))
ax1.plot(b,c[:,i], label="{0:d}mA".format(int((current)*1000)))
ax1.set_xlabel("Larmor frequency in kHz")
ax1.axhline(y=0, color='r', ls='--')
plt.legend(prop={'size':6})
#plt.savefig('meas2/phase_amplitude.png')
plt.show()
|
physikier/magnetometer
|
src/plot.py
|
plot.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12598627326
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
"""
Runs data quality check by passing test SQL
Parameters
redshift_conn_id: Redshift Connection ID
test_sql: SQL query to run on Redshift for data validation
expected_result: Expected result to match the test result.
"""
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
redshift_conn_id="",
test_sql="",
expected_result="",
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id=redshift_conn_id
self.test_sql=test_sql
self.expected_result=expected_result
def execute(self, context):
self.log.info("Start data validation...")
redshift_hook = PostgresHook(Postgres_conn_in=self.redshift_conn_id)
self.log.info("Got credentials.")
records=redshift_hook.get_records(self.test_sql)
if records[0][0] != self.expected_result:
raise ValueError(f"Data quality check failed. {records[0][0]} does not equal {self.expected_result}")
else:
self.log.info("Data quality check passed!!!")
|
ljia-ch/airflow_data_pipeline_project
|
plugins/operators/data_quality.py
|
data_quality.py
|
py
| 1,362 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26581884850
|
CODE_OK = 200
CODE_CREATED = 201
CODE_NO_CONTENT = 204
CODE_BAD_REQUEST = 400
CODE_FORBIDDEN = 403
CODE_NOT_FOUND = 404
CODE_METHOD_NOT_ALLOWED = 405
CODE_NOT_ACCEPTABLE = 406
CODE_CONFLICT = 409
response_ok = {
"status": "OK",
"code": CODE_OK,
"error": "",
"data": "",
}
response_fail = {
"status": "FAIL",
"code": CODE_BAD_REQUEST,
"error": "",
"data": "",
}
# not used, may deprecate later
errors = {
'UserAlreadyExistsError': {
'message': "A user with that username already exists.",
'status': CODE_CONFLICT,
'extra': "NA",
},
'ResourceDoesNotExist': {
'message': "A resource with that ID no longer exists.",
'status': CODE_NOT_FOUND,
'extra': "NA",
},
'InvalidParams': {
'message': "Not valid param is given",
'status': CODE_NOT_ACCEPTABLE,
'extra': "NA",
},
'InvalidOperations': {
'message': "Invalid operation method is given",
'status': CODE_METHOD_NOT_ALLOWED,
'extra': "NA",
},
}
|
lafenicecc/cello
|
src/common/error.py
|
error.py
|
py
| 1,054 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72988129467
|
import numpy as np
def upper_confidence_bound(data):
N = data.shape[0] # number of samples (Example: How many times add was shown, How many times machine was played)
d = data.shape[1] # number of dfferent objects (Examples: Different ads, Bandit machines)
#Ni(n) - the number of times the object 'i' was selected up until the nubmer n
#Ri(n) - the sum of rewards of the object 'i' up to the number n
#The idea is to have the best object at each iteration 1-n
N_i_n = {}
R_i_n = {}
iter_object_selected = []
#Setting starting values
for i in range(d):
N_i_n[i] = 0
R_i_n[i] = 0
total_results = 0
#The main loop
for sample in range(N):
object_selected = 0
max_upper_bound = 0
for object_ in range(d):
if N_i_n[object_] > 0:
average_reward = R_i_n[object_]/N_i_n[object_]
delta_of_object = np.sqrt(3/2 * np.log(sample+1) / N_i_n[object_])
upper_bound = average_reward + delta_of_object
else:
upper_bound = float("+inf")
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
object_selected = object_
iter_object_selected.append(object_selected)
N_i_n[object_selected] += 1
reward = data[sample, object_selected]
R_i_n[object_selected] += reward
total_results += reward
return iter_object_selected, total_results
|
lucko515/ads-strategy-reinforcement-learning
|
Upper confidence bound/upper_confidence_bound.py
|
upper_confidence_bound.py
|
py
| 1,321 |
python
|
en
|
code
| 7 |
github-code
|
6
|
12190600050
|
#
# db.py
#
import os
import sqlite3
import time
import datetime
from flask import g
import db_init
def test_db_conn():
dbname = os.environ.get("SEVENS_DB_NAME")
try:
os.remove(dbname)
except OSError:
pass
""" Check if db connection can open, create and init db if doesn't already exist """
print("********dbname: "+dbname)
assert dbname is not None
# If db file doesn't exist call the initialize module to create and init
if not os.path.isfile(dbname):
print("!!!!!!!!!dbname: "+dbname)
db_init.init(dbname)
def get_db():
dbname = os.environ.get("SEVENS_DB_NAME")
assert dbname is not None
if not hasattr(g,'sqlite_db'):
g.sqlite_db = sqlite3.connect(dbname)
return g.sqlite_db
def close_db():
if hasattr(g,'sqlite_db'):
g.sqlite_db.close()
# CREATE TABLE players (id INTEGER PRIMARY KEY AUTOINCREMENT, points INTEGER, name STRING);
def players_row_to_dict(row):
players = {}
players['id'] = row[0]
players['points'] = row[1]
players['name'] = row[2]
return players
#CREATE TABLE hands (player_id INTEGER FOREIGN KEY, clubs STRING, hearts STRING, diamonds STRING, spades STRING);
def hands_row_to_dict(row):
hands = {}
hands['player_id'] = row[0]
hands['clubs'] = str(row[1])
hands['hearts'] = str(row[2])
hands['diamonds'] = str(row[3])
hands['spades'] = str(row[4])
return hands
# CREATE TABLE board (cur_player_id INTEGER FOREIGN KEY, clubs STRING, hearts STRING, diamonds STRING, spades STRING);
def board_row_to_dict(row):
board = {}
board['cur_player_id'] = row[0]
board['clubs'] = str(row[1])
board['hearts'] = str(row[2])
board['diamonds'] = str(row[3])
board['spades'] = str(row[4])
return board
def get_game_state():
conn = get_db()
curs = conn.cursor()
rows = curs.execute ("SELECT * FROM board").fetchall()
board = []
for row in rows:
b = board_row_to_dict(row)
board.append(b)
return board
def get_games():
conn = get_db()
curs = conn.cursor()
rows = curs.execute ("SELECT * FROM games ORDER BY date").fetchall()
games = []
for row in rows:
game = game_row_to_dict(row)
games.append(game)
return games
'''
def update_game(gameid,score,lines,user):
conn = get_db()
curs = conn.cursor()
curs.execute("UPDATE games SET score=?, lines=?, user=?, haveResult=? WHERE id=?;",
(score, lines, user, True, gameid))
conn.commit()
res = curs.execute("SELECT * FROM games WHERE id=?;",(gameid,)).fetchall()
if len(res) != 0:
return game_row_to_dict(res[0])
else:
return None
def add_access_log(game_id, func, method, auth, ip, user_agent):
""" Add access log to global access_log list """
conn = get_db()
curs = conn.cursor()
curs.execute("INSERT INTO accesslog (id, function, method, date, ipaddress, useragent, user) VALUES (?,?,?,?,?,?,?);", (game_id, func, method, time.time(),ip, user_agent, auth))
conn.commit()
def get_access_logs():
conn = get_db()
curs = conn.cursor()
rows = curs.execute ("SELECT * FROM accesslog ORDER BY date").fetchall()
access_log = []
for row in rows:
access = accesslog_row_to_dict(row)
access_log.append(access)
return access_log
def get_rng_seed():
""" Generate rng seed """
return 0xDEADBEEF
'''
|
tolkamps1/sevens7
|
db.py
|
db.py
|
py
| 3,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30677208057
|
#Write a Python program to add 'ing' at the end of a given string (length should be at least 3).
# If the given string already ends with 'ing' then add 'ly' instead if the string length of the given string
# is less than 3, leave it unchanged
def add_string(str1):
length = len(str1)
if length > 2:
if str1[-3:] =='ing':
str1+= 'ly'
else:
str1 += 'ing'
return str1
print(add_string('ab'))
print(add_string('abc'))
print(add_string('string'))
#Write a Python program to find the first appearance of the substring 'not' and 'poor' from a given string,
# if 'not' follows the 'poor', replace the whole 'not'...'poor' substring with 'good'.
# Return the resulting string
def not_poor(str1):
snot = str1.find('not')
spoor = str1.find('poor')
if spoor > snot and snot>0 and spoor>0:
str1 = str1.replace(str1[snot:(spoor+4)],'good')
return str1
else:
return str1
print(not_poor('the lyrics is not that poor!'))
print(not_poor('the lyrics is poor!'))
|
allen-waker/ASSIGMENT-2
|
module - 2/unchangd_string.py
|
unchangd_string.py
|
py
| 1,082 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7997489923
|
"""
Before executing this script make sure that all packages are installed properly
and also select 3 ips from resource pool wiki which are not in use.(check using ping command)
purpose:
-------
This script is for first time setup of dcs vm which includes
accepting eula,changing password,configure the ip and changing the schema of dcs vm.
"""
from re import search, IGNORECASE
from SSHLibrary import SSHLibrary
import json
import platform, os, sys
import time
import netifaces
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from auto_loader import load_from_file
import logging
logging.basicConfig(level=logging.INFO)
class dcs(object):
def __init__(self, ipv6="", vmInterface="", user="", userpwd=""):
"""
Constructor method to RestAppliance.
We compute the correct API-Version for REST calls.
Parameters
----------
ipv6 : str
Ipv6 of the dcs vm to connect.
vmInterface: str
Interface of the ubuntu VM.
IPv6 address starts with fe80:: i.e. it's a link-local address, reachable only in the network segment it's directly connected to.
Using the NIC that connects to that segment specifically.
user: str
Username of the DCS VM name
userpwd: str
DCS VM password
"""
# builds endpoint
self.ipv6Endpoint = ipv6 + "%" + vmInterface
self.sshlib = SSHLibrary()
self.stdout = None
self.sshlib.open_connection(self.ipv6Endpoint)
self.sshlib.login(username=user, password=userpwd)
# sets API version
self.api_version = self.get_api_version()
logging.debug("The API Version utilized is {0}.".format(
self.api_version))
print(self.api_version)
# header information
self._header = "-H \"X-API-Version: {0}\" -H \"Content-Type: application/json\"".format(
self.api_version)
self._secure_header = None
def get_api_version(self):
"""
Helper method get_api_version
Gets latest API verisons supported from the appliance.
On failure, sets api_verison to 120
Parameters
----------
none
"""
api_command = "curl --request GET https://localhost/rest/version"
apiversions, exit_code = self.sshlib.execute_command(
command=api_command, return_rc=True)
if exit_code == 0:
api_version = json.loads(apiversions)
return api_version["currentVersion"]
else:
logging.warning(
"The API Version utilized is 120 as get_api_version return exit code 1"
)
return "120"
def build_command(self, url, request_type, payload={}, *options):
"""
Helper method build_command
creates the curl command along with headers for GEt and POST call to the appliance.
Parameters:
----------
url: str
URL location of the endpoint.
request_type: str
specifies the type of REST request. For isntance, Get, Post.
payload: dict
data to be sent to the appliance, only applicable when making a post call.
*options: list of strings
any arguments that needs to be concatinated with the curl command. For instance, "-i", "-s"
"""
url = "https://localhost" + url
if request_type == "GET":
command = "curl -X {0} {1} {2}".format(request_type, self._header,
url)
if self._secure_header != None:
command = "curl -X {0} {1} {2}".format(request_type,
self._secure_header,
url)
elif request_type == "POST":
payload = '{0}'.format(json.dumps(payload).replace("'", '"'))
command = 'curl -X {0} {1} -d \'{2}\' {3}'.format(
request_type, self._header, payload, url)
if self._secure_header != None:
command = 'curl -X {0} {1} -d \'{2}\' {3}'.format(
request_type, self._secure_header, payload, url)
if options:
option = ""
for op in options:
option = option + " " + op
command = "curl{0} -X {1} {2} -d '{3}' {4}".format(
option, request_type, self._header, payload, url)
if self._secure_header != None:
command = "curl{0} -X {1} {2} -d '{3}' {4}".format(
option, request_type, self._secure_header, payload, url)
logging.info('Executing URI {0} Request Type: {1}'.format(
url, request_type))
return command
def accept_eula_once(self, service_access="yes"):
"""
On initial communication with the appliance, the end user service agreement (EULA) must be accepted.
This only needs to occur once. Additional calls will not change the status of the EULA nor the status of the service access.
If a change to the service access is required, see the function change_service_access()
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
No authentication on the appliance is required.
Parameters
----------
service_access (optional): str
"yes" will accept service access
"no" will not allow service access
empty value will default to "yes"
"""
url = '/rest/appliance/eula/status'
eula_command = self.build_command(url, "GET")
json_result, exit_code = self.sshlib.execute_command(eula_command,
return_rc=True)
if not json_result: # if False, eula acceptance has already occurred.
logging.warning('EULA does not need to be saved.')
if exit_code != 0 or json_result:
logging.debug(
'Call EULA Acceptance with enable service access={0}'.format(
service_access))
url = '/rest/appliance/eula/save'
payload = {"supportAccess": service_access}
save_eula_command = self.build_command(url, "POST", payload)
logging.warning(save_eula_command)
save_success, exit_code = self.sshlib.execute_command(
save_eula_command, return_rc=True)
if exit_code == 0:
logging.info('EULA Response {0}'.format(save_success))
else:
raise Exception('accept_eula failed. JSON Response {0}'.format(
json.dumps(save_success)))
def change_administrator_password(self):
"""
On initial logon, the administrator's password has to be changed from the default value.
The call to the administrator password change is attempted.
If the change administrator password call fails, then we attempt to login with the administrator password.
If successful, we log a message and the accurate administrator password.
If the administrator login is not successful, an error is raised.
The administrator data is pulled from the dictionary in this file. This needs to be moved to a more formal location.
Parameters
----------
none
"""
url = "/rest/users/changePassword"
payload = {
"userName": "Administrator",
"oldPassword": "admin",
"newPassword": "admin123"
}
change_pass_command = self.build_command(url, "POST", payload)
status, success = self.sshlib.execute_command(
command=change_pass_command, return_rc=True)
if success == 0:
logging.info('Administrator password change was accepted.')
else:
raise Exception(
'change_administrator_password failed. JSON Response: {0}'.
format(json.dumps(status)))
def get_secure_headers(self):
"""
Helper method to appliance_request().
Gives header information required by the appliance with authentication information.
Return
------
_secure_header: dict. Dictionary containing X-API-Verions, Content-Type, and Auth. The Auth parameter value is a sessionID.
"""
# Once _secure_header is defined, we can use it over and over again for the duration of its life.
# Note, the header is only good for that user (administrator), 24 hours, and until the next reboot.
if self._secure_header != None:
return self._secure_header
payload = {"userName": "Administrator", "password": "admin123"}
url = '/rest/login-sessions'
authentication_command = self.build_command(url, "POST", payload)
status, exit_code = self.sshlib.execute_command(
command=authentication_command, return_rc=True)
if exit_code != 0:
raise Exception(
"There was an issue with the HTTP Call to get headers. Exception message: {0}"
.format(status))
try:
safe_json = json.loads(status)
self._secure_header = self._header
if 'sessionID' not in safe_json:
raise Exception(
'Auth token for the header is undefined. No Session ID available. Status: {0}.'
.format(status))
self._secure_header = self._header + ' -H "Auth: {0}"'.format(
safe_json['sessionID'])
return self._secure_header
except:
raise Exception(
'Failure to access the sessionID from the response. JSON: {0}'.
format(status))
def get_mac(self):
"""
Helper method get_mac
Used when creating the payload for setting the ip address of the oneview dcs appliance.
returns mac address of the oneview dcs appliance.
Parameters:
----------
none
"""
url = "/rest/appliance/network-interfaces"
self.get_secure_headers()
mac_command = self.build_command(url, "GET")
data, exit_code = self.sshlib.execute_command(command=mac_command,
return_rc=True)
if exit_code != 0:
raise Exception(
'Failure to get mac address of the interface: {0}'.format(
data))
data = json.loads(data)
try:
return data["applianceNetworks"][0]["macAddress"]
except:
raise Exception('Failure to fetch macAddress from the reponse')
def change_ovDcs_ip(self, app1Ipv4Addr, app2Ipv4Addr, virtIpv4Addr,
ipv4Gateway, ipv4Subnet, ):
"""
Changes the Ip address of the oneview dcs appliance.
Parameters:
----------
app1Ipv4Addr: str
Node1 IPv4 address in a two-node cluster
app2Ipv4Addr: str
Node2 IPv4 address in a two-node cluster.
virtIpv4Addr: str
Virtual IPv4 address. Oneview dcs will be reachable from this IP.
ipv4Gateway: str
IPv4 gateway address.
ipv4Subnet: str
IPv4 subnet mask or CIDR bit count.
"""
url = "/rest/appliance/network-interfaces"
macAddress = self.get_mac()
payload = {
"applianceNetworks": [{
"activeNode": 1,
"app2Ipv4Addr": app2Ipv4Addr,
"app1Ipv4Addr": app1Ipv4Addr,
"confOneNode": True,
"hostname": "ThisIsAutomated.com",
"networkLabel": "Managed devices network",
"interfaceName": "Appliance",
"device": "eth0",
"ipv4Gateway": ipv4Gateway,
"ipv4Subnet": ipv4Subnet,
"ipv4Type": "STATIC",
"ipv6Type": "UNCONFIGURE",
"macAddress": macAddress,
"overrideIpv4DhcpDnsServers": False,
"unconfigure": False,
"slaacEnabled": "yes",
"virtIpv4Addr": virtIpv4Addr
}]
}
changeIp_command = self.build_command(url, "POST", payload, "-i")
data, exit_code = self.sshlib.execute_command(command=changeIp_command,
return_rc=True)
x = json.dumps(data)
time.sleep(2)
uri = search('Location: (.+?)\r\nCache-Control', x)
print(uri, x)
if uri != None:
task_uri = uri.group(1)
if (self.get_task(task_uri)):
logging.info("Oneview Ip is set to: {0}".format(virtIpv4Addr))
f = open('ipaddress.txt', 'w')
f.write(str(virtIpv4Addr))
return None
def get_task(self, uri):
"""Gets the task corresponding to a given task ID.
Will wait until the task is not completed.
No failure will rasie an exception.
On successful completion will return True
Parameters:
----------
uri: str
Uri of the task
"""
self.get_secure_headers()
task_command = self.build_command(uri, "GET")
data, exit_code = self.sshlib.execute_command(command=task_command,
return_rc=True)
if exit_code == 0:
task_data = json.loads(data)
while task_data["taskState"] == "Running":
logging.info("task \"{0}\" is running...".format(uri))
time.sleep(10)
data, exit_code = self.sshlib.execute_command(command=task_command,
return_rc=True)
task_data = json.loads(data)
if task_data["taskState"] == "Completed":
logging.info("task \"{0}\" completed".format(uri))
return True
else:
logging.warning(
"Unexpected failure. Task ended with state {0}, URI:{1}".
format(task_data["taskState"], uri))
return None
def search_task(self, param):
"""Gets all the tasks based upon filters provided. Note: filters are optional.
iterate through all the task collected and calls get_task() to check the status.
Used while running hardware discovery
Parameters:
----------
param: str
Filters for the finding the task uris. For example: ?filter="'name' = 'alertMax'"
filters are concatenated with the URI
"""
self.get_secure_headers()
uri = "/rest/tasks" + param
task_command = self.build_command(uri, "GET")
data, exit_code = self.sshlib.execute_command(command=task_command,
return_rc=True)
all_members = json.loads(data)
for i in all_members["members"]:
self.get_task(i["uri"])
def execute_command_in_dcs_and_verify(self, dcs_command, expected_output):
'''Execute the given Command in DCS and verify the response with Expected output.
Example
Execute Command In DCS And Verify | <dcs_command> | <expected_output> |
:param dcs_command: Command that need to be executed in DCS vm
:param expected_output: expected output from the DCS command executed
:raises AssertionError if output does not match with expected output
:return stdout: return response obtained after command execution
'''
logging.info("executing {0}".format(dcs_command))
self.stdout = self.sshlib.execute_command(dcs_command,
return_stdout=True)
if search(expected_output, self.stdout, IGNORECASE) is None:
raise AssertionError(
"DCS command output is not as expected: {} found: {}".format(
expected_output, self.stdout))
return self.stdout
def change_dcs_schematic(self, dcs_commands):
'''Changes DCS schematic to given schematic
Example
Change DCS Schematic | <dcs_commands> |
:param dcs_commands: DCS commands to be executed along with its expected output for changing the schematic
ex:[["dcs stop", "DCS is Stopped"]]
'''
for cmd in dcs_commands:
self.execute_command_in_dcs_and_verify(cmd[0], cmd[1])
time.sleep(60)
def dcs_hardware_setup(self):
'''Performs Hardware Setup in DCS appliance
Parameters:
none
'''
logging.info("executing appliance set up")
status, exit_code = self.sshlib.execute_command(
command=
"curl -i -s -o /dev/nul -I -w '%{http_code}\n' -X POST -H \"X-API-Version: "
+ str(self.api_version) +
"\" https://localhost/rest/appliance/tech-setup",
return_rc=True)
if exit_code != 0:
raise AssertionError(
"Failed to Invoke Sever Hardware discovery with status:{} and exit code:{}"
.format(status, exit_code))
elif status == "202":
self.search_task(
"?filter=\"'name'='Discover%20hardware'\"&sort=created:descending&count=1"
)
def dcs_network_configuration(self, app1Ipv4Addr, app2Ipv4Addr,
virtIpv4Addr, ipv4Gateway, ipv4Subnet):
"""Changes the passwordthe dcs appliance and sets new Ip of the appliamce.
Parameters:
app1Ipv4Addr: str
Node1 IPv4 address in a two-node cluster
app2Ipv4Addr: str
Node2 IPv4 address in a two-node cluster.
virtIpv4Addr: str
Virtual IPv4 address. Oneview dcs will be reachable from this IP.
ipv4Gateway: str
IPv4 gateway address.
ipv4Subnet: str
IPv4 subnet mask or CIDR bit count.
"""
self.accept_eula_once()
self.change_administrator_password()
self.change_ovDcs_ip(app1Ipv4Addr, app2Ipv4Addr, virtIpv4Addr,
ipv4Gateway, ipv4Subnet)
def dcs_schematic_configuration(self, dcs_commands):
'''Change DCS schematic then perform Hardware setup
:param dcs_commands: Sequence of DCS commands to be executed along with its expected output for changing the schematic
ex:[["dcs stop", "DCS is Stopped"]]
'''
# need to check if the surnning schematic is 3endl_demo then skip this step
self.change_dcs_schematic(dcs_commands)
self.dcs_hardware_setup()
self.sshlib.close_connection()
dcs_commands = [
["dcs status", "dcs is running"],
["dcs stop", "dcs is stopped"],
["dcs status", "dcs is not running"],
["dcs start /dcs/schematic/synergy_3encl_demo cold", "DCS httpd daemon started"],
[
"dcs status",
"DCS is Running\n Schematic used: /dcs/schematic/synergy_3encl_demo",
],
]
def ping(hosts):
"""
Returns True if host (str) responds to a ping request.
"""
host=hosts.strip()
# operating_sys = platform.system().lower()
exit_code = os.system("ping6 "+host+"%"+interfaces[0]+" -c 5")
# print("ping6 "+hosts+"%"+interfaces[0]+" -c 5")
if exit_code == 0:
return True
return False
interfaces = list(filter(lambda x: "ens" in x, netifaces.interfaces()))
config = load_from_file("auto_config")["fts"]
if __name__ == "__main__":
if len(interfaces) > 0:
f=open("ipv6.txt")
ipv6=f.readline()
while ipv6:
if ping(ipv6):
ipv6=ipv6.strip()
dcs_inst = dcs(ipv6, interfaces[0],
config["dcs_username"], config["dcs_password"])
dcs_inst.dcs_network_configuration(
config["dcs_ipv4_1"],
config["dcs_ipv4_2"],
config["dcs_ipv4_3"],
config["gateway"],
config["subnet_mask"],)
dcs_inst.dcs_schematic_configuration(dcs_commands)
break
else:
ipv6=f.readline()
|
Srija-Papinwar/CD
|
scripts/dcs_fts.py
|
dcs_fts.py
|
py
| 20,662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5759912904
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 20 09:17:19 2019
@author: if715029
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial.distance as sc
import pandas as pd
#%%
data = pd.read_excel('../data/Datos_2015.xlsx',sheet_name='Atemajac')
#%%
data = data.iloc[:,2:7].dropna()
#%%
D1 = sc.squareform(sc.pdist(data.iloc[:,2:],'euclidean'))
#%%
data_norm = (data-data.mean(axis=0))/data.std(axis=0)
#%%
plt.subplot(1,2,1)
plt.scatter(data['CO'],data['PM10'])
plt.axis('square')
plt.subplot(1,2,2)
plt.scatter(data_norm['CO'],data_norm['PM10'])
plt.axis('square')
plt.show()
|
OscarFlores-IFi/CDINP19
|
code/p6.py
|
p6.py
|
py
| 616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27055803559
|
"""empty message
Revision ID: 810e0afb57ea
Revises: 22771e69d10c
Create Date: 2022-01-19 19:59:08.027108
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "810e0afb57ea"
down_revision = "22771e69d10c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"techstack",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("technology", sa.String(length=50), nullable=False),
sa.Column("label", sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("technology"),
)
op.create_table(
"team_techstack",
sa.Column("team_id", sa.Integer(), nullable=True),
sa.Column("techstack_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["team_id"],
["team.id"],
),
sa.ForeignKeyConstraint(
["techstack_id"],
["techstack.id"],
),
)
op.create_table(
"user_skill",
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("techstack_id", sa.Integer(), nullable=False),
sa.Column("skill_level", sa.Integer(), nullable=True),
sa.Column("is_learning_goal", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["techstack_id"],
["techstack.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("user_id", "techstack_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("user_skill")
op.drop_table("team_techstack")
op.drop_table("techstack")
# ### end Alembic commands ###
|
CodeForPoznan/codeforpoznan.pl_v3
|
backend/migrations/versions/810e0afb57ea_.py
|
810e0afb57ea_.py
|
py
| 1,891 |
python
|
en
|
code
| 8 |
github-code
|
6
|
17345627172
|
import os
import glob
import torch
from torchvision import transforms as T
from torch.utils.data import DataLoader,Dataset
from torch.utils.data.distributed import DistributedSampler
from codes.utils import img_processing
from codes.data import data_utils
import math
import numpy as np
class Load_Data(Dataset):
'''
读取图片
获取低照度图像的Y通道
读取低照度图像、低照度+噪声图像、正常照度图像
'''
def __init__(self, data_root, data_son=None, img_type='jpg', is_resize=False, is_long_resize=False, resize_h=512, resize_w=512):
if data_son is not '':
# 如果非None,则读取成对的低照度-正常照度图像
imgs_ll = glob.glob(os.path.join(data_root, data_son['ll'], '*.*' ))
imgs_ll_noise = glob.glob(os.path.join(data_root, data_son['ll_noise'], '*.*' ))
imgs_org = glob.glob(os.path.join(data_root, data_son['org'], '*.*'))
imgs_org_enhance = glob.glob(os.path.join(data_root, data_son['org_en'], '*.*'))
self.imgs_ll = imgs_ll
self.imgs_org = imgs_org
self.imgs_org_enhance = imgs_org_enhance
else:
imgs_ll_noise = glob.glob(os.path.join(data_root, '*.*'))
self.imgs_ll_noise = imgs_ll_noise
self.data_son = data_son
self.is_resize = is_resize
self.resize_h = resize_h
self.resize_w = resize_w
self.is_long_resize = is_long_resize
# 对图片的操作
self.img_ll_transform = data_utils.train_ll_transforms()
self.img_org_transform = data_utils.train_org_transforms()
def __getitem__(self, index):
'''
读取图片,并对图片进行相应的处理
:param index:
:return:
'''
imgs_ll_path = self.imgs_ll[index] # 低照度,返回下标为index的低照度图片路径
imgs_ll_noise_path = imgs_ll_path.replace(self.data_son['ll'], self.data_son['ll_noise']) # 低照度 + noise
[_, name] = os.path.split(imgs_ll_path)
suffix = name[name.find('.') + 1:] # 图片类型
name = name[:name.find('.')]
img_ll = img_processing.read_image(imgs_ll_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=True,
is_long_resize=self.is_long_resize)
img_ll_noise, y = img_processing.read_image(imgs_ll_noise_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=True,
is_long_resize=self.is_long_resize, is_cvtColor='YCrCb')
# t0 = abs(img_ll_noise - img_ll)
# t = abs(img_ll_noise - img_ll) / (img_ll + 1e-7)
# r_max = t[:,:,0].max()
# noise_map = np.max(abs(img_ll_noise - img_ll) / img_ll_noise, axis=(0,1,2))
# noise = self.noise_map(img_ll_noise)
noise_map = img_ll_noise - img_ll # 对于非加性噪声,这种求法不对
noise_map = self.img_org_transform(noise_map)
img_ll = self.img_org_transform(img_ll)
img_ll_noise = self.img_org_transform(img_ll_noise)
if self.data_son is not '': # 读取正常照度图像
imgs_org_path = imgs_ll_path.replace(self.data_son['ll'], self.data_son['org'])
imgs_org_path = imgs_org_path.replace('png', 'jpg') # org集的图片格式为jpg
img_org = img_processing.read_image(imgs_org_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=False,
is_long_resize=self.is_long_resize)
img_org = self.img_org_transform(img_org)
imgs_org_en_path = imgs_ll_path.replace(self.data_son['ll'], self.data_son['org_en'])
img_org_en, y_en = img_processing.read_image(imgs_org_en_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=False,
is_long_resize=self.is_long_resize, is_cvtColor='YCrCb')
img_org_en = self.img_org_transform(img_org_en)
return img_ll, img_ll_noise, img_org, img_org_en, y, noise_map, name
else:
return img_ll, y, name
def __len__(self):
return len(self.imgs_ll) # 总图片数量
def get_loader(data_root, data_son, batch_size, is_resize=False,resize_h=384, resize_w=384, img_type='jpg', is_long_resize=False):
dataset = Load_Data(data_root, data_son, is_resize=is_resize, resize_h=resize_h, resize_w=resize_w, img_type=img_type, is_long_resize=is_long_resize)
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
pin_memory=True) # 锁页内存,设置pin_memory=True,则意味着生成的Tensor数据最开始是属于内存中的锁页内存,
# 这样将内存的Tensor转义到GPU的显存就会更快一些
# 显卡中的显存全部是锁页内存,当计算机的内存充足的时候,可以设置pin_memory=True
# 省掉将数据从CPU传入到RAM中,再传到GPU上的过程。而是直接将数据映射到GPU的相关内存上,节省数据传输的时间
return data_loader
|
csxuwu/LRCR_Net
|
codes/data/data_loader4.py
|
data_loader4.py
|
py
| 5,103 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1383718733
|
from .base import metadata
from sqlalchemy import Table, Column, BigInteger,\
String, Boolean, DateTime
t_users = Table(
"users",
metadata,
Column('u_id', BigInteger), # telegram id
Column('name', String), # фамилия c инициалами
Column('name_tg', String), # имя пользователя в телеге если есть
Column('admin', Boolean), # является ли администратором
Column('org_name', String),
Column('org_code', String),
Column('date_update', DateTime),
)
|
oleg-medovikov/eventlog
|
base/users.py
|
users.py
|
py
| 618 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
72680091707
|
# Given an array, finds the element that would be at position k of the sorted array
# basically the partition algorithm used in quicksort
# O(n) time
def quick_select(index, array):
p_index, p_val = partition(choose_pivot(array), array)
if p_index == index:
return p_val
if p_index > index:
return quick_select(index, array[:p_index])
if p_index < index:
return quick_select(index - p_index - 1, array[p_index + 1:])
def choose_pivot(array):
return len(array) - 1
def partition(pivot, array):
array[-1], array[pivot] = array[pivot], array[-1]
low = 0
high = len(array) - 2
length = len(array)
while True:
while low < length - 1 and array[low] < array[-1]:
low += 1
while high >= 0 and array[high] > array[-1]:
high -= 1
if low >= high:
break
array[low], array[high] = array[high], array[low]
array[low], array[-1] = array[-1], array[low]
return low, array[low]
print(quick_select(3, [30,60,10,0,50,80,90,20,40,70]))
|
exue026/algos-and-structs
|
quick-select.py
|
quick-select.py
|
py
| 1,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18537345589
|
# Prob_link: https://www.codingninjas.com/studio/problems/fractional-knapsack_8230767?challengeSlug=striver-sde-challenge&leftPanelTab=0
from os import *
from sys import *
from collections import *
from math import *
def maximumValue(items, n, w):
items.sort(key=lambda x: x[1] / x[0], reverse=True)
value = 0
for item in items:
if item[0] <= w:
w -= item[0]
value += item[1]
elif item[0] > w:
value += (item[1] / item[0]) * w
w = 0
break
return value
|
Red-Pillow/Strivers-SDE-Sheet-Challenge
|
P46_Fractional_Knapsack.py
|
P46_Fractional_Knapsack.py
|
py
| 565 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22951595310
|
#!/usr/bin/env python2.7
"""
A tool to update the Product Version and Code of a C# VS2010 setup package (*.vdproj). Intended
to be used with an automated build process.
"""
import re
import uuid
import argparse
import os, shutil
import tempfile
##"ProductCode" = "8:{35424778-8534-431B-9492-5CD84B1EDE03}"
productcode_re = re.compile(r"(?:\"ProductCode\" = \"8.){([\d\w-]+)}")
##"ProductVersion" = "8:1.0.89"
productversion_re = re.compile(r"(?:\"ProductVersion\" = \"8.)([\d\w\.]+)\"")
def replace_code_and_version(src_fname, version="1.0.0", code="12345678-1234-1234-1234-1233456789012"):
fd, tmp_fname = tempfile.mkstemp()
tmp = open(tmp_fname, 'w')
src = open(src_fname)
for l in src:
if productcode_re.search(l):
m = productcode_re.search(l)
l = l.replace(m.group(1), code)
if productversion_re.search(l):
m = productversion_re.search(l)
l = l.replace(m.group(1), version)
tmp.write(l)
tmp.close()
os.close(fd)
src.close()
os.remove(src_fname)
shutil.move(tmp_fname, src_fname)
def parse_commands(test_args=None):
descrip = "Utility to update ProductCode and ProductVersion of VS2010 setup projects"
parser = argparse.ArgumentParser(description=descrip)
parser.add_argument("-f", "--file", dest="vdproj", action="store", default=None,
help="The vdproj file to be 'adjusted'", required=True)
parser.add_argument("-v", "--version", action="store", dest="version", default="1.0.0",
help="The new version to be set conforming to: major, minor, build e.g '1.0.195'")
parser.add_argument("-c", "--code", dest="code", action="store", default=str(uuid.uuid4()),
help="The new product code GUID. If not provided one is generated. ")
## Don't update the UpgradeCode that needs to stay the same for the product duration
if test_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(test_args)
return args
def main():
args = parse_commands()
replace_code_and_version(args.file, args.version, args.code)
if __name__ == '__main__':
main()
|
wfriedl/pvc_changer
|
pvc_changer.py
|
pvc_changer.py
|
py
| 2,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3345758956
|
from tkinter import *
from tkinter import messagebox
import tkinter as tk
import time, sys
from pygame import mixer
from PIL import Image, ImageTk
def alarm():
alarm_time=user_input.get()
if alarm_time=="":
messagebox.askretrycancel("Error Message","Please Enter value")
else:
while True:
time.sleep(1)
if(alarm_time==time.strftime("%H:%M")):
playmusic()
def playmusic():
mixer.init()
mixer.music.load(' clock.mp3')
mixer.music.play()
while mixer.music.get_busy():
time.sleep(30)
mixer.music.stop()
sys.exit()
root=Tk()
root.title(" Alarm clock")
canvas=Canvas(root, width=600,height=380)
image=ImageTk.PhotoImage(Image.open("clock image .png"))
canvas.create_image(0,0,anchor=NW, image=image)
canvas.pack()
header=Frame(root)
box1=Frame(root)
box1.place(x=250,y=180)
box2=Frame(root)
box2.place(x=250,y=180)
#time taken by user
#helv36 = tkFont.Font(family="Helvetica",size=36,weight="bold")
user_input=Entry(box1,font=('ArialNarrow', 20),width=8)
user_input.grid(row=0, column=2)
#set alarm button
start_button = Button( )
root.mainloop()
|
shuchi111/Alarm_clockGUI.py
|
alarm.py
|
alarm.py
|
py
| 1,256 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9512947044
|
# encoding=utf-8
import pandas as pd
import numpy as np
import time
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
'''
1.求导的是likelihood function, not cost function
2.注意由于梯度中包含指数操作,所以需要一个很小的学习率。
'''
class logistic_regression(object):
def __init__(self,max_iteration=5000,learning_rate=0.00001):
self.max_iteration = max_iteration
self.learning_rate = learning_rate
def predict_(self,x):
wx = np.dot(self.w,x)
p_1 = np.exp(wx)/(1+np.exp(wx))
p_0 = 1/(1+np.exp(wx))
if p_1 >= p_0:
return 1
else:
return 0
def train(self,features,labels):
self.w = [0.0] * (len(features[0])+1)
#SGD
iteration = 0
while iteration < self.max_iteration:
index = np.random.choice(len(features),1)[0]
x = features[index]
x = np.append(x,1) #x=(x1,x2,....,xn,1)^T
y = labels[index]
'''
Caution:
Reference to chp6 p79. 此处对log-likelihood function 求导,我们想要likelihood最大,等价于其负值最小
所有gradient之后要带负号
'''
gradient = y*x - np.exp(np.dot(self.w,x))*x / (1+np.exp(np.dot(self.w,x)))
self.w -= self.learning_rate * (-1*gradient)
iteration += 1
def predict(self,features):
labels = []
for feature in features:
x = list(feature)
x.append(1)
labels.append(self.predict_(x))
return labels
|
guozhiqi14/Statistical-Learning
|
Logistic Regression/logistic_clf.py
|
logistic_clf.py
|
py
| 1,422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19882708740
|
import os
import click
from guardata.utils import trio_run
from guardata.api.protocol import OrganizationID
from guardata.logging import configure_logging
from guardata.cli_utils import spinner, cli_exception_handler
from guardata.client.types import BackendAddr, BackendOrganizationBootstrapAddr
from guardata.client.backend_connection import apiv1_backend_administration_cmds_factory
async def _create_organization(debug, name, backend_addr, administration_token, expiration_date):
async with spinner("Creating group in backend"):
async with apiv1_backend_administration_cmds_factory(
backend_addr, administration_token
) as cmds:
rep = await cmds.organization_create(name, expiration_date)
if rep["status"] != "ok":
raise RuntimeError(f"Backend refused to create group: {rep}")
bootstrap_token = rep["bootstrap_token"]
organization_addr = BackendOrganizationBootstrapAddr.build(backend_addr, name, bootstrap_token)
organization_addr_display = click.style(organization_addr.to_url(), fg="yellow")
click.echo(f"Bootstrap group url: {organization_addr_display}")
@click.command(short_help="create new group")
@click.argument("name", required=True, type=OrganizationID)
@click.option("--addr", "-B", required=True, type=BackendAddr.from_url)
@click.option("--administration-token", "-T", required=True)
@click.option("--expiration-date", "-E", default=None, type=click.DateTime())
def create_organization(name, addr, administration_token, expiration_date):
debug = "DEBUG" in os.environ
configure_logging(log_level="DEBUG" if debug else "WARNING")
with cli_exception_handler(debug):
trio_run(_create_organization, debug, name, addr, administration_token, expiration_date)
|
bitlogik/guardata
|
guardata/client/cli/create_organization.py
|
create_organization.py
|
py
| 1,793 |
python
|
en
|
code
| 9 |
github-code
|
6
|
71066855869
|
from manim_express.eager import PlotObj, Size
from examples.example_imports import *
scene = EagerModeScene(screen_size=Size.bigger)
graph = Line().scale(0.2)
# t0 = time.time()
#
# delta_t = 0.5
# for a in np.linspace(3, 12, 3):
# graph2 = ParametricCurve(lambda t: [t,
# 0.8 * np.abs(t) ** (6 / 7) + 0.9 * np.sqrt(abs(a - t ** 2)) * np.sin(
# a * t + 0.2),
# 0],
# t_range=(-math.sqrt(a), math.sqrt(a))).scale(0.5)
# scene.play(Transform(graph, graph2), run_time=3)
ps = np.random.rand(10, 3)
print(ps.shape)
print(ps[:, 0].max())
theta = np.linspace(0, 2 * PI, 100)
x = np.cos(theta)
y = np.sin(theta)
p = PlotObj(x, y)
scene.play(ShowCreation(p))
s = PlotObj(theta, x).set_color(RED)
scene.play(ShowCreation(s))
grid = p.get_grid(3, 3)
scene.add(grid)
scene.play(grid.animate.shift(LEFT))
scene.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN, RED))
scene.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
# scene.play(grid.animate.apply_complex_function(np.exp), run_time=5)
scene.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
scene.hold_on()
|
beidongjiedeguang/manim-express
|
examples/plot/lines.py
|
lines.py
|
py
| 1,382 |
python
|
en
|
code
| 13 |
github-code
|
6
|
38126132792
|
import pandas as pd
TABLAS_REPORTE_DIARIO = {1: 'CasosConfirmadosNivelNacional', 2: 'ExamenesRealizadosNivelNacional',
3: 'HospitalizacionUCIRegion', 4: 'HospitalizacionUciEtario'}
def regionName(df):
if 'Region' in df.columns:
print('Normalizando regiones')
df['Region'] = df['Region'].str.strip()
df["Region"] = df["Region"].replace({"Arica - Parinacota": "Arica y Parinacota", "Arica": "Arica y Parinacota",
"Tarapaca": "Tarapacá",
"Valparaiso": "Valparaíso", "Santiago": "Metropolitana",
"Del Libertador General Bernardo O’Higgins": "O’Higgins",
"Ohiggins": "O’Higgins", "Libertador Bernardo O'Higgins" : "O’Higgins",
"Nuble": "Ñuble",
"Biobio": "Biobío", "La Araucania": "Araucanía", "Araucania": "Araucanía",
"Los Rios": "Los Ríos", "De los Rios": "Los Ríos", "LOs Rios" : "Los Ríos",
"De los Lagos": "Los Lagos", "LOs Lagos": "Los Lagos",
"Aysen": "Aysén", "Magallanes y la Antartica": "Magallanes",
"": "Total"})
codigoRegion = {'Tarapacá': '01', 'Antofagasta': '02', 'Atacama': '03', 'Coquimbo': '04', 'Valparaíso': '05',
'O’Higgins': '06', 'Maule': '07', 'Biobío': '08', 'Araucanía': '09', 'Los Lagos': '10',
'Aysén': '11', 'Magallanes': '12', 'Metropolitana': '13', 'Los Ríos': '14',
'Arica y Parinacota': '15', 'Ñuble': '16'}
for region in df['Region']:
if region in codigoRegion.keys():
loc = df.loc[df['Region'] == region].index
df.loc[loc, 'Codigoregion'] = codigoRegion[region]
else:
loc = df.loc[df['Region'] == region].index
df.loc[loc, 'Codigoregion'] = ''
print(region + ' no es region')
# codigoregion quedo al final. Idealmente deberia estar junto a region
codRegAux = df['Codigoregion']
df.drop(labels=['Codigoregion'], axis=1, inplace=True)
df.insert(1, 'Codigoregion', codRegAux)
def comunaName(df):
if 'Comuna' in df.columns:
print('Normalizando comunas')
# Lee IDs de comunas desde página web oficial de SUBDERE
df_dim_comunas = pd.read_excel("http://www.subdere.gov.cl/sites/default/files/documentos/cut_2018_v03.xls",
encoding="utf-8")
# Crea columna sin tildes, para hacer merge con datos publicados
df_dim_comunas["Comuna"] = df_dim_comunas["Nombre Comuna"].str.normalize("NFKD").str.encode("ascii",
errors="ignore").str.decode("utf-8")
print(df_dim_comunas)
df = df.merge(df_dim_comunas, on="Comuna", how="outer")
return(df)
def pandizer(tables):
"""
Recibimos lista de tablas de diccionarios recursivos y las transformamos en pandas datraframes
:param tables:lista de diccionarios: cada tabla es un diccionario compuesto de diccionarios para cada fila
:return:
"""
lenTables = len(tables)
print(' Got ' + str(lenTables) + ' tables to pandize')
df_list = {}
if tables == '<b> NO Table FOUND </b>':
print(tables)
return df_list
for i in range(lenTables):
table_key = 'Table_' + str(i + 1)
each_table = tables[i]
pd_table = pd.DataFrame(each_table[table_key])
#Remove empty spaces from headers
aux = pd_table.transpose()
aux.columns = aux.iloc[0]
aux.drop(aux.index[0], inplace=True)
aux.columns = aux.columns.str.replace(' ', '')
df_list[table_key] = aux
return df_list
def dumpDict2csv(dict, source, output):
if dict:
print(source + ' had ' + str(len(dict)) + ' tables')
for k in dict.keys():
filename = output + source + '_' + str(k) + '.csv'
print(k + ' will be stored as ' + filename)
dict[k].to_csv(filename, index=False)
else:
print(source + ' has no tables')
filename = output + source + '_NOTABLES' + '.csv'
myfile = open(filename, 'a+')
myfile.write(source + ' has no tables')
myfile.close()
|
gitter-badger/covid19-pdfocr
|
src/postAwsProcessing.py
|
postAwsProcessing.py
|
py
| 4,597 |
python
|
es
|
code
| 0 |
github-code
|
6
|
26971894453
|
import logging
import mtcorr
import statistics as stats
import math
import h5py
import numpy
import sys
log = logging.getLogger(__name__)
def load_from_hdf5(filename):
f = h5py.File(filename,'r')
quantiles_dict = {}
stats = {}
if 'quantiles' in f:
quantiles_dict['exp_quantiles'] = f['quantiles']['quantiles'][:,0].tolist()
quantiles_dict['quantiles'] = f['quantiles']['quantiles'][:,1].tolist()
quantiles_dict['exp_log_quantiles'] = f['quantiles']['log_quantiles'][:,0].tolist()
quantiles_dict['log_quantiles'] = f['quantiles']['log_quantiles'][:,1].tolist()
stats['quantiles_dict'] = quantiles_dict
pvals_group = f['pvalues']
method = pvals_group.attrs.get('analysis_method','')
transformation = pvals_group.get('transformation','')
if 'ks_stat' in pvals_group.attrs:
stats['ks_stats'] = {'D':pvals_group.attrs['ks_stat']}
if 'ks_pval' in pvals_group.attrs:
stats['ks_stats']['p_val'] = pvals_group.attrs['ks_pval']
if 'med_pval' in pvals_group.attrs:
stats['med_pval'] = pvals_group.attrs['med_pval']
if 'bh_thres' in pvals_group.attrs:
stats['bh_thres_d'] = {'thes_pval': math.pow(10,-pvals_group.attrs['bh_thres'])}
chromosomes = []
positions = []
scores = []
mafs = []
macs = []
additional_columns = {}
chrs = map(lambda x:x[3:],f['pvalues'].keys())
for ix,chr in enumerate(chrs):
chr_group = pvals_group['chr%s'% chr]
chromosomes.extend([chr]*len(chr_group['positions']))
positions.extend(chr_group['positions'][:].tolist())
scores.extend(chr_group['scores'][:].tolist())
mafs.extend(chr_group['mafs'][:].tolist())
macs.extend(chr_group['macs'][:].tolist())
for i,key in enumerate(chr_group.keys()):
if key not in ('positions','scores','mafs','macs'):
values = chr_group[key][:].tolist()
if key not in additional_columns:
additional_columns[key] = values
else:
additional_columns[key].extend(values)
f.close()
scores = map(lambda x:math.pow(10,-1*x), scores)
maf_dict = {'mafs':mafs,'macs':macs}
return GWASResult(chrs,chromosomes,positions,scores,maf_dict,method,transformation,stats=stats,additional_columns=additional_columns)
def load_from_csv(filename):
chromosomes = []
positions = []
pvals = []
mafs = []
macs = []
additional_columns = {}
chrs = []
chr = None
is_pval = False
with open(filename,'r') as f:
header = f.readline().rstrip()
add_header = header.split(",")[5:]
for key in add_header:
key = key.replace('"','')
additional_columns[key] = []
for row in f:
fields = row.rstrip().split(",")
if chr != fields[0]:
chr = fields[0]
chrs.append(chr)
chromosomes.append(chr)
positions.append(int(float(fields[1])))
pvals.append(float(fields[2]))
mafs.append(float(fields[3]))
macs.append(int(float(fields[4])))
if len(add_header) > 0:
for i,key in enumerate(add_header):
key = key.replace('"','')
addit_value = None
if fields[(5+i)] != '':
addit_value = float(fields[(5+i)])
additional_columns[key].append(addit_value)
is_pval = max(pvals) <= 1.0
if is_pval is False:
pvals = map(lambda x:math.pow(10,-1*x),pvals)
return GWASResult(chrs,chromosomes,positions,pvals,{'mafs':mafs,'macs':macs},additional_columns = additional_columns)
class GWASResult(object):
def __init__(self,chrs,chromosomes,positions,pvals,maf_dict,method = 'N/A',transformation = None,stats = None,additional_columns = None,step_stats = None):
self.ix_with_bad_pvalues = ix_with_bad_pvalues = numpy.where(pvals == 0.0)[0]
if len(ix_with_bad_pvalues) > 0:
pvals[ix_with_bad_pvalues] = sys.float_info.min
self.pvals = pvals
self.method = method
self.transformation = transformation
self.chrs = chrs
self.chromosomes = chromosomes
self.positions = positions
self.stats = stats
self.maf_dict = maf_dict
self.additional_columns = additional_columns
self.step_stats = step_stats
self.bonferroni_threshold = -math.log10(0.05 / len(pvals))
self.min_pval = min(pvals)
if not self.stats:
self._calculate_stats_()
def _calculate_stats_(self):
log.info('Calculating Benjamini-Hochberg threshold',extra={'progress':90})
#Calculate Benjamini-Hochberg threshold
self.stats = {}
self.stats['bh_thres_d'] = mtcorr.get_bhy_thres(self.pvals, fdr_thres=0.05)
#Calculate Median p-value
self.stats['med_pval'] = stats.calc_median(self.pvals)
#Calculate the Kolmogorov-Smirnov statistic
self.stats['ks_stats'] = stats.calc_ks_stats(self.pvals)
self.stats['quantiles_dict'] = stats.calculate_qqplot_data(self.pvals)
def get_top_snps(self,top_ratio=2500):
data = numpy.core.records.fromrecords(zip(self.chromosomes, self.positions, self.pvals, self.maf_dict['mafs'], self.maf_dict['macs'],*self.additional_columns.values()),names='chr,positions,scores,mafs,macs')
data_to_return=[]
for ix,chr in enumerate(self.chrs):
chr_data = data[numpy.where(data['chr'] == chr)]
chr_data =chr_data[chr_data['scores'].argsort()[::]][:top_ratio]
data_to_return.append(chr_data)
return numpy.concatenate(data_to_return)
def save_as_csv(self,csv_file):
data = numpy.array(zip(self.chromosomes, self.positions, self.pvals, self.maf_dict['mafs'], self.maf_dict['macs'],*self.additional_columns.values()))
data =data[numpy.lexsort((data[:,1],data[:,0]))]
additional_column_headers = self.additional_columns.keys()
header = ['chromosomes','positions','pvals','mafs','macs']
header.extend(additional_column_headers)
with open(csv_file,'w') as f:
f.write(','.join(header)+"\n")
for row in data:
rows_to_write = row.tolist()
rows_to_write[0] = int(rows_to_write[0])
rows_to_write[1] = int(rows_to_write[1])
rows_to_write[4] = int(float(rows_to_write[4]))
f.write(','.join(map(str,rows_to_write))+"\n")
def save_as_hdf5(self,hdf5_file):
positions = self.positions
chromosomes = self.chromosomes
maf_dict = self.maf_dict
scores = map(lambda x:-math.log10(x), self.pvals)
quantiles_dict = self.stats['quantiles_dict']
f = h5py.File(hdf5_file,'w')
# store quantiles
quant_group = f.create_group('quantiles')
quantiles_array = zip(quantiles_dict['exp_quantiles'],quantiles_dict['quantiles'])
log_quantiles_array = zip(quantiles_dict['exp_log_quantiles'],quantiles_dict['log_quantiles'])
quant_group.create_dataset('quantiles',(len(quantiles_dict['quantiles']), 2),'f8',data=quantiles_array)
quant_group.create_dataset('log_quantiles',(len(quantiles_dict['log_quantiles']), 2),'f8',data=log_quantiles_array)
#store pvalues
pvals_group = f.create_group('pvalues')
if len(self.ix_with_bad_pvalues) > 0:
pvals_group.attrs['ix_with_bad_pvalues'] = self.ix_with_bad_pvalues
pvals_group.attrs['numberOfSNPs'] = len(scores)
pvals_group.attrs['max_score'] = max(scores)
if self.method is not None:
pvals_group.attrs['analysis_method'] = self.method
transformation = "raw"
if self.transformation is not None:
transformation = self.transformation
pvals_group.attrs['transformation'] = transformation
pvals_group.attrs['bonferroni_threshold'] = self.bonferroni_threshold
pvals_group.attrs['ks_stat'] = self.stats['ks_stats']['D']
pvals_group.attrs['ks_pval'] = self.stats['ks_stats']['p_val']
pvals_group.attrs['med_pval'] = self.stats['med_pval']
pvals_group.attrs['bh_thres'] =-math.log10(self.stats['bh_thres_d']['thes_pval'])
data = numpy.core.records.fromrecords(zip(chromosomes, positions, scores, maf_dict['mafs'], maf_dict['macs'],*self.additional_columns.values()),names='chr,positions,scores,mafs,macs')
for ix,chr in enumerate(self.chrs):
chr_group = pvals_group.create_group("chr%s" % chr)
chr_data = data[numpy.where(data['chr'] == chr)]
chr_data =chr_data[chr_data['scores'].argsort()[::-1]]
positions = chr_data['positions']
chr_group.create_dataset('positions',(len(positions),),'i4',data=positions)
scores = chr_data['scores']
chr_group.create_dataset('scores',(len(scores),),'f8',data=scores)
mafs = chr_data['mafs']
chr_group.create_dataset('mafs',(len(mafs),),'f8',data=mafs)
macs = chr_data['macs']
chr_group.create_dataset('macs',(len(macs),),'i4',data=macs)
if len(chr_data.dtype) > 5:
for i,key in enumerate(self.additional_columns.keys()):
values = chr_data['f%s'% (5+i)]
chr_group.create_dataset(key,values.shape,values.dtype,data=values)
f.close()
|
timeu/PyGWAS
|
pygwas/core/result.py
|
result.py
|
py
| 9,529 |
python
|
en
|
code
| 20 |
github-code
|
6
|
10731823816
|
import torch
from torch import nn
import torchvision.transforms as T
#######################################################################################
######################################## DRML ########################################
#######################################################################################
class RegionLayer(nn.Module):
def __init__(self, in_channels, grid=(8, 8)):
super(RegionLayer, self).__init__()
self.in_channels = in_channels
self.grid = grid
self.region_layers = dict()
for i in range(self.grid[0]):
for j in range(self.grid[1]):
module_name = 'region_conv_%d_%d' % (i, j)
self.region_layers[module_name] = nn.Sequential(
nn.BatchNorm2d(self.in_channels),
nn.ReLU(),
nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels,
kernel_size=3, stride=1, padding=1)
)
self.add_module(name=module_name, module=self.region_layers[module_name])
def forward(self, x):
"""
:param x: (b, c, h, w)
:return: (b, c, h, w)
"""
batch_size, _, height, width = x.size()
input_row_list = torch.split(x, split_size_or_sections=height//(self.grid[0]-1), dim=2)
output_row_list = []
for i, row in enumerate(input_row_list):
input_grid_list_of_a_row = torch.split(row, split_size_or_sections=width//(self.grid[1]-1), dim=3)
output_grid_list_of_a_row = []
for j, grid in enumerate(input_grid_list_of_a_row):
module_name = 'region_conv_%d_%d' % (i, j)
# print(module_name)
# print(i,j)
grid = self.region_layers[module_name](grid.contiguous()) + grid
output_grid_list_of_a_row.append(grid)
output_row = torch.cat(output_grid_list_of_a_row, dim=3)
output_row_list.append(output_row)
output = torch.cat(output_row_list, dim=2)
return output
class DRML(nn.Module):
def __init__(self, class_number=12):
super(DRML, self).__init__()
print('Init DRML... pls god work...')
self.class_number = class_number
self.extractor = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=11, stride=1),
RegionLayer(in_channels=32, grid=(8, 8)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.BatchNorm2d(num_features=32),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=8, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=8,),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=6, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=5, stride=1),
nn.ReLU(),
)
self.classifier = nn.Sequential(
nn.Linear(in_features=6400, out_features=4096),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(in_features=4096, out_features=2048),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(in_features=2048, out_features=class_number)
)
def forward(self, data):
"""
:param x: (b, c, h, w)
:return: (b, class_number)
"""
x = data[0]
batch_size = x.size(0)
output = self.extractor(x)
output = output.view(batch_size, -1)
output = self.classifier(output)
return output
#######################################################################################
####################################### AlexNet ######################################
#######################################################################################
class AlexNet(nn.Module):
def __init__(self, num_classes = 12, dropout = 0.5): #0.5
super().__init__()
print('INIT AU AlexNet')
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(2304, 4096), # assuming 144x144 input
nn.ReLU(inplace=False),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=False),
nn.Linear(4096, num_classes),
)
def forward(self, data):
x = data[0]
x = self.features(x)
x = torch.flatten(x, 1)
out = self.classifier(x)
return out
#######################################################################################
####################################### MTTSCAN ######################################
#######################################################################################
class Attention_mask(nn.Module):
def __init__(self):
super(Attention_mask, self).__init__()
def forward(self, x):
xsum = torch.sum(x, dim=2, keepdim=True)
xsum = torch.sum(xsum, dim=3, keepdim=True)
xshape = tuple(x.size())
return x / xsum * xshape[2] * xshape[3] * 0.5
def get_config(self):
"""May be generated manually. """
config = super(Attention_mask, self).get_config()
return config
class TSM(nn.Module):
def __init__(self, n_segment=10, fold_div=3):
super(TSM, self).__init__()
self.n_segment = n_segment
self.fold_div = fold_div
def forward(self, x):
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
fold = c // self.fold_div
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
return out.view(nt, c, h, w)
class MTTS_CAN_SMALL(nn.Module):
"""MTTS_CAN is the multi-task (respiration) version of TS-CAN"""
def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25,
dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, frame_depth=20):
super(MTTS_CAN_SMALL, self).__init__()
print('init MTTS_CAN_SMALL')
self.in_channels = in_channels
self.kernel_size = kernel_size
self.dropout_rate1 = dropout_rate1
self.dropout_rate2 = dropout_rate2
self.pool_size = pool_size
self.nb_filters1 = nb_filters1
self.nb_filters2 = nb_filters2
self.nb_dense = nb_dense
# TSM layers
self.TSM_1 = TSM(n_segment=frame_depth)
self.TSM_2 = TSM(n_segment=frame_depth)
self.TSM_3 = TSM(n_segment=frame_depth)
self.TSM_4 = TSM(n_segment=frame_depth)
# Motion branch convs
self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Apperance branch convs
self.apperance_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Attention layers
self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0),bias=True)
self.attn_mask_1 = Attention_mask()
self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0),bias=True)
self.attn_mask_2 = Attention_mask()
# Dropout layers
self.dropout_4_y = nn.Dropout(self.dropout_rate2)
self.dropout_4_r = nn.Dropout(self.dropout_rate2)
# Dense layers
self.final_dense_1_y = nn.Linear(5184, self.nb_dense, bias=True)
self.final_dense_2_y = nn.Linear(self.nb_dense, 1, bias=True)
self.final_dense_1_r = nn.Linear(5184, self.nb_dense, bias=True)
self.final_dense_2_r = nn.Linear(self.nb_dense, 1, bias=True)
def forward(self, inputs, params=None):
big = inputs[0]
small = inputs[1]
raw_input = torch.zeros_like(small)
diff_input = small
transform = T.Resize((9,9))
for i in range(big.shape[0]):
# iterate through batch
raw_input[i,:,:,:] = transform(big[i,:,:,:])
diff_input = self.TSM_1(diff_input)
d1 = torch.tanh(self.motion_conv1(diff_input))
d1 = self.TSM_2(d1)
d2 = torch.tanh(self.motion_conv2(d1))
r1 = torch.tanh(self.apperance_conv1(raw_input))
r2 = torch.tanh(self.apperance_conv2(r1))
g1 = torch.sigmoid(self.apperance_att_conv1(r2))
g1 = self.attn_mask_1(g1)
gated1 = d2 * g1
# d3 = self.avg_pooling_1(gated1)
# d4 = self.dropout_1(d3)
# r3 = self.avg_pooling_2(r2)
# r4 = self.dropout_2(r3)
d4 = self.TSM_3(gated1)
d5 = torch.tanh(self.motion_conv3(d4))
d5 = self.TSM_4(d5)
d6 = torch.tanh(self.motion_conv4(d5))
r5 = torch.tanh(self.apperance_conv3(r2))
r6 = torch.tanh(self.apperance_conv4(r5))
g2 = torch.sigmoid(self.apperance_att_conv2(r6))
g2 = self.attn_mask_2(g2)
gated2 = d6 * g2
# d7 = self.avg_pooling_3(gated2)
# d8 = self.dropout_3(d7)
d9 = gated2.view(gated2.size(0), -1)
d10 = torch.tanh(self.final_dense_1_y(d9))
d11 = self.dropout_4_y(d10)
out_y = self.final_dense_2_y(d11)
d10 = torch.tanh(self.final_dense_1_r(d9))
d11 = self.dropout_4_r(d10)
out_r = self.final_dense_2_r(d11)
return out_y, out_r
#######################################################################################
####################################### DEEPPHYS ######################################
#######################################################################################
class DeepPhys(nn.Module):
def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25,
dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, out_size=1, img_size=36):
"""Definition of DeepPhys.
Args:
in_channels: the number of input channel. Default: 3
img_size: height/width of each frame. Default: 36.
Returns:
DeepPhys model.
"""
super(DeepPhys, self).__init__()
print("INIT DEEPPHYS")
self.in_channels = in_channels
self.kernel_size = kernel_size
self.dropout_rate1 = dropout_rate1
self.dropout_rate2 = dropout_rate2
self.pool_size = pool_size
self.nb_filters1 = nb_filters1
self.nb_filters2 = nb_filters2
self.nb_dense = nb_dense
self.out_size = out_size
# Motion branch convs
self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Apperance branch convs
self.apperance_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Attention layers
self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0), bias=True)
self.attn_mask_1 = Attention_mask()
self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0), bias=True)
self.attn_mask_2 = Attention_mask()
# Dropout layers
self.dropout_4 = nn.Dropout(self.dropout_rate2)
# Dense layers
self.final_dense_1 = nn.Linear(5184, self.nb_dense, bias=True)
self.final_dense_2 = nn.Linear(self.nb_dense, self.out_size, bias=True)
def forward(self, inputs, params=None):
big = inputs[0]
small = inputs[1]
raw_input = torch.zeros_like(small)
diff_input = small
transform = T.Resize((9,9))
for i in range(big.shape[0]):
raw_input[i,:,:,:] = transform(big[i,:,:,:])
d1 = torch.tanh(self.motion_conv1(diff_input))
d2 = torch.tanh(self.motion_conv2(d1))
r1 = torch.tanh(self.apperance_conv1(raw_input))
r2 = torch.tanh(self.apperance_conv2(r1))
g1 = torch.sigmoid(self.apperance_att_conv1(r2))
g1 = self.attn_mask_1(g1)
gated1 = d2 * g1
d5 = torch.tanh(self.motion_conv3(gated1))
d6 = torch.tanh(self.motion_conv4(d5))
r5 = torch.tanh(self.apperance_conv3(r2))
r6 = torch.tanh(self.apperance_conv4(r5))
g2 = torch.sigmoid(self.apperance_att_conv2(r6))
g2 = self.attn_mask_2(g2)
gated2 = d6 * g2
d9 = gated2.view(gated2.size(0), -1)
d10 = torch.tanh(self.final_dense_1(d9))
d11 = self.dropout_4(d10)
out = self.final_dense_2(d11)
return out
|
girishvn/BigSmall
|
code/neural_methods/model/literature_models.py
|
literature_models.py
|
py
| 15,138 |
python
|
en
|
code
| 14 |
github-code
|
6
|
33699952990
|
from django.contrib import admin
from django.urls import path
from web.views import home_page
from django.contrib.auth.views import LoginView, LogoutView
from ckt.views import (
CircuitControlView,
CircuitStatusView,
write_api_view,
read_api_view,
to_circuit,
plot_graph,
chartview,
graph_two,
usercreation,
aboutpage,
privacy,
Register,
Method,
widgets,
)
urlpatterns = [
path("admin/", admin.site.urls),
path("", home_page, name="home"),
path("circuit/", CircuitControlView.as_view(), name="ckt"),
path("circuit_status/", CircuitStatusView.as_view(), name="ckt_status"),
path("control/", to_circuit, name="control"),
path("api_write/", write_api_view, name="write_api"),
path("api_read/", read_api_view, name="read_api"),
path("graph_plot/", plot_graph, name="plot_graph"),
path("chartview/", chartview, name="viewchart"),
path("graph_two/", graph_two, name="chartview_two"),
path("register/", usercreation, name="register"),
path("login/", LoginView.as_view(template_name="Login.html"), name="login"),
path("logout/", LogoutView.as_view(template_name="logout.html"), name="logout"),
path("aboutpage/", aboutpage, name="about_page"),
path("privacy/", privacy, name="privacy_policy"),
path("Register/", Register, name="Register_page"),
path("Method/", Method, name="Method_page"),
path("widgets/", widgets, name="widgets_page"),
]
|
sumansam312/IOT_Platform
|
iot/urls.py
|
urls.py
|
py
| 1,462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6306031331
|
import math
import socket
import time
import struct
# Задаем адрес сервера
SERVER_ADDRESS = ('192.168.1.208', 5555)
# Настраиваем сокет
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(SERVER_ADDRESS)
server_socket.listen(10)
print('server is running, please, press ctrl+c to stop')
# Слушаем запросы
connection, address = server_socket.accept()
print("new connection from {address}".format(address=address))
count = 0;
while count <= 100:
count = count + 1
data1 = int(100 * math.sin(2*math.pi*count / 100))
data2 = int(80 * math.sin(60*2*math.pi/360 + 2*math.pi*count / 100))
data3 = int(60 * math.sin(130*2*math.pi/360 + 2*math.pi*count / 100))
data4 = int(40 * math.sin(270*2*math.pi/360 + 2*math.pi*count / 100))
connection.sendall(data1.to_bytes(2, byteorder='big', signed=True))
connection.sendall(data2.to_bytes(2, byteorder='big', signed=True))
connection.sendall(data3.to_bytes(2, byteorder='big', signed=True))
connection.sendall(data4.to_bytes(2, byteorder='big', signed=True))
print(count)
time.sleep(1)
connection.close()
server_socket.close()
print("All data sendet!")
|
urb31075/PythonBox
|
srv.py
|
srv.py
|
py
| 1,218 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11707381858
|
#!/usr/bin/python
# coding: utf-8
from io import open
import os
import time
import re
import db
from sqlalchemy import or_, and_, not_, asc, desc, func
from datetime import datetime, timedelta
from functools import wraps # We need this to make Flask understand decorated routes.
import hashlib
import subprocess
from lxml.html.clean import Cleaner
from lxml.etree import ParserError
from werkzeug import secure_filename
from flask import Flask, Blueprint, render_template, request, flash, redirect, session, abort, url_for, make_response, g
from wtforms import Form, BooleanField, TextField, TextAreaField, PasswordField, RadioField, SelectField, SelectMultipleField, BooleanField, IntegerField, HiddenField, SubmitField, validators, ValidationError, widgets
from wtforms.fields.html5 import DateTimeLocalField
import requests
def now():
if app.config['DB'].startswith('postgresql+psycopg2'):
# https://stackoverflow.com/questions/796008/cant-subtract-offset-naive-and-offset-aware-datetimes/17752647#17752647
import psycopg2
return datetime.utcnow().replace(
tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
else:
return datetime.utcnow()
dtnow = now
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
Shamelessly stolen from WTForms FAQ.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
app_dir = os.path.dirname(os.path.abspath(__file__))
app = Flask('rhforum', template_folder=app_dir+"/templates")
app.config.from_pyfile(app_dir+"/config.py") # XXX
BASE_URL = app.config.get("BASE_URL", "")
rhforum = Blueprint('rhforum', __name__,
template_folder='templates',
static_folder='static')
doku = None
if app.config.get("DOKU_URL", ""):
from dokuwiki import DokuWiki
try:
doku = DokuWiki(app.config['DOKU_URL'], app.config['DOKU_USER'], app.config['DOKU_PASS'])
except Exception as ex:
print("Failed to connect to DokuWiki: ", ex)
class PostForm(Form):
text = TextAreaField('Text', [validators.required()])
submit = SubmitField('Odeslat')
class EditPostForm(Form):
text = TextAreaField('Text', [validators.required()])
submit = SubmitField('Upravit')
delete = SubmitField('Smazat')
class EditThreadForm(Form):
name = TextField('Nadpis', [validators.required()])
text = TextAreaField('Text', [validators.required()])
forum_id = SelectField('Fórum', coerce=int)
wiki_article = TextField('Wiki článek')
submit = SubmitField('Upravit')
delete = SubmitField('Smazat')
class ThreadForm(PostForm):
name = TextField('Nadpis', [validators.required()])
class UserForm(Form):
fullname = TextField('Nadpis', [validators.required()])
email = TextField('Email', [validators.required()])
new_password = PasswordField('Nové heslo')
homepage = TextField('Homepage')
avatar_url = TextField('URL avataru')
profile = TextAreaField('Profil')
submit = SubmitField('Upravit')
class AdminUserForm(UserForm):
group_ids = MultiCheckboxField('Skupiny', coerce=int)
@rhforum.app_template_filter('datetime')
def datetime_format(value, format='%d. %m. %Y %H:%M:%S'):
if not value: return "-"
if isinstance(value, str): return value
return value.strftime(format)
cleaner = Cleaner(comments=False, style=False, embedded=False, annoying_tags=False)
@rhforum.app_template_filter('postfilter')
def postfilter(text):
return text
@rhforum.app_template_filter('clean')
def clean(value):
try:
return cleaner.clean_html(value)
except ParserError:
return ""
@rhforum.app_template_filter('bbcode')
def bbcode(text):
text = re.sub("\[quote=([^\]@]*)@(\d)*\]", "<blockquote><div class='quoting' data-id='\\2'>\\1</div><p>", text)
text = re.sub("\[quote=([^\]@]*)\]", "<blockquote><div class='quoting'>\\1</div><p>", text)
text = re.sub("\[quote\]", "<blockquote><p>", text)
text = re.sub("\[\/quote\]", "</blockquote>", text)
return text
@rhforum.before_request
def before_request():
if not hasattr(g, 'telegram_messages'):
g.telegram_messages = []
if not hasattr(g, 'irc_messages'):
g.irc_messages = []
if not hasattr(g, 'discord_messages'):
g.discord_messages = []
if 'user_id' in session:
g.user = db.session.query(db.User).get(session['user_id'])
if not g.user:
# TODO
pass
g.user.laststamp = now()
else:
g.user = db.Guest()
g.now = now()
g.yesterday = g.now - timedelta(days=1)
g.tomorrow = g.now + timedelta(days=1)
g.production = app.config['PRODUCTION']
@rhforum.after_request
def after_request(response):
try:
while g.telegram_messages:
message = g.telegram_messages.pop(0)
subprocess.Popen(["python", app_dir+"/report.py", "telegram", message.encode('utf-8')])
while g.irc_messages:
message = g.irc_messages.pop(0)
subprocess.Popen(["python", app_dir+"/report.py", "irc", message.encode('utf-8')])
while g.discord_messages:
message = g.discord_messages.pop(0)
subprocess.Popen(["python", app_dir+"/report.py", "discord", message.encode('utf-8')])
except Exception as ex:
print(type(ex), ex)
return response
@rhforum.teardown_request
def shutdown_session(exception=None):
db.session.close()
db.session.remove()
def sort_tasks(tasks):
return []
now = g.now
def cmp_tasks(task0, task1):
# sort order:
# 0. unspecified announcements and tasks
# 1. upcoming announcements and all unfinished tasks
# 2. past announcements and tasks ("everything else")
# 3. finished unspecified tasks
def get_task_priority(task):
if not task.due_time and not task.status: return 0
if not task.due_time and task.status == "todo": return 0
if not task.status and task.due_time and task.due_time > now: return 1
if task.status == "todo": return 1
if not task.due_time and task.status == "done": return 3
return 2
task0_pri = get_task_priority(task0)
task1_pri = get_task_priority(task1)
if task0_pri < task1_pri: return -1
if task0_pri > task1_pri: return 1
if not task0.due_time: return 1;
if not task1.due_time: return 1;
return 1 if abs(now - task0.due_time) > abs(now - task1.due_time) else -1
tasks.sort(cmp_tasks)
class ForumForm(Form):
name = TextField('Jméno', [validators.required()])
description = TextField('Popisek', [validators.required()])
category_id = SelectField('Kategorie', coerce=int)
move_up = SubmitField('↑')
move_down = SubmitField('↓')
save = SubmitField('Uložit')
new_forum_id = SelectField('Nové fórum', coerce=int, default=0)
delete = SubmitField('Odstranit')
class CategoryForm(Form):
name = TextField('Jméno', [validators.required()])
group_id = SelectField('Nutná skupina', coerce=int)
move_up = SubmitField('↑')
move_down = SubmitField('↓')
save = SubmitField('Uložit')
delete = SubmitField('Odstranit')
class ForumControlsForm(Form):
mark_read = SubmitField('Označit fórum za přečtené')
class TaskForm(Form):
type = SelectField("Typ", [validators.optional()], choices=(('task', 'úkol'), ('announcement', 'oznámení')))
due_time = DateTimeLocalField('Čas', [validators.optional()], format="%Y-%m-%dT%H:%M")
text = TextField('Text', [validators.required()])
user_id = SelectField('Uživatel', coerce=int)
submit = SubmitField("Zadat")
@rhforum.errorhandler(404)
def page_not_found(e):
if not request.path.startswith("/static"):
return render_template('forum/errorpage.html', error=404), 404
else:
return "404", 404 # we don't have templates
@rhforum.errorhandler(403)
def page_not_found(e):
return render_template('forum/errorpage.html', error=403), 403
@rhforum.errorhandler(500)
def page_not_found(e):
return render_template('forum/errorpage.html', error=500), 500
@rhforum.errorhandler(400)
def page_not_found(e):
return render_template('forum/errorpage.html', error=400), 400
def get_active_threads():
threads = db.session.query(db.Thread).join(db.Forum).outerjoin(db.Category)\
.filter(or_(db.Forum.category_id==None, db.Category.group_id.in_([None, 0]), db.Category.group_id.in_(group.id for group in g.user.groups)))\
.filter(db.Forum.trash == False) \
.order_by(db.Thread.laststamp.desc())
return threads
@rhforum.route("/", methods="GET POST".split())
def index():
form = None
if g.user:
form = ForumControlsForm(request.form)
if request.method == "POST":# and form.validate():
if form.mark_read.data:
g.user.read_all()
categories = db.session.query(db.Category).order_by(db.Category.position).all()
uncategorized_fora = db.session.query(db.Forum).filter(db.Forum.category == None, db.Forum.trash == False).order_by(db.Forum.position).all()
trash = db.session.query(db.Forum).filter(db.Forum.trash == True).scalar()
if uncategorized_fora:
categories.append(None)
latest_threads = get_active_threads()[0:10]
tasks = db.session.query(db.Task).filter(db.Task.user_id.in_([g.user.id, None, 0])).all()
sort_tasks(tasks)
return render_template("forum/index.html", categories=categories, uncategorized_fora=uncategorized_fora, edit_forum = None, latest_threads=latest_threads, trash=trash, form=form, tasks=tasks)
@rhforum.route("/active", methods="GET POST".split())
def active():
form = ForumControlsForm(request.form)
active_threads = get_active_threads()[0:100]
return render_template("forum/active.html", active_threads=active_threads, form=form)
@rhforum.route("/edit-forum/<int:forum_id>", endpoint="edit_forum", methods="GET POST".split())
@rhforum.route("/edit-forum/new", endpoint="edit_forum", methods="GET POST".split())
@rhforum.route("/edit-category/<int:category_id>", endpoint="edit_category", methods="GET POST".split())
@rhforum.route("/edit-category/new", endpoint="edit_category", methods="GET POST".split())
def edit_forum_or_category(forum_id=None, category_id=None):
if not g.user.admin: abort(403) # TODO minrights decorator
categories = db.session.query(db.Category).order_by(db.Category.position).all()
uncategorized_fora = db.session.query(db.Forum).filter(db.Forum.category == None, db.Forum.trash == False).order_by(db.Forum.position)
trash = db.session.query(db.Forum).filter(db.Forum.trash == True).scalar()
if request.endpoint == 'rhforum.edit_forum':
if forum_id:
forum = db.session.query(db.Forum).get(forum_id)
#forum.last = forum.position == len(forum.category.fora) - 1 if forum.category else True
if not forum.category: forum.position = 0
else:
forum = db.Forum()
uncategorized_fora = list(uncategorized_fora) + [forum]
forum.position = 0
forum.last = True
form = ForumForm(request.form, forum)
form.category_id.choices = [(0, "-")] + [(c.id, c.name) for c in categories if c]
fora = db.session.query(db.Forum).outerjoin(db.Category).order_by(db.Category.position, db.Forum.position).all()
form.new_forum_id.choices = [(0, "-")] + [(f.id, f.name) for f in fora]
editable = forum
elif request.endpoint == 'rhforum.edit_category':
if category_id:
category = db.session.query(db.Category).get(category_id)
#category.last = category.position == len(categories) - 1
else:
category = db.Category()
categories = list(categories) + [category]
category.position = 0
category.last = True
form = CategoryForm(request.form, category)
form.group_id.choices = [(0, "-")] + [(group.id, group.name) for group in db.session.query(db.Group)]
editable = category
if request.method == "POST" and form.validate():
if request.endpoint == 'rhforum.edit_forum':
forum.name = form.name.data
forum.identifier = forum.name.lower().replace(' ', '-')
forum.description = form.description.data
forum.category_id = form.category_id.data or None
forum.category = db.session.query(db.Category).get(form.category_id.data)
elif request.endpoint == 'rhforum.edit_category':
category.name = form.name.data
category.group_id = form.group_id.data
if form.save.data:
if request.endpoint == 'rhforum.edit_forum':
if not forum_id:
if forum.category_id:
forum.position = len(forum.category.fora) - 1
db.session.add(forum)
flash("Fórum vytvořeno.")
else:
flash("Fórum upraveno.")
elif request.endpoint == 'rhforum.edit_category':
if not category_id:
category.position = len(categories) - 1
db.session.add(category)
flash("Kategorie vytvořena.")
else:
flash("Kategorie upravena.")
db.session.commit()
return redirect(url_for('.index'))
elif form.delete.data:
if request.endpoint == 'rhforum.edit_forum':
if not form.new_forum_id.data and forum.threads:
flash("Je nutno témata někam přesunout.")
else:
moved = False
if form.new_forum_id.data:
moved = True
new_forum = db.session.query(db.Forum).get(form.new_forum_id.data)
for thread in forum.threads:
thread.forum = new_forum
else:
moved = False
db.session.delete(forum)
if moved:
flash("Fórum odstraněno a témata přesunuty.")
else:
flash("Fórum odstraněno.")
db.session.commit()
return redirect(url_for('.index'))
elif request.endpoint == 'rhforum.edit_category':
db.session.delete(category)
flash("Kategorie odstraněna.")
db.session.commit()
return redirect(url_for('.index'))
else:
# moving
i = editable.position
if request.endpoint == 'rhforum.edit_forum':
items = list(forum.category.fora)
elif request.endpoint == 'rhforum.edit_category':
items = list(categories)
items.remove(editable)
if form.move_up and form.move_up.data:
items.insert(i-1, editable)
elif form.move_down and form.move_down.data:
items.insert(i+1, editable)
for i, x in enumerate(items):
x.position = i
db.session.add(x)
db.session.commit()
if request.endpoint == 'rhforum.edit_category':
categories = items
if editable.position == 0:
del form.move_up
if request.endpoint == 'rhforum.edit_forum':
if not forum.category or forum.position == len(forum.category.fora) - 1:
del form.move_down
elif request.endpoint == 'rhforum.edit_category':
if not category.id or category.position == len(categories) - 1:
del form.move_down
return render_template("forum/index.html", categories=categories+[None], uncategorized_fora=uncategorized_fora, editable=editable, form=form, new=not bool(forum_id), trash=trash)
class LoginForm(Form):
name = TextField('Jméno', [validators.required()])
password = PasswordField('Heslo', [validators.required()])
submit = SubmitField('Přihlásit se')
@rhforum.route("/login", methods="GET POST".split())
def login():
form = LoginForm(request.form)
failed = False
if request.method == 'POST' and form.validate():
user = db.session.query(db.User).filter(db.User.login == form.name.data.lower()).first()
if not user: failed = True
else:
try:
password_matches = user.verify_password(form.password.data)
except db.OldHashingMethodException:
failed = True
password_matches = False
flash("Prosím, požádejte admina o změnu hesla.")
if password_matches:
g.user = user
session['user_id'] = g.user.id
session.permanent = True
flash("Jste přihlášeni.")
return redirect(url_for('.index'))
else:
failed = True
return render_template("forum/login.html", form=form, failed=failed)
class RegisterForm(Form):
username = TextField('Nevyplňovat')
bbq = TextField('Login', [validators.required()])
fullname = TextField('Jméno', [validators.required()])
password = PasswordField('Heslo', [
validators.Required(),
validators.EqualTo('confirm_password', message='Hesla se musí schodovat')
])
confirm_password = PasswordField('Heslo znovu')
email = TextField('Email', [validators.required()])
submit = SubmitField('Zaregistrovat se')
@rhforum.route("/register", methods="GET POST".split())
def register():
if g.user:
if g.user.admin:
flash("Pro ruční registraci účtů ostatním použijte prosím DokuWiki.")
return redirect(url_for(".index"))
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
if form.username.data:
return "OK"
username = form.bbq.data.lower()
if db.session.query(db.User).filter(db.User.login == username).first():
flash("Tento login je už zabraný, vyberte si prosím jiný.")
else:
user = db.User(login=username, fullname=form.fullname.data, email=form.email.data, timestamp=now(), laststamp=now())
user.set_password(form.password.data)
user_group = db.session.query(db.Group).filter(db.Group.name=="user").scalar()
if user_group:
user.groups.append(user_group)
db.session.add(user)
db.session.commit()
g.telegram_messages.append("Nová registrace: *{}* (login *{}*, email {}): {}".format(
user.fullname, user.login, user.email, BASE_URL+user.url))
#g.irc_messages.append("Nová registrace: \x0302{}\x03 (login \x0208{}\x03, email {}): {}".format(
# user.fullname, user.login, user.email, BASE_URL+user.url))
g.discord_messages.append("Nová registrace: **{}** (login **{}**, email {}): <{}>".format(
user.fullname, user.login, user.email, BASE_URL+user.url))
g.user = user
g.user.read_all()
session['user_id'] = g.user.id
session.permanent = True
flash("Registrace proběhla úspěšně.")
return redirect(url_for(".index"))
return render_template("forum/register.html", form=form)
@rhforum.route("/logout")
def logout():
if 'user_id' in session:
session.pop('user_id')
flash("Odhlášení proběhlo úspěšně.")
return redirect(url_for('.index'))
@rhforum.route("/<int:forum_id>", methods="GET POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>", methods="GET POST".split())
def forum(forum_id, forum_identifier=None):
forum = db.session.query(db.Forum).get(forum_id)
if not forum: abort(404)
if forum.category and forum.category.group and forum.category.group not in g.user.groups: abort(403)
if forum.trash and not g.user.admin: abort(403)
threads = db.session.query(db.Thread).filter(db.Thread.forum == forum).order_by(db.Thread.archived.asc(), db.Thread.pinned.desc(), db.Thread.laststamp.desc())
form = None
if not forum.trash:
form = ThreadForm(request.form)
if g.user and request.method == 'POST' and form.validate():
now = dtnow()
thread = db.Thread(forum=forum, author=g.user, timestamp=now, laststamp=now,
name=form.name.data)
db.session.add(thread)
post = db.Post(thread=thread, author=g.user, timestamp=now,
text=form.text.data)
db.session.add(post)
db.session.commit()
g.telegram_messages.append("Nové téma od *{}*: *{}*: {}".format(
thread.author.name, thread.name, BASE_URL+thread.short_url))
if not thread.forum.category or not thread.forum.category.group or thread.forum.category.group.name == "extern":
g.discord_messages.append("Nové téma od **{}**: **{}**: <{}>".format(
thread.author.name, thread.name, BASE_URL+thread.short_url))
# g.irc_messages.append("Nové téma od \x0302{}\x03: \x0306{}\x03: {}".format(
# thread.author.name, thread.name, BASE_URL+thread.short_url))
return redirect(thread.url)
return render_template("forum/forum.html", forum=forum, threads=threads, form=form)
@rhforum.route("/users/<int:user_id>/threads")
@rhforum.route("/users/<int:user_id>-<name>/threads")
def user_threads(user_id, name=None):
user = db.session.query(db.User).get(user_id)
if not user: abort(404)
forum = db.Forum(name="Témata od {}".format(user.name))
threads = db.session.query(db.Thread).join(db.Forum)\
.filter(db.Forum.trash == False, db.Thread.author == user)\
.outerjoin(db.Category)\
.filter(or_(db.Forum.category_id==None, db.Category.group_id.in_([None, 0]), db.Category.group_id.in_(group.id for group in g.user.groups)))\
.filter(db.Forum.trash == False).order_by(db.Thread.laststamp.desc()).all()
return render_template("forum/forum.html", forum=forum, threads=threads, user=user)
# TODO <path:thread_identificator>
@rhforum.route("/<int:forum_id>/<int:thread_id>", methods="GET POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>/<int:thread_id>-<thread_identifier>", methods="GET POST".split())
def thread(forum_id, thread_id, forum_identifier=None, thread_identifier=None):
thread = db.session.query(db.Thread).get(thread_id)
if not thread: abort(404)
if thread.forum.category and thread.forum.category.group and thread.forum.category.group not in g.user.groups: abort(403)
if thread.forum.trash and not g.user.admin: abort(403)
reply_post = None
if "reply" in request.args:
try:
reply_post_id = int(request.args["reply"])
except ValueError:
abort(400)
reply_post = db.session.query(db.Post).get(reply_post_id)
if reply_post_id and not reply_post:
abort(404)
if reply_post and reply_post.thread != thread:
abort(400)
if g.user.admin and "show_deleted" in request.args:
posts = thread.posts.filter()
else:
posts = thread.posts.filter(db.Post.deleted==False)
num_deleted = thread.posts.count() - thread.posts.filter(db.Post.deleted==False).count()
form = None
if not thread.forum.trash and not (thread.locked and not g.user.admin):
text = ""
if reply_post:
text = "[quote={}@{}]{}[/quote]\n".format(reply_post.author.login, reply_post.id, reply_post.text)
form = PostForm(request.form, text=text)
if g.user and request.method == 'POST' and form.validate():
now = dtnow()
post = db.Post(thread=thread, author=g.user, timestamp=now,
text=form.text.data)
db.session.add(post)
thread.laststamp = now
db.session.commit()
g.telegram_messages.append("Nový příspěvek od *{}* do *{}*: {}".format(
post.author.name, post.thread.name, BASE_URL+post.short_url))
if not thread.forum.category or not thread.forum.category.group or thread.forum.category.group.name == "extern":
g.discord_messages.append("Nový příspěvek od **{}** do **{}**: <{}>".format(
post.author.name, post.thread.name, BASE_URL+post.short_url))
# g.irc_messages.append("Nový příspěvek od \x0302{}\x03 do \x0306{}\x03: {}".format(
# post.author.name, post.thread.name, BASE_URL+post.short_url))
return redirect(thread.url+"#post-latest") # TODO id
if g.user:
thread_read = db.session.query(db.ThreadRead).filter(db.ThreadRead.user==g.user, db.ThreadRead.thread==thread).first()
if not thread_read:
last_read_timestamp = None
else:
last_read_timestamp = thread_read.last_post.timestamp
g.user.read(thread.last_post)
else:
last_read_timestamp = g.now
article = None
article_revisions = []
article_info = None
doku_error = None
if thread.wiki_article and doku:
try:
article = doku.pages.html(thread.wiki_article)
#article_revisions = doku.send("wiki.getPageVersions", thread.wiki_article)
article_info = doku.send("wiki.getPageInfo", thread.wiki_article)
print(article_info, 'xxx')
except Exception as ex:
print(ex)
doku_error = ex
return render_template("forum/thread.html", thread=thread, forum=thread.forum, posts=posts, form=form, now=dtnow(), last_read_timestamp=last_read_timestamp, article=article, article_revisions=article_revisions, article_info=article_info, doku_error=doku_error, reply_post=reply_post, show_deleted="show_deleted" in request.args, num_deleted=num_deleted)
@rhforum.route("/<int:forum_id>/<int:topic_id>/set", methods="POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>/<int:thread_id>-<thread_identifier>/set", methods="POST".split())
def thread_set(forum_id, thread_id, forum_identifier=None, thread_identifier=None):
if not g.user.admin: abort(403)
thread = db.session.query(db.Thread).get(thread_id)
if not thread: abort(404)
if request.form.get("pin"):
thread.pinned = True
elif request.form.get("unpin"):
thread.pinned = False
elif request.form.get("lock"):
thread.locked = True
elif request.form.get("unlock"):
thread.locked = False
elif request.form.get("archive"):
thread.archived = True
elif request.form.get("unarchive"):
thread.archived = False
db.session.commit()
return redirect(thread.url)
@rhforum.route("/<int:forum_id>/<int:thread_id>/edit/<int:post_id>", methods="GET POST".split())
@rhforum.route("/<int:forum_id>-<forum_identifier>/<int:thread_id>-<thread_identifier>/edit/<int:post_id>", methods="GET POST".split())
def edit_post(forum_id, thread_id, post_id, forum_identifier=None, thread_identifier=None):
post = db.session.query(db.Post).get(post_id)
thread = db.session.query(db.Thread).get(thread_id)
if not post: abort(404)
if thread.forum.category and thread.forum.category.group and thread.forum.category.group not in g.user.groups: abort(403)
if post.thread != thread: abort(400)
if post.deleted:
# The user probably hit edit multiple times. Let's just be helpful.
return redirect(thread.url)
if post.author != g.user and not g.user.admin: abort(403)
if post.thread.forum.trash and not g.user.admin: abort(403)
posts = thread.posts.filter(db.Post.deleted==False)
if post == posts[0] and g.user.admin:
edit_thread = True
form = EditThreadForm(request.form, text=post.text, name=thread.name, forum_id=thread.forum_id, wiki_article=thread.wiki_article)
forums = db.session.query(db.Forum).outerjoin(db.Category).order_by(db.Category.position, db.Forum.position).all()
form.forum_id.choices = [(f.id, f.name) for f in forums]
else:
edit_thread = False
form = EditPostForm(request.form, text=post.text)
if not g.user.admin: del form.delete
if request.method == 'POST' and form.validate():
if form.submit.data:
now = dtnow()
new_post = db.Post(thread=thread, author=post.author, timestamp=post.timestamp, editstamp=now,
text=form.text.data, original=post.original if post.original else post, editor=g.user)
db.session.add(new_post)
post.deleted=True
if edit_thread:
thread.name = form.name.data
thread.forum_id = form.forum_id.data
thread.wiki_article = form.wiki_article.data
#forum.fix_laststamp() # TODO
db.session.commit()
if edit_thread:
return redirect(thread.url)
else:
return redirect(new_post.url)
elif form.delete.data:
post.deleted = True
db.session.commit()
return redirect(thread.url)
return render_template("forum/thread.html", thread=thread, forum=thread.forum, posts=posts, form=form, now=dtnow(), edit_post=post, edit_thread=edit_thread, last_read_timestamp=g.now)
@rhforum.route("/users/")
def users():
if not g.user.admin: abort(403)
users = db.session.query(db.User).order_by(db.User.fullname)
return render_template("forum/users.html", users=users)
@rhforum.route("/users/<int:user_id>")
@rhforum.route("/users/<int:user_id>-<name>")
def user(user_id, name=None):
user = db.session.query(db.User).get(user_id)
if not user: abort(404)
return render_template("forum/user.html", user=user)
@rhforum.route("/users/<int:user_id>/edit", methods="GET POST".split())
@rhforum.route("/users/<int:user_id>-<name>/edit", methods="GET POST".split())
def edit_user(user_id, name=None):
user = db.session.query(db.User).get(user_id)
if not user: abort(404)
if user != g.user and not g.user.admin: abort(403)
if g.user.admin:
form = AdminUserForm(request.form, user)
form.group_ids.choices = []
for group in db.session.query(db.Group):
form.group_ids.choices.append((group.id, group.name))
if form.group_ids.data == None:
form.group_ids.data = [group.id for group in user.groups]
else:
form = UserForm(request.form, user)
if request.method == 'POST' and form.validate():
user.fullname = form.fullname.data
user.email = form.email.data
user.homepage = form.homepage.data
user.avatar_url = form.avatar_url.data
if form.new_password.data:
user.set_password(form.new_password.data)
flash("Heslo změněno.")
if g.user.admin:
user.groups = []
for group_id in form.group_ids.data:
user.groups.append(db.session.query(db.Group).get(group_id))
db.session.commit()
flash("Uživatel upraven.")
return redirect(user.url)
return render_template("forum/user.html", user=user, edit=True, form=form)
class GroupForm(Form):
name = TextField('Jméno', [validators.required()])
symbol = TextField('Symbol')
title = TextField('Titul')
rank = IntegerField('Rank')
display = BooleanField('Zobrazovat')
submit = SubmitField('Uložit')
@rhforum.route("/groups/", methods=["GET"])
@rhforum.route("/groups/<int:edit_group_id>/edit", methods=["GET", "POST"])
def groups(edit_group_id=None):
if not g.user.admin: abort(403)
groups = db.session.query(db.Group).all()
edit_group = None
form = None
if edit_group_id == 0 and request.method == 'POST':
group = db.Group(name="")
db.session.add(group)
db.session.commit()
return redirect(url_for('.groups', edit_group_id=group.id))
if edit_group_id:
edit_group = db.session.query(db.Group).get(edit_group_id)
form = GroupForm(request.form, edit_group)
if request.method == 'POST' and form.validate():
edit_group.name = form.name.data
edit_group.symbol = form.symbol.data
edit_group.title = form.title.data
edit_group.rank = form.rank.data
edit_group.display = form.display.data
db.session.commit()
flash("Skupina {} upravena.".format(edit_group.name))
return redirect(url_for('.groups'))
return render_template("forum/groups.html", groups=groups, edit_group=edit_group, form=form)
@rhforum.route("/tasks", methods="GET POST".split())
@rhforum.route("/tasks/<int:task_id>", methods=["GET", "POST"])
def tasks(task_id=None):
if not g.user.in_group("retroherna"): error(403)
task = None
if task_id:
task = db.session.query(db.Task).get(task_id)
if not task: error(404)
form = TaskForm(request.form, task)
form.user_id.choices = [(0, '-')]
for user in db.session.query(db.User):
form.user_id.choices.append((user.id, user.name))
if request.method == 'POST' and form.validate():
if not form.due_time.data and (form.type.data == "announcement" or (task and not task.status)):
flash("Nelze vytvořit oznámení bez konečného času.")
else:
if not task_id:
task = db.Task()
task.created_time = now()
task.author = g.user
task.text = form.text.data
task.due_time = form.due_time.data
if form.type.data == "task":
task.status = "todo"
task.user_id = form.user_id.data
if not task_id:
db.session.add(task)
db.session.commit()
if not task_id:
flash("Úkol přidán.")
else:
flash("Úkol upraven.")
return redirect(url_for('.tasks'))
tasks = db.session.query(db.Task).all()#.order_by(func.abs(func.now() - db.Task.due_time))
sort_tasks(tasks)
return render_template("forum/tasks.html", tasks=tasks, form=form, task_id=task_id)
@rhforum.route("/tasks/<int:task_id>/status", methods=["POST"])
def change_task_status(task_id):
if not g.user.in_group("retroherna"): error(403)
task = db.session.query(db.Task).get(task_id)
if not task: error(404)
if request.form["status"] == "todo":
task.status = "todo"
elif request.form["status"] == "done":
task.status = "done"
db.session.commit()
return redirect(url_for(".tasks"))
class IRCSendForm(Form):
text = TextField('Text', [validators.required()])
submit = SubmitField('Odeslat')
@rhforum.route("/irc-send/", methods=["GET", "POST"])
def irc_send():
if not g.user.admin: error(403)
text = None
form = IRCSendForm(request.form)
if request.method == 'POST' and form.validate():
text = form.text.data
g.irc_messages.append(text)
form = IRCSendForm()
return render_template("forum/irc_send.html", form=form, text=text)
app.register_blueprint(rhforum, url_prefix='')
if not app.debug:
import logging
from logging import FileHandler
file_handler = FileHandler(app_dir+'/flask.log')
file_handler.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(host="", port=8080, debug=True, threaded=True)
|
retroherna/rhweb2
|
rhforum.py
|
rhforum.py
|
py
| 36,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27592948212
|
import serial
inp=input("Enter the port : ")
ser=serial.Serial(inp,baudrate=230400,timeout=None)
data_old=0 # always the first-fixed bit
skipped=0
cntr=0
fl=1
su=0
cx=0
while True:
if (skipped!=0):
data_old=ser.readline().decode('ascii')[0]
data_old=int(data_old)
skipped-=1
continue
data_new=ser.readline().decode('ascii')[0]
data_new=int(data_new)
if (data_old!=data_new):
skipped=3
# print(data_old)
if(fl==1):
if(data_old==0):
cntr+=1
if(data_old==1):
cntr=0
if(cntr==27):
# print("Connection Established")
cntr=0
fl=0
continue
if(fl==0):
cx+=1
su=int(data_old)+su*2
if(cx==8):
print(chr(su), end='')
cx=0
su=0
|
eclubiitk/Li-Fi-E-Club
|
Old Codes/non-queue implementation/Receiver.py
|
Receiver.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22938524534
|
import cv2
import numpy as np
from model import Model
import math as m
import time
import logging as log
class headPoseEstimation():
def __init__(self, MODEL_PATH, DEVICE):
self.model_loaded = Model(MODEL_PATH, DEVICE)
self.model_loaded.get_unsupported_layer()
self.model_name = self.model_loaded.get_model_name()
self.initial_w = None
self.initial_h = None
self.frame = None
self.image_input_shape = self.model_loaded.get_input_shape()
def input_blobs(self):
return self.model_loaded.get_input_blob()
def output_blobs(self):
return self.model_loaded.get_output_blob()
def set_params(self, frame, initial_w, initial_h):
self.frame = frame
self.initial_w = initial_w
self.initial_h = initial_h
def get_inference_outputs(self):
t0 = time.perf_counter()
t_count = 0
inputs_model = self.input_blobs()
prepro_img_face = self.preprocess_frame(self.frame)
inputs_to_feed = {inputs_model[0]:prepro_img_face}
t_start = time.perf_counter()
head_pose_angles = self.inference(inputs_to_feed)
t_end = time.perf_counter()
t_count += 1
log.info("model {} is processed with {:0.2f} requests/sec ({:0.2} sec per request)".format(self.model_name, 1 / (t_end - t_start), t_end - t_start))
return head_pose_angles
def preprocess_frame(self, frame):
resize_frame = cv2.resize(frame, (self.image_input_shape[0][3], self.image_input_shape[0][2]), interpolation=cv2.INTER_AREA)
resize_frame = resize_frame.transpose((2,0,1))
resize_frame = resize_frame.reshape(1, *resize_frame.shape)
return resize_frame
def inference(self, input_data):
return self.model_loaded.get_infer_output(input_data)
|
SamyTahar/Computer-Pointer-Controller
|
src/headposeestimation.py
|
headposeestimation.py
|
py
| 1,885 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30031301327
|
import json
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
def read_details(pd_details):
"""[summary]
Args:
pd_details ([type]): [description]
Returns:
[type]: [description]
"""
with open(pd_details) as f:
data = json.load(f)
return data
def update_annot(ind, nodelist, pos, data, annot, G):
"""[summary]
Args:
ind ([type]): [description]
nodelist ([type]): [description]
pos ([type]): [description]
data ([type]): [description]
annot ([type]): [description]
G ([type]): [description]
"""
node_idx = ind["ind"][0]
node = list(nodelist)[node_idx]
xy = pos[node]
annot.xy = xy
node_attr = {"ID": node}
node_attr.update(G.nodes[node])
all_details = data[node]
patient_string = "Patient: {} , {}, {}".format(
"Ramesh", all_details["pBgrp"], all_details["pAge"]
)
donor_string = "Donor: {} , {}, {}".format(
"arun", all_details["dBgrp"], all_details["dAge"]
)
text = "\n".join([patient_string, donor_string])
annot.set_text(text)
return
def hover(
event, annot, nodes1, nodes2, nodes3, nodes4, top_nodes, rest, pos, data, fig, ax, G
):
"""[summary]
Args:
event ([type]): [description]
annot ([type]): [description]
nodes1 ([type]): [description]
nodes2 ([type]): [description]
nodes3 ([type]): [description]
nodes4 ([type]): [description]
top_nodes ([type]): [description]
rest ([type]): [description]
pos ([type]): [description]
data ([type]): [description]
fig ([type]): [description]
ax ([type]): [description]
G ([type]): [description]
"""
vis = annot.get_visible()
if event.inaxes == ax:
if nodes1 is not None:
cont1, ind1 = nodes1.contains(event)
cont2, ind2 = nodes2.contains(event)
else:
cont1, cont2 = False, False
if nodes3 is not None:
cont3, ind3 = nodes3.contains(event)
cont4, ind4 = nodes4.contains(event)
else:
cont3, cont4 = False, False
if cont1:
update_annot(ind1, top_nodes, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
elif cont2:
update_annot(ind2, top_nodes, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
elif cont3:
update_annot(ind3, rest, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
elif cont4:
update_annot(ind4, rest, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
def hover_graph(G, cycles, solution_values, weight, pd_details):
"""
G : networkx graph object with all nodes, but only solution edges
cycles : list -> all possible cycles in G
solution : list -> 1 if corresponding cycle is chosen for final solution else 0
weight : dict -> keys: edges, values: edgeweights
pd_details : string -> path to JSON file (dump) with patient donor details
"""
fig, ax = plt.subplots()
pos = graphviz_layout(G)
data = read_details(pd_details)
rest = []
two_cycle_nodes_top = {}
two_cycle_nodes_bottom = {}
top_edges = []
bottom_edges = []
colour1 = "orange"
colour2 = "purple"
for i, cycle in enumerate(cycles):
if len(cycle) == 3 and solution_values[i] == 1:
### selects chosen 2 cycles and colours the top and bottom halves of the two nodes in an opposite
### manner to signify corresponding PD pairs
two_cycle_nodes_top[cycle[0]] = colour1
two_cycle_nodes_bottom[cycle[0]] = colour2
two_cycle_nodes_top[cycle[1]] = colour2
two_cycle_nodes_bottom[cycle[1]] = colour1
top_edges.append((cycle[0], cycle[1]))
bottom_edges.append((cycle[1], cycle[0]))
pos = graphviz_layout(G)
# drawing two cycle nodes
top_nodes, top_colours = two_cycle_nodes_top.keys(), two_cycle_nodes_top.values()
bottom_nodes, bottom_colours = (
two_cycle_nodes_bottom.keys(),
two_cycle_nodes_bottom.values(),
)
# nodes other than those part of two cycles, including ones that are not part of any solution cycle
rest = [n for n in G.nodes() if n not in top_nodes]
""" nodes1 : top half of two cycle nodes
nodes2 : bottom half of two cycle nodes
nodes3 : top half of remaining nodes
nodes4 : bottom half of remaining nodes
"""
nodes1 = nx.draw_networkx_nodes(
G,
pos,
nodelist=top_nodes,
node_color=top_colours,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="top"),
label="P",
)
nodes2 = nx.draw_networkx_nodes(
G,
pos,
nodelist=bottom_nodes,
node_color=bottom_colours,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="bottom"),
label="D",
)
# drawing remaining nodes
nodes3 = nx.draw_networkx_nodes(
G,
pos,
nodelist=rest,
label="P",
node_color=colour1,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="top"),
)
nodes4 = nx.draw_networkx_nodes(
G,
pos,
nodelist=rest,
node_color=colour2,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="bottom"),
)
"""
Networkx by default draws straight arcs and places edge labels on the middle of those arcs.
However, we draw curved arcs but edge labels still remain at their default position (midpoint of NodeA and NodeB) {inside the cycle}
Thus we need to offset this by supplying new positions. To maintain consistency across all scales of X and Y axis,
and positions of nodes we take the offset as 0.3 times difference between x-coordinates of the two nodes between which
the edge is drawn. Different offsets are required for top edge and bottom edge of two cycles. For three cycles, the default
placement causes no issue.
"""
pos_higher, pos_lower = {}, {}
# calculating offset
if not top_edges:
y_off = 20
else:
a, b = top_edges[0]
y_off = 0.3 * abs(pos[a][0] - pos[b][0])
for k, v in pos.items():
pos_higher[k] = (v[0], v[1] + y_off)
for k, v in pos.items():
pos_lower[k] = (v[0], v[1] - y_off)
"""
w_top : edge weights of top edges of two cycles
w_bottom : edge weights of bottom edges of two cycles
w_rest : edge weights of remaining edges which can be placed in their default location
"""
w_top = {e: str(weight[e]) for e in weight if (e in top_edges and e in G.edges())}
w_bottom = {
e: str(weight[e]) for e in weight if (e in bottom_edges and e in G.edges())
}
w_rest = {
e: str(weight[e])
for e in weight
if (e in G.edges() and e not in top_edges and e not in bottom_edges)
}
### Drawing edge labels
nx.draw_networkx_edges(
G, pos, edgelist=G.edges(), connectionstyle="arc3,rad=0.2", arrowsize=20
)
nx.draw_networkx_edge_labels(
G, pos_higher, edge_labels=w_top, label_pos=0.5, verticalalignment="top"
)
nx.draw_networkx_edge_labels(
G, pos_lower, edge_labels=w_bottom, label_pos=0.5, verticalalignment="bottom"
)
nx.draw_networkx_edge_labels(G, pos, edge_labels=w_rest, label_pos=0.5)
# =================== HOVERING =========================
### setting annotation style
annot = ax.annotate(
"",
xy=(0, 0),
xytext=(20, 20),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"),
)
annot.set_visible(False)
idx_to_node_dict = {idx: node for idx, node in enumerate(G.nodes)}
fig.canvas.mpl_connect(
"motion_notify_event",
lambda event: hover(
event,
annot,
nodes1,
nodes2,
nodes3,
nodes4,
top_nodes,
rest,
pos,
data,
fig,
ax,
G,
),
)
plt.show()
plt.savefig("./result/output.svg", format="svg")
|
siv2r/kidney-exchange
|
global_match/hovering.py
|
hovering.py
|
py
| 8,711 |
python
|
en
|
code
| 45 |
github-code
|
6
|
39276815188
|
import csv
class Item:
pay_rate=0.8# pay rate after 20% discount
all=[]
def __init__(self,name:str,price:float,quantity=0):# sepecify data types for incoming Input
# assertion for incoming input before they get assigned to instance Attribnues
assert price>=0, f"price {price} is not more than zero"
assert quantity>=0, f"quantity {quantity} is not more than zero"
# Assignment of self Attibutes
print(f"the object is created {name}")
self.name=name
self.price=price
self.quantity=quantity
Item.all.append(self)
def calculate_total_price(self):
return self.price*self.quantity
def apply_discount(self):
self.price=self.price*self.pay_rate
def __repr__(self): # This is a Magic Method which is used to re
return f"item('{self.name}',{self.price},{self.quantity})"
@classmethod
def instantiate_from_csv(cls):
with open('codesnippets/items.csv','r') as f:
reader=csv.DictReader(f)
items=list(reader)
for item in items:
print(item)
Item(
name=item.get('name'),
price=float(item.get('price')),
quantity=int(item.get('quantity')),
)
print(Item.all)
# print name attribute of all instances
for instance in Item.all:
print(instance.name)
Item.instantiate_from_csv()
|
menkaraghunathbhapkar/menu--oops-practise
|
class_methods/main1.py
|
main1.py
|
py
| 1,437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31102897914
|
# Sortear números e somar
from random import randint
from time import sleep
def sortearLista(lista):
print('Sorteando 5 valores para a lista: ', end ='')
for cont in range(0, 5):
n = randint(1, 10)
lista.append(n)
print(f'{n}', end = '', flush = True)
sleep(0.3)
print(' Pronto!!!')
def somaPares(lista):
soma = 0
for val in lista:
if val % 2 == 0:
soma += val
print(f'Somando os valores pares de {lista}, temos {soma}')
# Programa principal
num = list()
sortearLista(num)
somaPares(num)
|
gslmota/Programs-PYTHON
|
Exercícios/Mundo 3/ex091.py
|
ex091.py
|
py
| 569 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
40684104940
|
#!/usr/bin/python3
"""
function that queries the Reddit API
and returns the number of subscribers
"""
import requests
def number_of_subscribers(subreddit):
"""initializate"""
if (type(subreddit) is not str):
return(0)
url_api = ("https://www.reddit.com/r/{}/about.json".format(subreddit))
headers = {'user-agent': 'safari:holberton/0.1.0'}
response = requests.get(url_api, headers=headers)
if response.status_code is not 200:
return(0)
return(response.json().get("data").get("subscribers"))
|
manosakpujiha/alx-system_engineering-devops
|
0x16-api_advanced/0-subs.py
|
0-subs.py
|
py
| 539 |
python
|
en
|
code
| 3 |
github-code
|
6
|
32763077140
|
myfile = open('myfile.txt')
print(myfile.read())
#can aonly read once
myfile.seek(0)
mycontent = myfile.read()
print(mycontent)
#returns a list of lines
myfile.readlines()
#file locations
#need full file path
#pwd
#best practice to close it
myfile.close()
#or
with open('myfile.txt') as my_new_file:
contents = my_new_file.read()
print(contents)
#can read or write
#a append r read w write r+ read and write w+ writing reading overwrite existing file
#with open('myfile.txt', mode='w') as myfile:
with open('mynewfile.txt', mode='r') as f:
print (f.read())
with open('mynewfile.txt', mode='a') as f:
f.write('four on fourth')
with open('mynewfile.txt', mode='r') as f:
print (f.read())
with open('dadasf.txt', mode='w') as f:
f.write('i created this file')
with open('dadasf.txt', mode='r') as f:
print(f.read())
|
stephen-engler/python_files_io
|
files_io.py
|
files_io.py
|
py
| 846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24270720132
|
import random
import os
import glob
import cv2
import numpy as np
import json
from detectron2.structures import BoxMode
import itertools
import sys
# import some common detectron2 utilities
import pdb
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
class_list = ['cone','duckie','duckiebot']
"""Now, let's fine-tune a coco-pretrained R50-FPN Mask R-CNN model on the balloon dataset. It takes ~6 minutes to train 300 iterations on Colab's K80 GPU."""
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
cfg = get_cfg()
cfg.merge_from_file("/network/home/bhattdha/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
class_list = ['cone','duckie','duckiebot']
# write a function that loads the dataset into detectron2's standard format
def get_duckietown_dicts(root_dir):
annotation_file = root_dir + 'annotations/final_anns.json'
frame_path = root_dir + 'final_frames/frames/'
with open(annotation_file) as f:
data = json.load(f)
record = {}
dataset_dicts = []
class_label = {}
## giving labels to the classes
for idx,class_val in enumerate(class_list):
class_label[class_val] = idx
for name in data.keys():
# print(name)
image_name = frame_path + name
record = {}
height, width = cv2.imread(image_name).shape[:2]
record["file_name"] = image_name
record["height"] = height
record["width"] = width
objs = []
for annotation in data[name]:
ob_list = []
obj_ann = {
"bbox": [annotation['bbox'][0], annotation['bbox'][1], annotation['bbox'][0] + annotation['bbox'][2], annotation['bbox'][1] + annotation['bbox'][3]],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": annotation['cat_id'] - 1,
"iscrowd": 0
}
objs.append(obj_ann)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
from detectron2.data import DatasetCatalog, MetadataCatalog
root_dir = '/network/tmp1/bhattdha/duckietown_dataset/'
for d in ["train", "test"]:
DatasetCatalog.register("duckietown/" + d, lambda d=d: get_duckietown_dicts(root_dir))
MetadataCatalog.get('duckietown/' + d).set(thing_classes=class_list)
duckietown_metadata = MetadataCatalog.get('duckietown/train')
cfg_load = torch.load('/network/tmp1/bhattdha/duckietown_dataset/probabilistic_duckietown_OD/probabilistic_duckietown_OD_cfg.final')
##loading the config used at train time
cfg = cfg_load['cfg']
# import pdb; pdb.set_trace()
# cfg.DATASETS.TEST = () # no metrics implemented for this dataset
cfg.DATASETS.TEST = ('coco_2017_val',) # no metrics implemented for this dataset
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(class_list) # (kitti)
cfg.OUTPUT_DIR = '/network/tmp1/bhattdha/duckietown_dataset/probabilistic_duckietown_OD/'
# cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(class_list) # (kitti)
"""Now, we perform inference with the trained model on the kitti dataset. First, let's create a predictor using the model we just trained:"""
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_0014999.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set the testing threshold for this model
# cfg.DATASETS.TEST = ("kitti/test", )
predictor = DefaultPredictor(cfg)
"""Then, we randomly select several samples to visualize the prediction results."""
from detectron2.utils.visualizer import ColorMode
# im = cv2.imread('test.png')
# outputs = predictor(im)
# v = Visualizer(im[:, :, ::-1],
# metadata=duckietown_metadata,
# scale=1.0,
# instance_mode=ColorMode.IMAGE
# )
# v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# cv2.imwrite("test_out.png", v.get_image()[:, :, ::-1])
# import pdb; pdb.set_trace()
# import time
# inf_time = []
# # If the input is the camera, pass 0 instead of the video file name
# cap = cv2.VideoCapture('/network/home/bhattdha/manfred_vid.mov')
# frame_width = int(cap.get(3))
# frame_height = int(cap.get(4))
# out = cv2.VideoWriter('/network/home/bhattdha/output_prob.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 20, (frame_width,frame_height))
# while(cap.isOpened()):
# ret, frame = cap.read()
# st_time = time.time()
# outputs = predictor(frame)
# end_time = time.time() - st_time
# inf_time.append(time.time() - st_time)
# # pdb.set_trace()
# v = Visualizer(frame[:, :, ::-1],
# metadata=duckietown_metadata,
# scale=1.0,
# instance_mode=ColorMode.IMAGE
# )
# # out.write(frame)
# v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# print("Tot time is: ", end_time)
# # print(type(v))
# # import ipdb; ipdb.set_trace()
# out.write(v.get_image()[:, :, ::-1])
# # When everything done, release the video capture and video write objects
# cap.release()
# out.release()
# print("Inference time: ", np.mean(np.array(inf_time)))
# dataset_dicts = get_kitti_dicts("/network/tmp1/bhattdha/kitti_dataset", 'test')
image_names = glob.glob("/network/tmp1/bhattdha/duckietown_dataset/final_frames/test/*.png")
for idx, im_name in enumerate(image_names):
print(idx, im_name)
im = cv2.imread(im_name)
outputs = predictor(im)
# pdb.set_trace()
v = Visualizer(im[:, :, ::-1],
metadata=duckietown_metadata,
scale=1.0,
instance_mode=ColorMode.IMAGE
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
print("saving images")
print(type(v))
cv2.imwrite("/network/tmp1/bhattdha/duckietown_dataset/probabilistic_duckietown_OD/test_outputs/" + str(idx).zfill(5) + '.png', v.get_image()[:, :, ::-1])
# cv2_imshow(v.get_image()[:, :, ::-1])
|
dhaivat1729/detectron2_CL
|
experiments/test_duckietown_detectron.py
|
test_duckietown_detectron.py
|
py
| 6,268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43987570086
|
from os.path import join
import numpy as np
from graycart.GraycartWafer import evaluate_wafer_flow
from graycart.utils import plotting
# ----------------------------------------------------------------------------------------------------------------------
# INPUTS
"""
Some important notes:
1. On "Design Labels" or 'design_lbls':
* if a wafer only has a single design, then features will be identified by 'a1', 'c2', etc...
* if a wafer has multiple designs, the 'design_lbls' get attached to the feature labels, 'a1_LrO' or 'a1_erf5'
2. On "Target Labels" or 'target_lbls':
* the string 'target-profile_' is inserted before the design label, or 'design_lbl'. The target label doesn't
really serve a purpose at this point. The 'design file' (x, y, r, l) and 'target profile' (r, z) should be
combined.
3. On design spacing and design locations:
* 'design_spacing' is primarily used to filter multiple peaks in a single scan.
* 'design_locs' isn't really used at this point.
* 'design_spacing' should be removed as an input and calculated using 'design_locs'.
"""
"""
Etch Rates:
smOOth.V2:
SPR220-7: (no hard bake) vertical etch rate = 600 nm/min; lateral etch rate = 0 nm/s (calculated using w17)
(Laser Monitor) 3.5 wavelengths --> 1 wavelength ~= 190 nm
Si: vertical etch rate = 0 nm/min;
SF6+O2.V6:
SPR220-7: (no hard bake) vertical etch rate = 260 nm/min (calculated using w16)
Si: (no hard bake) vertical etch rate = 1950 nm/min
wafer 16:
1. post-dev PR thickness (b1, silicon-to-PR-surface) = 6.5 microns
2. post-etch PR thickness (b1, silicon-to-PR-surface) = 2.25 microns --> 260 nm/min
post-etch Si depth (b1, trench-to-PR-interface) = 32 microns --> 1.95 um/min
SF6:
SPR220-7: (no hard bake) vertical etch rate = 10 nm/s; lateral etch rate = 0 nm/s (calculated using w17)
Si: vertical etch rate = 0 nm/s;
"""
# SHARED
# target feature
target_radius = 1920 # microns
plot_width_rel_target_radius = 1.2 # plot radius = target_radius * plot_width_rel_target_radius
target_depth_profile = 50
# data processing
evaluate_signal_processing = False # True
lambda_peak_rel_height = lambda x: min([0.95 + x / 100, 0.9875])
z_standoff_measure = -0.125
z_standoff_design = 1
thickness_PR = 7.5
thickness_PR_budget = 1.5
# WAFER
for wid in [18]: # np.arange(12, 18)
if wid == 19:
# DESIGN
design_lbls = ['erf5']
target_lbls = ['erf5']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-1, 2)]
# field exposure matrix
dose_lbls = ['a', 'b', 'c'] # , 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [0e3, 0e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]]
features_of_interest = ['a1', 'b1', 'c1']
target_radius = 2050 # microns
target_depth_profile = 45
elif wid == 18:
# DESIGN
design_lbls = ['erf5']
target_lbls = ['erf5']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-1, 2)]
# field exposure matrix
dose_lbls = ['a', 'b', 'c'] # , 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [0e3, 0e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]]
features_of_interest = ['a1', 'b1', 'c1']
target_radius = 2050 # microns
elif wid == 17:
# DESIGN
design_lbls = ['erf5_LrO']
target_lbls = ['erf5_LrO']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-2, 3)]
# field exposure matrix
dose_lbls = ['a', 'b', 'c', 'd', 'e'] # , 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [0e3, 0e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]] # False
features_of_interest = ['d1'] # ['b1', 'b2', 'c1', 'c2', 'd1', 'd2']
lambda_peak_rel_height = lambda x: min([0.95 + x / 100, 0.9875])
target_radius = 2100 # microns
plot_width_rel_target_radius = 1.25 # plot radius = target_radius * plot_width_rel_target_radius
elif wid == 16:
# DESIGN
design_lbls = ['erf5_LrO']
target_lbls = ['erf5_LrO']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-2, 3)]
# field exposure matrix
dose_lbls = ['a', 'b', 'c'] # , 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [0e3, 0e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]] # False
features_of_interest = ['a1', 'b1', 'c1'] # ['b1', 'b2', 'c1', 'c2', 'd1', 'd2']
# misc
thickness_PR_budget = 1.65
elif wid == 15:
# DESIGN
design_lbls = ['erf5_LrO']
target_lbls = ['erf5_LrO']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-2, 3)]
# field exposure matrix
dose_lbls = ['c'] # , 'c', 'd', 'e'
focus_lbls = [1, 2]
fem_dxdy = [25e3, 0e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]] # False
features_of_interest = ['c1', 'c2'] # ['b1', 'b2', 'c1', 'c2', 'd1', 'd2']
elif wid == 14:
# DESIGN
design_lbls = ['erf5_LrO']
target_lbls = ['erf5_LrO']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-2, 3)]
# field exposure matrix
dose_lbls = ['a', 'b'] # , 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [0e3, 35e3]
# data processing
perform_rolling_on = [[3, 'b1', 25]] # False
features_of_interest = ['a1', 'b1'] # ['b1', 'b2', 'c1', 'c2', 'd1', 'd2']
elif wid == 13:
# DESIGN
design_lbls = ['erf5_LrO']
target_lbls = ['erf5_LrO']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-2, 3)]
# field exposure matrix
dose_lbls = ['a', 'b', 'c', 'd', 'e'] # , 'c', 'd', 'e'
focus_lbls = [1, 2]
fem_dxdy = [5e3, 5e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]] # False
features_of_interest = ['c1', 'c2']
elif wid == 12:
# DESIGN
design_lbls = ['erf5_LrO']
target_lbls = ['erf5_LrO']
design_ids = [0]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in np.arange(-2, 3)]
# field exposure matrix
dose_lbls = ['a', 'b', 'c', 'd', 'e']
focus_lbls = [1, 2, 3]
dose, dose_step = 350, 0
focus, focus_step = -25, 25
fem_dxdy = [15e3, 5e3]
# data processing
perform_rolling_on = [[3, 'c1', 25], [3, 'c2', 15], [3, 'c3', 15]]
features_of_interest = ['c{}'.format(i + 1) for i in range(3)]
elif wid == 11:
# DESIGN
design_lbls = ['erf3', 'Lr']
target_lbls = [None, None]
design_ids = [0, 1]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in [-0.5, 0.5]]
# field exposure matrix
dose_lbls = ['a', 'b'] # 'a', 'b', 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [10e3, 10e3]
# data processing
perform_rolling_on = False # [[3, 'b1', 25]] # False
features_of_interest = ['a1_erf3', 'b1_erf3', 'a1_Lr', 'b1_Lr'] # ['b1', 'b2', 'c1', 'c2', 'd1', 'd2']
elif wid == 10:
# design
design_lbls = ['erf3', 'Lr']
target_lbls = [None, None]
design_ids = [0, 1]
design_spacing = 5e3
design_locs = [[0, n * design_spacing] for n in [-0.5, 0.5]]
# field exposure matrix
dose_lbls = ['a', 'b'] # 'a', 'b', 'c', 'd', 'e'
focus_lbls = [1]
fem_dxdy = [10e3, 10e3]
# data processing
perform_rolling_on = [[3, 'b1_erf3', 50]] # False
features_of_interest = ['a1_erf3', 'b1_erf3', 'a1_Lr', 'b1_Lr'] # ['b1', 'b2', 'c1', 'c2', 'd1', 'd2']
else:
continue
# raise ValueError()
# SHARED
base_path = '/Users/mackenzie/Desktop/Zipper/Fabrication/Wafer{}'.format(wid)
fn_pflow = 'process-flow_w{}.xlsx'.format(wid)
path_results = 'results'
profilometry_tool = 'KLATencor-P7'
# results
save_type = '.png'
step_develop = 3
save_all_results = False
# ------------------------------------------------------------------------------------------------------------------
wfr = evaluate_wafer_flow(wid, base_path, fn_pflow, path_results, profilometry_tool,
design_lbls, target_lbls, design_locs, design_ids,
design_spacing, dose_lbls, focus_lbls, fem_dxdy,
target_radius=target_radius,
plot_width_rel_target_radius=plot_width_rel_target_radius,
peak_rel_height=lambda_peak_rel_height,
save_all_results=save_all_results,
perform_rolling_on=perform_rolling_on,
evaluate_signal_processing=evaluate_signal_processing,
)
# grade target accuracy
wfr.grade_profile_accuracy(step=max(wfr.list_steps), target_radius=target_radius, target_depth=target_depth_profile)
wfr.backout_process_to_achieve_target(target_radius=target_radius,
target_depth=target_depth_profile,
thickness_PR=thickness_PR,
thickness_PR_budget=thickness_PR_budget,
save_fig=save_all_results)
wfr.characterize_exposure_dose_depth_relationship(z_standoff=z_standoff_measure,
plot_figs=save_all_results,
save_type=save_type,
)
wfr.merge_exposure_doses_to_process_depths(export=save_all_results)
wfr.plot_all_exposure_dose_to_depth(step=step_develop)
wfr.compare_exposure_functions()
wfr.correct_grayscale_design_profile(z_standoff=z_standoff_design,
plot_figs=save_all_results,
save_type=save_type,
)
# ---
# ---
# plots
if save_all_results:
wfr.plot_feature_evolution(px='r', py='z', save_fig=save_all_results)
# compare target to features
wfr.compare_target_to_feature_evolution(px='r', py='z', save_fig=save_all_results)
# plot exposure profile
for foi in features_of_interest:
gpf = wfr.features[foi]
plotting.plot_exposure_profile(gcf=gpf, path_save=join(wfr.path_results, 'figs'), save_type=save_type)
# plot feature profile overlaid with exposure profile
step = max(wfr.list_steps)
for did in wfr.dids:
plotting.plot_overlay_feature_and_exposure_profiles(gcw=wfr, step=step, did=did,
path_save=join(wfr.path_results, 'figs'),
save_type=save_type,
)
# ---
print("test_flow.py completed without errors.")
|
sean-mackenzie/grayscale-cartographer
|
tests/run_flow.py
|
run_flow.py
|
py
| 11,973 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70994878268
|
# -*- coding: utf-8 -*-
import PySide2.QtWidgets as qtwidgets
import PySide2.QtCore as qtcore
import PySide2.QtGui as qtgui
import PySide2.QtNetwork as qtnetwork
import os.path
import signal
import socket
class HButtonBar(qtwidgets.QWidget):
layout=qtwidgets.QHBoxLayout
def __init__(self,def_list):
qtwidgets.QWidget.__init__(self)
b_layout=self.layout()
for label,callback in def_list:
button = qtwidgets.QPushButton(label)
button.clicked.connect(callback)
b_layout.addWidget(button)
self.setLayout(b_layout)
class VButtonBar(HButtonBar):
layout=qtwidgets.QVBoxLayout
class OpenFileWidget(qtwidgets.QWidget):
def __init__(self):
qtwidgets.QWidget.__init__(self)
self.field=qtwidgets.QLineEdit()
button=qtwidgets.QPushButton("Browse...")
layout = qtwidgets.QHBoxLayout()
layout.addWidget(self.field,stretch=1)
layout.addWidget(button,stretch=0)
self.setLayout(layout)
button.pressed.connect(self._open)
def text(self):
return self.field.text()
def setText(self,txt):
self.field.setText(txt)
def blockTextSignals(self,flag):
self.field.blockSignals(flag)
def _open(self):
dialog = qtwidgets.QFileDialog(self)
dialog.setFileMode(qtwidgets.QFileDialog.ExistingFile)
dialog.setAcceptMode(qtwidgets.QFileDialog.AcceptOpen)
old=self.field.text()
if not old:
dialog.setDirectory(".")
else:
dialog.setDirectory(os.path.dirname(old))
dialog.selectFile(old)
if dialog.exec_():
fnames = dialog.selectedFiles()
self.field.setText(fnames[0])
class SaveFileWidget(OpenFileWidget):
def _open(self):
dialog = qtwidgets.QFileDialog(self)
dialog.setFileMode(qtwidgets.QFileDialog.AnyFile)
dialog.setAcceptMode(qtwidgets.QFileDialog.AcceptSave)
old=self.field.text()
if not old:
dialog.setDirectory(".")
else:
dialog.setDirectory(os.path.dirname(old))
dialog.selectFile(old)
if dialog.exec_():
fnames = dialog.selectedFiles()
self.field.setText(fnames[0])
class OpenDirWidget(OpenFileWidget):
def _open(self):
dialog = qtwidgets.QFileDialog(self)
dialog.setFileMode(qtwidgets.QFileDialog.Directory)
dialog.setAcceptMode(qtwidgets.QFileDialog.AcceptOpen)
dialog.setOptions(qtwidgets.QFileDialog.ShowDirsOnly)
old=self.field.text()
if not old:
dialog.setDirectory(".")
else:
dialog.setDirectory(os.path.dirname(old))
dialog.selectFile(old)
if dialog.exec_():
fnames = dialog.selectedFiles()
self.field.setText(fnames[0])
class SignalWakeupHandler(qtnetwork.QAbstractSocket):
def __init__(self, parent=None):
super().__init__(qtnetwork.QAbstractSocket.UdpSocket, parent)
self.old_fd = None
# Create a socket pair
self.wsock, self.rsock = socket.socketpair(type=socket.SOCK_DGRAM)
# Let Qt listen on the one end
self.setSocketDescriptor(self.rsock.fileno())
# And let Python write on the other end
self.wsock.setblocking(False)
self.old_fd = signal.set_wakeup_fd(self.wsock.fileno())
# First Python code executed gets any exception from
# the signal handler, so add a dummy handler first
self.readyRead.connect(lambda : None)
# Second handler does the real handling
self.readyRead.connect(self._readSignal)
def __del__(self):
# Restore any old handler on deletion
if self.old_fd is not None and signal and signal.set_wakeup_fd:
signal.set_wakeup_fd(self.old_fd)
def _readSignal(self):
# Read the written byte.
# Note: readyRead is blocked from occuring again until readData()
# was called, so call it, even if you don't need the value.
data = self.readData(1)
# Emit a Qt signal for convenience
self.signalReceived.emit(data[0])
signalReceived = qtcore.Signal(int)
class FormDialog(qtwidgets.QDialog):
def _font(self,style,size):
font_db = qtgui.QFontDatabase()
family="Raleway"
font=font_db.font(family,style,size)
return font
def __init__(self,window,title,form,*args,**kwargs):
super().__init__(window,*args,**kwargs)
self.setWindowTitle(title)
flags = qtwidgets.QDialogButtonBox.Ok | qtwidgets.QDialogButtonBox.Cancel
button_box = qtwidgets.QDialogButtonBox(flags)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
for w in button_box.findChildren(qtwidgets.QWidget):
w.setFont(self._font("Medium",10))
f_widget=qtwidgets.QWidget()
self._form=form
f_widget.setLayout(self._form)
for w in f_widget.findChildren(qtwidgets.QWidget):
w.setFont(self._font("Medium",10))
v_layout = qtwidgets.QVBoxLayout()
v_layout.addWidget(f_widget)
v_layout.addWidget(button_box)
self.setLayout(v_layout)
def get_data(self):
print("dialog")
ret=self.exec_()
data=list(self._form.get_data())
data.append(ret==self.Accepted)
return tuple(data)
class AwesomeToolBar(qtwidgets.QToolBar):
def _font(self,family,style,size):
font_db = qtgui.QFontDatabase()
family="Font Awesome 5 "+family
font=font_db.font(family,style,size)
return font
def __init__(self,parent): #icon,tooltip,size=8,style="Solid",family="Free"):
qtwidgets.QToolBar.__init__(self,parent)
def addAction(self,icon,tooltip,size=8,style="Solid",family="Free"):
action=qtwidgets.QToolBar.addAction(self,icon)
action.setToolTip(tooltip)
action.setFont(self._font(family,style,size))
return action
class AddRootProxyModel(qtcore.QIdentityProxyModel):
root="==root=="
def data(self, index, role):
parent=index.parent()
if parent.isValid():
return qtcore.QIdentityProxyModel.data(self,index,role)
row=index.row()
if row==0:
if role not in [ qtcore.Qt.DisplayRole, qtcore.Qt.EditRole]:
ret=qtcore.QIdentityProxyModel.data(self,index,role)
print(role,ret)
return ret
return "----"
sibling=index.sibling(row-1,index.column())
return qtcore.QIdentityProxyModel.data(self,sibling,role)
def flags(self,index):
if not index.parent().isValid():
if index.row()==0:
return qtcore.Qt.ItemIsEnabled | qtcore.Qt.ItemIsSelectable | qtcore.Qt.ItemNeverHasChildren
return qtcore.Qt.ItemIsEnabled | qtcore.Qt.ItemIsSelectable
def rowCount(self,index):
if index.isValid():
if index.parent().isValid():
return qtcore.QIdentityProxyModel.rowCount(self,index)
if index.row()==0: return 0
return qtcore.QIdentityProxyModel.rowCount(self,index)
return 1+qtcore.QIdentityProxyModel.rowCount(self)
def index(self,row,column,parent=qtcore.QModelIndex()):
if parent.isValid():
return qtcore.QIdentityProxyModel.index(self,row,column,parent)
if row==0:
ret=self.createIndex(0,column,self.root)
return ret
old=qtcore.QIdentityProxyModel.index(self,row-1,column,parent)
return self.createIndex(row,column,old.internalPointer())
def parent(self,index):
if not index.isValid(): return qtcore.QModelIndex()
obj=index.internalPointer()
if obj==self.root: return qtcore.QModelIndex()
return qtcore.QIdentityProxyModel.parent(self,index)
def mapToSource(self,proxyIndex):
new_index=qtcore.QIdentityProxyModel.mapToSource(self,proxyIndex)
if new_index.internalPointer()==self.root:
return qtcore.QModelIndex()
return new_index
def mapFromSource(self,sourceIndex):
new_index=qtcore.QIdentityProxyModel.mapFromSource(self,sourceIndex)
if new_index.parent().isValid(): return new_index
return self.createIndex(1+new_index.row(),new_index.column(),
new_index.internalPointer())
# def mapFromSource(self, sourceIndex):
# if not sourceIndex.isValid(): return qtcore.QModelIndex()
# parent=sourceIndex.parent()
# if parent.isValid():
# return self.createIndex(sourceIndex.row(),
# sourceIndex.column(),
# sourceIndex.internalPointer())
# return self.createIndex(1+sourceIndex.row(),
# sourceIndex.column(),
# sourceIndex.internalPointer())
# def mapToSource(self, proxyIndex):
# if not proxyIndex.isValid(): return qtcore.QModelIndex()
# parent=proxyIndex.parent()
# if parent.isValid:
# return qtcore.QIdentityProxyModel.mapToSource(self,proxyIndex)
# obj=proxyIndex.internalPointer()
# if obj==self.root: return qtcore.QModelIndex()
# return self.sourceModel().createIndex(proxyIndex.row()-1,
# proxyIndex.column(),
# obj)
|
chiara-paci/djvueditor
|
lib/python/djvuedlib/widgets.py
|
widgets.py
|
py
| 9,581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2696828667
|
from collections import Counter
from trava.ext.boosting_eval.boosting_logic import CommonBoostingEvalLogic
from trava.ext.boosting_eval.eval_steps import EvalFitSteps
from trava.fit_predictor import FitPredictConfig, FitPredictConfigUpdateStep, FitPredictorSteps
from trava.split.result import SplitResult
from trava.tracker import Tracker
class _GroupConfigUpdateStep(FitPredictConfigUpdateStep):
def __init__(self, group_col_name: str):
self._group_col_name = group_col_name
def fit_split_data(self, raw_split_data: SplitResult, config: FitPredictConfig, tracker: Tracker) -> SplitResult:
X_valid = None
if raw_split_data.X_valid is not None:
X_valid = raw_split_data.X_valid.drop(self._group_col_name, axis=1)
result = SplitResult(
X_train=raw_split_data.X_train.drop(self._group_col_name, axis=1),
y_train=raw_split_data.y_train,
X_test=raw_split_data.X_test.drop(self._group_col_name, axis=1),
y_test=raw_split_data.y_test,
X_valid=X_valid,
y_valid=raw_split_data.y_valid,
)
return result
def fit_params(
self, fit_params: dict, fit_split_data: SplitResult, config: FitPredictConfig, tracker: Tracker
) -> dict:
raw_split_data = config.raw_split_data
assert raw_split_data
train_counted_groups = self._counted_groups(X=raw_split_data.X_train)
fit_params["group"] = train_counted_groups
return fit_params
def _counted_groups(self, X):
train_groups = X[self._group_col_name].values
counted_groups = list(Counter(train_groups).values())
return counted_groups
class _GroupEvalConfigUpdateStep(_GroupConfigUpdateStep):
def __init__(self, group_col_name: str):
super().__init__(group_col_name=group_col_name)
def fit_params(
self, fit_params: dict, fit_split_data: SplitResult, config: FitPredictConfig, tracker: Tracker
) -> dict:
fit_params = super().fit_params(
fit_params=fit_params, fit_split_data=fit_split_data, config=config, tracker=tracker
)
raw_split_data = config.raw_split_data
assert raw_split_data
assert raw_split_data.X_valid is not None, "X_valid set must be present to run evaluation"
eval_counted_groups = self._counted_groups(X=raw_split_data.X_valid)
fit_params["eval_group"] = [fit_params["group"], eval_counted_groups]
return fit_params
def _counted_groups(self, X):
train_groups = X[self._group_col_name].values
counted_groups = list(Counter(train_groups).values())
return counted_groups
class GroupFitSteps(FitPredictorSteps):
"""
Simple extension for problems that are based on groups ( e.g. ranking )
that provides group parameter for training a model.
Init parameters
----------
group_col_name: str
Which column is used to store groups
"""
def __init__(self, group_col_name: str):
group_config_step = _GroupConfigUpdateStep(group_col_name=group_col_name)
super().__init__(config_steps=[group_config_step])
class GroupEvalFitSteps(EvalFitSteps):
"""
Same as GroupFitSteps, but also adds some modifications to support evaluation.
Init parameters
----------
eval_logic: Eval
Contains logic of how to perform evaluation on the model.
group_col_name: str
Which column is used to store groups
"""
def __init__(self, eval_logic: CommonBoostingEvalLogic, group_col_name: str):
group_eval_config_step = _GroupEvalConfigUpdateStep(group_col_name=group_col_name)
super().__init__(eval_logic=eval_logic)
self.config_steps.insert(0, group_eval_config_step)
|
ityutin/trava
|
trava/ext/grouped/group_steps.py
|
group_steps.py
|
py
| 3,781 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33874326793
|
# 507/206 Homework 6 Part 2
import requests
from bs4 import BeautifulSoup
#### Part 2 ####
print('\n*********** PART 2 ***********')
print('Michigan Daily -- MOST READ\n')
### Your Part 2 solution goes here
html = requests.get('https://www.michigandaily.com/').text
soup = BeautifulSoup(html, 'html.parser')
# searching_div = soup.find('div', attrs = {"class":"panel-pane pane-mostread"})
searching_div= soup.find('div', attrs = {'class': "view view-most-read view-id-most_read view-display-id-panel_pane_1 view-dom-id-99658157999dd0ac5aa62c2b284dd266"})
# print(searching_div)
mr = searching_div.select("ol li")
for li in mr:
print(li.text)
# print(mr)
# for a in mr:
# abstract = a.select("ol")
# print(abstract)
# for li in abstract:
# print(li)
|
xckou/SI507-HW06-xckou
|
hw6_part2.py
|
hw6_part2.py
|
py
| 762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23552963573
|
#########################################################################
# File Name: getKmerFromVCF_REF.py
# Author: yanbo
# mail: [email protected]
# Created Time: Thu 09 May 2019 10:45:06 AEST
#########################################################################
#!/bin/bash
import collections
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import re
import sys
import tools
import read
def write_pair_kmer(outFile, kmers):
sortedKmers = sorted(kmers)
with open(outFile, "w") as f:
for (kmer1, kmer2, pos) in sortedKmers:
#f.write("%s %s %s %s %s %s %s %s\n" % (ele[0], ele[1], ele[2], ele[3], ele[4], ele[5], tools.reverse(ele[0]), tools.reverse(ele[1]) ) )
f.write("%s %s %s\n" % (kmer1, kmer2, pos) )
def get_snp_pair_kmer(vcfFilename):
snps = read.read_vcf(vcfFilename)
kmerFilename="chr" + sys.argv[1] + ".snp.real." + sys.argv[2] + "mer"
kmers = []
for key in snps:
#assert seq[key-1] == snps[key][0] or seq[key-1] == snps[key][1]
assert seq[key-1] == snps[key][0]
h1 = seq[key-int(k/2)-1 : key-1] + snps[key][0] + seq[key : key+int(k/2) ] # 0
h2 = seq[key-int(k/2)-1 : key-1] + snps[key][1] + seq[key : key+int(k/2) ] # 1
if h1.count('N') > 0 or h2.count('N') > 0:
continue
'''
new_h1 = tools.reverse(h1) # 0
new_h2 = tools.reverse(h2) # 1
min_h= min(h1,h2)
min_newh = min(new_h1, new_h2)
ID = snps[key][2]
if min_h < min_newh:
if h1 < h2:
kmers.append( (h1, h2, key, ID, 0, 1) )
else:
kmers.append( (h2, h1, key, ID, 1, 0) )
else:
if new_h1 < new_h2:
kmers.append( (new_h1, new_h2, key, ID, 0, 1) )
else:
kmers.append( (new_h2, new_h1, key, ID, 1, 0) )
'''
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
kmers.append( (smallerH1, smallerH2, key) )
write_pair_kmer(kmerFilename, kmers)
def get_indel_pair_kmer(vcfFilename):
indels = read.read_vcf(vcfFilename)
kmerFilename="chr" + sys.argv[1] + ".indel.real." + sys.argv[2] + "mer"
kmers = []
indel_length1_cnt = 0
for key in indels:
s1, s2, ID = indels[key]
lenS1, lenS2 = len(s1), len(s2)
if lenS1 + lenS2 > 3:
continue
indel_length1_cnt += 1
assert lenS1 + lenS2 >= 2
if len(s1) == 1 and len(s2) == 2:
assert seq[key-1] == s1
assert s2[0] != s2[1]
#while s2[1] == seq[key-1]: # delete content is s2[1]
#key = key-1 # delete happen at "AAA" region, always think delete first poisition
h1 = seq[key-int(k/2) : key+int(k/2)] # k-1
h2 = seq[key-int(k/2) : key-1] + s2 + seq[key : key+int(k/2)] # 1 # len: k
assert len(h1) == k-1 and len(h2) == k
h1, h2 = h2, h1 # h1 always is longer one
initialH1 = h1
if h1.count('N') > 0 or h2.count('N') > 0:
continue
#print key, "11", h1
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
kmers.append( (smallerH1, smallerH2, key) )
# delete happen at multipe "AAAA" region, more pair kmer happen
l = len(h1)
mid = l/2
i=1
while mid+i<l and initialH1[mid+i] == initialH1[mid]:
h1 = seq[key-int(k/2)+i : key+int(k/2)+i] # move right i
h2 = seq[key-int(k/2)+i : key-1] + s2 + seq[key : key+int(k/2)+i] # move right i
h1, h2 = h2, h1 # h1 always is longer one
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "aa"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
i=1
while mid-i>=0 and initialH1[mid-i] == initialH1[mid]:
h1 = seq[key-int(k/2)-i : key+int(k/2)-i] # move left i
h2 = seq[key-int(k/2)-i : key-1] + s2 + seq[key : key+int(k/2)-i] # move right i
h1, h2 = h2, h1 # h1 always is longer one
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "bb"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
''' # for test can grouth-truth can always keep min strand delete first
if h1 > tools.reverse(h1):
print "aa"
print h1, h2
print tools.reverse(h1), tools.reverse(h2)
while s2[1] == seq[key]:
key+=1
h1 = seq[key-int(k/2) : key+int(k/2)] # k-1
h2 = seq[key-int(k/2) : key] + s2[1] + seq[key : key+int(k/2)] # 1 # len: k
h1, h2 = h2, h1
print h1, h2
print tools.reverse(h1), tools.reverse(h2)
'''
elif len(s1) == 2 and len(s2) == 1:
assert seq[key-1:key+1] == s1
assert s1[0] != s1[1]
h1 = seq[key-int(k/2) : key+int(k/2)+1] # k
h2 = seq[key-int(k/2) : key] + seq[key+1 : key+int(k/2)+1] # 1 # len: k-1
assert len(h1) == k and len(h2) == k-1
initialH1 = h1
if h1.count('N') > 0 or h2.count('N') > 0:
continue
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "22"
kmers.append( (smallerH1, smallerH2, key) )
l = len(h1)
mid = l/2
i=1
while initialH1[mid+i] == initialH1[mid]:
h1 = seq[key-int(k/2)+i : key+int(k/2)+1+i] # k
h2 = seq[key-int(k/2)+i : key] + seq[key+1 : key+int(k/2)+1+i] # 1 # len: k-1
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "cc"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
i=1
while initialH1[mid-i] == initialH1[mid]:
h1 = seq[key-int(k/2)-i : key+int(k/2)+1-i] # k
h2 = seq[key-int(k/2)-i : key] + seq[key+1 : key+int(k/2)+1-i] # 1 # len: k-1
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "dd"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
print ("there are ", indel_length1_cnt, "indels, create ", len(kmers), "indel pair kmer")
write_pair_kmer(kmerFilename, kmers)
'''
allFile = "chr" + sys.argv[1] + ".all." + sys.argv[2] + "mer"
foutAll = open(allFile, "w")
for i in range(seqLen-21):
mer = seq[i:i+k]
if mer.count('N') > 0:
continue
Rmer = tools.reverse(mer)
if Rmer < mer:
mer = Rmer
foutAll.write("%s %s\n" % (mer, i))
foutAll.close()
'''
# this simulate data is based on hg18
refFilename="/home/yulin/bio/Data/reference/NCBI36_hg18/chr22.fa"
snpVCFFile="/home/yulin/bio/VariationCalling/data/NA12878/VCF/NA12878_hg18_snp_VCFs/chr22.vcf"
indelVCFFile="/home/yulin/bio/VariationCalling/data/NA12878/VCF/NA12878_hg18_indel_VCFs/chr22.vcf"
# this illumina data align to hg19
#print ("input chrID kmer-size")
#refFilename ="/home/yulin/bio/Data/reference/GRCh37_hg19/chr" + sys.argv[1] + ".fa"
#vcfFilename ="/home/yulin/software/HapCUT2/reproduce_hapcut2_paper/run_hapcut2_fosmid/data/NA12878_hg19_VCFs/chr" + sys.argv[1] + ".phased.vcf"
record = SeqIO.read(open(refFilename), "fasta")
print (record.id)
seq = str(record.seq).upper()
seqLen = len(seq)
k=int(sys.argv[2])
get_snp_pair_kmer(snpVCFFile)
get_indel_pair_kmer(indelVCFFile)
|
yanboANU/VariationCalling
|
libprism/evaluate/getKmerFromVCF_REF.py
|
getKmerFromVCF_REF.py
|
py
| 7,794 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.