seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
34829962744
|
import sys
import random
from time import sleep
class Game:
def __init__(self):
self.wins = 0
self.loses = 0
self.RPS = ['Rock', 'Paper', 'Scissors']
self.confirm = ['Yes', 'Y', 'Okay']
def _game(self):
player_rps = input("Please select either rock, paper or scissors: ")
player_clean = player_rps.capitalize()
def validate(a):
if player_rps.capitalize() in self.RPS:
return True
else:
return False
rps_cpu = (random.choice(self.RPS))
if validate(player_clean) is False:
sleep(1)
sys.exit("Hey! You should have chosen either rock, paper or scissors.")
def win_condition(a, b):
if (a, b) in (("Rock", "Scissors"), ("Paper", "Scissors"), ("Scissors", "Rock")):
return True
def choices():
sleep(1)
print("Player chooses - " + player_clean)
sleep(1)
print("CPU chooses - " + rps_cpu)
choices()
if rps_cpu != player_clean:
if win_condition(player_clean, rps_cpu):
sleep(1)
print("Player wins!")
self.wins += 1
self._score()
else:
sleep(1)
print("CPU wins")
self.loses += 1
self._score()
else:
sleep(1)
print("Tie!")
self._score()
def _score(self):
print("Wins: " + str(self.wins) + " Loses: " + str(self.loses))
play_again = input("Do you want to play again? ")
if play_again.capitalize() in self.confirm:
self._game()
else:
print("Final score is - " + "Wins: " + str(self.wins) + " Loses: " + str(self.loses))
print("Thanks for playing!")
sleep(5)
def play(self):
print("Welcome to the Rock Paper Scissors application!")
sleep(1)
user_input = input("Would you like to play, rock, paper, scissors? ")
if user_input.capitalize() in self.confirm:
sleep(1)
self._game()
else:
print("Okay, have a nice day.")
sleep(5)
|
James-Rocker/pygames_portfolio
|
games/rock_paper_scissors.py
|
rock_paper_scissors.py
|
py
| 2,260 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21210874790
|
'''
Practica 4
Escribir un programa que pida al usuario un número entero y muestre en pantalla
un triángulo como el siguiente (número ingresado 5):
1
3 1
5 3 1
7 5 3 1
9 7 5 3 1
@utor: Francisco Zamora Saldaña
Programación avanzada 2MM3
'''
print("Triangulo de numeros impare")
while True:
user = input("Dame un numero entero y positivo :\n")
try:
n= int(user)
except:
print("¡¡Debe ser numero entero y positivo!! \n")
else:
if n % 1 == 0 and n >0 :
break
elif n == 0:
print("el zero es la mitad de la recta no jala")
else:
print("¡¡Debe ser entero y positivo!!")
triangulo=[]
for i in range(1,n+1):
fila = []
for j in range(0,i):
fila.insert(0,2*j+1)
filastr=[str(a) for a in fila]
print(' '. join(filastr))
triangulo.append(fila)
|
FranciscoZS/PythonCode
|
practica4.py
|
practica4.py
|
py
| 954 |
python
|
es
|
code
| 0 |
github-code
|
6
|
8779553577
|
import requests
import logging
from bs4 import BeautifulSoup
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
class WikiWorker():
def __init__(self) -> None:
self._url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
@staticmethod
def _extract_company_symbol(page_html):
soup = BeautifulSoup(page_html,'html.parser')
table = soup.find(id='constituents')
table_rows = table.find_all('tr')
for table_row in table_rows[1:]:
symbol = table_row.find('td').text.strip('\n')
yield symbol
def get_sp_500_companies(self):
response = requests.get(url=self._url)
if response.status_code != 200:
logger.warning('Not able to find companies!')
return []
yield from self._extract_company_symbol(response.text)
if __name__ == '__main__':
wiki = WikiWorker()
counter = 0
for symbol in wiki.get_sp_500_companies():
print(symbol)
counter += 1
if counter > 5:
break
|
atula28os/Multithreads
|
workers/WikiWorker.py
|
WikiWorker.py
|
py
| 1,125 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22852840076
|
class Solution(object):
def groupStrings(self, strings):
"""
:type strings: List[str]
:rtype: List[List[str]]
"""
groups = collections.defaultdict(list)
for s in strings:
groups[tuple(map(lambda x: (ord(x) - ord(s[0])) % 26, s))] += s,
return map(sorted, groups.values())
|
yuweishi/LeetCode
|
Algorithms/Group Shifted Strings/solution.py
|
solution.py
|
py
| 344 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32581486001
|
import pickle
import pennylane as qml
from pennylane import numpy as np
from math import pi
from ChemModel import translator, quantum_net
from Arguments import Arguments
# load molecular datasets (OH: 12 qubits)
# OHdatasets = qml.data.load("qchem", molname="OH", basis="STO-3G", bondlength=0.9)
# OHdata = OHdatasets[0]
# hamiltonian = OHdata.hamiltonian
# print(OHdata.molecule)
# print("molecular dataset used: {}".format(OHdata))
def chemistry(design):
np.random.seed(42)
args = Arguments()
symbols = ["O", "H"]
coordinates = np.array([0.0, 0.0, 0.0, 0.45, -0.1525, -0.8454])
# Building the molecular hamiltonian
hamiltonian, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates, charge=1)
dev = qml.device("lightning.qubit", wires=args.n_qubits)
@qml.qnode(dev, diff_method="adjoint")
def cost_fn(theta):
quantum_net(theta, design)
return qml.expval(hamiltonian)
print(hamiltonian)
energy = []
for i in range(5):
q_params = 2 * pi * np.random.rand(design['layer_repe'] * args.n_qubits * 2)
opt = qml.GradientDescentOptimizer(stepsize=0.4)
for n in range(50):
q_params, prev_energy = opt.step_and_cost(cost_fn, q_params)
print(f"--- Step: {n}, Energy: {cost_fn(q_params):.8f}")
energy.append(cost_fn(q_params))
metrics = np.mean(energy)
report = {'energy': metrics}
print(metrics)
return report
if __name__ == '__main__':
# with open('data/chemistry_dataset', 'rb') as json_data:
# data = pickle.load(json_data)
net = [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 4, 3, 4, 3, 1, 0, 1, 2, 3, 4, 5]
design = translator(net)
report = chemistry(design)
|
katiexu/QC_Contest_test
|
chemistryOH.py
|
chemistryOH.py
|
py
| 1,791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9506994070
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import lxml
import lxml.html.clean
import requests
import wikipedia as wp
from transliterate import translit
def get_html_from_text(raw_html):
# clean_args = {
# "javascript": True, # strip javascript
# "page_structure": False, # leave page structure alone
# "style": True # remove CSS styling
# }
# clean_html = lxml.html.clean.Cleaner(**clean_args).clean_html(raw_html)
html = lxml.html.fromstring(raw_html)
return html
def get_element_by_selector(b, selector):
select_result = list(b.cssselect(selector))
if len(select_result) == 0:
return None
return select_result[0]
def get_info_from_block(b):
d = dict()
p = get_element_by_selector(b, 'p[itemprop="description"]')
d['short'] = '\n'.join(t for t in p.itertext()) if p is not None else None
# short = '\n'.join(t for t in p.itertext())
label_select = get_element_by_selector(b, 'h3 > a')
d['label'] = label_select.text if label_select is not None else None
lat_select = get_element_by_selector(b, 'meta[itemprop="latitude"]')
d['lat'] = lat_select.attrib['content'] if lat_select is not None else None
long_select = get_element_by_selector(b, 'meta[itemprop="longitude"]')
d['long'] = long_select.attrib['content'] if long_select is not None else None
return d
# print(d)
def get_infos():
response = requests.get('https://autotravel.ru/excite.php/1055/1')
raw_html = response.text
# BLOCK_SELECTOR = 'div[itemtype="http://schema.org/Place"]'
BLOCK_SELECTOR = 'div[class="col-md-12 col-xs-12"] > div'
html = get_html_from_text(raw_html)
blocks = html.cssselect(BLOCK_SELECTOR)
infos = list(filter(lambda d: d['long'] is not None and d['lat'] is not None, map(get_info_from_block, blocks)))
return infos
def check_label(d, key):
return type(d[key]) == str and d[key].find('\n') == -1
def search(query, lang):
wp.set_lang(lang)
wp_search = wp.search(query)
if len(wp_search) == 0:
return None
return wp_search[0]
def get_page(d):
label = d['label']
ru_label = d['label_ru']
exception = wp.exceptions.WikipediaException
def try_different(suffix=''):
try:
p = wp.page(ru_label + suffix)
except exception:
try:
p = wp.page(label + suffix)
except exception:
p = None
return p
p = try_different()
if p is None:
p = try_different(' (Санкт-Петербург)')
return p
class ExtractorError(RuntimeError):
def __init__(self, message):
super(ExtractorError, self).__init__(message)
self.message = message
OUTPUT_DIRECTORY = "sights/"
if __name__ == '__main__':
infos = get_infos()
bad_records = []
for i, d in enumerate(infos):
label = d['label']
try:
# en_search = search(label, 'en')
# if en_search is None:
en_search = translit(label, reversed=True)
d['label_en'] = en_search
assert check_label(d, 'label_en')
ru_search = search(label, 'ru')
if ru_search is None:
raise ExtractorError('ru_search')
d['label_ru'] = ru_search
assert check_label(d, 'label_ru')
p = get_page(d)
if p is None:
raise ExtractorError('get_page')
if d['short'] is None:
print(i, label, "does not have short description from site\n".format(d['label']))
d['short'] = p.summary
d['long_description'] = p.summary
try:
d['lat'] = float(p.coordinates[0])
d['long'] = float(p.coordinates[1])
except KeyError:
pass
d['url'] = p.url
d['name'] = ''.join(filter(lambda c: c.isalnum() or c == '_', d['label_en'].replace(' ', '_')))
if d['name'].startswith('List'):
print(d)
f = open(OUTPUT_DIRECTORY + d['name'] + '.sight', 'w')
f.write('\n'.join([
d['label'],
d['label_ru'],
d['label_en'],
str(d['lat']) + ' ' + str(d['long']),
d['url'],
'===',
d['short'],
'===',
d['long_description'],
]))
f.close()
except ExtractorError as e:
print(i, label, e.message)
bad_records.append(d)
f = open('bad_records.txt', 'w')
f.write('\n'.join([str(record) for record in bad_records]))
f.close()
|
OSLL/adfmp18-PiterSights
|
crawler/site_extractor.py
|
site_extractor.py
|
py
| 4,706 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70497736189
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 20:15:51 2021
@author: blgnm
"""
import george
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import minimize
from scipy.optimize import curve_fit
import random
from astropy.stats import biweight_location
import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import sklearn
import matplotlib.pyplot as plt
import pywt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from antares_client.search import search
from antares_client._api.models import Locus
from antares_client.search import get_by_ztf_object_id
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from imblearn.pipeline import Pipeline as imbpipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import BorderlineSMOTE
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets, metrics, model_selection, svm
from sklearn.model_selection import RepeatedStratifiedKFold
from imblearn.ensemble import BalancedRandomForestClassifier
class ZTFData:
def __init__(self,ZTF_id = None):
self.ZTF_id = ZTF_id
def get_id(self):
return self.ZTF_id
def get_raw_data(self):
for i in self.ZTF_id:
locus = get_by_ztf_object_id(i)
return locus.lightcurve
def get_lightcurve(self):
"""
Returns
-------
Light Curve of all ztf objects stored within ZTFData()
"""
for i in self.ZTF_id:
plt.figure(num=None, figsize=(12, 5), dpi=100)
locus = get_by_ztf_object_id(i)
lc = locus.lightcurve
lc['alert_type'] = lc['alert_id'].apply(lambda x: x[:x.index(':')])
for (pb, at), df in lc.groupby(['ant_passband', 'alert_type']):
is_candidate = at == 'ztf_candidate'
plt.errorbar(
x=df['ant_mjd'],
y=df['ant_mag'] if is_candidate else df['ant_maglim'],
yerr=df['ant_magerr'],
# uplims=(at!='ztf_candidate'),
label=pb + ' ' + at[4:],
color=pb,
fmt=('o' if is_candidate else '^') + pb.lower(),
alpha=1 if is_candidate else 0.3)
plt.plot(df['ant_mjd'], df['ant_mag'])
plt.title(str(i))
plt.xlabel('MJD')
plt.ylabel('Magnitude')
plt.legend()
plt.gca().invert_yaxis()
plt.show()
def Data(self, labels = None):
"""
Parameters
----------
labels : Pandas Series, optional
Classification labels, if applicable (I don't remember if that part works tbh). The default is None.
Returns
-------
Pandas data frame of processed event information for all provided ZTF ID's.
"""
Supernovadf = pd.DataFrame(columns = ["mjd", "band", "magnitude", "error", "event","class"])
#plt.title(locus.properties['ztf_object_id'])
loop = tqdm(total = len(self.ZTF_id), position =0, leave = False)
for i in self.ZTF_id:
locus = get_by_ztf_object_id(i)
try:
Data = locus.lightcurve
except Exception:
pass
Data_frame1 = pd.DataFrame.from_dict(Data[Data['ant_passband']=='g'])
Data_frame2 = pd.DataFrame.from_dict(Data[Data['ant_passband']=='R'])
Data_frame1['ant_mag'] = Data_frame1['ant_mag'].replace(np.nan, 0)
Data_frame2['ant_mag'] = Data_frame2['ant_mag'].replace(np.nan, 0)
Data_frame1 = Data_frame1[Data_frame1.ant_mag > 0]
Data_frame2 = Data_frame2[Data_frame2.ant_mag > 0]
MJD1 = Data_frame1['ant_mjd']
MJD2 = Data_frame2['ant_mjd']
MagnitudeG = Data_frame1['ant_mag']
MagnitudeR = Data_frame2['ant_mag']
MJD1 = MJD1 - (MJD1.min() - 1)
MJD2 = MJD2 - (MJD2.min() - 1)
GBand = pd.DataFrame(columns = ["mjd", "band", "magnitude", "error", "event"])
GBand["mjd"] = Data_frame1["ant_mjd"]
GBand["band"] = pd.Series(np.zeros([len(MagnitudeG)]))
GBand["magnitude"] = MagnitudeG
GBand['band'] = GBand['band'].replace(np.nan, 0)
GBand['error'] = Data_frame1["ant_magerr"]
RBand = pd.DataFrame(columns = ["mjd", "band", "magnitude", "error", "event"])
RBand["mjd"] = Data_frame2["ant_mjd"]
RBand["band"] = pd.Series(np.ones([len(MagnitudeR)]))
RBand["magnitude"] = MagnitudeR
RBand['band'] = RBand['band'].replace(np.nan, 1)
RBand['error'] = Data_frame2['ant_magerr']
num = np.zeros(len(RBand))
num1 = np.zeros(len(GBand))
GBand['event'] = num1
RBand['event'] = num
GBand['class'] = num1
RBand['class'] = num
GBand['event'] = GBand['event'].replace([0], [str(i)])
RBand['event'] = RBand['event'].replace([0], [str(i)])
Band = pd.concat([GBand, RBand], axis = 0, ).reset_index(drop=True)
Supernovadf = pd.concat([Supernovadf, Band], axis = 0).reset_index(drop=True)
loop.set_description("Fetching Data...".format(len(i)))
loop.update(1)
loop.close()
return(Supernovadf)
def GaussianRegression(self, data = None, classification = True, DateRange = None, n_samples = 100):
"""
Parameters
----------
data : Pandas Data Frame, optional
Pandas Data Frame with info from Data(); if None, it will pull data based off stored ZTF ID's.
The default is None.
classification : Boolean, optional
If you are making a training set to True (I always keep it True personally, not sure if it works otherwise).
The default is True.
DateRange : Integer, optional
How many days you want the classifier to look at. The default is None.
n_samples : Integer, optional
The number of samples GP Regression takes from the data. The default is 100.
Returns
-------
Pandas Data Frame
Pandas Data Frame of GP Regression Data.
"""
def Multi_Band_GP(x_range, x, y, y_err, dim, n_samples = False, sampling = False):
""" Considers cross corrolations between multiple bands as dims, prone to holding the order of the bands too rigidly """
""" Will optimize for 'best' parameters when given no parameters """
""" x = mjd, y and y_err = measurment, dim and dim_err = wavelength in nm """
length_scale = 20
signal_to_noises = (np.abs(y) / np.sqrt(np.power(y_err,2) + (1e-2 * np.max(y))**2))
scale = np.abs(y[signal_to_noises.argmax()])
kernel = ((0.5 * scale)**2 * george.kernels.Matern32Kernel([length_scale**2, 6000**2], ndim=2))
kernel.freeze_parameter('k2:metric:log_M_1_1')
kernel.freeze_parameter('k1:log_constant') #Fixed Scale
x_data = np.vstack([x, dim]).T
gp = george.GP(kernel, mean = biweight_location(y))
guess_parameters = gp.get_parameter_vector()
gp.compute(x_data, y_err)
x_pred = np.linspace(x.min(), x.max(), n_samples)
x_pred = np.vstack([x, dim]).T
pred, pred_var = gp.predict(y, x_pred, return_var=True)
# bounds = [(0, np.log(1000**2))]
def neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.log_likelihood(y)
def grad_neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y)
result = minimize(
neg_ln_like,
gp.get_parameter_vector(),
jac=grad_neg_ln_like,
# bounds=bounds
)
if result.success:
gp.set_parameter_vector(result.x)
else:
gp.set_parameter_vector(guess_parameters)
gp.set_parameter_vector(result.x)
# print(kernel.get_parameter_vector(True))
#print("\nFinal ln-likelihood: {0:.2f}".format(gp.log_likelihood(y)))
if n_samples != False:
x_pred = np.vstack([np.array(list(np.linspace(x_range.min(), x_range.max(), n_samples))*np.unique(dim).size),
np.array(np.sort(list(np.unique(dim))*n_samples))]).T
# x_pred = np.vstack([np.array(list(np.linspace(x_range.min(), x_range.max(), n_samples))*6),
# np.array(np.sort([357, 476, 621, 754, 870, 1004]*n_samples))]).T
pred, pred_var = gp.predict(y, x_pred, return_var=True)
output = [x_pred[:,0], pred, np.sqrt(pred_var), x_pred[:,1], []]
return output
elif sampling != False:
x_pred = np.vstack([np.array(sampling[0]),
np.array(sampling[1])]).T
pred, pred_var = gp.predict(y, x_pred, return_var=True)
output = [x_pred[:,0], pred, np.sqrt(pred_var), x_pred[:,1], []]
return output
def band_to_color(inp):
labels = [357, 476, 621, 754, 870, 1004]
# labels = [0,1,2,3,4,5]
labels_2=['green', 'red', 'goldenrod', 'blue', 'pink', 'grey']
outlst = []
for x in inp:
out = labels.index(int(x))
out = labels_2[out]
outlst.append(out)
return outlst
def band_to_wvlg(inp):
labels = [0,1,2,3,4,5]
labels_2=[357.0, 476.7, 621.5, 754.5, 870.75, 1004.0]
outlst = []
for x in inp:
out = labels.index(int(x))
out = labels_2[out]
outlst.append(out)
return outlst
def expfun(x, a, b):
return np.multiply(np.exp(np.multiply(x, b)), a)
def randomoff(inp, off = 0.25):
outlist = []
for i in inp:
value = random.random()
outlist += [i+value*off]
return outlist
def Spectra_Model():
return 0
if data is None:
sf = self.Data()
else:
sf = data
Gaus = pd.DataFrame()
pd.options.mode.chained_assignment = None # default='warn'
SN_uqlst = sf.event.unique()
loop = tqdm(total = len(SN_uqlst), position =0, leave = False)
for i in SN_uqlst:
SNdf = sf[sf['event']==i]
SNdf['mjd'] = SNdf['mjd'] - (SNdf['mjd'].min() -1)
if DateRange is not None:
SNdf = SNdf[SNdf['mjd'] < DateRange]
b = SNdf['band'].unique() == np.array([0.0, 1.0])
if b[0] != True or b[1] != True:
continue
mjdrange = np.asarray([min(SNdf['mjd'].tolist()),max(SNdf['mjd'].tolist())])
D = Multi_Band_GP(x_range = mjdrange, x=SNdf['mjd'].to_numpy(),
y=SNdf['magnitude'].to_numpy(), y_err=SNdf['error'].to_numpy(),
dim=band_to_wvlg(SNdf['band'].to_numpy()),
n_samples= n_samples)
GaussianFitted = pd.DataFrame()
GaussianFitted['mjd'] = D[0]
GaussianFitted['magnitude'] = D[1]
GaussianFitted['error'] = D[2]
GaussianFitted['band'] = D[3]
y = pd.Series(data=np.zeros(1000)).astype(int)
y = y.replace(0,i)
GaussianFitted['event'] = y
if classification == True:
x = pd.Series(data = np.zeros(1000)).astype(int)
x = x.replace(0, SNdf['class'].unique()[0])
GaussianFitted['class'] = x
Gaus = pd.concat([Gaus, GaussianFitted])
loop.set_description("Computing GPR...".format(len(i)))
loop.update(1)
loop.close()
return Gaus
def Gpr_Graph(self, DateRange = None ,n_samples = 100):
def band_to_color(inp):
labels = [357, 476, 621, 754, 870, 1004]
# labels = [0,1,2,3,4,5]
labels_2=['green', 'red', 'goldenrod', 'blue', 'pink', 'grey']
outlst = []
for x in inp:
out = labels.index(int(x))
out = labels_2[out]
outlst.append(out)
return outlst
Gaussian = self.GaussianRegression(DateRange = DateRange,n_samples = n_samples)
for i in Gaussian['event'].unique():
plt.errorbar(Gaussian[Gaussian['event']==i]['mjd'],Gaussian[Gaussian['event']==i]['magnitude'],Gaussian[Gaussian['event']==i]['error'], c=band_to_color(Gaussian[Gaussian['event']==i]['band']), alpha = 0.2, ls = 'None')
#plt.errorbar(x = SNdf['mjd'].to_numpy(), y = SNdf['magnitude'].to_numpy(), yerr = SNdf['error'].to_numpy(), ls = 'None')
plt.xlabel("MJD")
plt.ylabel("Magnitude")
plt.title(i)
plt.gca().invert_yaxis()
plt.show()
plt.close()
def Wavelet(self, Data = None, WaveletType = 'sym2', classification = True, Date = None, length = 150):
"""
Parameters
----------
Note: This version Processes both bands seperately, see Wavelet2() for multiband processing
Data : Pandas Data Frame, optional
Pandas DataFrame processed by Data(). The default is None.
WaveletType : Str, optional
Type of Wavelet transformation to be used. The default is 'sym2'.
classification : Boolean, optional
If you are making a training set to True (I always keep it True personally, not sure if it works otherwise).
The default is True.
Date : Integer, optional
How many days you want the classifier to look at. The default is None. The default is None.
length : Integer, optional
Set maximum event length; all events longer than set length are filtered out. The default is 150.
Returns
-------
Function_Parameters : Pandas DataFrame
Event Information such as ZTF ID and classification.
Coefficients : Numpy Array
List of Wavelet transformation Coefficients.
"""
from tqdm import tqdm
Function_Parameters = pd.DataFrame()
Coefficients = list()
if Data is None:
Data = self.Data()
Gaussian = self.GaussianRegression(data = Data, DateRange = Date)
Data_uqlst = Data['event'].unique()
loop = tqdm(total = len(Data['event'].unique()), position =0, leave = False)
for i in Data_uqlst:
b = Data[(Data['event']==i)]['band'].unique() == np.array([0.0, 1.0])
if b[0] != True or b[1] != True:
continue
if max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd']) > length:
#print(len(Data[Data['event']==i]['mjd']))
continue
else:
GaussianFitted = Gaussian[Gaussian['event']==i]
Uqe_Bands = GaussianFitted['band'].unique()
for UBand in Uqe_Bands:
if classification == True:
Class = GaussianFitted[GaussianFitted['band']==UBand]['class']
x = GaussianFitted[GaussianFitted['band']==UBand]['mjd'].astype(float)
y = GaussianFitted[GaussianFitted['band']==UBand]['magnitude'].astype(float)
y_err = GaussianFitted[GaussianFitted['band']==UBand]['error'].astype(float)
signal = y.values.squeeze()
ca = np.array(pywt.swt(np.array(signal), WaveletType, level = 2, axis = 0))
npoints=len(ca[0, 0, :])
coefficients =ca.reshape(2*2, npoints)
Features = pd.DataFrame(data = {'band': [UBand], 'event': str(i),
'delta':y.values.max()-y.values.min(), 'variance':y.var(),
'duration': max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd'])})
if classification == True:
Features['class'] = Class.unique()[0]
Coefficients.append(coefficients.flatten())
Function_Parameters = pd.concat([Function_Parameters, Features], axis =0 )
Function_Parameters = Function_Parameters.reset_index(drop=True)
loop.set_description("Computing Wavelet Transform...".format(len(i)))
loop.update(1)
loop.close()
Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN Ib/c', 'SLSN'], [0,1,2,3])
#Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN IIn', 'SN IIP', 'SN Ia-91T', 'SLSN-I', 'SLSN-II', 'SN Ic', 'SN Ib', 'SN Ic-BL', 'SN IIb', 'SN Ia-pec', 'SN Ibn', 'SN Ia-91bg'], [0,1,2,3,4,5,6,7,8,9, 10,11,12,13])
return Function_Parameters, Coefficients
def Wavelet2(self, Data = None, WaveletType = 'sym2', classification = True, Date = None, length = 150):
"""
Parameters
----------
Note: This version Processes both bands together, see Wavelet() for seperate band processing
Data : Pandas Data Frame, optional
Pandas DataFrame processed by Data(). The default is None.
WaveletType : Str, optional
Type of Wavelet transformation to be used. The default is 'sym2'.
classification : Boolean, optional
If you are making a training set to True (I always keep it True personally, not sure if it works otherwise).
The default is True.
Date : Integer, optional
How many days you want the classifier to look at. The default is None. The default is None.
length : Integer, optional
Set maximum event length; all events longer than set length are filtered out. The default is 150.
Returns
-------
Function_Parameters : Pandas DataFrame
Event Information such as ZTF ID and classification.
Coefficients : Numpy Array
List of Wavelet transformation Coefficients.
"""
from tqdm import tqdm
Function_Parameters = pd.DataFrame()
Coefficients = list()
if Data is None:
Data = self.Data()
Gaussian = self.GaussianRegression(data = Data, DateRange = Date)
Data_uqlst = Data['event'].unique()
loop = tqdm(total = len(Data['event'].unique()), position =0, leave = False)
for i in Data_uqlst:
b = Data[(Data['event']==i)]['band'].unique() == np.array([0.0, 1.0])
if b[0] != True or b[1] != True:
continue
if max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd']) > length:
#print(len(Data[Data['event']==i]['mjd']))
continue
GaussianFitted = Gaussian[Gaussian['event']==i]
if classification == True:
Class = GaussianFitted['class']
x = GaussianFitted['mjd'].astype(float)
y = GaussianFitted['magnitude'].astype(float)
y_err = GaussianFitted['error'].astype(float)
signal = y.values.squeeze()
if len(signal) == 0:
continue
from scipy import integrate
Area = integrate.simpson(y, x)
ca = np.array(pywt.swt(np.array(signal), WaveletType, level = 2, axis = 0))
npoints=len(ca[0, 0, :])
coefficients =ca.reshape(2*2, npoints)
Features = pd.DataFrame(data = {'event': str(i),
'delta':y.values.max()-y.values.min(), 'variance':y.var(),
'duration': max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd']),
'area':Area}, index=[0])
if classification == True:
Features['class'] = Class.unique()[0]
Coefficients.append(coefficients.flatten())
Function_Parameters = pd.concat([Function_Parameters, Features], axis =0 )
Function_Parameters = Function_Parameters.reset_index(drop=True)
loop.set_description("Computing Wavelet Transform...".format(len(i)))
loop.update(1)
loop.close()
Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN Ib/c', 'SLSN'], [0,1,2,3])
#Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN IIn', 'SN IIP', 'SN Ia-91T', 'SLSN-I', 'SLSN-II', 'SN Ic', 'SN Ib', 'SN Ic-BL', 'SN IIb', 'SN Ia-pec', 'SN Ibn', 'SN Ia-91bg'], [0,1,2,3,4,5,6,7,8,9, 10,11,12,13])
return Function_Parameters, Coefficients
def DimensionalityReduction2(self, Coefficients =None, labels=None, smot = False, n = 20, Trainset = True):
"""
Parameters
----------
Use this version if you used Wavelet2() (Multiband processing)
Coefficients : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
labels : Pandas Data Frame, optional
Provide your own labels. The default is None.
smot : Boolean, optional
Choose whether or not to use SMOTE. The default is False.
n : Integer, optional
Output Dimension. The default is 20.
Trainset : Boolean, optional
Specify if this is the training set or if its unlabeled data. The default is True.
Returns
-------
Pandas Data Frame
Pandas Data Frame of PCA reduced Wavelet Coefficients.
Function
If Trainset = True, returns PCA() to use on unlabeled data.
"""
if Coefficients is not None:
labels = labels
Coefficients = Coefficients
else:
labels, Coefficients = self.Wavelet()
Coefficients = pd.concat([pd.DataFrame(data=labels),pd.DataFrame(data=Coefficients)],axis=1)
Coeff = Coefficients.iloc[:,6:]
pca = PCA(n_components = n, whiten = True)
if smot == True:
sm = SMOTE()
Coeff, labels= sm.fit_resample(Coeff, Coefficients['class'].ravel())
print(Coeff)
final = pca.fit_transform((Coeff))
#RBand2, GBand2 = pd.DataFrame(data = {'Rdelta': RBand['delta'], 'Rvariance': RBand['variance']}), pd.DataFrame(data = {'Gdelta':GBand['delta'], 'Gvariance': GBand['variance']})
if smot == True:
events =pd.DataFrame(data = {'class': labels}).reset_index(drop=True)
if smot == False:
events =pd.DataFrame(data = {'event': Coefficients['event'], 'class': Coefficients['class']}).reset_index(drop=True)
if Trainset == True:
return pd.concat([events, pd.DataFrame(final)],axis=1), pca
if Trainset == False:
return pd.concat([events, pd.DataFrame(data = Coeff).reset_index(drop=True)],axis=1)
def DimensionalityReduction(self, Coefficients =None, labels=None, smot = False, n = 20, Trainset = True):
"""
Parameters
----------
Use this version if you used Wavelet() (One band at a time processing)
Coefficients : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
labels : Pandas Data Frame, optional
Provide your own labels. The default is None.
smot : Boolean, optional
Choose whether or not to use SMOTE. The default is False.
n : Integer, optional
Output Dimension. The default is 20.
Trainset : Boolean, optional
Specify if this is the training set or if its unlabeled data. The default is True.
Returns
-------
Pandas Data Frame
Pandas Data Frame of PCA reduced Wavelet Coefficients.
Function
If Trainset = True, returns PCA() to use on unlabeled data.
"""
if Coefficients is not None:
labels = labels
Coefficients = Coefficients
else:
labels, Coefficients = self.Wavelet()
Coefficients = pd.concat([pd.DataFrame(data=labels),pd.DataFrame(data=Coefficients)],axis=1)
GBand, RBand = Coefficients[Coefficients['band']==357.0].reset_index(drop=True), Coefficients[Coefficients['band']==476.7].reset_index(drop=True)
print(RBand)
pca = PCA(n_components = n, whiten = True)
RBand = pd.concat([RBand.iloc[:,6:].reset_index(drop=True),GBand.iloc[:,6:].reset_index(drop=True)],axis=1, ignore_index=True)
if smot == True:
sm = SMOTE()
RBand, labels= sm.fit_resample(RBand, GBand['class'].ravel())
final = pca.fit_transform((RBand))
#RBand2, GBand2 = pd.DataFrame(data = {'Rdelta': RBand['delta'], 'Rvariance': RBand['variance']}), pd.DataFrame(data = {'Gdelta':GBand['delta'], 'Gvariance': GBand['variance']})
if smot == True:
events =pd.DataFrame(data = {'class': labels}).reset_index(drop=True)
if smot == False:
events =pd.DataFrame(data = {'event': GBand['event'], 'class': GBand['class']}).reset_index(drop=True)
if Trainset == True:
return pd.concat([events, pd.DataFrame(final)],axis=1), pca
if Trainset == False:
return pd.concat([events, pd.DataFrame(data = RBand).reset_index(drop=True)],axis=1)
def SupernovaTrainer(self, Train = None, y = None, **kwargs):
"""
Parameters
----------
Trains for Supernova vs Bogus Classification
Train : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
y : Pandas Data Frame, optional
Provide data labels. The default is None.
**kwargs : Classifier arguments
Input arguments for classifier.
Returns
-------
Function
Trained Classifier.
"""
if Train is None:
Data = self.DimensionalityReduction()
Train = Data.iloc[:,2:].reset_index(drop=True)
y = Data['class'].reset_index(drop=True)
svc = RandomForestClassifier(random_state=iterations, n_estimators = 30, min_samples_split = 6)
if kwargs:
classifier=AdaBoostClassifier(**kwargs)
else:
classifier=AdaBoostClassifier(base_estimator=svc,n_estimators=30, learning_rate =4)
if evaluate == True:
pipeline = imbpipeline(steps = [['classifier', classifier]])
stratified_kfold = StratifiedKFold(n_splits=3, shuffle=True)
print(cross_validate(pipeline, np.array(Train),np.array(y).ravel(),scoring = 'accuracy'))
y_pred = cross_val_predict(pipeline, np.array(Train),np.array(y), cv = stratified_kfold)
conf_mat = confusion_matrix(y, y_pred)
plot_confusion_matrix1(conf_mat, ['SN', 'Bogus'], cmap = 'Blues')
return classifier.fit(Train, y)
def SupernovaTypeClassifierTrainer(self, Train, y, evaluate = True , smot = True,
Ada = True, KNN = False,roc = True, Rand = False,
grid = False, n = 1, fold = 3,n_components = 20,
metric = 'accuracy', param_grid = None,**kwargs):
"""
Parameters
----------
Trains for Supernova Type Classification
Train : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
y : Pandas Data Frame, optional
Provide data labels. The default is None.
evaluate : Boolean, optional
Choose whether or not to show model performance. The default is True.
Ada : Boolean, optional
Choose to use Ada Boosted Random Forrest. The default is True.
KNN : Boolean, optional
Choose to use K-nearest neighbors. The default is False.
**kwargs : TYPE
DESCRIPTION.
Raises
------
Exception
If you set both KNN and Ada to false, raises an error.
Returns
-------
Function
Trained Classifier.
"""
if Train is not None:
TrainingData, u = pd.concat([pd.DataFrame(data=y),pd.DataFrame(data=Train)],axis=1).reset_index(drop=True), y
#else *** Remember to make this load in default training data
svc = RandomForestClassifier(n_estimators = 30, min_samples_split = 6)
TrainingData = TrainingData.sample(frac = 1).reset_index(drop=True)
if kwargs:
if Ada ==True:
classifier = AdaBoostClassifier(**kwargs)
if KNN == True:
classifier = KNeighborsClassifier(**kwargs)
if Rand == True:
#classifier = RandomForestClassifier(**kwargs)
classifier = BalancedRandomForestClassifier(**kwargs)
else:
classifier=AdaBoostClassifier(base_estimator=svc,n_estimators=30, learning_rate =2)
#classifier = KNeighborsClassifier(n_neighbors=1500)
if evaluate == True:
if smot == True:
pipeline = imbpipeline(steps = [['scale',MinMaxScaler()],['smote', SMOTE()],['classifier', classifier]])
if smot == False:
pipeline = imbpipeline(steps = [['scale',MinMaxScaler()],['classifier', classifier]])
stratified_kfold = StratifiedKFold(n_splits=fold, shuffle=True)
repeatstratified_kfold = RepeatedStratifiedKFold(n_splits=fold, n_repeats=n)
cross = cross_validate(pipeline, np.array(TrainingData.iloc[:,1:]),np.array(TrainingData.iloc[:,0]),scoring = metric, cv = repeatstratified_kfold, n_jobs = -1)
print(f'The mean {metric} over {fold} fold stratified crossvalidation repeated {n} times is {np.mean(cross["test_score"])}, with a standard deviation of {np.std(cross["test_score"])}')
y_pred = cross_val_predict(pipeline, np.array(TrainingData.iloc[:,1:]),np.array(TrainingData.iloc[:,0]), cv = stratified_kfold, n_jobs = -1)
#conf_mat = confusion_matrix(y, y_pred)
conf_mat = confusion_matrix(TrainingData.iloc[:,0], y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=conf_mat)
disp.plot(cmap = 'Blues')
if grid == True:
clf = GridSearchCV(pipeline, param_grid, n_jobs = -1, cv = stratified_kfold, scoring = 'f1_micro', verbose = 1)
clf.fit(TrainingData.iloc[:,1:], TrainingData.iloc[:,0])
plot_confusion_matrix1(conf_mat, ['Type 1a','Type 2', 'Type 1b/c', 'SLSN'], cmap = 'Blues')
Classifier = pipeline.fit(TrainingData.iloc[:,1:], TrainingData.iloc[:,0])
if grid == False:
return Classifier
if grid == True:
return clf
def plot_confusion_matrix1(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
import matplotlib.pyplot as plt
import numpy as np
import itertools
accuracy = np.trace(cm) / np.sum(cm).astype('float')
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="black" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="black" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
|
Feliconut/PurduePHYS324-LSST
|
SupernovaClassification.py
|
SupernovaClassification.py
|
py
| 36,879 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7923689727
|
import os
from multiprocessing.dummy import Pool as ThreadPool
# Set your opensmile Extracter and path here
exe_opensmile = './opensmile/bin/SMILExtract.exe'
path_config = './opensmile/config/prosody/prosodyShs.conf'
# Set your data path and output path here
data_path = "./wav/"
save_path = './features/' # output folder
# Extractor set-ups
opensmile_options = '-C ' + path_config
def feature_extract(fn):
infilename = data_path + fn
outfilename = save_path + fn[:-4] + ".csv"
opensmile_call = exe_opensmile + ' ' + opensmile_options + \
' -I ' + infilename + ' -csvoutput ' + outfilename + \
' -start 60 -end 600'
print(opensmile_call)
os.system(opensmile_call)
for root, dirs, files in os.walk(data_path):
for name in files:
feature_extract(name)
# print(files)
# pool = ThreadPool()
# pool.map(feature_extract, files)
# pool.close()
# pool.join()
|
Saxon-Huang/EECS498_presentation_rating_system
|
extractProsody.py
|
extractProsody.py
|
py
| 927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27411122966
|
# original paper setting: -/77.21/87.85
# add char-cnn 200d: 57.30/77.64/87.75
# 1 layer highway body->subject<->comment: 56/76/86
# 1 layer unshared tanh body->subject<->comment: 57.51/77.18/86.41
import tensorflow as tf
# inpa,inpb (b,m,d) maska,maskb (b,m,1)
def ps_pb_interaction(ps, pb, ps_mask, pb_mask, keep_prob, scope):
"""
Question Condensing 部分
:param ps:
:param pb:
:param ps_mask:
:param pb_mask:
:param keep_prob:
:param scope:
:return:
"""
with tf.variable_scope(scope):
b, m, n, d = tf.shape(ps)[0], tf.shape(ps)[1], tf.shape(pb)[1], ps.get_shape().as_list()[2]
attn_mask = tf.expand_dims(ps_mask * tf.reshape(pb_mask, [b, 1, n]), -1) # (b,m,n,1) (b,1,n --->b,m,n--->b,m,n,1)
head = tf.tile(tf.expand_dims(ps, 2), [1, 1, n, 1]) # (b,m,1,d)
tail = tf.tile(tf.expand_dims(pb, 1), [1, m, 1, 1]) # (b,1,n,d)
parallel = head * tf.reduce_sum(head * tail, -1, True)/(tf.reduce_sum(head * head, -1, True)+1e-5) # 公式1
orthogonal = tail - parallel # 公式2
base = parallel if scope == 'parallel' else orthogonal
interaction = dense(base, d, scope='interaction') # (b,m,n,d)
logits = 10.0*tf.tanh((interaction)/10.0) + (1 - attn_mask) * (-1e30) # 公式3
attn_score = tf.nn.softmax(logits, 2) * attn_mask # 公式4
attn_result = tf.reduce_sum(attn_score * tail, 2) # (b,m,d) # 公式5
fusion_gate = dense(tf.concat([ps, attn_result], -1), d, tf.sigmoid, scope='fusion_gate')*ps_mask # 公式6
return (fusion_gate*ps + (1-fusion_gate)*attn_result) * ps_mask # 公式7
# inpa,inpb (b,m,d) maska,maskb (b,m,1)
def pq_interaction(ps, qt, ps_mask, qt_mask, keep_prob, scope):
"""
问答交互部分,返回Satt和Catt
:param ps: Srep
:param qt: Crep
:param ps_mask:
:param qt_mask:
:param keep_prob:
:param scope:
:return:
"""
with tf.variable_scope(scope):
b, m, n, d = tf.shape(ps)[0], tf.shape(ps)[1], tf.shape(qt)[1], ps.get_shape().as_list()[2]
attn_mask = tf.expand_dims(ps_mask*tf.reshape(qt_mask,[b,1,n]), -1) # (b,m,n,1)
head = tf.tile(tf.expand_dims(ps, 2), [1,1,n,1]) # (b,m,1,d)
tail = tf.tile(tf.expand_dims(qt, 1), [1,m,1,1]) # (b,1,n,d)
interaction = dense(tf.concat([head, tail], -1), d, scope='interaction') # (b,m,n,d) 公式9
#interaction = tf.reduce_sum(head*tail, -1, True)
#interaction = tf.reduce_sum(dense(head, d, scope='interaction')*tail, -1, True)
logits = 5.0*tf.tanh((interaction)/5.0) + (1 - attn_mask) * (-1e30) # 公式10
atta_score = tf.nn.softmax(logits, axis=2) * attn_mask # 公式11左部权重计算
atta_result = tf.reduce_sum(atta_score * tail, axis=2) # (b,m,d) # 公式11--->S ai
attb_score = tf.nn.softmax(logits, 1) * attn_mask # 公式12左部权重计算
attb_result = tf.reduce_sum(attb_score * head, 1) # (b,n,d) 公式12--->C ai
cata = tf.concat([ps, atta_result], -1) * ps_mask # S att
catb = tf.concat([qt, attb_result], -1) * qt_mask # C att
return cata, catb # Satt和Catt
def source2token(rep_tensor, rep_mask, keep_prob, scope):
"""
:param rep_tensor: Satt
:param rep_mask:
:param keep_prob:
:param scope:
:return:
"""
with tf.variable_scope(scope):
ivec = rep_tensor.get_shape().as_list()[2]
map1 = dense(rep_tensor, ivec, tf.nn.elu, keep_prob, 'map1')*rep_mask # (b,n,d) # 公式13
map2 = dense(rep_tensor, ivec, tf.identity, keep_prob, 'map2')*rep_mask # (b,n,d)
map2_masked = map1 + (1-rep_mask) * (-1e30)
soft = tf.nn.softmax(map2_masked, 1)*rep_mask # bs,sl,vec
return tf.reduce_sum(soft * rep_tensor, 1) # bs, vec # 公式14
def dense(inp, out_size, activation=tf.identity, keep_prob=1.0, scope=None, need_bias=True):
with tf.variable_scope(scope):
inp_shape = [inp.get_shape().as_list()[i] or tf.shape(inp)[i] for i in range(len(inp.get_shape().as_list()))] # 获取shape列表
input = tf.nn.dropout(tf.reshape(inp, [-1, inp_shape[-1]]), keep_prob)
W = tf.get_variable('W', shape=[input.get_shape()[-1],out_size],dtype=tf.float32)
b = tf.get_variable('b', shape=[out_size], dtype=tf.float32, initializer=tf.zeros_initializer()) if need_bias else 0
return activation(tf.reshape(tf.matmul(input, W) + b, inp_shape[:-1] + [out_size]))
|
helloworld729/QCN
|
utils.py
|
utils.py
|
py
| 5,047 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44092813645
|
from gym_compete_rllib import create_env
from ray.tune.registry import ENV_CREATOR, _global_registry
def test_create_env():
env_creator = _global_registry.get(ENV_CREATOR, "multicomp")
env_config = {'with_video': False,
"SingleAgentToMultiAgent": False,
"env_name": "multicomp/YouShallNotPassHumans-v0"}
env = env_creator(env_config)
assert env.n_policies == 2
assert env.observation_space.shape == (380,)
assert env.action_space.shape == (17,)
assert env.player_names == ['player_1', 'player_2']
def episode(env):
obs = env.reset()
def check_obs(obs, error_on_empty=True):
assert isinstance(obs, dict)
if error_on_empty:
assert set(obs.keys()) == set(env.player_names), f"{obs.keys()} {env.player_names}"
assert all([o.shape == env.observation_space.shape for o in obs.values()])
check_obs(obs)
while True:
actions = {p: env.action_space.sample() for p in env.player_names}
obs, reward, done, info = env.step(actions)
check_obs(obs, error_on_empty=False)
if done['__all__']:
break
for _ in range(10):
episode(env)
if __name__ == '__main__':
test_create_env()
|
HumanCompatibleAI/better-adversarial-defenses
|
gym_compete_rllib/test_load_rllib_env.py
|
test_load_rllib_env.py
|
py
| 1,332 |
python
|
en
|
code
| 11 |
github-code
|
6
|
580269488
|
import json, requests, pytest
from pydantic import BaseModel, field_validator
from unittest.mock import Mock, MagicMock
class Location:
def __init__(self, longitudecls, latitudecls):
self._longitude = longitudecls
self._latitude = latitudecls
def get_weather(self):
weather_data = requests.get(
f'https://fcc-weather-api.glitch.me/api/current?lat={self._latitude}&lon={self._longitude}')
data = json.loads(weather_data.text)
try:
if weather_data.status_code != 200:
raise Exception(f'Error request, status code : {weather_data.status_code}')
try:
feels_like_temp = data.get['main']['feels_like']
except:
feels_like_temp = 0
dict_for_weather = {
'temperature': {
'temp': data['main']['temp'],
'feels_like': feels_like_temp,
'temp_min': data['main']['temp_min'],
'temp_max': data['main']['temp_max']
},
'pressure': data['main']['pressure'],
'description': data['weather'][0]['description'],
'name': data['name']
}
except Exception as e:
return None
return WeatherPydantic(**dict_for_weather)
class TemperaturePydantic(BaseModel):
temp: float
feels_like: float
temp_min: float
temp_max: float
@field_validator('temp')
def validate_temp(cls, temp: int):
return round(temp * 1.8 + 32, 3)
class WeatherPydantic(BaseModel):
temperature: TemperaturePydantic
pressure: float
description: str
name: str
# longitude = int(input('Please enter longitude: '))
# latitude = int(input('Please inter latitude: '))
loc = Location(50, 38)
weather = loc.get_weather()
print(weather)
def test_get_weather(mocker):
mocker.patch.object(
requests,
'get',
return_value=Mock(
status_code=200,
text=json.dumps(
{
"coord": {
"lon": 50,
"lat": 28
}, "weather": [
{
"id": 800,
"main": "Clear",
"description": "clear sky",
}
],
"base": "stations",
"main": {
"temp": 33.27,
"feels_like": 0.0,
"temp_min": 33.27,
"temp_max": 33.27,
"pressure": 1001,
"humidity": 72,
"sea_level": 1001,
"grnd_level": 1001
},
"visibility": 10000,
"wind": {
"speed": 3.25,
"deg": 258,
"gust": 3.6
},
"clouds": {
"all": 1
},
"dt": 1691623796,
"sys": {
"country": "SA",
"sunrise": 1691633230,
"sunset": 1691681029},
"timezone": 12600,
"id": 109435,
"name": "Jubail",
"cod": 200
}
)
)
)
test_loc = Location(50, 28)
actual = test_loc.get_weather()
expected = WeatherPydantic(
temperature=TemperaturePydantic.model_construct(
temp=round(33.27*1.8 + 32, 3),
feels_like=0.00,
temp_min=33.27,
temp_max=33.27
),
pressure=1001,
description='clear sky',
name='Jubail',
)
assert actual == expected
|
MrDumper/Roma
|
weather_HW.py
|
weather_HW.py
|
py
| 4,128 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24126442093
|
import gc
import sys
import wx
from weakref import ref
from testutil import check_collected
foo = 0
success = 0
def test_callafter_leak():
def func():
global foo
foo = 42
wr = ref(func)
wx.CallAfter(func)
del func
# make sure that func runs
wx.GetApp().Yield()
assert wr() is None, gc.get_referrers(gc.get_referrers(wr())[0])
assert foo == 42
global success
success = success + 1
def main():
a = wx.PySimpleApp()
N = 100
for x in xrange(N):
wx.CallAfter(test_callafter_leak)
wx.CallAfter(gc.collect)
for x in xrange(N):
wx.CallAfter(test_callafter_leak)
wx.CallAfter(a.ExitMainLoop)
a.MainLoop()
global success
assert success == N*2
if __name__ == '__main__':
main()
|
ifwe/wxpy
|
src/tests/test_callafter.py
|
test_callafter.py
|
py
| 804 |
python
|
en
|
code
| 0 |
github-code
|
6
|
855722734
|
#!/usr/bin/env python
# Demonstrate how to use the vtkBoxWidget to control volume rendering
# within the interior of the widget.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Load a volume, use the widget to control what's volume
# rendered. Basically the idea is that the vtkBoxWidget provides a box
# which clips the volume rendering.
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.GetOutput().SetOrigin(0.0, 0.0, 0.0)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT+ "/Data/headsq/quarter")
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
tfun = vtk.vtkPiecewiseFunction()
tfun.AddPoint(70.0, 0.0)
tfun.AddPoint(599.0, 0)
tfun.AddPoint(600.0, 0)
tfun.AddPoint(1195.0, 0)
tfun.AddPoint(1200, .2)
tfun.AddPoint(1300, .3)
tfun.AddPoint(2000, .3)
tfun.AddPoint(4095.0, 1.0)
ctfun = vtk.vtkColorTransferFunction()
ctfun.AddRGBPoint(0.0, 0.5, 0.0, 0.0)
ctfun.AddRGBPoint(600.0, 1.0, 0.5, 0.5)
ctfun.AddRGBPoint(1280.0, 0.9, 0.2, 0.3)
ctfun.AddRGBPoint(1960.0, 0.81, 0.27, 0.1)
ctfun.AddRGBPoint(4095.0, 0.5, 0.5, 0.5)
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetInputConnection(v16.GetOutputPort())
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(ctfun)
volumeProperty.SetScalarOpacity(tfun)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
newvol = vtk.vtkVolume()
newvol.SetMapper(volumeMapper)
newvol.SetProperty(volumeProperty)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(v16.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# The SetInteractor method is how 3D widgets are associated with the
# render window interactor. Internally, SetInteractor sets up a bunch
# of callbacks using the Command/Observer mechanism (AddObserver()).
boxWidget = vtk.vtkBoxWidget()
boxWidget.SetInteractor(iren)
boxWidget.SetPlaceFactor(1.0)
# Add the actors to the renderer, set the background and size
ren.AddActor(outlineActor)
ren.AddVolume(newvol)
ren.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
# When interaction starts, the requested frame rate is increased.
def StartInteraction(obj, event):
global renWin
renWin.SetDesiredUpdateRate(10)
# When interaction ends, the requested frame rate is decreased to
# normal levels. This causes a full resolution render to occur.
def EndInteraction(obj, event):
global renWin
renWin.SetDesiredUpdateRate(0.001)
# The implicit function vtkPlanes is used in conjunction with the
# volume ray cast mapper to limit which portion of the volume is
# volume rendered.
planes = vtk.vtkPlanes()
def ClipVolumeRender(obj, event):
global planes, volumeMapper
obj.GetPlanes(planes)
volumeMapper.SetClippingPlanes(planes)
# Place the interactor initially. The output of the reader is used to
# place the box widget.
boxWidget.SetInput(v16.GetOutput())
boxWidget.PlaceWidget()
boxWidget.InsideOutOn()
boxWidget.AddObserver("StartInteractionEvent", StartInteraction)
boxWidget.AddObserver("InteractionEvent", ClipVolumeRender)
boxWidget.AddObserver("EndInteractionEvent", EndInteraction)
outlineProperty = boxWidget.GetOutlineProperty()
outlineProperty.SetRepresentationToWireframe()
outlineProperty.SetAmbient(1.0)
outlineProperty.SetAmbientColor(1, 1, 1)
outlineProperty.SetLineWidth(3)
selectedOutlineProperty = boxWidget.GetSelectedOutlineProperty()
selectedOutlineProperty.SetRepresentationToWireframe()
selectedOutlineProperty.SetAmbient(1.0)
selectedOutlineProperty.SetAmbientColor(1, 0, 0)
selectedOutlineProperty.SetLineWidth(3)
iren.Initialize()
renWin.Render()
iren.Start()
|
VisTrails/VisTrails
|
examples/vtk_examples/GUI/VolumeRenderWithBoxWidget.py
|
VolumeRenderWithBoxWidget.py
|
py
| 4,067 |
python
|
en
|
code
| 100 |
github-code
|
6
|
25574251157
|
"""
Name:Hrishikesh Moholkar
file:wordData.py
This file is the main file for
all other working files
"""
from rit_lib import*
class YearCount(struct):
"""
slots for class:
year: integer representing particular year
count:an integer representing count for that year
return:none
"""
_slots=((int,"year"),(int,"count"))
class WordTrend(struct):
"""
slots for class:
word: string representing word
trend:an integer representing count for that year
return:none
"""
_slots=((str,"word"),(float,"trend"))
def readWordFile(filename):
"""
this function reads the file and returns dictionary
containing keys as words and values as list of yearcount objects.
:param filename: input file
:return:dictionary
"""
words=dict()
list1=[]
for line in open("data"+ '/' + filename):
if "," not in line:
word=line.strip()
words[word]=""
else:
x1= [append(line)]
if words.get(word)!= "":
words[word] = words.get(word) + x1;
else:
words[word] = x1;
return (words)
def append(value):
"""
this helper function returns yearcount object
and appends to list
:param value:
:return:yearcount object
"""
temp=[]
temp=value.split(",")
x=YearCount(int(temp[0]),int(temp[1]))
return x
def totalOccurrences(word,words):
"""
this function counts the total occurences of
a word
:param word:desired word entered
:param words:dictionary containing keys
:return:total number of times that word has appeared in print
"""
x=words[word]
count2=0
if word in words.keys():
for i in range(0,len(x)):
count1=x[i].count
count2=count2+count1
else:
print("no key")
return count2
if __name__ == "__main__" :
"""
standalone execution
"""
filename=input("enter word file:")
word=input("enter word:")
words=readWordFile(filename)
#print(words)
v=totalOccurrences(word,words)
print("total occurences of",word,":",v)
|
hnm6500/csci141---python
|
hnm6500/wordData.py
|
wordData.py
|
py
| 2,117 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20825309063
|
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def get_feature_matrix(N = 55):
#initialize the feature vector with zeros.
x_vec = np.zeros((N,3))
x = []
for i in range (N):
im = Image.open("images/image_{number}.jpg".format(number=i+1))
width, height = im.size
rgb_im = im.convert('RGB')
red = []
green = []
blue = []
for y in range (height):
for x in range (width):
pixel = rgb_im.getpixel((x,y))
red.append(pixel[0])
green.append(pixel[1])
blue.append(pixel[2])
x_vec[i] = np.mean(red), np.mean(green), np.mean(blue)
return x_vec;
def get_labels(N=55):
y = np.zeros((N,1));
y = np.zeros((N,1))
#raise NotImplementedError()
for i in range (0,20):
y[i] = 1
for i in range (21, N):
y[i] = 0
return y
def sigmoid_func(z):
sigmoid = 1/(1+np.exp(-z))
return sigmoid
def gradient(X,y,w):
grad = []
grad = np.transpose(np.dot(np.transpose(X), -(y-sigmoid_func(np.dot(X, np.transpose(w))))))/len(X)
#raise NotImplementedError()
return grad
def logisticRegression_func(X,y,step_size, K):
N = X.shape[0]
d = X.shape[1]
# Initialize w as 1xd array.
w = np.zeros((1,d))
loss = float('inf')
loss_list = []
for i in range(K):
# loss_list.append(gradient(X,y,w))
cost = (1/N*np.dot(np.transpose(-y),np.log(sigmoid_func(np.dot(X,np.transpose(w)))))-np.dot(np.transpose((1-y)),np.log(1-sigmoid_func(np.dot(X,np.transpose(w))))))
w = np.subtract(w, step_size*loss_list[i])
loss_list.append(cost[0])
#raise NotImplementedError()
return loss_list, w
""" Predict Output """
def predict_output(X,w):
y = []
value = sigmoid_func(np.dot(X, np.transpose(w)))
for i in range (len(value)):
if value[i] >= 0.5:
y.append(1)
else:
y.append(0)
return y
y = get_labels()
X = get_feature_matrix()
# Full Vector
# Let s label : Grass = 1 , Soil = 0, Tiles = 0
assert X.shape == (55,3)
#axes = Visualize_data(X,y)
step_size = 1e-5
num_iter = 3000
e_list, w_opt = logisticRegression_func(X,y,step_size,num_iter)
print ('The optimal weight vector is:', w_opt)
y_hat = predict_output(X,w_opt)
def visualize_error(X, y, step_sizes, best = None, num_iter = 2000):
plt.figure(figsize=(12, 4))
fig, axes = plt.subplots(1, 2,figsize=(12, 4))
for step in step_sizes:
loss_list, w_opt = logisticRegression_func(X, y, step, num_iter)
#raise NotImplementedError()
n = len(loss_list) # Size of list remains the same.
x_axes = np.linspace(0,n,n,endpoint=False)
axes[0].plot(x_axes, loss_list, label=step)
axes[0].set_xlabel('Number of Iterations')
axes[0].set_ylabel('Loss Function')
axes[0].legend()
axes[0].set_title(r'$\bf{Figure\ 4.}$Converge of GD')
for step in step_sizes:
### STUDENT TASK ###
# Plot Error against Step Size.
# Now mark the best converge in red. Use value from best as a correct step size.
loss_list, w_opt = logisticRegression_func(X, y, step, num_iter)
# YOUR CODE HERE
#raise NotImplementedError()
n = len(loss_list) # Size of list remains the same.
x_axes = np.linspace(0,n,n,endpoint=False)
if step == best:
axes[1].plot(x_axes, loss_list, label=step, color="red")
else:
axes[1].plot(x_axes, loss_list, label=step, color="blue")
axes[1].set_xlabel('Number of Iterations')
axes[1].set_ylabel('Loss Function')
axes[1].legend()
axes[1].set_title(r'$\bf{Figure\ 5.}$Converge of GD')
plt.tight_layout()
return best, axes
### STUDENT TASK ###
# Change best=None into step size from the list that provides the fastest converge. e.g best=1
res0_1, axes = visualize_error(X/255, y, best=None, step_sizes=[0.1,0.5,1,5,10,16])
# YOUR CODE HERE
#raise NotImplementedError()
|
laurivoipio/MLBP
|
Round3 - Classification/ML3_2.py
|
ML3_2.py
|
py
| 4,065 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15536903566
|
import pandas as pd
from sklearn import model_selection
import numpy as np
from sklearn import datasets
def create_k_folds():
# csv with image id, image location and image label.
df = pd.read_csv("train.csv")
# create a new column called kfold and fill it with -1
df["kfold"] = -1
# the next step is to randomize the rows of the data
df = df.sample(frac=1).reset_index(drop=True)
# initiate the kfold class from model_selection module
kf = model_selection.KFold(n_splits=5)
# fill the new kfold column
for fold, (trn_, val_) in enumerate(kf.split(X=df)):
df.loc[val_, "kfold"] = fold
# save the new csv with kfold column
df.to_csv("train_folds.csv", index=False)
def create_stratified_k_folds():
# csv with image id, image location and image label.
df = pd.read_csv("train.csv")
# create a new column called kfold and fill it with -1
df["kfold"] = -1
# the next step is to randomize the rows of the data
df = df.sample(frac=1).reset_index(drop=True)
# fetch targets
y = df.target.values
# initiate the kfold class from model_selection module
kf = model_selection.StratifiedKFold(n_splits=5)
# fill the new kfold column
for fold, (trn_, val_) in enumerate(kf.split(X=df, y=y)):
df.loc[val_, "kfold"] = fold
# save the new csv with kfold column
df.to_csv("train_folds.csv", index=False)
def create_stratified_k_fold_for_regression(data):
# create a new column called kfold and fill it with -1
data["kfold"] = -1
# the next step is to randomize the rows of the data
data = data.sample(frac=1).reset_index(drop=True)
# calculate the number of bins by Sturge's rule
# I take the floor of the value, you can also just round it.
num_bins = int(np.floor(1 + np.log2(len(data))))
# bin tagets
data.loc[:, "bins"] = pd.cut(data["target"], bins=num_bins, labels=False)
# initiate the kfold class from model_selection module
kf = model_selection.StratifiedKFold(n_splits=5)
# fill the new kfold column note that, instead of targets, we use bins!
for f, (t_, v_) in enumerate(kf.split(X=data, y=data.bins.values)):
data.loc[v_, "kfold"] = f
# drop the bins column
data = data.drop("bins", axis=1)
# return data frame with folds
return data
if __name__ == '__main__':
# We create a sample dataset with 1500 samples and 100 features and 1 target
X, y = datasets.make_regression(n_samples=1500, n_features=100, n_targets=1)
# create a data frame out of our numpy arrays
df = pd.DataFrame(X, columns=[f"f_{i}" for i in range(X.shape[1])])
df.loc[:, "target"] = y
# create folds
df = create_stratified_k_fold_for_regression(data=df)
a = 0
|
Vuong02011996/Book-Approaching-any-machine-learning-problem
|
B_Cross validation/cross-validation.py
|
cross-validation.py
|
py
| 2,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71493783548
|
import numpy as np
def minimum_bbox(surface_points:np.ndarray):
'''
The minimum bounding box of a given point cloud
Parameters
----------
surface_points : np.ndarray(Ngamma,d)
The positions of points in a point cloud in 'd' dimensions
Returns
----------
domain : np.ndarray(d,2)
The lower and upper bounds along each dimension
'''
lower = np.min(surface_points,axis=0)
upper = np.max(surface_points,axis=0)
domain = np.stack((lower, upper),axis=1)
return domain
def enlarged_bbox(surface_points:np.ndarray, percent:float=10.):
'''
Calcuate an enlarged bounding box of a given point cloud
Parameters
----------
surface_points : np.ndarray(Ngamma,d)
The positions of points in a point cloud in 'd' dimensions
percent : float
The percentage to increase the bounding box from the minimum bounding box
Returns
----------
domain : np.ndarray(d,2)
The lower and upper bounds along each dimension
'''
lower = np.min(surface_points,axis=0)
upper = np.max(surface_points,axis=0)
border = percent/100*(upper-lower)/2
domain = np.stack((lower-border, upper+border),axis=1)
return domain
|
LSDOlab/lsdo_genie
|
lsdo_genie/utils/bounding_boxes.py
|
bounding_boxes.py
|
py
| 1,233 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30823175530
|
from django.urls import path
from payment import views
app_name = 'payment'
urlpatterns = [
path('canceled/$', views.payment_canceled, name='canceled'),
path('done/$', views.payment_done, name='done'),
path('(?P<id>\d+)/process', views.payment_process, name='process')
]
|
studiosemicolon/onlineshop
|
payment/urls.py
|
urls.py
|
py
| 287 |
python
|
en
|
code
| 23 |
github-code
|
6
|
8201001826
|
def fight(posseser, intruder):
id = intruder.int_display()
pbid = posseser.pos_bid(id)
pd = posseser.pos_display()
ibid = intruder.int_bid(pd)
# print(round(pd,1), round(id,1), round(pbid,1), round(ibid,1), sep='\t\t\t')
posseser.base_health -= min(pbid,ibid)
posseser.health = min(posseser.health, posseser.base_health)
intruder.base_health -= min(pbid,ibid)
intruder.health = min(intruder.health, intruder.base_health)
if pbid > ibid:
return posseser, intruder, pbid, ibid
else:
return intruder, posseser, pbid, ibid
|
rswofxd/evolutionary-game-theory-sim
|
fight.py
|
fight.py
|
py
| 579 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17260440247
|
"""
Module that can parse chess notation for individual moves. Mostly to debug
things and/or introduce chess states without having to wire up the entire
camera setup on a physical board.
Note that we're using standard Algebraic Notation:
https://en.wikipedia.org/wiki/Algebraic_notation_(chess)
Maybe we move on to FEN https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation
to start from boards?
BUGS:
- Doesn't handle pawn promotions
- Doesn't handle disambiguations (when two knights can reach the same place)
"""
import re
from enum import Enum
from collections import namedtuple
# Data definitions. Currently don't allow for draws.
Piece = Enum('Piece', 'Pawn Rook Knight Bishop Queen King')
Action = Enum('Action', 'Move Capture CastleKingside CastleQueenside PawnPromotion')
Modifier = Enum('Modifier', 'Check CheckMate')
Col = Enum('Col', 'A B C D E F G H')
Row = Enum('Row', 'One Two Three Four Five Six Seven Eight')
Position = namedtuple('Position', 'row col')
Move = namedtuple('Move', 'piece action position modifiers')
# Black could be None in the case of a white Checkmate
Turn = namedtuple('Turn', 'white black')
LINE_REGEX = re.compile('(?:\d+\.\s+)\s*(\S+)(?:\s+(\S+)\s*)?$')
POSITION_PATTERN = '([a-h])(1|2|3|4|5|6|7|8)'
POSITION_REGEX = re.compile(POSITION_PATTERN)
PIECE_MAP = {
'B': Piece.Bishop,
'R': Piece.Rook,
'Q': Piece.Queen,
'K': Piece.King,
'N': Piece.Knight
}
COL_MAP = {'a': Col.A, 'b': Col.B, 'c': Col.C, 'd': Col.D, 'e': Col.E, 'f': Col.F, 'g': Col.G, 'h': Col.H}
ROW_MAP = {
'1': Row.One,
'2': Row.Two,
'3': Row.Three,
'4': Row.Four,
'5': Row.Five,
'6': Row.Six,
'7': Row.Seven,
'8': Row.Eight
}
ACTION_MAP = {
'x': Action.Capture,
'O-O': Action.CastleKingside,
'O-O-O': Action.CastleQueenside,
'=': Action.PawnPromotion
}
def parse_file(filename):
with open(filename) as f:
lines = f.readlines()
return [parse_line(line.rstrip('\n')) for line in lines]
def parse_line(line):
components = LINE_REGEX.match(line)
white_move = _parse_move(components.group(1))
black_move = None
black_move_spec = components.group(2)
if black_move_spec:
black_move = _parse_move(black_move_spec)
return Turn(white=white_move, black=black_move)
def _parse_move(move):
if re.match('O-O-O', move):
return Move(piece=None, action=Action.CastleQueenside, position=None, modifiers=[])
elif re.match('O-O', move):
return Move(piece=None, action=Action.CastleKingside, position=None, modifiers=[])
piece = _get_piece(move)
action = _get_action(move)
position = _get_position(move)
modifiers = _get_modifiers(move)
return Move(piece=piece, action=action, position=position, modifiers=modifiers)
def _get_piece(move):
"""
The piece is realatively easy to determine: it's either a pawn, or directly
determined by its first letter. Gets _a little_ weird for when pawns capture,
so we default to that if the first character isnt' a recognized one.
"""
match = re.search('^' + POSITION_PATTERN, move)
if match:
return Piece.Pawn
else:
return PIECE_MAP.get(move[0], Piece.Pawn)
def _get_action(move):
for action in ACTION_MAP.iterkeys():
if re.search(action, move):
return ACTION_MAP[action]
return Action.Move
def _get_position(move):
"""
The position is pretty easily determined by one of the "acceptable letters" followed by
an acceptable number.
"""
match = POSITION_REGEX.search(move)
return Position(col=COL_MAP[match.group(1)], row=ROW_MAP[match.group(2)])
def _get_modifiers(move):
modifiers = []
if re.search('\+', move):
modifiers.append(Modifier.Check)
elif re.search('#', move):
modifiers.append(Modifier.CheckMate)
return modifiers
def test_data():
return [
Turn(white=Move(piece=Piece.Pawn,
action=Action.Move,
position=Position(col=Col.E, row=Row.Four),
modifiers=[]),
black=Move(piece=Piece.Pawn,
action=Action.Move,
position=Position(col=Col.E, row=Row.Five),
modifiers=[])),
]
|
stay-whimsical/screamchess
|
src/chess/parser.py
|
parser.py
|
py
| 4,334 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71779936188
|
from student import *
# 管理系统类
class Lms(object):
# 储存学员数据需要的列表
def __init__(self):
self.student_list = []
def bt(self):
print('学号|姓名|性别|年龄|联系电话|身份证号码')
# 功能检查函数--------------------------------------------------------------
def inspect(self):
try:
print(self.student_list)
except:
print('录入功能错误')
else:
print('读取学员数据功能测试成功')
# 系统主页框架--------------------------------------------------------------
def run(self):
# 加载学员信息
self.load_student()
while True:
# 显示菜单
self.show_menu()
# 输入选项
menu_num = int(input('\n请输入功能序号:'))
# 判断功能
if menu_num == 1: # 查询
self.search_student()
elif menu_num == 2: # 新增
self.add_student()
elif menu_num == 3: # 遍历
self.show_student()
elif menu_num == 0: # 退出
self.exit_student()
break
elif menu_num == 9: # 系统检查
self.inspect() # 读取信息检测功能
break
# print系统菜单--------------------------------------------------------------
def show_menu(self): # 系统主页
print('LMS v1.0'.center(54, '-'))
print('1、查询学员信息'.center(50, ))
print('2、新增学员信息'.center(50, ))
print('3、遍历学员信息'.center(50, ))
print('0、退出管理系统'.center(50, ))
# 加载学员信息--------------------------------------------------------------
def load_student(self):
try:
f = open('student.data', 'r')
except:
f = open('student.data', 'w')
else:
str_data = f.read()
dick_data = eval(str_data)
self.student_list = [Student(i['name'], i['id_num'], i['tel'], i['stu_id']) for i in dick_data]
f.close()
# 增加功能--------------------------------------------------------------
def add_student(self):
global add_sut_id
id_num = input('请输入身份证号码:')
# 检查身份证号码位数
if len(id_num) != 18:
print('\n***您输入的身份证号码不正确,请重新输入!***\n\n')
self.add_student()
else:
# 检查身份证号码是否已存在
for i in self.student_list:
if i.id_num == id_num:
print('\n***注意:身份证号存在冲突****\n冲突学员信息:')
self.bt()
print(i)
print('\n已为您返回主菜单\n\n')
self.run()
name = input('请输入学员姓名:')
tel = input('请输入手机号码:')
# 调用 student.py模块,生成 学生信息对象,返回给 list
student = Student(name, id_num, tel)
self.student_list.append(student)
# 确认录入信息是否有误:
for i in self.student_list:
if i.id_num == id_num:
self.bt()
print(i)
add_sut_id = i.stu_id
break
a = input('请检查录入信息\n\n确认无误 Y\n修改录入 r\n返回主菜单 N\n请输入:')
if a == 'Y' or a == 'y':
self.save_data()
print('***学员信息已保存***')
self.run()
elif a == 'R' or a == 'r':
self.del_data(add_sut_id)
print('请重新录入信息:\n')
self.add_student()
elif a == 'N' or a == 'n':
self.run()
# 保存数据模块,将学员数据列表,保存的 student.data 文件=---------------
def save_data(self):
f = open('student.data', 'w')
list_data = [i.__dict__ for i in self.student_list]
str_data = str(list_data)
print(str_data)
f.write(str_data)
# 删除模块 ------------------------------------------
def del_data(self, value1):
for i in self.student_list:
if i.stu_id == value1:
del i
break
# _______________________________________________________
# # 查询功能
# def search_student(self): # 1查询功能页面
# while True:
# print('1、学号查询')
# print('2、姓名查询')
# print('0、退出查询')
# mo = int(input('请输入查询方式:'))
# if mo == 1:
# self.id_search()
# elif mo == 2:
# self.name_search()
# elif mo == 0:
# self.exit_search()
# break
#
# def student_data(self): # 启动系统读取全部学员数据文件,存到内存中,如果没有数据文件默认创建空文件,读取后关闭文件;
# try:
# f = open('student.data', 'r')
# except:
# f = open('student.data', 'w')
# else:
# data = f.read()
# new_list = eval(data)
# self.student_list = [Student(i['stu_id'], i['name'], i['gender'], i['tel'], i['id_num']) for i in new_list]
#
# def aa(self): # 测试学员储存信息函数
# print(self.student_list)
#
# # 框架程序入口
# def run(self): # 系统主页框架
# # 欢迎页面
# print('LMS v1.0'.center(54, '-'))
# # 加载学员信息
# # self.load_student()
#
# while True:
# # 显示菜单
# self.show_menu()
# # 输入选项
# menu_num = int(input('\n请输入功能序号:'))
# # 判断功能
# if menu_num == 1: # 查询
# self.search_student()
# elif menu_num == 2: # 新增
# self.add_student()
# elif menu_num == 5: # 遍历
# self.show_student()
# elif menu_num == 0: # 退出
# # self.exit_student()
# self.student_data()
# break
#
# # 系统菜单
# def show_menu(self): # 系统主页
# print('1、查询学员信息'.center(50, ))
# print('2、新增学员信息'.center(50, ))
# print('3、遍历学员信息'.center(50, ))
# print('0、退出管理系统'.center(50, ))
#
# # 查询功能
# def search_student(self): # 1查询功能页面
# while True:
# print('1、学号查询')
# print('2、姓名查询')
# print('0、退出查询')
# mo = int(input('请输入查询方式:'))
# if mo == 1:
# self.id_search()
# elif mo == 2:
# self.name_search()
# elif mo == 0:
# self.exit_search()
# break
#
# def id_search(self): # 1.1学号查询
# id_student = int(input('请输入查询学员的学号:'))
# print('学号|姓名|性别|年龄|联系电话|身份证号码')
# for i in self.student_list:
# if i.id == id_student:
# print(i)
# break
# else:
# print('学号不正确!')
# self.search_student()
# num = int(input('1、重新查找\n2、修改学员信息\n3、删除学员信息\n0、返回主菜单'))
# if num == 1:
# self.id_search()
# elif num == 2:
# self.modify_student()
# elif num == 3:
# self.del_student()
# elif num == 0:
# self.run()
#
# def name_search(self): # 1.2姓名查询
# name_student = input('请输入查询学员的姓名:')
# print('学号|姓名|性别|年龄|联系电话|身份证号码')
# for i in self.student_list:
# if i.name == name_student:
# print(i)
# num = int(input('1、重新查找\n2、修改学员信息\n3、删除学员信息\n0、返回主菜单'))
# if num == 1:
# self.name_search()
# elif num == 2:
# self.modify_student()
# elif num == 3:
# self.del_student()
# elif num == 0:
# self.run()
#
# def exit_modify(self): # 1.3退出查询
# self.run()
#
# def modify_student(self): # 1.1.1修改信息
# modify_id = int(input('请输入修改学员学号:'))
# for i in self.student_list:
# if i.id == modify_id:
# break
# print('学号|姓名|性别|年龄|联系电话|身份证号码')
# print(i)
# print('请输入修改信息数字编号:')
# print('1.姓名;2.性别;3.年龄;4.身份证号码;5.手机号码。')
# modify_num = int(input('请输入编号:'))
# new_modify = input('请输入修改内容:')
# if modify_num == 1:
# i.id = new_modify
# print(i)
# elif modify_num == 2:
# pass
# elif modify_num == 3:
# pass
# elif modify_num == 4:
# pass
# elif modify_num == 5:
# pass
#
# def del_student(self): # 删除学员信息
# del_id = input('请输入删除学员的学号:')
# print('学号|姓名|性别|年龄|联系电话|身份证号码')
# for i in self.student_list:
# if i.id == del_id:
# print(i)
# break
# else:
# print('您输入的学号有误!')
#
# x = input('输入Y确认删除,输入N返回主菜单')
# if x == 'Y' or 'y':
# self.student_list.remove(i)
# print('学员已删除!')
# elif x == 'N' or 'n':
# print('已取消!')
#
#
#
# # 数据刷新模块
# def save_data(self):
# new_list = [i.__dict__ for i in self.student_list]
# f = open('student.data', 'w')
# f.write(str(new_list))
# f.close()
#
# # 修改功能
# # 查询功能
# # 退出功能
#
#
# if __name__ == '__main__':
# ams = Lms()
# ams.aa()
|
goodsimba/LMS
|
managerSystem.py
|
managerSystem.py
|
py
| 10,483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35351271287
|
'''
started on 2022/06/13
end on 2022/xx/xx
@author zelo2
'''
import torch
import torch.nn as nn
class LightGCN(nn.Module):
def __init__(self, n_user, n_item, norm_adj, device, args):
super(LightGCN, self).__init__()
self.device = device
self.n_user = n_user
self.n_item = n_item
self.norm_adj = norm_adj
self.embed_size = args.embed_size
self.batch_size = args.batch_size
self.layer_num = args.layer_num
self.reg_value = eval(args.reg)[0]
self.embeding_dict = self.init_weight()
self.sp_norm_adj = self.convert_coo_matirix_2_sp_tensor(self.norm_adj).to(self.device)
def init_weight(self):
'''Embedding with xavier initialization'''
initializer = nn.init.xavier_uniform_
embedding_dict = nn.ParameterDict({
'user_embed': nn.Parameter(initializer(torch.empty(self.n_user,
self.embed_size))),
'item_embed': nn.Parameter(initializer(torch.empty(self.n_item,
self.embed_size)))
})
# self.user_embedding = nn.Embedding(self.n_user, self.embed_size)
# self.item_embedding = nn.Embedding(self.n_user, self.embed_size)
# nn.init.xavier_uniform_(self.user_embedding.weight)
# nn.init.xavier_uniform_(self.item_embedding.weight)
return embedding_dict
def convert_coo_matirix_2_sp_tensor(self, X):
coo = X.tocoo()
# coo matrix--((data, (row, column)), shape)
# data:矩阵中的数据, row, column表示这个数据在哪一行哪一列
i = torch.LongTensor([coo.row, coo.col]) # [row, column]
v = torch.from_numpy(coo.data).float() # data
return torch.sparse.FloatTensor(i, v, coo.shape)
def sparse_dropout(self, x, rate, noise_shape):
save_probability = 1 - rate
# torch.rand: 均匀分布采样[0,1]
# 因此加上它之后,大于1的概率即为 1 - node_dropout_rate
save_probability += torch.rand(noise_shape)
dropout_mask = torch.float(save_probability).type(torch.bool)
i = x._indices()
v = x._values()
i = i[:, dropout_mask]
v = v[dropout_mask]
out = torch.sparse.FloatTensor(i, v, x.shape)
return out * (1. / (1 - rate)) # dropout部分节点,重新正则化。
# return out
def forward(self, user, pos_item, neg_item, drop_flag=False):
A = self.sp_norm_adj
embedding_matrix = torch.cat([self.embeding_dict['user_embed'], self.embeding_dict['item_embed']]
, 0) # [M+N, embedding_size]
embedding_matrix = embedding_matrix.to(self.device)
all_embeddings = [embedding_matrix]
for k in range(self.layer_num):
# Graph Convolution operation without self connection
embedding_matrix = torch.sparse.mm(A, embedding_matrix)
# Normalization
# norm_embeddings = F.normalize(embedding_matrix, p=2, dim=1) # normalize each row
all_embeddings += [embedding_matrix]
all_embeddings = torch.stack(all_embeddings, dim=1)
all_embeddings = torch.mean(all_embeddings, dim=1)
user_embeddings = all_embeddings[:self.n_user, :]
item_embeddings = all_embeddings[self.n_user:, :]
user_embeddings = user_embeddings[user, :]
pos_item_embeddings = item_embeddings[pos_item, :]
neg_item_embeddings = item_embeddings[neg_item, :]
return user_embeddings, pos_item_embeddings, neg_item_embeddings # [batch_size, embed_size * layer_num] * 3
def bpr_loss(self, users, pos_items, neg_items):
'''
:param users: user embeddings [batch_size, embed_size * layer_num]
:param pos_items: positive item embeddings
:param neg_items: negative item embeddings
:return: Bayesian Personalized Ranking loss (BPR loss)
'''
pos_inner_product = torch.mul(users, pos_items)
neg_inner_product = torch.mul(users, neg_items)
pos_inner_product = torch.sum(pos_inner_product, axis=1) # sum each row [batch_size]
neg_inner_product = torch.sum(neg_inner_product, axis=1)
loss_value = nn.LogSigmoid()(pos_inner_product - neg_inner_product)
loss_value = -1 * torch.mean(loss_value)
# L2范式:所有元素的平方和 开根号
l2_value = torch.norm(users, p=2) ** 2 + torch.norm(pos_items, p=2) ** 2 + torch.norm(neg_items, p=2) ** 2
l2_value /= 2
# for k in range(self.layer_num):
# l2_value += torch.norm(self.weight_dict['W1_layer%d' % k], p=2) ** 2
# l2_value += torch.norm(self.weight_dict['b1_layer%d' % k], p=2) ** 2
# l2_value /= (2 + self.layer_num * 2)
l2_value = self.reg_value * l2_value / self.batch_size
return loss_value + l2_value
|
zelo2/NGCF
|
LightGCN/lightGCN_model.py
|
lightGCN_model.py
|
py
| 4,989 |
python
|
en
|
code
| 2 |
github-code
|
6
|
25240115497
|
N = int(input())
trilha = 0
subida = 0
for i in range (N):
valores = input()
valores = valores.split()
M = int(valores[0])
contador1 = 0
contador2 = 0
altura = int(valores[1])
for j in range(1, M+1):
alturanova = int(valores[j])
if alturanova > altura:
contador1 = alturanova - altura + contador1
altura = alturanova
altura = int(valores[M])
for k in range(M,0,-1):
alturanova = int(valores[k])
if alturanova > altura:
contador2 = alturanova - altura + contador2
altura = alturanova
contador1 = min(contador1, contador2)
if i == 0:
subida = contador1
trilha = i+1
else:
if contador1<subida:
subida = contador1
trilha = i+1
print(trilha)
|
MateusFerreiraMachado/Programas_Python
|
Trilha.py
|
Trilha.py
|
py
| 842 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
28235938842
|
# 寻找一个能复现bug的小图。
# 如果pattern大小为N,则会先检查所有大小为N的图(共2^(N*(N-1)/2)种),再检查大小为N+1的图……直到找到发生错误
# 由于原程序中输出的东西太多,需要先修改源程序,让其将答案输出至1.out和2.out,其余内容可以输出至stdout(本程序中不会做重定向)
# 使用本程序,需要修改N、bin1和bin2,编译一个正确版本和有bug版本的
# 由于我们的dataloader要求不能存在度数为0的点,所以很多生成的图是不合法的,如果报了“vertex number error!”是正常的
import os
N = 5
bin1 = "../build/bin/gpu_graph"
res1 = "1.out"
bin2 = "../build/bin/gpu_graph_correct"
res2 = "2.out"
while (True):
print("N = " + str(N))
print("range = ", range(2 ** int(N * (N - 1) / 2)))
for i in range(2 ** int(N * (N - 1) / 2)):
x = i
e_num = 0
while (x != 0):
e_num += x & 1
x >>= 1
f = open("graph.out", "w")
f.write(str(N) + " " + str(e_num) + "\n")
x = i
for j in range(1, N):
for k in range(j + 1, N + 1):
if (x & 1 == 1):
f.write(str(j) + " " + str(k) + "\n")
x >>= 1
f.close()
os.system("srun -N 1 " + bin1 + " Patents graph.out")
os.system("srun -N 1 " + bin2 + " Patents graph.out")
if os.path.exists(res1) and os.path.exists(res2) and os.system("diff -w -q " + res1 + " " + res2) != 0:
break
print("i = " + str(i) + " Correct.")
N += 1
|
sth1997/GraphSet
|
scripts/small_graph_check.py
|
small_graph_check.py
|
py
| 1,612 |
python
|
zh
|
code
| 5 |
github-code
|
6
|
26705804968
|
from random import randint
class TokenGenerator:
def __init__(self, path):
f = open(path,'r')
self.list = []
self.size = 0
for line in f:
self.list.append(line.replace("\n",""))
self.size = self.size+1
def close(self):
f.close()
def get(self):
word = ""
for i in range(0,3):
word = word+self.list[randint(0, self.size-1)]
return word
|
blin4444/pcrs
|
server_side/words.py
|
words.py
|
py
| 366 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19875967712
|
import numpy as np
import matplotlib.pyplot as plt
import time
import numpy as np
import pandas as pd
import time
import gc
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
from sklearn import tree
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from kneed import KneeLocator
import seaborn as sb
from sklearn.metrics import silhouette_score
from sklearn import manifold
from sklearn.mixture import GaussianMixture
class Data():
def dataAllocation(self, path):
# Separate out the x_data and y_data and return each
# args: string path for .csv file
# return: pandas dataframe, pandas series
# -------------------------------
# ADD CODE HERE
df = pd.read_csv(path)
xcols = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8']
ycol = ['y']
x_data = df[xcols]
y_data = df[ycol]
# print(y_data[y_data.y == 1].shape[0])
# print(df.shape[0])
# -------------------------------
return x_data, y_data.values.ravel()
def processed_data_Allocation(self, path):
# Read the processed dataset
# -------------------------------
df = pd.read_csv(path)
xcols = ["age","education","default","housing","loan","contact","month","day_of_week","campaign","previous","poutcome","emp.var.rate","cons.price.idx","cons.conf.idx","euribor3m","nr.employed","job_blue-collar","job_entrepreneur","job_housemaid","job_management","job_retired","job_self-employed","job_services","job_student","job_technician","job_unemployed","marital_married","marital_single"]
ycol = ['y']
x_data = df[xcols]
y_data = df[ycol]
return x_data, y_data.values.ravel()
def trainSets(self, x_data, y_data):
# Split 70% of the data into training and 30% into test sets. Call them x_train, x_test, y_train and y_test.
# Use the train_test_split method in sklearn with the parameter 'shuffle' set to true and the 'random_state' set to 614.
# args: pandas dataframe, pandas dataframe
# return: pandas dataframe, pandas dataframe, pandas series, pandas series
# -------------------------------
# ADD CODE HERE
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.2, shuffle=True, random_state=614)
# -------------------------------
return x_train, x_test, y_train, y_test
def dataPreProcess(self, x_train, x_test):
# Pre-process the data to standardize it, otherwise the grid search will take much longer.
# args: pandas dataframe, pandas dataframe
# return: pandas dataframe, pandas dataframe
# -------------------------------
# ADD CODE HERE
scaler = StandardScaler()
scaler.fit(x_train)
scaled_x_train = scaler.transform(x_train)
scaled_x_test = scaler.transform(x_test)
# -------------------------------
return scaled_x_train, scaled_x_test
fig, axs = plt.subplots(2, 3)
##################### Diabetes data #############################
dataset = Data()
data = 'data/pima-indians-diabetes.csv'
x_data, y_data = dataset.dataAllocation(data)
x_train, x_test, y_train, y_test = dataset.trainSets(x_data, y_data)
x_train_scaled, x_test_scaled = dataset.dataPreProcess(x_train, x_test)
# KM with DS1
kmeans_kwargs = {'init': 'random', 'n_init':10, 'max_iter':100, 'random_state':42, 'algorithm':'full',}
start = time.time()
kmeans = KMeans(n_clusters=4, **kmeans_kwargs)
label = kmeans.fit(x_train_scaled).labels_
end = time.time()
print("KM with DS1 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS1 = tsne_transform.fit_transform(x_train_scaled)
axs[0, 0].scatter(feature2D_DS1[:,0], feature2D_DS1[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[0, 0].set_title('Clusters for DS1 with KM')
# EM with DS1
em_kwargs = {'covariance_type': 'full', 'n_init':10, 'max_iter':100, 'random_state':42, 'init_params':'kmeans'}
start = time.time()
em = GaussianMixture(n_components=5, **em_kwargs)
label = em.fit(x_train_scaled).predict(x_train_scaled)
end = time.time()
print("EM with DS1 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS1 = tsne_transform.fit_transform(x_train_scaled)
axs[0, 1].scatter(feature2D_DS1[:,0], feature2D_DS1[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[0, 1].set_title('Clusters for DS1 with EM')
axs[0, 2].scatter(feature2D_DS1[:,0], feature2D_DS1[:,1], c=y_train, cmap=plt.cm.Spectral, s=5)
axs[0, 2].set_title('Clusters for DS1 - True Label')
dataset = Data()
data = 'data/bank_marketing.csv'
x_data, y_data = dataset.processed_data_Allocation(data)
x_train, x_test, y_train, y_test = dataset.trainSets(x_data, y_data)
x_train_scaled, x_test_scaled = dataset.dataPreProcess(x_train, x_test)
# KM with DS2
kmeans_kwargs = {'init': 'random', 'n_init':10, 'max_iter':100, 'random_state':42, 'algorithm':'full',}
start = time.time()
kmeans = KMeans(n_clusters=6, **kmeans_kwargs)
label = kmeans.fit(x_train_scaled).labels_
end = time.time()
print("KM with DS2 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS2 = tsne_transform.fit_transform(x_train_scaled)
axs[1, 0].scatter(feature2D_DS2[:,0], feature2D_DS2[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[1, 0].set_title('Clusters for DS2 with KM')
# EM with DS2
em_kwargs = {'covariance_type': 'full', 'n_init':10, 'max_iter':100, 'random_state':42, 'init_params':'kmeans'}
start = time.time()
em = GaussianMixture(n_components=5, **em_kwargs)
label = em.fit(x_train_scaled).predict(x_train_scaled)
end = time.time()
print("EM with DS2 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS2 = tsne_transform.fit_transform(x_train_scaled)
axs[1, 1].scatter(feature2D_DS2[:,0], feature2D_DS2[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[1, 1].set_title('Clusters for DS2 with EM')
axs[1, 2].scatter(feature2D_DS2[:,0], feature2D_DS2[:,1], c=y_train, cmap=plt.cm.Spectral, s=5)
axs[1, 2].set_title('Clusters for DS2 - True Label')
fig.tight_layout()
plt.show()
|
RuizeHu/Gatech_CS_7641_UnsupervisedLearning
|
code/Clusters_Plot.py
|
Clusters_Plot.py
|
py
| 6,940 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35463005885
|
from collections import namedtuple, Counter
import warnings
from math import sqrt
import numpy as np
from scipy.stats import special_ortho_group
import pytest
import kwant
from ... import lattice
from ...builder import HoppingKind, Builder, Site
from ...system import NoSymmetry
from .. import gauge
## Utilities
square_lattice = lattice.square(norbs=1, name='square')
honeycomb_lattice = lattice.honeycomb(norbs=1, name='honeycomb')
cubic_lattice = lattice.cubic(norbs=1, name='cubic')
def rectangle(W, L):
return (
lambda s: 0 <= s.pos[0] < L and 0 <= s.pos[1] < W,
(L/2, W/2)
)
def ring(r_inner, r_outer):
return (
lambda s: r_inner <= np.linalg.norm(s.pos) <= r_outer,
((r_inner + r_outer) / 2, 0)
)
def wedge(W):
return (
lambda s: (0 <= s.pos[0] < W) and (0 <= s.pos[1] <= s.pos[0]),
(0, 0)
)
def half_ring(r_inner, r_outer):
in_ring, _ = ring(r_inner, r_outer)
return (
lambda s: s.pos[0] <= 0 and in_ring(s),
(-(r_inner + r_outer) / 2, 0)
)
def cuboid(a, b, c):
return (
lambda s: 0 <= s.pos[0] < a and 0 <= s.pos[1] < b and 0 <= s.pos[2] < c,
(a/2, b/2, c/2)
)
def hypercube(dim, W):
return (
lambda s: all(0 <= x < W for x in s.pos),
(W / 2,) * dim
)
def circle(r):
return (
lambda s: np.linalg.norm(s.pos) < r,
(0, 0)
)
def ball(dim, r):
return (
lambda s: np.linalg.norm(s.pos) < r,
(0,) * dim
)
def model(lat, neighbors):
syst = Builder(lattice.TranslationalSymmetry(*lat.prim_vecs))
if hasattr(lat, 'sublattices'):
for l in lat.sublattices:
zv = (0,) * len(l.prim_vecs)
syst[l(*zv)] = None
else:
zv = (0,) * len(l.prim_vecs)
syst[lat(*zv)] = None
for r in range(neighbors):
syst[lat.neighbors(r + 1)] = None
return syst
def check_loop_kind(loop_kind):
(_, first_fam_a, prev_fam_b), *rest = loop_kind
for (_, fam_a, fam_b) in rest:
if prev_fam_b != fam_a:
raise ValueError('Invalid loop kind: does not close')
prev_fam_b = fam_b
# loop closes
net_delta = np.sum([hk.delta for hk in loop_kind])
if first_fam_a != fam_b or np.any(net_delta != 0):
raise ValueError('Invalid loop kind: does not close')
def available_loops(syst, loop_kind):
def maybe_loop(site):
loop = [site]
a = site
for delta, family_a, family_b in loop_kind:
b = Site(family_b, a.tag + delta, True)
if family_a != a.family or (a, b) not in syst:
return None
loop.append(b)
a = b
return loop
check_loop_kind(loop_kind)
return list(filter(None, map(maybe_loop, syst.sites())))
def loop_to_links(loop):
return list(zip(loop, loop[1:]))
def no_symmetry(lat, neighbors):
return NoSymmetry()
def translational_symmetry(lat, neighbors):
return lattice.TranslationalSymmetry(int((neighbors + 1)/2) * lat.prim_vecs[0])
## Tests
# Tests that phase around a loop is equal to the flux through the loop.
# First we define the loops that we want to test, for various lattices.
# If a system does not support a particular kind of loop, they will simply
# not be generated.
Loop = namedtuple('Loop', ('path', 'flux'))
square_loops = [([HoppingKind(d, square_lattice) for d in l.path], l.flux)
for l in [
# 1st nearest neighbors
Loop(path=[(1, 0), (0, 1), (-1, 0), (0, -1)], flux=1),
# 2nd nearest neighbors
Loop(path=[(1, 0), (0, 1), (-1, -1)], flux=0.5),
Loop(path=[(1, 0), (-1, 1), (0, -1)], flux=0.5),
# 3rd nearest neighbors
Loop(path=[(2, 0), (0, 1), (-2, 0), (0, -1)], flux=2),
Loop(path=[(2, 0), (-1, 1), (-1, 0), (0, -1)], flux=1.5),
]]
a, b = honeycomb_lattice.sublattices
honeycomb_loops = [([HoppingKind(d, a, b) for *d, a, b in l.path], l.flux)
for l in [
# 1st nearest neighbors
Loop(path=[(0, 0, a, b), (-1, 1, b, a), (0, -1, a, b), (0, 0, b, a),
(1, -1, a, b), (0, 1, b, a)],
flux=sqrt(3)/2),
# 2nd nearest neighbors
Loop(path=[(-1, 1, a, a), (0, -1, a, a), (1, 0, a, a)],
flux=sqrt(3)/4),
Loop(path=[(-1, 0, b, b), (1, -1, b, b), (0, 1, b, b)],
flux=sqrt(3)/4),
]]
cubic_loops = [([HoppingKind(d, cubic_lattice) for d in l.path], l.flux)
for l in [
# 1st nearest neighbors
Loop(path=[(1, 0, 0), (0, 1, 0), (-1, 0, 0), (0, -1, 0)], flux=1),
Loop(path=[(0, 1, 0), (0, 0, 1), (0, -1, 0), (0, 0, -1)], flux=0),
Loop(path=[(1, 0, 0), (0, 0, 1), (-1, 0, 0), (0, 0, -1)], flux=0),
# 2nd nearest neighbors
Loop(path=[(1, 0, 0), (-1, 1, 0), (0, -1, 0)], flux=0.5),
Loop(path=[(1, 0, 0), (0, 1, 0), (-1, -1, 0)], flux=0.5),
Loop(path=[(1, 0, 0), (-1, 0, 1), (0, 0, -1)], flux=0),
Loop(path=[(1, 0, 0), (0, 0, 1), (-1, 0, -1)], flux=0),
Loop(path=[(0, 1, 0), (0, -1, 1), (0, 0, -1)], flux=0),
Loop(path=[(0, 1, 0), (0, 0, 1), (0, -1, -1)], flux=0),
# 3rd nearest neighbors
Loop(path=[(1, 1, 1), (0, 0, -1), (-1, -1, 0)], flux=0),
Loop(path=[(1, 1, 1), (-1, 0, -1), (0, -1, 0)], flux=0.5),
]]
square = (square_lattice, square_loops)
honeycomb = (honeycomb_lattice, honeycomb_loops)
cubic = (cubic_lattice, cubic_loops)
def _test_phase_loops(syst, phases, loops):
for loop_kind, loop_flux in loops:
for loop in available_loops(syst, loop_kind):
loop_phase = np.prod([phases(a, b) for a, b in loop_to_links(loop)])
expected_loop_phase = np.exp(1j * np.pi * loop_flux)
assert np.isclose(loop_phase, expected_loop_phase)
@pytest.mark.parametrize("neighbors", [1, 2, 3])
@pytest.mark.parametrize("symmetry", [no_symmetry, translational_symmetry],
ids=['finite', 'infinite'])
@pytest.mark.parametrize("lattice, loops", [square, honeycomb, cubic],
ids=['square', 'honeycomb', 'cubic'])
def test_phases(lattice, neighbors, symmetry, loops):
"""Check that the phases around common loops are equal to the flux, for
finite and infinite systems with uniform magnetic field.
"""
W = 4
dim = len(lattice.prim_vecs)
field = np.array([0, 0, 1]) if dim == 3 else 1
syst = Builder(symmetry(lattice, neighbors))
syst.fill(model(lattice, neighbors), *hypercube(dim, W))
this_gauge = gauge.magnetic_gauge(syst.finalized())
phases = this_gauge(field)
_test_phase_loops(syst, phases, loops)
@pytest.mark.parametrize("neighbors", [1, 2, 3])
@pytest.mark.parametrize("lat, loops", [square, honeycomb],
ids=['square', 'honeycomb'])
def test_phases_composite(neighbors, lat, loops):
"""Check that the phases around common loops are equal to the flux, for
composite systems with uniform magnetic field.
"""
W = 4
dim = len(lat.prim_vecs)
field = np.array([0, 0, 1]) if dim == 3 else 1
lead = Builder(lattice.TranslationalSymmetry(-lat.prim_vecs[0]))
lead.fill(model(lat, neighbors), *hypercube(dim, W))
# Case where extra sites are added and fields are same in
# scattering region and lead.
syst = Builder()
syst.fill(model(lat, neighbors), *ball(dim, W + 1))
extra_sites = syst.attach_lead(lead)
assert extra_sites # make sure we're testing the edge case with added sites
this_gauge = gauge.magnetic_gauge(syst.finalized())
# same field in scattering region and lead
phases, lead_phases = this_gauge(field, field)
# When extra sites are added to the central region we need to select
# the correct phase function.
def combined_phases(a, b):
if a in extra_sites or b in extra_sites:
return lead_phases(a, b)
else:
return phases(a, b)
_test_phase_loops(syst, combined_phases, loops)
_test_phase_loops(lead, lead_phases, loops)
@pytest.mark.parametrize("neighbors", [1, 2])
def test_overlapping_interfaces(neighbors):
"""Test composite systems with overlapping lead interfaces."""
lat = square_lattice
def _make_syst(edge, W=5):
syst = Builder()
syst.fill(model(lat, neighbors), *rectangle(W, W))
leadx = Builder(lattice.TranslationalSymmetry((-1, 0)))
leadx[(lat(0, j) for j in range(edge, W - edge))] = None
for n in range(1, neighbors + 1):
leadx[lat.neighbors(n)] = None
leady = Builder(lattice.TranslationalSymmetry((0, -1)))
leady[(lat(i, 0) for i in range(edge, W - edge))] = None
for n in range(1, neighbors + 1):
leady[lat.neighbors(n)] = None
assert not syst.attach_lead(leadx) # sanity check; no sites added
assert not syst.attach_lead(leady) # sanity check; no sites added
return syst, leadx, leady
# edge == 0: lead interfaces overlap
# edge == 1: lead interfaces partition scattering region
# into 2 disconnected components
for edge in (0, 1):
syst, leadx, leady = _make_syst(edge)
this_gauge = gauge.magnetic_gauge(syst.finalized())
phases, leadx_phases, leady_phases = this_gauge(1, 1, 1)
_test_phase_loops(syst, phases, square_loops)
_test_phase_loops(leadx, leadx_phases, square_loops)
_test_phase_loops(leady, leady_phases, square_loops)
def _make_square_syst(sym, neighbors=1):
lat = square_lattice
syst = Builder(sym)
syst[(lat(i, j) for i in (0, 1) for j in (0, 1))] = None
for n in range(1, neighbors + 1):
syst[lat.neighbors(n)] = None
return syst
def test_unfixable_gauge():
"""Check error is raised when we cannot fix the gauge."""
leadx = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)))
leady = _make_square_syst(lattice.TranslationalSymmetry((0, -1)))
# 1x2 line with 2 leads
syst = _make_square_syst(NoSymmetry())
del syst[[square_lattice(1, 0), square_lattice(1, 1)]]
syst.attach_lead(leadx)
syst.attach_lead(leadx.reversed())
with pytest.raises(ValueError):
gauge.magnetic_gauge(syst.finalized())
# 2x2 square with leads attached from all 4 sides,
# and nearest neighbor hoppings
syst = _make_square_syst(NoSymmetry())
# Until we add the last lead we have enough gauge freedom
# to specify independent fields in the scattering region
# and each of the leads. We check that no extra sites are
# added as a sanity check.
assert not syst.attach_lead(leadx)
gauge.magnetic_gauge(syst.finalized())
assert not syst.attach_lead(leady)
gauge.magnetic_gauge(syst.finalized())
assert not syst.attach_lead(leadx.reversed())
gauge.magnetic_gauge(syst.finalized())
# Adding the last lead removes our gauge freedom.
assert not syst.attach_lead(leady.reversed())
with pytest.raises(ValueError):
gauge.magnetic_gauge(syst.finalized())
# 2x2 square with 2 leads, but 4rd nearest neighbor hoppings
syst = _make_square_syst(NoSymmetry())
del syst[(square_lattice(1, 0), square_lattice(1, 1))]
leadx = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)))
leadx[square_lattice.neighbors(4)] = None
for lead in (leadx, leadx.reversed()):
syst.attach_lead(lead)
with pytest.raises(ValueError):
gauge.magnetic_gauge(syst.finalized())
def _test_disconnected(syst):
with pytest.raises(ValueError) as excinfo:
gauge.magnetic_gauge(syst.finalized())
assert 'unit cell not connected' in str(excinfo.value)
def test_invalid_lead():
"""Check error is raised when a lead unit cell is not connected
within the unit cell itself.
In order for the algorithm to work we need to be able to close
loops within the lead. However we only add a single lead unit
cell, so not all paths can be closed, even if the lead is
connected.
"""
lat = square_lattice
lead = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)),
neighbors=0)
# Truly disconnected system
# Ignore warnings to suppress Kwant's complaint about disconnected lead
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_test_disconnected(lead)
# 2 disconnected chains (diagonal)
lead[(lat(0, 0), lat(1, 1))] = None
lead[(lat(0, 1), lat(1, 0))] = None
_test_disconnected(lead)
lead = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)),
neighbors=0)
# 2 disconnected chains (horizontal)
lead[(lat(0, 0), lat(1, 0))] = None
lead[(lat(0, 1), lat(1, 1))] = None
_test_disconnected(lead)
# System has loops, but need 3 unit cells
# to express them.
lead[(lat(0, 0), lat(1, 1))] = None
lead[(lat(0, 1), lat(1, 0))] = None
_test_disconnected(lead)
# Test internal parts of magnetic_gauge
@pytest.mark.parametrize("shape",
[rectangle(5, 5), circle(4),
half_ring(5, 10)],
ids=['rectangle', 'circle', 'half-ring']
)
@pytest.mark.parametrize("lattice", [square_lattice, honeycomb_lattice],
ids=['square', 'honeycomb'])
@pytest.mark.parametrize("neighbors", [1, 2, 3])
def test_minimal_cycle_basis(lattice, neighbors, shape):
"""Check that for lattice models on genus 0 shapes, nearly
all loops have the same (minimal) length. This is not an
equality, as there may be weird loops on the edges.
"""
syst = Builder()
syst.fill(model(lattice, neighbors), *shape)
syst = syst.finalized()
loops = gauge._loops_in_finite(syst)
loop_counts = Counter(map(len, loops))
min_loop = min(loop_counts)
# arbitrarily allow 1% of slightly longer loops;
# we don't make stronger guarantees about the quality
# of our loop basis
assert loop_counts[min_loop] / len(loops) > 0.99, loop_counts
def random_loop(n, max_radius=10, planar=False):
"""Return a loop of 'n' points.
The loop is in the x-y plane if 'planar is False', otherwise
each point is given a random perturbation in the z direction
"""
theta = np.sort(2 * np.pi * np.random.rand(n))
r = max_radius * np.random.rand(n)
if planar:
z = np.zeros((n,))
else:
z = 2 * (max_radius / 5) * (np.random.rand(n) - 1)
return np.array([r * np.cos(theta), r * np.sin(theta), z]).transpose()
def test_constant_surface_integral():
field_direction = np.random.rand(3)
field_direction /= np.linalg.norm(field_direction)
loop = random_loop(7)
integral = gauge._surface_integral
I = integral(lambda r: field_direction, loop)
assert np.isclose(I, integral(field_direction, loop))
assert np.isclose(I, integral(lambda r: field_direction, loop, average=True))
def circular_field(r_vec):
return np.array([r_vec[1], -r_vec[0], 0])
def test_invariant_surface_integral():
"""Surface integral should be identical if we apply a random
rotation to loop and vector field.
"""
integral = gauge._surface_integral
# loop with random orientation
orig_loop = loop = random_loop(7)
I = integral(circular_field, loop)
for _ in range(4):
rot = special_ortho_group.rvs(3)
loop = orig_loop @ rot.transpose()
assert np.isclose(I, integral(lambda r: rot @ circular_field(rot.transpose() @ r), loop))
@pytest.fixture
def system_and_gauge():
def hopping(a, b, peierls):
return -1 * peierls(a, b)
syst = Builder()
syst[(square_lattice(i, j) for i in range(3) for j in range(10))] = 4
syst[square_lattice.neighbors()] = hopping
lead = Builder(lattice.TranslationalSymmetry((-1, 0)))
lead[(square_lattice(0, j) for j in range(10))] = 4
lead[square_lattice.neighbors()] = hopping
syst.attach_lead(lead.substituted(peierls='peierls_left'))
syst.attach_lead(lead.reversed().substituted(peierls='peierls_right'))
syst = syst.finalized()
magnetic_gauge = gauge.magnetic_gauge(syst)
return syst, magnetic_gauge
@pytest.mark.parametrize('B',[0, 0.1, lambda r: 0.1 * np.exp(-r[1]**2)])
def test_uniform_magnetic_field(system_and_gauge, B):
syst, gauge = system_and_gauge
peierls, peierls_left, peierls_right = gauge(B, B, B)
params = dict(peierls=peierls, peierls_left=peierls_left,
peierls_right=peierls_right)
s = kwant.smatrix(syst, energy=0.6, params=params)
t = s.submatrix(1, 0)
assert t.shape > (0, 0) # sanity check
assert np.allclose(np.abs(t)**2, np.eye(*t.shape))
def test_phase_sign(system_and_gauge):
syst, gauge = system_and_gauge
peierls, peierls_left, peierls_right = gauge(0.1, 0.1, 0.1)
params = dict(peierls=peierls, peierls_left=peierls_left,
peierls_right=peierls_right)
cut = [(square_lattice(1, j), square_lattice(0, j))
for j in range(10)]
J = kwant.operator.Current(syst, where=cut)
J = J.bind(params=params)
psi = kwant.wave_function(syst, energy=0.6, params=params)(0)[0]
# Electrons incident from the left travel along the *top*
# edge of the Hall bar in the presence of a magnetic field
# out of the plane
j = J(psi)
j_bottom = sum(j[0:5])
j_top = sum(j[5:10])
assert np.isclose(j_top + j_bottom, 1) # sanity check
assert j_top > 0.9
|
kwant-project/kwant
|
kwant/physics/tests/test_gauge.py
|
test_gauge.py
|
py
| 17,404 |
python
|
en
|
code
| 76 |
github-code
|
6
|
33522558184
|
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.core.mail import send_mail
User = get_user_model()
def wish_birthday():
today = timezone.now().date()
user_list = User.objects.filter(birthday__day=today.day, birthday__month=today.month)
for item in user_list:
subject = 'Birthday Wish!'
body = 'Hi {},\n Happy Birthday!!!'.format(item.username)
send_mail(subject, body, '[email protected]', [item.email])
|
napitsakun/backend_task
|
user/cron.py
|
cron.py
|
py
| 497 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26051387150
|
with open("26data/26-J1.txt", "r") as f:
data = list(map(int, f.readlines()))[1:]
res = 0
for n in range(len(data)):
for k in range(n + 1, len(data)):
if data[n] + data[k] == 100:
res += 1
del data[k]
break
print(res)
# 3845 - CORRECT
|
Woolfer0097/UGE_IT
|
26 task/26.py
|
26.py
|
py
| 291 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2710846372
|
# just a seperate file for handling the logging
# of sanic to use with logging
from sanic.log import DefaultFilter
import sys
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'accessFilter': {
'()': DefaultFilter,
'param': [0, 10, 20]
},
'errorFilter': {
'()': DefaultFilter,
'param': [30, 40, 50]
}
},
'formatters': {
'simple': {
'format': '%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'access': {
'format': '%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: ' +
'%(request)s %(message)s %(status)d %(byte)d',
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'internalFile': {
'class': 'logging.FileHandler',
'filters': ['accessFilter'],
'formatter': 'simple',
'filename': "temp/clickinternal.log"
},
'accessFile': {
'class': 'logging.FileHandler',
'filters': ['accessFilter'],
'formatter': 'access',
'filename': "temp/clickaccess.log"
},
'errorFile': {
'class': 'logging.FileHandler',
'filters': ['errorFilter'],
'formatter': 'simple',
'filename': "temp/clickerr.log"
},
'internal': {
'class': 'logging.StreamHandler',
'filters': ['accessFilter'],
'formatter': 'simple',
'stream': sys.stderr
},
'accessStream': {
'class': 'logging.StreamHandler',
'filters': ['accessFilter'],
'formatter': 'access',
'stream': sys.stderr
},
'errorStream': {
'class': 'logging.StreamHandler',
'filters': ['errorFilter'],
'formatter': 'simple',
'stream': sys.stderr
}
},
'loggers': {
'sanic': {
'level': 'DEBUG',
'handlers': ['internal','errorStream','internalFile', 'errorFile']
},
'network': {
'level': 'DEBUG',
'handlers': ['accessStream','errorStream','accessFile', 'errorFile']
}
}
}
|
AggressivelyMeows/Ping
|
logging_config.py
|
logging_config.py
|
py
| 2,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72650125307
|
def inputWitdhlong( ) :
wi = float( input('กว้าง : ') )
lo = float( input('ยาว : ') )
return wi, lo
def inputBaseHigh( ) :
ba = float( input('ฐาน : ') )
hi = float( input('สูง : ') )
return ba, hi
def calAndShowAreaSquare( ba, hi ) :
area = ba * hi / 2
print(f'สามเหลี่ยมฐาน {ba} สูง {hi} มีพื้นที่ {area}')
wi, lo = inputWitdhlong( )
calAndShowAreaSquare(wi, lo)
print('--------------------------')
ba, hi = inputBaseHigh( )
calAndShowAreaSquare(ba, hi)
|
HowToPlayMeow/pythonproject04
|
py5.py
|
py5.py
|
py
| 567 |
python
|
th
|
code
| 0 |
github-code
|
6
|
12485496681
|
# Create your views here.
id = int()
iditemcourant = int()
#-*- coding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render , redirect
from utilisateur.forms import *
from utilisateur.models import *
#import time
from django.core.urlresolvers import reverse
def utilisateur(request):
if request.method == 'POST':
form = formuser(request.POST)
if form.is_valid():
global id
id = int(form.cleaned_data['idUtilisateur'])
listeid = list()
for i in utilisateurtable.objects.all():
listeid.append(i.id)
if id in listeid:
return redirect("index")#ici lien vers acceuil du site
else:
return redirect("authentification")#ici lien vers acceuil du site
else:
form = formuser()
#return render(request, 'index.html', locals())
return render(request, 'index2.html', locals())
def index(request):
return render(request, 'index.html', {})
def genereid(request):
#iduser = str(time.time())
#list = iduser.split('.')
#iduser = "".join(list)
#iduser = 'votre nouveau id est :' + iduser
connecte = utilisateurtable.objects.create()
connecte.id = str(connecte.id)
return HttpResponse(connecte.id)
def listitemmath(request):
return render(request, 'listitemmath.html', locals())
#################################################################
def equation_du_premier_degre(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'eq1.html', locals())
def equation_du_second(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'eq2.html', locals())
def derivee(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'derivee.html', locals())
def primitive(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'primitive.html', locals())
def equation_differentiel(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'eqdiff.html', locals())
def equation_du_premier_degreexo(request ):
if request.method == 'POST':
form = formexo(request.POST)
if form.is_valid():
oui = form.cleaned_data['oui']
non = form.cleaned_data['non']
reussite = bool()
if oui is True and non is False:
reussite = True
else:
reussite = False
if reussite:#il a reussi lexercice ce qui veut dire deja qu'il a fini un item
#chargeons les informations de la base utilisateur
utilisateur = utilisateurtable.objects.get(id = id)
#decalage on a 4 items car 4 neuds constituent un chemin de 3arretes a retribuer
utilisateur.id_ante_ante_penultimate_item = utilisateur.id_ante_penultimate_item
utilisateur.id_ante_penultimate_item = utilisateur.id_penultimate_item
utilisateur.id_penultimate_item = utilisateur.id_last_item
utilisateur.id_last_item = iditemcourant
utilisateur.save()
#verifions que utilisateur.id_penultimate_item n'est pas nul
if utilisateur.id_penultimate_item != None :
last_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_penultimate_item, iditemnextfk=utilisateur.id_last_item)
if last_link.d_a_pos_ph != None:
var = float(last_link.d_a_pos_ph)
var += 3
last_link.d_a_pos_ph = str(var)
last_link.save()
else:
var = 3.0
last_link.d_a_pos_ph = str(var)
last_link.save()
if utilisateur.id_ante_penultimate_item != None :
penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_penultimate_item, iditemnextfk=utilisateur.id_penultimate_item)
if penultimate_link.d_a_pos_ph != None:
var = float(penultimate_link.d_a_pos_ph)
var += 2
penultimate_link.d_a_pos_ph = str(var)
penultimate_link.save()
else:
var = 2.0
penultimate_link.d_a_pos_ph = str(var)
penultimate_link.save()
if utilisateur.id_ante_ante_penultimate_item != None and utilisateur.id_penultimate_item != None :
ante_penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_ante_penultimate_item, iditemnextfk=utilisateur.id_ante_penultimate_item)
if ante_penultimate_link.d_a_pos_ph != None:
var = float(ante_penultimate_link.d_a_pos_ph)
var += 1
ante_penultimate_link.d_a_pos_ph = str(var)
ante_penultimate_link.save()
else:
var = 1.0
ante_penultimate_link.d_a_pos_ph = str(var)
ante_penultimate_link.save()
return redirect("proposelien")
else: #c'est a dire il na valider le test
#chargeons les informations de la base utilisateur
utilisateur = utilisateurtable.objects.get(id = id)
#decalage on a 4 items car 4 neuds constituent un chemin de 3arretes a retribuer
utilisateur.id_ante_ante_penultimate_item = utilisateur.id_ante_penultimate_item
utilisateur.id_ante_penultimate_item = utilisateur.id_penultimate_item
utilisateur.id_penultimate_item = utilisateur.id_last_item
utilisateur.id_last_item = iditemcourant
utilisateur.save()
#verifions que utilisateur.id_penultimate_item n'est pas nul
if utilisateur.id_penultimate_item != None :
last_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_penultimate_item, iditemnextfk=utilisateur.id_last_item)
if last_link.d_a_neg_ph != None:
var = float(last_link.d_a_neg_ph)
var -= 3
last_link.d_a_neg_ph = str(var)
last_link.save()
else:
var = -3.0
last_link.d_a_neg_ph = str(var)
last_link.save()
if utilisateur.id_ante_penultimate_item != None :
penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_penultimate_item, iditemnextfk=utilisateur.id_penultimate_item)
if penultimate_link.d_a_neg_ph != None:
var = float(penultimate_link.d_a_neg_ph)
var -= 2
penultimate_link.d_a_neg_ph = str(var)
penultimate_link.save()
else:
var = -2.0
penultimate_link.d_a_neg_ph = str(var)
penultimate_link.save()
if utilisateur.id_ante_ante_penultimate_item != None and utilisateur.id_penultimate_item != None :
ante_penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_ante_penultimate_item, iditemnextfk=utilisateur.id_ante_penultimate_item)
if ante_penultimate_link.d_a_neg_ph != None:
var = float(ante_penultimate_link.d_a_neg_ph)
var -= 1
ante_penultimate_link.d_a_neg_ph = str(var)
ante_penultimate_link.save()
else:
var = -1.0
ante_penultimate_link.d_a_neg_ph = str(var)
ante_penultimate_link.save()
return redirect("proposelien")
else:
form = formexo()
return render(request, 'exoeq1.html', locals())
def viewitem(request , iditem):
#il vient de clicquer sur un item independamment ou pas on sait pas mais on va regarder sil existe deja un chemin entre le dernier item parcouru et l'item courant
#gestion template
a = pedagogic_item.objects.get(id = iditem)
lien = reverse(a.link_to_courses)
global iditemcourant
iditemcourant = iditem#donc voila le nouveau item
#verifions si un chemin entre le dernier lien et le nouveau lien
utilisateur = utilisateurtable.objects.get(id = id)
#pour pouvoir avoir un chemin il faut qu'il y ait un noeud precedent au moins
if utilisateur.id_last_item != None :#on est sur notre premier item
precedent = utilisateur.id_last_item
suivant = iditemcourant
#si le chemin n'existe pas elle sera creer
i , boole = link_to_next_possible_pi.objects.get_or_create(iditemfk=precedent, iditemnextfk= suivant)
#return HttpResponse(boole)#reussite ,
listepossiblite = link_to_next_possible_pi.objects.filter(iditemfk = precedent)#si
for chemin in listepossiblite:
if chemin.d_a_pos_ph != None:#on ne dimunue pas
var = float(chemin.d_a_pos_ph)
var -= 1
chemin.d_a_pos_ph = str(var)
chemin.save()
else:
chemin.d_a_pos_ph = '-1'
chemin.save()
return render(request, 'viewitem.html', locals())
def proposelien(request):
#algorithme de trie des liens
utilisateur = utilisateurtable.objects.get(id = id)
liste = link_to_next_possible_pi.objects.filter(iditemfk = utilisateur.id_last_item)#si
nombrelien = len(liste)
if nombrelien == 1:
lienlist = pedagogic_item.objects.filter(id = liste[0].iditemnextfk)
nom_cour = lienlist[0].link_to_courses
lien = reverse(nom_cour)
return render(request, 'proposelien.html', locals())
|
3SCS/hackaton-130813
|
hommilliere/utilisateur/views.py
|
views.py
|
py
| 8,734 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
30358149781
|
import contextlib
import datetime
import unittest
from traits.api import Date, HasTraits, List
from traitsui.api import DateEditor, View, Item
from traitsui.editors.date_editor import CellFormat
from traitsui.tests._tools import (
BaseTestMixin,
create_ui,
requires_toolkit,
reraise_exceptions,
ToolkitName,
)
class Foo(HasTraits):
dates = List(Date)
single_date = Date()
def single_select_custom_view():
view = View(
Item(
name="single_date",
style="custom",
editor=DateEditor(multi_select=False),
)
)
return view
def multi_select_custom_view():
view = View(
Item(
name="dates", style="custom", editor=DateEditor(multi_select=True)
)
)
return view
def multi_select_selected_color_view():
view = View(
Item(
name="dates",
style="custom",
editor=DateEditor(
multi_select=True,
selected_style=CellFormat(bold=True, bgcolor=(128, 10, 0)),
),
)
)
return view
@requires_toolkit([ToolkitName.qt])
class TestDateEditorCustomQt(BaseTestMixin, unittest.TestCase):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
def test_single_select_qt(self):
with self.launch_editor(single_select_custom_view) as (foo, editor):
date = datetime.date(2018, 2, 3)
self.click_date_on_editor(editor, date)
self.assertEqual(foo.single_date, date)
def test_multi_select_dates_on_editor(self):
with self.launch_editor(multi_select_custom_view) as (foo, editor):
dates = [datetime.date(2018, 2, 3), datetime.date(2018, 2, 1)]
for date in dates:
self.click_date_on_editor(editor, date)
for date in dates:
self.check_select_status(
editor=editor, date=date, selected=True
)
self.assertEqual(foo.dates, sorted(dates))
def test_multi_select_qt_styles_reset(self):
with self.launch_editor(multi_select_custom_view) as (foo, editor):
date = datetime.date(2018, 2, 1)
self.click_date_on_editor(editor, date)
self.check_select_status(editor=editor, date=date, selected=True)
self.click_date_on_editor(editor, date)
self.check_select_status(editor=editor, date=date, selected=False)
def test_multi_select_qt_set_model_dates(self):
# Test setting the dates from the model object.
with self.launch_editor(multi_select_custom_view) as (foo, editor):
foo.dates = [datetime.date(2010, 1, 2), datetime.date(2010, 2, 1)]
for date in foo.dates:
self.check_select_status(
editor=editor, date=date, selected=True
)
def test_custom_selected_color(self):
view_factory = multi_select_selected_color_view
with self.launch_editor(view_factory) as (foo, editor):
date = datetime.date(2011, 3, 4)
foo.dates = [date]
self.check_date_bgcolor(editor, date, (128, 10, 0))
# --------------------
# Helper methods
# --------------------
@contextlib.contextmanager
def launch_editor(self, view_factory):
foo = Foo()
with create_ui(foo, dict(view=view_factory())) as ui:
(editor,) = ui._editors
yield foo, editor
def check_select_status(self, editor, date, selected):
from pyface.qt import QtCore, QtGui
qdate = QtCore.QDate(date.year, date.month, date.day)
textformat = editor.control.dateTextFormat(qdate)
if selected:
self.assertEqual(
textformat.fontWeight(),
QtGui.QFont.Weight.Bold,
"{!r} is not selected.".format(date),
)
self.check_date_bgcolor(editor, date, (0, 128, 0))
else:
self.assertEqual(
textformat.fontWeight(),
QtGui.QFont.Weight.Normal,
"{!r} is not unselected.".format(date),
)
self.assertEqual(
textformat.background().style(),
QtCore.Qt.BrushStyle.NoBrush,
"Expected brush to have been reset.",
)
self.check_date_bgcolor(editor, date, (0, 0, 0))
def click_date_on_editor(self, editor, date):
from pyface.qt import QtCore
# QCalendarWidget.setSelectedDate modifies internal state
# instead of triggering the click signal.
# So we call update_object directly
editor.update_object(QtCore.QDate(date.year, date.month, date.day))
def check_date_bgcolor(self, editor, date, expected):
from pyface.qt import QtCore
qdate = QtCore.QDate(date.year, date.month, date.day)
textformat = editor.control.dateTextFormat(qdate)
color = textformat.background().color()
actual = (color.red(), color.green(), color.blue())
self.assertEqual(
actual,
expected,
"Expected color: {!r}. Got color: {!r}".format(expected, actual),
)
# Run this test case against wx too once enthought/traitsui#752 is fixed.
@requires_toolkit([ToolkitName.qt])
class TestDateEditorInitDispose(unittest.TestCase):
"""Test the init and dispose of date editor."""
def check_init_and_dispose(self, view):
with reraise_exceptions(), create_ui(Foo(), dict(view=view)):
pass
def test_simple_date_editor(self):
view = View(
Item(
name="single_date",
style="simple",
)
)
self.check_init_and_dispose(view)
def test_custom_date_editor(self):
view = View(
Item(
name="single_date",
style="custom",
)
)
self.check_init_and_dispose(view)
|
enthought/traitsui
|
traitsui/tests/editors/test_date_editor.py
|
test_date_editor.py
|
py
| 6,064 |
python
|
en
|
code
| 290 |
github-code
|
6
|
9489616666
|
import healsparse as hs
import healpy as hp
import numpy as np
from optparse import OptionParser
def main():
usage = "%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--input_file",type="string",dest="infilename",help="Input file",default='/pool/cosmo01_data1/des/y6_sp_maps/mangle_maps/SPmaps/band_g/y6a2_g_o.4096_t.32768_AIRMASS.MAX_EQU.fits.fz')
parser.add_option("--nside_coverage",type="int",dest="nside_coverage",help="nside coverage value",default=32)
parser.add_option("--nside_out",type="int",dest="nside_out",help="nside of output file",default=4096)
parser.add_option("--nest",action="store_true",dest="isnest",help="Toggle NEST to True",default=False)
parser.add_option("--healpix",action="store_true",dest="ishealpix",help="Toggle healpix format to True",default=False)
parser.add_option("--mask",action="store_true",dest="applymask",help="Toggle mask application to True",default=False)
parser.add_option("--input_file_maskname",type="string",dest="maskname",help="Mask file",default='/pool/cosmo01_data1/des/y6_sp_maps/official_v1/outliers_analysis/bao_mask/jointmasks/Y6LSSBAO_V2_MASK_WITHDEPTH_up_to_22.5_jointmask_0.01percent_sb_mean_0.5max_val_neb_mean_gcs_bit64.fits.gz')
parser.add_option("--output_healsparse",action="store_true",dest="ouths",help="Toggle healsparse output",default=False)
parser.add_option("--output_dir",type="string",dest="outdir",help="Output directory",default='./')
parser.add_option("--output_file_mapname",type="string",dest="mapname",help="Output file map name",default='MAPNAME')
parser.add_option("--output_file_statname",type="string",dest="statname",help="Output file stat name",default='STATNAME')
parser.add_option("--output_file_bandname",type="string",dest="bandname",help="Output file band name",default=None)
(options, args) = parser.parse_args()
print('Selected NEST',options.isnest)
print('nside',options.nside_out)
#read a map
print('Reading map',options.infilename)
print('Selected input format as Healpix',options.ishealpix)
if options.applymask:
print('Applying mask',options.maskname)
if options.ishealpix:
inmap = hp.read_map(options.infilename, nest = options.isnest)
if options.applymask:
maskmap = hp.read_map(options.maskname,nest=False,partial=True)
mask = np.where(maskmap == hp.UNSEEN) #masked out regions assumed to be assigned UNSEEN
inmap[mask[0]] = hp.UNSEEN
else:
inmap = hs.HealSparseMap.read(options.infilename, options.nside_coverage)
#build string with file name
if options.isnest:
nestring = 'NEST'
else:
nestring = 'RING'
if options.bandname is None:
outfilename_noext = '_'.join(('y6',options.mapname,options.statname,str(options.nside_out),nestring))
else:
outfilename_noext = '_'.join(('y6',options.mapname,options.statname,options.bandname,str(options.nside_out),nestring))
if options.ouths:
extension = '.hs'
else:
extension = '.fits'
outfilename = options.outdir + outfilename_noext + extension
#write maps
print('Writing map',outfilename)
if options.ishealpix: #input is healpix
if options.ouths: #output in healsparse format
conv_map = hs.HealSparseMap(nside_coverage=options.nside_coverage, healpix_map=inmap)
conv_map.write(outfilename, clobber=True)
else: #output in healpix
if options.isnest:
order = 'NESTED'
else:
order = 'RING'
lores_map = hp.ud_grade(inmap, options.nside_out, order_in = order, order_out = order)
hp.write_map(outfilename, lores_map, nest = options.isnest, overwrite=True)
else: #input is healsparse
if options.ouths: #output in healsparse format
inmap.write(outfilename, clobber=True)
else: #output in healpix
conv_map = inmap.generate_healpix_map(nside=options.nside_out, reduction='mean', nest = options.isnest)
hp.write_map(outfilename, conv_map, nest = options.isnest, overwrite=True)
if __name__ == "__main__":
main()
|
nsevilla/utilities
|
converthshp.py
|
converthshp.py
|
py
| 4,218 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15271938454
|
"""
Creating keyspaces in Cassandra:
CREATE KEYSPACE my_keyspace WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};
"""
from faker import Faker
faker = Faker()
def get_registered_user():
return faker.name()+"$"+faker.address()+"$"+faker.year()
"""return {
"name": faker.name(),
"address":faker.address(),
"created_at": faker.year()
}"""
if __name__ == "__main__":
print(get_registered_user())
|
imnikhilanand/Real-Time-ETL-with-Kafka-Spark-Cassandra
|
src/produce_data/generate_data.py
|
generate_data.py
|
py
| 431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18308321752
|
from unittest import TestCase
from sqlite3 import IntegrityError
import os
from shutil import copytree, rmtree
from random import randint, random
import uuid
from tempfile import gettempdir
from shapely.geometry import Point
import shapely.wkb
from aequilibrae.project import Project
from ...data import siouxfalls_project
class TestNode(TestCase):
def setUp(self) -> None:
os.environ["PATH"] = os.path.join(gettempdir(), "temp_data") + ";" + os.environ["PATH"]
self.proj_dir = os.path.join(gettempdir(), uuid.uuid4().hex)
copytree(siouxfalls_project, self.proj_dir)
self.project = Project()
self.project.open(self.proj_dir)
self.network = self.project.network
self.curr = self.project.conn.cursor()
def tearDown(self) -> None:
self.curr.close()
self.project.close()
try:
rmtree(self.proj_dir)
except Exception as e:
print(f"Failed to remove at {e.args}")
def test_save_and_assignment(self):
nodes = self.network.nodes
nd = randint(1, 24)
node = nodes.get(nd)
with self.assertRaises(AttributeError):
node.modes = "abc"
with self.assertRaises(AttributeError):
node.link_types = "default"
with self.assertRaises(AttributeError):
node.node_id = 2
with self.assertRaises(ValueError):
node.is_centroid = 2
node.is_centroid = 0
self.assertEqual(0, node.is_centroid, "Assignment of is_centroid did not work")
x = node.geometry.x + random()
y = node.geometry.y + random()
node.geometry = Point([x, y])
node.save()
self.curr.execute("Select is_centroid, asBinary(geometry) from nodes where node_id=?;", [nd])
flag, wkb = self.curr.fetchone()
self.assertEqual(flag, 0, "Saving of is_centroid failed")
geo = shapely.wkb.loads(wkb)
self.assertEqual(geo.x, x, "Geometry X saved wrong")
self.assertEqual(geo.y, y, "Geometry Y saved wrong")
self.curr.execute("Select asBinary(geometry) from links where a_node=?;", [nd])
wkb = self.curr.fetchone()[0]
geo2 = shapely.wkb.loads(wkb)
self.assertEqual(geo2.xy[0][0], x, "Saving node geometry broke underlying network")
self.assertEqual(geo2.xy[1][0], y, "Saving node geometry broke underlying network")
def test_data_fields(self):
nodes = self.network.nodes
node1 = nodes.get(randint(1, 24))
node2 = nodes.get(randint(1, 24))
self.assertEqual(node1.data_fields(), node2.data_fields(), "Different nodes have different data fields")
fields = sorted(node1.data_fields())
self.curr.execute("pragma table_info(nodes)")
dt = self.curr.fetchall()
actual_fields = sorted([x[1] for x in dt if x[1] != "ogc_fid"])
self.assertEqual(fields, actual_fields, "Node has unexpected set of fields")
def test_renumber(self):
nodes = self.network.nodes
node = nodes.get(randint(2, 24))
x = node.geometry.x
y = node.geometry.y
with self.assertRaises(IntegrityError):
node.renumber(1)
num = randint(25, 2000)
node.renumber(num)
self.curr.execute("Select asBinary(geometry) from nodes where node_id=?;", [num])
wkb = self.curr.fetchone()[0]
geo = shapely.wkb.loads(wkb)
self.assertEqual(geo.x, x, "Renumbering failed")
self.assertEqual(geo.y, y, "Renumbering failed")
|
AequilibraE/aequilibrae
|
tests/aequilibrae/project/test_node.py
|
test_node.py
|
py
| 3,560 |
python
|
en
|
code
| 140 |
github-code
|
6
|
69868060029
|
# 从列表中找出某个值第一个匹配项的索引位置
# list.index(obj)
# 参数
# obj -- 查找的对象。
# 返回值: 该方法返回查找对象的索引位置,如果没有找到对象则抛出异常。
Array = [1, 2, 3, 4, 5]
print(Array.index(2)) # 1
Array2 = [
{
"name": "Tom",
"age": 18
},
{
"name": "Yang",
"age": 22
}
]
print(Array2.index({
"name": "Yang",
"age": 22
})) # 1
# 对象 也可以检测
print(Array.index(6)) # ValueError: 6 is not in list
|
yangbaoxi/dataProcessing
|
python/List(数组)/查找元素/index.py
|
index.py
|
py
| 576 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
30176892204
|
#!/usr/bin/env python
#===============================================================================
# objdump2vmh.py
#===============================================================================
#
# -h --help Display this message
# -v --verbose Verbose mode
# -f --file Objdump file to parse
#
# Author : Ji Kim
# Date : April 13, 2011
#
import optparse
import fileinput
import sys
import re
#-------------------------------------------------------------------------------
# Command line processing
#-------------------------------------------------------------------------------
class OptionParserWithCustomError(optparse.OptionParser):
def error( self, msg = "" ):
if ( msg ): print("\n ERROR: %s" % msg)
print("")
for line in fileinput.input(sys.argv[0]):
if ( not re.match( "#", line ) ): sys.exit(msg != "")
if ((fileinput.lineno() == 3) or (fileinput.lineno() > 4)):
print( re.sub( "^#", "", line.rstrip("\n") ) )
def parse_cmdline():
p = OptionParserWithCustomError( add_help_option=False )
p.add_option( "-v", "--verbose", action="store_true", dest="verbose" )
p.add_option( "-h", "--help", action="store_true", dest="help" )
p.add_option( "-f", "--file", action="store", type="string", dest="file" )
(opts,args) = p.parse_args()
if ( help == True ): p.error()
if args: p.error("found extra positional arguments")
return opts
#-------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------
def main():
opts = parse_cmdline()
# Open vmh file for writing
file_out = open( opts.file[0:opts.file.find( "." )] + ".vmh", "w" )
try:
file_out.write( "\n" )
# Open objdump file for reading
file_in = open( opts.file, "r" )
try:
in_block = False
# Iterate through lines in the file
for line in file_in:
# Parse the line into a list of words
split_line = line.split()
# Check if line is the beginning of an instruction block
if ( line.find( ">:" ) >= 0 ):
in_block = True
# Convert block virtual address to physical address
block_addr = hex( int( split_line[0], 16 ) )[2:]
# Name of current block
block_name = split_line[1][:-1]
# Number of spaces between address and comment, offset from left edge = 10
space_amt = ( 10 - ( len( block_addr ) + 1 ) )
# Construct vmh line to write
buffer = "@" + block_addr + ( " " * space_amt ) + "// " + block_name + "\n"
# Write to vmh file
file_out.write( buffer )
# Check if line is within an instruction block
elif ( in_block ):
# Unset in_block if there's a break
if ( line == "\n" ):
in_block = False
file_out.write( "\n" )
else:
# Parse instruction fields
inst_bits = split_line[1]
PC_raw = split_line[0][:-1]
zero_amt = 8 - len( PC_raw )
PC = ( "0" * zero_amt ) + PC_raw
# Iterate through bytes in current data word
bytes = range(4)
bytes.reverse()
for i in bytes:
# Construct vmh line to write
buffer = inst_bits[i*2:i*2+2]
if ( i == 3 ):
buffer = buffer + " // " + PC + "\n"
else:
buffer = buffer + "\n"
# Write line to file
file_out.write( buffer )
finally:
file_in.close()
finally:
file_out.close()
main()
|
cornell-brg/pydgin
|
ubmark-nosyscalls/scripts/objdump2vmh.py
|
objdump2vmh.py
|
py
| 3,669 |
python
|
en
|
code
| 159 |
github-code
|
6
|
72128887548
|
from OpenGL.GL import *
from common import get_namekey
import numpy as np
import pyglet
# #---badway
# vaoidx = VAO( {0:3,1:2},
# #np.array([0,0,0, 0,0, 0.5,0,0, 1,0, 0.5,0.5,0, 1,1, 0,0.5,0, 0,1, ]).astype('float32'),
# #np.array([0,0,0, 0,0, 1,0,0, 1,0, 1,1,0, 1,1, 0,1,0, 0,1, ]).astype('float32'),
# np.array([ [0,0,0, 0,0], [1,0,0, 1,0], [1,1,0, 1,1], [0,1,0, 0,1] ]).astype('float32'),
# np.array([0,1,2,0,2,3,]).astype('uint')
# )
#hard to parse. we take thisway.
# vaoidx = VAO(
# {
# 'position' : [ 0,0,0, 1,0,0, 1,1,0, 0,1,0,],
# 'uv' : [ 0,0, 1,0, 1,1, 0,1 ],
# },
# indices = [0,1,2,0,2,3,]
# )#name
#hard to parse.
# vaoidx = VAO(
# position= [ 0,0,0, 1,0,0, 1,1,0, 0,1,0,],
# uv = [ 0,0, 1,0, 1,1, 0,1 ],
# indices = [0,1,2,0,2,3,]
# )#name
vao_attrs={
'position' : np.array([ 0,0,0, 1,0,0, 1,1,0, 0,1,0,]).astype('float32'),
'uv' : np.array([ 0,0, 1,0, 1,1, 0,1 ]).astype('float32'),
}
vao_indices = np.array([0,1,2,0,2,3,]).astype('uint')
# for i,nparr in vao_attrs.items():
# a = len(nparr)
# print(a)
class VAO:
"""indexed actually. hope we not use vao_notindexed."""
last = -1
#---namedict ver 0.2
namedict = {}
@classmethod
def get(cls, name):
if not 'default' in cls.namedict:
cls.default()
return cls.namedict.get(name)
@classmethod
def set(cls, name: str, item) -> str:
name = get_namekey(cls.namedict,name)
cls.namedict[name]=item
return name
@classmethod
def default(cls):
cls(vao_attrs,vao_indices,name='default')
def __repr__(self):
return f"vao name:{self.name}"
def __init__(self, attrdict,indices, name='vao'):
"""we need attrdict{'position':ndarr(f32)} !"""
#attrlist=[]
#for attrname, nparr in attrdict.items():
# attrlist.append(nparr)
#vertices = np.concatenate( attrlist ).astype('float32')
assert list(attrdict.keys())[0]=='position'
vert_count = len(attrdict['position'])//3
#attr_size_dict = {0:3,1:2}
attr_size_dict = {}
attridx = -1
for attrname, nparr in attrdict.items():
attridx+=1
attrN = len(nparr)//vert_count
attr_size_dict[attridx] = attrN
stride = sum(attr_size_dict.values())#5, of 3+2
vertices = []
for idx, nparr in enumerate(attrdict.values()):
vertlen = attr_size_dict[idx]
verted = nparr.reshape(-1,vertlen)
vertices.append(verted)
vertices = np.hstack(vertices).flatten().astype('float32')
#---old way
#vertices = np.array([ [0,0,0, 0,0], [1,0,0, 1,0], [1,1,0, 1,1], [0,1,0, 0,1] ]).astype('float32')
#indices = np.array([0,1,2,0,2,3,]).astype('uint')
#attr_size_dict = {0:3,1:2}
#stride = sum(attr_size_dict.values())#5, of 3+2
#pos 12, indices,,
#vert_count = len(indices)
datatype = GL_FLOAT
normalized = GL_FALSE #GL_TRUE
fsize = np.float32(0.0).nbytes #to ensure namespace-safe.
VAO = glGenVertexArrays(1) # create a VA. if 3, 3of VA got. #errs if no window.
VBO = glGenBuffers(1) #it's buffer, for data of vao.fine.
EBO = glGenBuffers(1) #indexed, so EBO also. yeah.
glBindVertexArray(VAO) #gpu bind VAO
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
pre_offset = 0
for attr_index, size in attr_size_dict.items():
if pre_offset==0:
offset = None
offset = ctypes.c_void_p(0)#maybe it seems works.
glVertexAttribPointer(attr_index, size, datatype, normalized, stride * fsize, offset)
glEnableVertexAttribArray(attr_index)
pre_offset = size
else:
offset = ctypes.c_void_p( pre_offset *fsize)
glVertexAttribPointer(attr_index, size, datatype, normalized, stride * fsize, offset)
glEnableVertexAttribArray(attr_index)
pre_offset +=size
self.ID = VAO
self.ID_VBO = VBO
self.ID_EBO = EBO
self.stride = stride
self.points = len(indices)
self.vertices = vertices
self.name = self.__class__.set(name,self)
def update_position(self,position):
vertices = self.vertices
#assume 0,1,2 is posxyz.
vertices[0::self.stride] = position[0::3]
vertices[1::self.stride] = position[1::3]
vertices[2::self.stride] = position[2::3]
VAO = self.ID
VBO = self.ID_VBO
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
#GL_STREAM_DRAW for little change, if you want someday..
self.vertices = vertices
def update(self,vertices):
"""requires same shape kinds.."""
VAO = self.ID
VBO = self.ID_VBO
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
#GL_STREAM_DRAW for little change, if you want someday..
#self.points = len(vertices)//self.stride #donno why do this..
def update_indices(self,vertices, indices):
"""hope we not use this.."""
VAO = self.ID
VBO = self.ID_VBO
EBO = self.ID_EBO
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
self.points = len(indices)#now we can really change it..
def bind(self):
if VAO.last != self.ID:
glBindVertexArray(self.ID)
VAO.last = self.ID
def unbind(self):
glBindVertexArray(0)
VAO.last = -1
def draw(self, MODE = 'triangles'):
"""requires bind first. it just draw command of VAO bound gpu."""
#simple mode changeable draw. we not prefer partial draw which is slow.
draw_dict = {'points':GL_POINTS,
'lines':GL_LINE_STRIP,
'triangles':GL_TRIANGLES,
}
MODE = draw_dict[MODE]
glDrawElements(MODE, self.points, GL_UNSIGNED_INT, None)
if __name__ == "__main__":
window = pyglet.window.Window()
VAO.default()
a = VAO.get('default')
print(a)
|
liltmagicbox/3dkatsu
|
objects/vao_123123.py
|
vao_123123.py
|
py
| 6,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25129650646
|
import numpy as np
import keras
from keras.datasets import mnist
class Dataset:
def __init__(self, path, local):
"""
Initialize the MNIST dataset.
Parameters path and local are only included to fit the interface of Dataset
:param path: Ignored
:param local: Ignored
"""
(x, y), (_, _) = mnist.load_data()
# Configure input
x = (x.astype(np.float32) - 127.5) / 127.5
x = np.expand_dims(x, axis=3)
x_padding = np.zeros((x.shape[0], 64, 64, 1)) - 1
x_padding[:, :28, :28, :] = x
x = x_padding
y = keras.utils.np_utils.to_categorical(y, 10)
self.x = x
self.y = y
print('Loaded dataset')
print('X:', self.x.shape)
print('Y:', self.y.shape)
|
jessica-dl/2XB3-ML-Training
|
trainer/mnist_dataset.py
|
mnist_dataset.py
|
py
| 804 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27834182031
|
class Monitors:
name = 'Samsung'
matrix = 'VA'
res = 'WQHD'
freq = 60
class Headphones:
name = 'Sony'
sensitivity = 108
micro = True
monitor_1 = Monitors()
monitor_2 = Monitors()
monitor_2.freq = 144
monitor_3 = Monitors()
monitor_3.freq = 70
monitor_4 = Monitors()
headphone_1 = Headphones()
headphone_1.micro = False
headphone_2 = Headphones()
headphone_3 = Headphones()
print(monitor_1.freq, monitor_2.freq, monitor_3.freq, monitor_4.freq)
print(headphone_1.micro, headphone_2.micro, headphone_2.micro,)
|
MikhailRyskin/test1
|
test242_2.py
|
test242_2.py
|
py
| 542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35334534179
|
'''
Escribir una funcion para ingresar desde el teclado una serie de numeros entre 1
y 20 guardarlos en una lista. En caso de ingresar un valor fuera de rango el
programa mostrara un mensaje de error solicitara un nuevo numero. Para finalizar
la carga se debera ingresar -1. La funcion no recibe ningun parametro, y
devuelve la lista cargada (o vacia, si el usuario no ingreso nada) como valor de
retorno.
'''
def cargar_lista(lista,num):
while(num != -1):
if(num < 1 or num > 20):
print("Ingrese un valor valido entre 1 y 20")
else:
lista.append(num)
num = int(input("Ingrese un numero entre 1 y 20 (Termina con -1): "))
return lista
lista = []
num = int(input("Ingrese un numero entre 1 y 20 (Termina con -1): "))
print(cargar_lista(lista,num))
input()
|
agsosa96/Fundamentos-De-La-Programacion
|
Trabajo Practico N°7/Ejercicio_1.py
|
Ejercicio_1.py
|
py
| 830 |
python
|
es
|
code
| 0 |
github-code
|
6
|
31232927085
|
import logging
import requests
from io import BytesIO
from django.core.management import BaseCommand
from places.models import Place, PlaceImage
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Load information about places with media files'
def add_arguments(self, parser):
parser.add_argument('resource_url', type=str)
def handle(self, *args, **options):
resource_url = options['resource_url']
logger.info(f'START LOADING DATA FROM RESOURCE {resource_url}')
try:
response = requests.get(resource_url)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.error(f'UNABLE TO LOAD DATA FROM RESOURCE {resource_url}, details: {e}')
return
place_data = response.json()
place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_title': place_data['title'],
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'lng': place_data['coordinates']['lng'],
'lat': place_data['coordinates']['lat'],
'place_id': place_data['title'],
}
)
if created:
for i, img_url in enumerate(place_data['imgs'], start=1):
try:
img_response = requests.get(img_url)
img_response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.error(f'UNABLE TO SAVE IMAGE FROM FROM RESOURCE {img_url}, details: {e}')
continue
img = BytesIO(img_response.content)
place_image, img_created = PlaceImage.objects.get_or_create(
place=place,
position=i
)
place_image.image.save(f'place-{place.id}-img-{i}', img, save=True)
action = 'CREATED' if created else 'UPDATED'
logger.info(f'{action} PLACE {place}')
logger.info(f'END LOADING DATA FROM RESOURCE {resource_url}')
|
vitaliy-pavlenko/where_to_go
|
places/management/commands/load_place.py
|
load_place.py
|
py
| 2,202 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75331159226
|
import ops
import iopc
TARBALL_FILE="drbd-utils-8.9.10.tar.gz"
TARBALL_DIR="drbd-utils-8.9.10"
INSTALL_DIR="drbd-utils-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarGz(tarball_pkg, output_dir)
#ops.copyto(ops.path_join(pkg_path, "finit.conf"), output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
extra_conf = []
extra_conf.append("--with-distro=generic")
extra_conf.append("--without-manual")
extra_conf.append("--without-udev")
extra_conf.append("--without-83support")
extra_conf.append("--with-initscripttype=sysv")
#extra_conf.append("--prefix=" + install_dir)
extra_conf.append("--host=" + cc_host)
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
return False
def MAIN_INSTALL(args):
set_global(args)
ops.mkdir(install_dir)
ops.copyto(ops.path_join(install_tmp_dir, "lib"), install_dir)
ops.mkdir(ops.path_join(install_dir, "sbin"))
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/sbin/drbdadm"), ops.path_join(install_dir, "/sbin"))
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/sbin/drbdsetup"), ops.path_join(install_dir, "/sbin"))
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/sbin/drbdmeta"), ops.path_join(install_dir, "/sbin"))
ops.mkdir(ops.path_join(install_dir, "usr/"))
ops.rm_file(ops.path_join(install_dir, "var"))
ops.ln(install_dir, "/var", ops.path_join(install_dir, "var"))
iopc.installBin(args["pkg_name"], ops.path_join(install_dir, "lib/."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(install_dir, "sbin/."), "sbin")
iopc.installBin(args["pkg_name"], ops.path_join(install_dir, "var"), "usr/local")
#iopc.installBin(args["pkg_name"], ops.path_join(output_dir, "finit.conf"), "etc")
return False
def MAIN_SDKENV(args):
set_global(args)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
|
YuanYuLin/drbd-utils
|
Package/CONFIG.py
|
CONFIG.py
|
py
| 3,434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75226773628
|
import pymongo
from data_handlers import import_from_csv
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
core_db = myclient["core"]
indices_col = core_db["indices"]
historical_data_col = core_db["historical_data"]
instruments_file = "C:\\Users\\Prathiksha\\Documents\\Prashanth\\Trading\\instruments_nse.csv"
market_cap_file = "C:\\Users\\Prathiksha\\Documents\\Prashanth\\Trading\\MCAP31032021_0.csv"
mis_data_file = "C:\\Users\\Prathiksha\\Documents\\Prashanth\\Trading\\Zerodha - Intraday margins - EQ- MIS_CO leverages.csv"
instruments_df = import_from_csv(instruments_file)
market_cap_df = import_from_csv(market_cap_file)
mis_df = import_from_csv(mis_data_file)
print(mis_df.columns)
def insert_to_db(df, db_col):
indices = df.columns
for ind in df.index:
input_dict = {}
for index in indices:
input_dict[index] = str(df[index][ind])
db_col.insert_one(input_dict)
def update_to_db(df, db_col, indices, field):
for ind in df.index:
for i in range(0, len(indices)):
try:
db_col.update_one({"tradingsymbol":df[indices[i]][ind]},{"$set":{field:"true"}})
except Exception as e:
print(e)
# try:
# db_col.update_one({"tradingsymbol":df[indices[i]][ind]+"-BE"},{"$set":{"rank":df['Sr. No.'][ind], "market_cap":df['market_cap'][ind]}})
# except Exception as e:
# print(e)
update_to_db(mis_df, indices_col, ['Symbol'], "mis_status")
#insert_to_db(instruments_df, indices_col)
|
prashanth470/trading
|
source/data/db_operations.py
|
db_operations.py
|
py
| 1,622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4086684487
|
import warnings
from functools import partial
from multiprocessing import Pool
import pandas as pd
import textacy
import textacy.preprocessing
import textacy.representations
import textacy.tm
from tqdm import tqdm
tqdm.pandas()
warnings.simplefilter(action="ignore", category=FutureWarning)
preproc = textacy.preprocessing.make_pipeline(
textacy.preprocessing.normalize.unicode,
textacy.preprocessing.normalize.bullet_points,
textacy.preprocessing.normalize.quotation_marks,
textacy.preprocessing.normalize.whitespace,
textacy.preprocessing.normalize.hyphenated_words,
textacy.preprocessing.remove.brackets,
textacy.preprocessing.replace.currency_symbols,
textacy.preprocessing.remove.html_tags,
)
def from_dict_to_frame(indexed_dict):
data = {k: [v] for k, v in indexed_dict.items()}
df = pd.DataFrame.from_dict(data).T
df.columns = ["text"]
df = df.explode("text")
return df
def extract_terms_df(
data,
text_var,
index_var,
ngs=True,
ents=True,
ncs=False,
sample_size=100000,
drop_emoji=True,
ngrams=(2, 2),
remove_punctuation=True,
include_pos=["NOUN"],
include_types=["PERSON", "ORG"],
language="en_core_web_sm",
):
load_lang = textacy.load_spacy_lang(language, disable=())
def extract_terms(
tuple, # (index, text)
ngs=True,
ents=True,
ncs=False,
ngrams=(2, 2),
drop_emoji=True,
remove_punctuation=False,
include_pos=["NOUN", "PROPN", "ADJ"],
include_types=["PERSON", "ORG"],
):
index = tuple[0]
text = tuple[1]
prepro_text = preproc(str(text))
if drop_emoji == True:
prepro_text = textacy.preprocessing.replace.emojis(prepro_text, repl="")
if remove_punctuation == True:
prepro_text = textacy.preprocessing.remove.punctuation(prepro_text)
doc = textacy.make_spacy_doc(prepro_text, lang=load_lang)
terms = []
if ngs:
ngrams_terms = list(
textacy.extract.terms(
doc,
ngs=partial(
textacy.extract.ngrams,
n=ngrams,
filter_punct=True,
filter_stops=True,
include_pos=include_pos,
),
dedupe=False,
)
)
terms.append(ngrams_terms)
if ents:
ents_terms = list(
textacy.extract.terms(
doc,
ents=partial(textacy.extract.entities, include_types=include_types),
dedupe=False,
)
)
terms.append(ents_terms)
if ncs:
ncs_terms = list(
textacy.extract.terms(
doc,
ncs=partial(textacy.extract.noun_chunks, drop_determiners=True),
dedupe=False,
)
)
noun_chunks = [x for x in ncs_terms if len(x) >= 3]
terms.append(noun_chunks)
final = [item for sublist in terms for item in sublist]
final = list(set(final))
df = [
(term.text, term.lemma_.lower(), term.label_, term.__len__())
for term in final
]
df = pd.DataFrame(df, columns=["text", "lemma", "ent", "ngrams"])
df["text_index"] = index
return df
"""
This function extracts terms from a column in a DataFrame. It can extract in a multiprocessing way
It outputs a dataframe with the list of terms and a table with the indexed terms
"""
data = data[data[text_var].notna()]
data = data.sample(min(sample_size, len(data)))
sentences = data[text_var].to_list()
indexes = data[index_var].to_list()
inputs = [(x, y) for x, y in zip(indexes, sentences)]
# else:
res = list(
tqdm(
map(
partial(
extract_terms,
ngs=ngs,
ents=ents,
ncs=ncs,
drop_emoji=drop_emoji,
remove_punctuation=remove_punctuation,
ngrams=ngrams,
include_pos=include_pos,
include_types=include_types,
),
inputs,
),
total=len(inputs),
)
)
final_res = pd.concat([x for x in res])
terms = (
final_res.groupby(["text", "lemma", "ent", "ngrams"])
.agg(count_terms=("text_index", "count"))
.reset_index()
)
# duplicates to get rid of
terms = terms.sort_values(["text", "ent"]).reset_index(drop=True)
terms = terms.drop_duplicates(["text"], keep="first")
terms = terms.sort_values("count_terms", ascending=False)
terms = terms.rename(columns={"text": "terms_indexed"})
terms = terms.set_index("terms_indexed")
terms_indexed = final_res[["text", "text_index"]].drop_duplicates()
terms_indexed = terms_indexed.rename(columns={"text_index": index_var})
terms_indexed = terms_indexed.groupby(index_var)["text"].apply(list)
terms_indexed = terms_indexed.reset_index()
terms_indexed = terms_indexed.rename(columns={"text": "terms_indexed"})
terms_indexed = terms_indexed.set_index(index_var)
return terms, terms_indexed
|
charlesdedampierre/BunkaTopics
|
bunkatopics/functions/extract_terms.py
|
extract_terms.py
|
py
| 5,458 |
python
|
en
|
code
| 35 |
github-code
|
6
|
32341699285
|
import json
import numpy as np
import pandas as pd
import tensorflow as tf
class BalancedGroupSoftmax:
def __init__(self,
dataset_json,
category_map,
empty_class_id,
selected_locations=None,
n_groups=4,
sl_max_groups=[0, 10, 100, 1000, 2**100],
beta_others=8.0):
self.n_groups = n_groups
self.sl_max_groups = sl_max_groups
self.beta_others = beta_others
self.category_map = category_map
self.empty_class_id = empty_class_id
self.label2binlabel = {}
self.groups_counts = [1] * (n_groups + 1)
self.predict_tables = []
dataset_df = self._load_dataset(dataset_json, selected_locations)
self._generate_binlabel_idx(dataset_df)
self._generate_predict_tables()
def _load_dataset(self, dataset_json, selected_locations):
with tf.io.gfile.GFile(dataset_json, 'r') as json_file:
json_data = json.load(json_file)
images = pd.DataFrame(json_data['images'])
annotations = pd.DataFrame(json_data['annotations'])
images = pd.merge(images,
annotations[["image_id", "category_id"]],
how='left',
left_on='id',
right_on='image_id')
if selected_locations is not None:
images = images[images.location.isin(selected_locations)]
images = images.copy()
return images
def _get_group(self, instances_count):
for group, group_max in enumerate(self.sl_max_groups):
if instances_count < group_max:
return group
return 0
def _generate_binlabel_idx(self, dataset_df):
categories = list(range(self.category_map.get_num_classes()))
#group 0 is only bg/fg
self.groups_counts[0] = 2
empty_class = self.category_map.category_to_index(self.empty_class_id)
self.label2binlabel[empty_class] = [1] + [0] * (self.n_groups)
categories.remove(empty_class)
#nonempty categories
for categ in categories:
categ_id = self.category_map.index_to_category(categ)
instances_count = len(dataset_df[dataset_df.category_id == categ_id])
group_id = self._get_group(instances_count)
binlabel = [0] * (self.n_groups + 1)
binlabel[group_id] = self.groups_counts[group_id]
self.groups_counts[group_id] += 1
self.label2binlabel[categ] = binlabel
def _generate_predict_tables(self):
for i in range(self.n_groups + 1):
self.predict_tables.append(
np.zeros(shape=(self.groups_counts[i],
self.category_map.get_num_classes())))
for label, binlabel in self.label2binlabel.items():
group = np.asarray(binlabel).argmax()
self.predict_tables[group][binlabel[group]][label] = 1.0
def create_classif_header(self, head_features):
outputs = []
for group_count in self.groups_counts:
output = tf.keras.layers.Dense(group_count,
activation='softmax')(head_features)
outputs.append(output)
return outputs
def _create_map_layer(self, inputs, n_inputs, n_outputs, weights):
map_layer = tf.keras.layers.Dense(n_outputs, use_bias=False)
map_layer(tf.convert_to_tensor(np.ones((1, n_inputs)), dtype=tf.float32))
map_layer.set_weights([weights])
return map_layer(inputs)
def create_prediction_model(self, trained_model):
fg_prob_map = np.array([np.ones(self.category_map.get_num_classes()),
np.zeros(self.category_map.get_num_classes())])
fg_prob = self._create_map_layer(trained_model.outputs[0],
self.groups_counts[0],
self.category_map.get_num_classes(),
fg_prob_map)
mapped_predictions = []
for output, group_size, predict_tbl in zip(trained_model.outputs,
self.groups_counts,
self.predict_tables):
layer_map = self._create_map_layer(output,
group_size,
self.category_map.get_num_classes(),
predict_tbl)
mapped_predictions.append(layer_map)
scaled_mapped_predictions = [mapped_predictions[0]]
for map_pred in mapped_predictions[1:]:
scaled_map_pred = tf.keras.layers.Multiply()([map_pred, fg_prob])
scaled_mapped_predictions.append(scaled_map_pred)
preds = tf.keras.layers.Add()(scaled_mapped_predictions)
model = tf.keras.models.Model(inputs=trained_model.inputs, outputs=preds)
return model
def process_label(self, label):
def _get_idx_label(label):
categ_id = self.category_map.category_to_index(label.numpy())
binlabels = self.label2binlabel[categ_id]
binlabels_one_hot = []
for idx, binlabel in enumerate(binlabels):
one_hot = np.zeros(self.groups_counts[idx])
one_hot[binlabel] = 1
binlabels_one_hot.append(one_hot)
return binlabels_one_hot
labels = tf.py_function(func=_get_idx_label,
inp=[label],
Tout=([tf.float32]*(self.n_groups+1)))
labels = [tf.ensure_shape(label, shape=(self.groups_counts[i],))
for i, label in enumerate(labels)]
return tuple(labels)
def generate_balancing_mask(self, labels):
batch_size = tf.shape(labels[0])[0]
masks = []
#for the bg/fg group we use all instances
mask0 = tf.ones(shape=(batch_size,))
masks.append(mask0)
def _get_max(labels, batch_size):
labels = labels.numpy()
others = labels[:,0]
fg = 1.0 - others
fg_num = np.sum(fg)
if fg_num == 0:
return np.zeros(batch_size)
others_num = batch_size - fg_num
others_sample_num = int(fg_num * self.beta_others)
if others_sample_num > others_num:
return np.ones(batch_size)
else:
sample_idx = np.random.choice(others.nonzero()[0],
(others_sample_num, ), replace=False)
fg[sample_idx] = 1.0
return fg
for i in range(1, self.n_groups + 1):
mask = tf.py_function(func=_get_max,
inp=[labels[i], batch_size],
Tout=tf.float32)
masks.append(mask)
return tuple(masks)
|
alcunha/iwildcam2021ufam
|
classification/bags.py
|
bags.py
|
py
| 6,432 |
python
|
en
|
code
| 11 |
github-code
|
6
|
26580948730
|
def caminho_hamiltoniano(grafo, size, ponto, path=[]):
if ponto not in set(path):
path.append(ponto)
if len(path) == size:
return path
todos_candidatos = []
for prox_ponto in grafo.get(ponto, []):
res_path = [i for i in path]
candidatos = caminho_hamiltoniano(grafo, size, prox_ponto, res_path)
if candidatos is not None:
todos_candidatos.extend(candidatos)
else:
pass
return todos_candidatos
else:
return None
if __name__ == '__main__':
ponto = 1
size = 4
grafo = {1: [2, 3], 2: [1, 3, 4], 3: [1, 2, 4], 4: [2, 3]}
grafo2 = {1:[2,4],2:[1,3],3:[2,4],4:[1,3]}
grafo3 = {0:[1,2],1:[3,4],2:[0,5,6],3:[1],4:[1],5:[2],6:[2]}
path = caminho_hamiltoniano(grafo, size, ponto)
lista_candidados = []
for j in range(size):
while j*size < len(path):
start = int(j*size)
end = int((j+1)*size)
lista_candidados.append(path[start:end])
break
print('Candidatos a ciclos hamiltonianos: '%(lista_candidados))
if len(lista_candidados) == 0:
print('Não há caminho hamiltoniano, então, não há ciclo hamiltoniano')
else:
for candidato in lista_candidados:
if ponto in grafo[candidato[-1]]:
print('O caminho %s é um ciclo hamiltoniano'%(candidato))
else:
print('O caminho %s não é um ciclo hamiltoniano'%(candidato))
|
LeandroGelain/PersonalGit
|
2018-2019/Aulas_Algoritmos_avançados/TrabalhoGrafos/hamiltoniano.py
|
hamiltoniano.py
|
py
| 1,538 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
71345484349
|
import logging
from typing import List
from DAL.ItemDAL.ItemDALInterface import ItemDALInterface
from Database.DBConnection import DBConnection
from Entities.Item import Item
class ItemDALImplementation(ItemDALInterface):
def create_item(self, item: Item) -> Item:
logging.info("Beginning DAL method create item with item: " + str(item))
sql = "INSERT INTO Designs.Item (item_name) VALUES (%s) RETURNING item_id;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item.item_name,))
item.item_id = cursor.fetchone()[0]
cursor.close()
connection.commit()
connection.close()
logging.info("Finishing DAL method create item with item: " + str(item))
return item
def get_item(self, item_id: int) -> Item:
logging.info("Beginning DAL method get item with item ID: " + str(item_id))
sql = "SELECT * FROM Designs.Item WHERE item_id=%s;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item_id,))
item_info = cursor.fetchone()
cursor.close()
connection.close()
if item_info is None:
item = Item(0, "")
logging.info("Finishing DAL method get item, item not found")
return item
else:
item = Item(*item_info)
logging.info("Finishing DAL method get item with item: " + str(item.convert_to_dictionary()))
return item
def get_all_items(self) -> List[Item]:
logging.info("Beginning DAL method get all items")
sql = "SELECT * FROM Designs.Item;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql)
item_records = cursor.fetchall()
cursor.close()
connection.close()
item_list = []
for item in item_records:
item = Item(*item)
item_list.append(item)
logging.info("Finishing DAL method get all items with items: " + str(item.convert_to_dictionary()))
return item_list
def update_item(self, item: Item) -> bool:
logging.info("Beginning DAL method update item with item: " + str(item.convert_to_dictionary()))
sql = "Update Designs.Item SET item_name=%s WHERE item_id=%s;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item.item_name, item.item_id))
cursor.close()
connection.commit()
connection.close()
logging.info("Finishing DAL method update item")
return True
def delete_item(self, item_id: int) -> bool:
logging.info("Beginning DAL method delete item with item ID: " + str(item_id))
sql = "DELETE FROM Designs.Item WHERE item_id=%s;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item_id,))
cursor.close()
connection.commit()
connection.close()
logging.info("Finishing DAL method delete item")
return True
|
dmerc12/143Designs
|
back-end/DAL/ItemDAL/ItemDALImplementation.py
|
ItemDALImplementation.py
|
py
| 3,158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71401520189
|
def getval(A, i, j, L, H):
if (i < 0 or i >= L or j < 0 or j >= H):
return 0
else:
return A[i][j]
def find_max_block(A, r, c, L, H, size, cntarr, maxsize):
if (r >= L or c >= H):
return
cntarr[r][c] = True
size += 1
if (size > maxsize):
maxsize = size
directions = [[-1,0], [-1,-1], [0,-1], [1,-1], [1,0], [1,1], [0,1], [-1,1]]
for direction in directions:
new_i = r + direction[0]
new_j = c + direction[1]
val = getval(A, new_i, new_j, L, H)
if (val > 0 and cntarr[new_i][new_j] is False):
maxsize = find_max_block(A, new_i, new_j, L, H, size, cntarr, maxsize)
cntarr[r][c] = False
return maxsize
def get_max_ones(A, rmax, colmax):
maxsize = 0
size = 0
cntarr = [[False for j in range(0,colmax)] for i in range(0,rmax) ]
for i in range(0, rmax):
for j in range(0, colmax):
if (A[i][j] == 1):
maxsize = find_max_block(A, i, j, L, H, size, cntarr, maxsize)
return maxsize
if __name__ == '__main__':
A = [
[1, 1, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 1]
]
L = len(A)
H = len(A[0])
print(get_max_ones(A, L, H))
|
azaazato/algorithm_study_python
|
chapter2/find_connected.py
|
find_connected.py
|
py
| 1,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70929449148
|
# Importeren modules
import copy
import math
ERROR_MARGIN = 0.01
def build_Best_Graph(n_dict, c_dict): # main function
# n_dict = Nodes Dict
# c_dict = Connections Dict
# f_nid = First Node Id
# o_nid_list = Ordered Nodes Ids List
solutions_list = []
for f_nid in n_dict: # for each node in node_list
print('Creating list of node ' + str(f_nid))
o_nid_list = ordered_Node_List(f_nid, c_dict, n_dict)
nodes_list = [n_dict[nid] for nid in o_nid_list] # list of noodes with coordinates
nodes_list[0]['coor'] = coor_0() # add the first node with coordinates
nodes_list[1]['coor'] = coor_1(nodes_list[0], nodes_list[1]) # add the second node with coordinates
for index in range(2, len(nodes_list)):
print('Evaluating node ' + str(index))
nodes_list[index]['coor'] = get_Coordinates(nodes_list[index], nodes_list[:index], c_dict)
solutions_list.append(nodes_list)
print('Evaluating lists')
best_solution = min(solutions_list, key=lambda nodes_list: solution_Stress(nodes_list, c_dict)) # get list of nodes with lowest stress
return best_solution
def ordered_Node_List(f_nid, c_dict, n_dict): # Sort Nodes Ids
# nid_l_list = Nodes Ids Left List
# s_nid_dict = Score Nodes Ids-To-List Dict
o_nid_list = [f_nid]
nid_l_list = list(n_dict.keys())
nid_l_list.remove(f_nid)
while len(nid_l_list) > 0:
print('Done in ' +str(len(nid_l_list)))
s_nid_dict = {}
for nid in nid_l_list:
s_nid_dict[nid] = score_Node_To_List(nid, o_nid_list, c_dict, n_dict)
max_nid = max(s_nid_dict.keys(), key=lambda nid: s_nid_dict[nid])
o_nid_list.append(max_nid)
nid_l_list.remove(max_nid)
return o_nid_list
def score_Node_To_List(nid1, o_nid_list, c_dict, n_dict):
score_sum = 0
for nid2 in o_nid_list:
score_sum += get_Score(c_dict[nid1][nid2], n_dict[nid1]['lenght'], n_dict[nid2]['lenght'])
return score_sum
def get_Score(n_connections, lenght_1, lenght_2):
score = n_connections / (lenght_1*lenght_2)
return score
def coor_0(): # first node coordinate
return (0, 0)
def coor_1(node_1, node_2): # second node coordinate
magnitude_in_X = node_1['radius'] + node_2['radius']
return ((magnitude_in_X), 0)
def get_Coordinates(node, prior_nodes_list, c_dict):
stress_list = []
for prior_pair in pairs_Nodes(prior_nodes_list):
p_n1 = prior_pair[0]
p_n2 = prior_pair[1]
c_1 = {'x': p_n1['coor'][0], 'y': p_n1['coor'][1], 'r': p_n1['radius'] + node['radius'] + ERROR_MARGIN}
c_2 = {'x': p_n2['coor'][0], 'y': p_n2['coor'][1], 'r': p_n2['radius'] + node['radius'] + ERROR_MARGIN}
if do_Overlaps(c_1, c_2): # If circles overlap
#print('Calculate intersection')
int_coordinates = coor_Inter(c_1, c_2) # intersection coordinates
for coordinate in int_coordinates:
c_3 = {'x': coordinate[0], 'y': coordinate[1], 'r': node['radius']}
#print('Calculate overlaping wiht any')
if not overlaps_Any(c_3, prior_nodes_list): # If nodes don't overlap
#print('Node Stress')
stress = node_Stress(node['id'], c_3['x'], c_3['y'], prior_nodes_list, c_dict)
stress_list.append({'coor': coordinate, 'stress': stress})
else:
pass
else:
pass
best_coordinate = min(stress_list, key=lambda coordinate: coordinate['stress'])['coor']
return best_coordinate
def overlaps_Any(circle, nodes_list):
overlaps = False
index = 0
while not overlaps and index < len(nodes_list):
#print('iteration ' + str(index))
#print('Calculate individual overlaping')
new_circle = {'x': nodes_list[index]['coor'][0], 'y': nodes_list[index]['coor'][1], 'r': nodes_list[index]['radius']}
overlaps = do_Overlaps(circle, new_circle)
index += 1
return overlaps
def pairs_Nodes(nodes_list): # order-independent combinaton without repetition
pairs_list = []
for n1 in nodes_list:
for n2 in nodes_list:
if n1['id'] > n2['id']:
pairs_list.append((n1, n2))
return pairs_list
def do_Overlaps(c_1, c_2): # if overlaps return true
x1 = c_1['x']
x2 = c_2['x']
y1 = c_1['y']
y2 = c_2['y']
r1 = c_1['r']
r2 = c_2['r']
#print(c_1)
#print(c_2)
circles_distance = math.sqrt((x2-x1)**2 + (y2-y1)**2)
sum_radius = r1 + r2
overlaps = sum_radius > circles_distance
#print(overlaps)
return overlaps
def coor_Inter(c_1, c_2): # intersection points between circles
# reference: https://math.stackexchange.com/questions/256100/how-can-i-find-the-points-at-which-two-circles-intersect
# https://gist.github.com/jupdike/bfe5eb23d1c395d8a0a1a4ddd94882ac
x1 = c_1['x']
x2 = c_2['x']
y1 = c_1['y']
y2 = c_2['y']
r1 = c_1['r']
r2 = c_2['r']
R2 = (x2-x1)**2 + (y2-y1)**2
R4 = R2**2
r2r2 = r1**2 - r2**2
a = r2r2 / (2 * R2)
c = math.sqrt(2 * (r1**2 + r2**2) / R2 - (r2r2) / R4 - 1)
fx = (x1+x2) / 2 + a * (x2 - x1)
gx = c * (y2 - y1) / 2
ix1 = fx + gx
ix2 = fx - gx
fy = (y1+y2) / 2 + a * (y2 - y1)
gy = c * (x1 - x2) / 2
iy1 = fy + gy
iy2 = fy - gy
int_coordinates = [(ix1, iy1), (ix2, iy1)]
return int_coordinates
def node_Stress(nid, x1, y1, nodes_list, c_dict): # stress of a node against the graph
stress = 0
for p_n in nodes_list:
p_nid = p_n['id']
if nid != p_nid:
x2 = p_n['coor'][0]
y2 = p_n['coor'][1]
R2 = (x2-x1)**2 + (y2-y1)**2
n_edges = c_dict[nid][p_nid]
stress += n_edges*R2
return stress
def solution_Stress(nodes_list, c_dict): # total stress of the graph
solution_stress = 0
for node in nodes_list:
node_stress = node_Stress(node['id'], node['coor'][0], node['coor'][1], nodes_list, c_dict)
solution_stress = solution_stress / 2
return solution_stress
|
jpbascur/SciMacro-noGUI
|
My_Module/My_chart.py
|
My_chart.py
|
py
| 6,133 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27228829856
|
"""This module implements classes and independent functions related to feature extraction
module of our work.
To be specific, this module helps identify handful of best features out of humongous number
of features; created from raw data """
import numpy as np
import pandas as pd
from namedlist import namedlist
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit as split
from sklearn.ensemble import BaggingRegressor
# from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings('ignore')
def rmse(labels, predictions):
"""
This function returns the root mean squared error.
Note: If predictions is an integer, we interpret it as a constant prediction.
"""
""" If predictions is an integer, we make it a array to comply with sklearn API """
if isinstance(predictions, int):
# create an array same as labels and fill it with constant prediction
predictions = np.full(labels.shape, predictions)
""" mean_squared_error is an error metric; imported from sklearn. """
mse = mean_squared_error(labels, predictions)
return np.sqrt(mse)
def train_test_split(data, test_ratio = 0.5, n_splits=10, best_split = True):
"""
This function splits the data into two using stratified sampling in ratios as determined by test_ratio.
The strata is constructed out of creating quartile splits on the target variable, i.e., sales_volume.
If the best_split is True, The split which yields the minimum differen in the means of target is
returned. Else, the last split is returned as it is.
Note the number of splits are determined by n_splits"""
# Object for stratified sampling
split_obj = split(n_splits=n_splits, test_size=test_ratio, random_state=180)
# Discretizing the target volume to guide stratified sampling
data['categories'] = pd.qcut(data['sales_volume'], 4, labels=["low", "low mid",'high mid',"high"])
# best split is one that yields least difference in mean sales_volume of both folds
least_diff_in_means = None
best_split = None, None
# Looping over each split
for idx_train, idx_test in split_obj.split(data, data['categories']):
train = data.iloc[idx_train]
test = data.iloc[idx_test]
diff_in_means = abs(train.sales_volume.mean() - test.sales_volume.mean())
""" Update the best split if best_split=True and
either the current split is the first split or the best split.
"""
if best_split and ((least_diff_in_means is None) or (least_diff_in_means > diff_in_means)):
least_diff_in_means = diff_in_means
best_split = idx_train, idx_test
if best_split[0] is None:
best_split = idx_train, idx_test
del data['categories']
idx_train, idx_test = best_split
train = data.iloc[idx_train]
test = data.iloc[idx_test]
return train, test
class IterVarModel:
""" This class iteratively find best features one by one iteratively; starting from no features """
"""
At a particular iteration, all the candidate features are evaluated and the features that
yields the best 2-fold cross validation performance are added to the model (best features).
Number of best features to extract is determined by max_features_to_extract.
If none of the features improves performance beyond already obtained in the previous
iteration, The feature search process stops even before finding max_features_to_extract features.
"""
"""
This class maintains two folds for performance evaluation and comparison.
"""
"Train fold 1 and evaluate fold 2 and call it performance over fold 2"
"Train fold 2 and evaluate fold 1 and call it performance over fold 1"
"Note that a specified model is used for all kind of training, testing purposes."
class RMSEFolds:
""" A nested class that we define to maintain and compare RMSE results over both folds """
def __init__(self, rmse_1, rmse_2):
self.fold_1 = rmse_1 # RMSE over fold 1
self.fold_2 = rmse_2 # RMSE over fold 2
def __lt__(self,other):
"""
__lt__ is a Special method that can define < operator on class instances.
We define RMSE1 < RMSE2 if and only if the RMSE1 is strictly lower than RMSE2
in both the folds.
"""
# defining < condition.
# Condition 1 - RMSE_1 < RMSE_2 if results of both folds in RMSE_1 are less than that in RMSE_2
# cond_1 = (self.fold_1 < other.fold_1) and (self.fold_2 < other.fold_2)
# Condition 2 - RMSE_1 < RMSE_2 if the sum of rmse in both folds of RMSE_1 is less than that in RMSE_2
# cond = (self.fold_1 ** 2 + self.fold_2 ** 2) < (other.fold_1 ** 2 + other.fold_2 ** 2)
cond = (self.fold_1 < other.fold_1) and (self.fold_2 < other.fold_2)
# RMSE_1 < RMSE_2 if either condition is true
return cond# _1 or cond_2
# Special method that gets run on object instantiation.
def __init__(self, data, model, max_features_to_extract):
# data over which we create folds and extract best features.
self.data = data
# maximum feautres to extract
self.max_features_to_extract = max_features_to_extract
# model to be used in feature evaluations
self.model = model
# input columns are all the columns in the dataframe data except the target
self.input_variables = [col for col in self.data.columns if col not in ['sales_volume']]
# maintaining data for the folds. This attribute holds data related to folds.
self.folds = None
# Maintains a list of useful features
self.extracted_features = []
# Stops the feature extraction process if it becomes True
self.stop_feature_extraction = False
# create 2 folds out of all the data. Basically, split data into folds and also create additional variables.
self.create_folds()
def standardize_folds_inputs(self):
""" Standardize inputs in fold 1 from parameters obtained in fold 2 and
Standardize inputs in fold 2 from parameters obtained in fold 1
Logic: test data cannot know her own mean and variance;
hence has to be standardize with training set parameters
"""
fold_1_X = self.folds[1].input
fold_2_X = self.folds[2].input
# get parameters from fold 1 and standardize and update fold 2
model = StandardScaler() # standard scalar
model.fit(fold_1_X) # get parameters from inputs in fold 1
self.folds[2]._update(input=pd.DataFrame(model.transform(fold_2_X), columns=fold_1_X.columns)) # transform inputs in fold 2
# get parameters from fold 2 and standardize and update fold 1
model = StandardScaler() # standard scalar
model.fit(fold_2_X) # get parameters from inputs in fold 2
self.folds[1]._update(input=pd.DataFrame(model.transform(fold_1_X), columns=fold_1_X.columns)) # transform inputs in fold 1
def add_data_in_folds(self):
data = self.data
# We use stratified sampling to split the data; see function train_test_split() for details
fold_1_data, fold_2_data = train_test_split(data, test_ratio = 0.5, n_splits=10, best_split = True)
# inputs
input_features = self.input_variables
## Now we add inputs and outputs to each fold.
# update inputs
self.folds[1]._update(input=fold_1_data[input_features])
self.folds[2]._update(input=fold_2_data[input_features])
# update outputs
self.folds[1]._update(output=fold_1_data['sales_volume'])
self.folds[2]._update(output=fold_2_data['sales_volume'])
def create_folds(self):
"""
This function uses stratified sampling to split data into two equal-sized folds
and maintains these folds using class attribute of folds.
We use namedlist; one for each fold to hold its data
"""
"""
namedlist is a factory function for creating mutable collections of list items;
it is similar to python's list but enables us to name each component and access using
dot notation.
"""
Fold = namedlist('Fold', 'input output rmse')
"""
class attribute folds is a dictionary with 2 keys;
key=1, refers to namedlist that holds data related to fold 1
key=2, refers to namedlist that holds data related to fold 2
"""
self.folds = dict()
for i in [1,2]:
self.folds[i] = Fold(input=None, output=None,rmse=None)
# add inputs and outputs to the folds by intelligently splitting data; see class method add_data_in_folds()
self.add_data_in_folds()
# Standardize inputs in the folds for better ML performance; see class method standardize_folds_inputs()
self.standardize_folds_inputs()
"""
Now after having inputs and outputs in both folds, we update RMSE.
As of now, we have not extracted any feature.
Hence, we consider a base model i.e., one that spits out mean of its training target.
"""
# predictions of base model over fold 1 is a constant; mean of target variable in fold 2
# predictions of base model over fold 2 is a constant; mean of target variable in fold 1
# updating RMSE based on this logic.
self.folds[1]._update(rmse=rmse(np.abs(self.folds[1].output - self.folds[2].output.mean()), 0))
self.folds[2]._update(rmse=rmse(np.abs(self.folds[2].output - self.folds[1].output.mean()), 0))
def eval_fold(self, eval_fold_number, features):
"""
This function evaluates a fold specified by eval_fold_number based on features
and returns RMSE
"""
"fold 1 is evaluated by training over fold 2 and evaluating over fold 1"
train_fold = 1 if eval_fold_number == 2 else 2
test_fold = 2 if eval_fold_number == 2 else 1
model = self.model
# training data from train_fold
X, Y = self.folds[train_fold].input[features], self.folds[train_fold].output
# learning
model.fit(X, Y)
# test data
test_X, test_Y = self.folds[test_fold].input[features], self.folds[test_fold].output
# prediction
test_predict = model.predict(test_X)
# evaluate predictions and compute rmse
tmp_rmse = rmse(test_Y, test_predict)
return tmp_rmse
def is_new_feature_good(self, features):
"""
This function evaluates fold 1 and fold 2 with features
and determines if features leads to better performance
compared to extracted best features.
"""
# class method eval_fold() is used to evaluate a fold and returns RMSE. see eval_fold()
rmse_1 = self.eval_fold(1, features)
rmse_2 = self.eval_fold(2, features)
# Construct an RMSE object comprising RMSEs of folds resulted from current features.
RMSE = self.RMSEFolds(rmse_1, rmse_2)
## Construct an RMSE object comprising RMSEs of folds resulted best features obtained so far
RMSE_current = self.RMSEFolds(self.folds[1].rmse, self.folds[2].rmse)
result = False
"""if RMSE is better than RMSE_current, then set of variables in features are better
than ones in best features extracted so far
"""
if RMSE < RMSE_current:
# do advanced analysis on residuals.
result = True
return result, RMSE
def add_var(self):
"""
This method search for the variable; if such a variable exists that when
added can improve performance
"""
#
# We define best_RMSE to be RMSE reached in previous iteration.
best_RMSE = self.RMSEFolds(self.folds[1].rmse, self.folds[2].rmse)
# Initially a None, best_var indicates the candidate variable that can be included.
best_var = None # maintain the best variable found in this iteration
# Looping over the candidate variables
for col in self.input_variables:
# candidate variable should not be already in the best extracted features.
if col not in self.extracted_features:
# make a temporary list of features by adding candidate feature to the existing best features.
tmp_features = self.extracted_features + [col]
"""
Evaluate the goodness of candidate feature;
see class method is_new_feature_good() for further details.
is_good=True indicates that the candidate variable can improve the performance.
"""
is_good, RMSE = self.is_new_feature_good(tmp_features)
# Update the best_var if is_good=True and RMSE is better than best RMSE so far.
if (is_good and (RMSE < best_RMSE)):
best_RMSE = RMSE
best_var = col
# If we find a variable that can improve performance.
if best_var is not None:
print('adding_variable: {}'.format(best_var))
self.extracted_features.append(best_var)
# Update rmse and residuals
self.folds[1]._update(rmse=best_RMSE.fold_1)
self.folds[2]._update(rmse=best_RMSE.fold_2)
# If we cannot find a variable that can improve performance.
else:
# Turning this to True stops feature extraction.
self.stop_feature_extraction = True
# print('new features cannot be added')
def extract_features(self):
"""
This function runs feature extraction routines until either
the maximum allowed features is reached or when none of the
variables can improve the performance by getting added.
"""
for _ in range(self.max_features_to_extract):
# running until stop_feature_extraction=False
if not self.stop_feature_extraction:
# add_var() is a function that looks for best variable to add; see method add_var() for details.
self.add_var()
return self.extracted_features
def extract_model_features(model, df, max_features=5):
"""
This function extracts features in the dataframe df based on specified model;
this function initializes instance of class IterVarModel with specified model
and the number of features to extract.
It uses class method extract_features() to extract best features and returns
these features
"""
# class instance.
tmp = IterVarModel(df, model, max_features)
return tmp.extract_features()
def bagging_feature_extraction(model, df, max_features):
"""
This function firstly defines a bagging model with specified base model
and uses this bagging model to extract features using function extract_model_features()
Bagging helps shortlisting best features and avoids unstable features.
base model is subsequently used over the extracted features to extract features.
"""
# Bagging model with each estimator utilizing 60% of the data; adding some randomness.
bagging_model = BaggingRegressor(base_estimator=model, max_samples=0.6, random_state=25)
# Extract bagging features
print('-'*18)
print('Bagging Features')
print('-'*18)
bagging_features = extract_model_features(bagging_model, df, max_features=15) # 15
# Modify the dataframe to include only the bagging features.
features_to_retain = bagging_features + ['sales_volume']
df = df[features_to_retain]
# feature extraction from the remaining features using base model
print('-'*18)
print('Final Features')
print('-'*18)
base_model_features = extract_model_features(model, df, max_features=max_features)
return base_model_features
def feature_extraction(models, df, max_features = 5, precede_bagging=False):
"""
This function uses multiple base models; stored in dictionary models and runs feature extract for each.
It may or may not include bagging based on precede_bagging.
The record for each model is stored in a namedlist.
The function returns a list of namedlists; each holding a record for a model.
"""
# A named list to maintain data for each model
"""
namedlist is a factory function for creating mutable collections of list items;
it is similar to python's list but enables us to name each component and access using
dot notation.
"""
Model = namedlist('Model', 'name sklearn_form extracted_features')
# a list of model features is created in which each component correspond to a model.
# list to hold namedlists.
models_features = []
# iterate over each model in models
for model_name, model in models.items():
print('\n')
print('*'*18)
print(model_name)
print('*'*18)
# If the features list should be made smaller with bagging
if precede_bagging:
tmp_features = bagging_feature_extraction(model, df, max_features)
else:
tmp_features = extract_model_features(model, df, max_features=max_features)
# creating namedlist to store records for the model.
tmp_model = Model(name=model_name, sklearn_form=model, extracted_features=tmp_features)
# adding record to the list
models_features.append(tmp_model)
return models_features
if __name__ == '__main__':
print('This file is not run as a module')
|
waqasbukhari/optimal_pos_placement
|
best_feature_extraction.py
|
best_feature_extraction.py
|
py
| 18,314 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30063350104
|
# import tenTo8
import encdec_8b10b
import veri
# encdec_8b10b.trans10to8(ten)
def negate(Str):
X = Str.replace('0','x')
X = X.replace('1','0')
X = X.replace('x','1')
return X
K280_m = '0011110100'
K280_p = negate(K280_m)
K281_m = '0011111001'
K281_p = negate(K281_m)
K283_m = '0011110011'
K283_p = negate(K283_m)
K284_m = '0011110010'
K284_p = negate(K284_m)
K285_m = '0011111010'
K285_p = negate(K285_m)
K287_m = '0011111000'
K287_p = negate(K287_m)
import logs
class lvdsMonitor(logs.driverClass):
def __init__(self,Path,Monitors):
logs.driverClass.__init__(self,Path,Monitors)
self.Pin0 = 'lvds0'
self.Pin1 = 'lvds1'
self.LVDS0 = ''
self.LVDS1 = ''
self.state0 = 'idle'
self.state1 = 'idle'
self.CRC = []
self.Ones0 = 0
self.Ones1 = 0
self.txc_0 = -1
self.txd_0 = -1
self.txc_1 = -1
self.txd_1 = -1
self.Stream = [[],[]]
self.Chars0,self.Chars1 = [],[]
self.syncState = 'idle'
self.dins = []
self.State = 'idle'
self.Frame = []
self.FrameId = 0
self.Bytes = 0
self.Indx = 0
self.keep = False
def run(self):
# self.run1()
self.run0()
def run1(self):
if veri.peek('tb.dut.dig_top.usat_tx.enable')!='1': return
cnt = logs.peek('tb.dut.dig_top.usat_tx.serializer.cnt')
if cnt!=11: return
din0 = veri.peek('tb.dut.dig_top.usat_tx.serializer.data_in_0')
din1 = veri.peek('tb.dut.dig_top.usat_tx.serializer.data_in_1')
Chr0 = encdec_8b10b.trans10to8(din0)
Chr1 = encdec_8b10b.trans10to8(din1)
Chr0 = nice(Chr0)
Chr1 = nice(Chr1)
self.Stream[0].append(Chr0)
self.Stream[1].append(Chr1)
self.dins.append(('%03x'%(logs.intx(din1)),'%03x'%(logs.intx(din0))))
if len(self.dins)>8: self.dins.pop(0)
if len(self.Stream[0])>8: self.Stream[0].pop(0)
if len(self.Stream[1])>8: self.Stream[1].pop(0)
# if len(self.Stream[0])==8:
# logs.log_info('STREAM side0 %d %s %s'%(len(self.Stream[0]),self.Stream[0],self.Stream[1]))
# logs.log_info('STREAM side1 %d %s %s'%(len(self.Stream[0]),self.Stream[0],self.Stream[1]))
if syncAll(self.Stream[0]) and syncAll(self.Stream[1]):
self.syncState = 'idle'
if self.syncState == 'idle':
if sync8(self.Stream[0]) and sync8(self.Stream[1]):
self.syncState = 'sync8'
self.Stream[0]=[]
self.Stream[1]=[]
elif self.syncState == 'sync8':
if len(self.Stream[0])==8:
logs.log_info('STREAM8 %s %s'%(self.Stream[1],self.Stream[0]))
# logs.log_info('STREAM8 X %s'%str(self.dins))
self.Stream[0]=[]
self.Stream[1]=[]
txc_0 = logs.peek('tb.dut.dig_top.usat_tx.txc_0')
txd_0 = logs.peek('tb.dut.dig_top.usat_tx.txd_0')
txc_1 = logs.peek('tb.dut.dig_top.usat_tx.txc_1')
txd_1 = logs.peek('tb.dut.dig_top.usat_tx.txd_1')
# logs.log_info('side0 %s %s %s %02x side1 %s %s %s %02x'%(din0,Chr0,self.txc_0,self.txd_0,din1,Chr1,self.txc_1,self.txd_1))
self.txc_0 = txc_0
self.txd_0 = txd_0
self.txc_1 = txc_1
self.txd_1 = txd_1
def run0(self):
lvds0 = self.peekbin(self.Pin0)
lvds1 = self.peekbin(self.Pin1)
self.run_lvds(lvds0,lvds1)
def run_lvds(self,lvds0,lvds1):
if lvds0 == '1': self.Ones0 += 1
if lvds0 == '0': self.Ones0 -= 1
if lvds1 == '1': self.Ones1 += 1
if lvds1 == '0': self.Ones1 -= 1
self.LVDS0 += lvds0
if len(self.LVDS0)>80: self.LVDS0 = self.LVDS0[-80:]
self.LVDS1 += lvds1
if len(self.LVDS1)>80: self.LVDS1 = self.LVDS1[-80:]
if self.state0 == 'idle':
if len(self.LVDS0)>60:
if idleChar(self.LVDS0[:10]) and idleChar(self.LVDS0[10:20]) and idleChar(self.LVDS0[20:30]):
if idleChar(self.LVDS0[30:40]) and idleChar(self.LVDS0[40:50]) and idleChar(self.LVDS0[50:60]):
self.state0 = 'sync8'
self.LVDS0 = self.LVDS0[60:]
elif self.state0 == 'sync8':
if len(self.LVDS0)>=10:
Chr0 = encdec_8b10b.trans10to8(self.LVDS0[:10])
Chr0 = nice(Chr0)
self.Chars0.append(Chr0)
logs.log_info('CHARS0 %s %s'%(str(self.Chars0),str(self.LVDS0)),2)
self.LVDS0 = self.LVDS0[10:]
if self.state1 == 'idle':
if len(self.LVDS1)>60:
if idleChar(self.LVDS1[:10]) and idleChar(self.LVDS1[10:20]) and idleChar(self.LVDS1[20:30]):
if idleChar(self.LVDS1[30:40]) and idleChar(self.LVDS1[40:50]) and idleChar(self.LVDS1[50:60]):
self.state1 = 'sync8'
self.LVDS1 = self.LVDS1[60:]
elif self.state1 == 'sync8':
if len(self.LVDS1)>=10:
Chr1 = encdec_8b10b.trans10to8(self.LVDS1[:10])
Chr1 = nice(Chr1)
self.Chars1.append(Chr1)
logs.log_info('CHARS1 %s %s'%(str(self.Chars1),str(self.LVDS1)),2)
self.LVDS1 = self.LVDS1[10:]
if self.State == 'idle':
# if self.keep:
# logs.log_info('CCCC 1=%s 0=%s'%(self.Chars1,self.Chars0))
if len(self.Chars1)>=10:
if (self.Chars1[0] in ['K285','K283','K280'])and(self.Chars1[1] == '00')and(self.Chars1[2]=='K281'):
if (self.Chars0[0] in ['K285','K283','K280'])and(self.Chars0[1] == '00'):
self.State = 'inframe'
self.Frame = self.Chars1[2:10]+ self.Chars0[2:10]
self.Chars1 = self.Chars1[10:]
self.Chars0 = self.Chars0[10:]
logs.log_info('FRAMEStart1 %s'%str(self.Frame))
elif len(self.Chars1)==8:
if (self.Chars1[0]=='K281'):
self.State = 'inframe'
self.Frame = self.Chars1[:8]+ self.Chars0[:8]
self.Chars1 = self.Chars1[8:]
self.Chars0 = self.Chars0[8:]
logs.log_info('FRAMEStart2 %s'%str(self.Frame))
elif self.State == 'inframe':
if len(self.Chars1)>=8:
self.Frame += self.Chars1[:8]+ self.Chars0[:8]
self.Chars1 = self.Chars1[8:]
self.Chars0 = self.Chars0[8:]
if 'K287' in self.Frame:
FRM = self.Frame[:self.Frame.index('K287')]
FRM = FRM[self.Frame.index('K281'):]
self.Frame = self.Frame[self.Frame.index('K287'):]
Len = len(FRM)-8
self.Bytes += Len
logs.log_info('FRAME #%d bytes=%d / %d : %s'%(self.FrameId,Len,self.Bytes,FRM))
if Len>100:
self.assembleData2(FRM[1:-1])
self.FrameId += 1
self.State = 'idle'
self.keep =True
if len(self.Chars1)>10: self.Chars1.pop(0)
if len(self.Chars0)>10: self.Chars0.pop(0)
def assembleData2(self,Bytes):
while Bytes[-1] == 'K284': Bytes.pop(-1)
state = 0
while Bytes!=[]:
P0 = self.sngl(Bytes.pop(0))
P1 = self.sngl(Bytes.pop(0))
state = self.runPair(P0,P1,state)
def sngl(self,X):
try:
return int(X,16)
except:
return X
def assembleData(self,Bytes):
while Bytes[-1] == 'K284': Bytes.pop(-1)
state = 0
while Bytes!=[]:
Seq0 = Bytes[:8]
Seq1 = Bytes[8:16]
logs.log_info('SEQ 1=%s 0=%s'%(Seq0,Seq1))
while Seq1!=[]:
P1 = int(Seq1.pop(0),16)
P0 = int(Seq0.pop(0),16)
state = self.runPair(P0,P1,state)
Bytes = Bytes[16:]
def report__(self,St):
Exp1,Exp0,Indx = extract(self.Pix1,self.Pix0)
Lefts = len(list(PIXELS[3].keys()))
if Indx<0:
logs.log_wrong('#%d %s %d (left %d) self.Pix1 = %x self.Pix0 = %x (E1=%x E0=%x)'%(self.Indx,St,Indx,Lefts,self.Pix1,self.Pix0,Exp1,Exp0))
else:
logs.log_info('#%d %s %d self.Pix1 = %x self.Pix0 = %x (E1=%x E0=%x)'%(self.Indx,St,Indx,self.Pix1,self.Pix0,Exp1,Exp0))
self.Indx += 1
if self.Indx>2368:
for Key in PIXELS[3]:
logs.log_info('leftovers ind=%d pix %04x %04x'%(PIXELS[3][Key],Key[0],Key[1]))
def report(self,St):
Gr1e = gray_encode(self.Pix1)
Gr1d = gray_decode(self.Pix1)
Gr0e = gray_encode(self.Pix0)
Gr0d = gray_decode(self.Pix0)
logs.log_info('REPORT %s %04x %04x en(%04x %04x)'%(St,self.Pix0,self.Pix1,Gr0e,Gr1e))
def runPair(self,P0,P1,state):
if state==0:
self.Pix1 = (P1>>6)& 3
self.Pix0 = ((P1&0x3f)<<8)+P0
state = 1
elif state==1:
self.Pix1 = self.Pix1 + (P0<<2) + ((P1 & 0xf)<<10)
self.report('st1')
self.Pix0 = (P1>>4)&0xf
state = 2
elif state==2:
self.Pix0 = self.Pix0 + (P0<<4) + ((P1 & 3)<<12)
self.Pix1 = P1>>2
state = 3
elif state==3:
self.Pix1 = self.Pix1 + (P0<<6)
self.report('st3')
self.Pix0 = P1
state = 4
elif state==4:
self.Pix0 = self.Pix0 + ((P0 & 0x3f)<<8)
self.Pix1 = ((P0 >>6) & 0x3) + (P1<<2)
state = 5
elif state==5:
self.Pix1 = self.Pix1 + ((P0 & 0xf)<<10)
self.report('st5')
self.Pix0 = (P1<<4)+ ((P0>>4) & 0xf)
state = 6
elif state==6:
self.Pix1 = (P1<<6)+(P0>>2)
self.Pix0 = self.Pix0 + ((P0&3)<<12)
self.report('st6')
state = 0
return state
def trace(self,LVDS,state,Side):
if len(LVDS)<10:
return state,LVDS
if state=='idle':
LVDS = self.idles(LVDS)
if LVDS:
return 'sync0',LVDS
return False
elif state=='sync0':
Chr = encdec_8b10b.trans10to8(LVDS[5:10]+LVDS[0:5])
self.Stream[Side].append(Chr)
if len(self.Stream[Side])>8:
self.Stream[Side].pop(0)
# if len(self.Stream[Side])==8:
logs.log_info('STREAM %s %d %s'%(Side,len(self.Stream[Side]),self.Stream[Side]))
elif state=='sync':
Chr = encdec_8b10b.trans10to8(LVDS[5:10]+LVDS[0:5])
logs.log_info('%s sync %s %s'%(Side,Chr,state),2)
Str = self.cutIt(LVDS,[K281_m,K281_p])
if Str:
return 'sof',Str
else:
Str = self.cutIt(LVDS,[K280_m,K280_p,K283_m,K283_p,K285_m,K285_p])
if not Str:
What = encdec_8b10b.trans10to8(LVDS[5:10]+LVDS[0:5])
logs.log_info('token? %s %s '%(LVDS[:10],What),2)
return 'sync',LVDS[10:]
else:
return 'sync',Str
elif state=='sof':
Str = self.cutIt(LVDS,[K287_m,K287_p])
if Str:
return 'eof',Str
Chr = LVDS[:10]
What = encdec_8b10b.trans10to8(LVDS[5:10]+LVDS[0:5])
logs.log_info('header %s %s'%(Chr,What),3)
return 'sof',LVDS[10:]
elif state=='eof':
Chr = LVDS[:10]
Str = LVDS[10:]
logs.log_info('reserved %s'%(Chr),3)
return 'res',LVDS
elif state=='res':
Chr = LVDS[:10]
Str = LVDS[10:]
logs.log_info('crc %s'%(Chr),3)
self.CRC.append(Chr)
if (len(self.CRC)==4):
return 'crc',Str
return 'res',LVDS
elif state=='crc':
Chr = LVDS[:10]
Str = LVDS[10:]
logs.log_info('token %s'%(Chr),3)
return 'sync',Str
def idles(self,LVDS):
Str = self.cutIt(LVDS,[K280_m,K280_p,K283_m,K283_p,K285_m,K285_p])
if not Str: return False
Str = Str[10:]
Str = self.cutIt(Str,[K280_m,K280_p,K283_m,K283_p,K285_m,K285_p])
if not Str: return False
Str = Str[10:]
Str = self.cutIt(Str,[K280_m,K280_p,K283_m,K283_p,K285_m,K285_p])
if not Str: return False
Str = Str[10:]
return Str
def cutIt(self,Str,Chars):
for Char in Chars:
Chrx = Char
if ((Chrx) in Str) and (Str.index(Chrx)<10):
Str = Str[Str.index(Chrx):]
return Str
return False
def extract(Pix0,Pix1):
Key = (Pix0,Pix1)
if Key in PIXELS[2]:
Ind = PIXELS[2].index(Key)
Exp0,Exp1 = PIXELS[2][Ind]
if Key in PIXELS[3]: PIXELS[3].pop(Key)
return Exp1,Exp0,Ind
return (0x9999,0x9999,-9999)
def nice(Char):
if type(Char) is tuple: return str(Char)
if Char[0] in '01': return '%02x'%int(Char,2)
return Char
def sync8(Seq):
if len(Seq)<8: return False
if (Seq[7][0]=='K'): return False
for II in range(7):
if (Seq[II][0]!='K'): return False
return True
def syncAll(Seq):
if len(Seq)<8: return False
for II in range(8):
if (Seq[II][0]!='K'): return False
return True
def idleChar(Str):
return Str in [K280_m,K280_p,K283_m,K283_p,K285_m,K285_p]
def gray_encode(n):
return n ^ n >> 1
def gray_decode(n):
m = n >> 1
while m:
n ^= m
m >>= 1
return n
|
greenblat/vlsistuff
|
verification_libs3/lvdsMonitor.py
|
lvdsMonitor.py
|
py
| 14,045 |
python
|
en
|
code
| 41 |
github-code
|
6
|
9626855200
|
import pandas as pd
import numpy as np
import os
import cv2
import json
from sklearn.model_selection import train_test_split
from trainer import Trainer
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error
from collections import Counter
# read the images in the same order specified by df["id"]
def read(corpus_path, df):
file_paths = [os.path.join(corpus_path, name) for name in df["id"]]
data = []
for file_path in file_paths:
image = cv2.imread(file_path)
data.append(image)
data = np.asarray(data, dtype=np.float32)
# collect the labels as well (if exist)
labels = None
if "label" in df:
labels = np.asarray(df["label"], dtype=np.uint8)
print("#### Histogram: {}".format(Counter(labels)))
return data, labels
# plot images per group (to better undestand the data)
def plot_per_group(data, labels, target_label):
import matplotlib.pyplot as plt
i, curr_idx = 0, 0
plt.figure(figsize=(10, 10))
while (i < len(data) and curr_idx < 9):
image = (data[i] * 255).astype("uint8")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
label = labels[i]
if label == target_label:
plt.subplot(3, 3, curr_idx + 1)
plt.imshow(image)
plt.title(label)
plt.axis("off")
curr_idx += 1
i += 1
if __name__ == "__main__":
with open('config_file.json') as json_file:
config_file = json.load(json_file)
os.environ["CUDA_VISIBLE_DEVICES"] = str(config_file["device"])
PROJECT_PATH = os.path.join(config_file["training_path"], "{}/".format(config_file["device"]))
if os.path.exists(PROJECT_PATH):
raise Exception("{} already exists. Try another path!".format(PROJECT_PATH))
os.makedirs(PROJECT_PATH, exist_ok=True)
config_file["checkpoint_args"]["filepath"] = os.path.join(PROJECT_PATH, config_file["checkpoint_args"]["filepath"])
with open(os.path.join(PROJECT_PATH, "config_file.json"), 'w+') as f:
json.dump(config_file, f, indent=4)
df_train = pd.read_csv("corpus/train.csv")
df_test = pd.read_csv("corpus/test.csv")
all_train_data, all_train_labels = read(corpus_path="corpus/train/", df=df_train) # <--- collect all the images from the training dataset
df_train = df_train.sample(frac=1).reset_index(drop=True) # <--- Shuffle the training data. Because of the augmentation step, we cannot shuffle during the `fit` call.
df_train, df_validation = train_test_split(df_train, test_size=0.2) # <--- split into train + validation
train_data, train_labels = read(corpus_path="corpus/train/", df=df_train) # <--- load the corresponding train images
validation_data, validation_labels = read(corpus_path="corpus/train/", df=df_validation) # <--- load the corresponding validation images
test_data, _ = read(corpus_path="corpus/test/", df=df_test) # <--- load the corresponding test images
print("##### [Train]: Initial: {} -> {}".format(len(df_train), train_data.shape))
print("##### [Validation]: Initial: {} -> {}".format(len(df_validation), validation_data.shape))
print("##### [Test]: Initial: {} -> {}".format(len(df_test), test_data.shape))
# for label in range(1, 6):
# plot_per_group(data=train_data, labels=train_labels, target_label=label)
# instantiate a trainer object
trainer = Trainer(all_train_data=all_train_data,
all_train_labels=all_train_labels,
train_data=train_data,
train_labels=train_labels,
validation_data=validation_data,
validation_labels=validation_labels,
loss_name=config_file["loss_name"],
model_type=config_file["model_type"],
learning_rate_decay=config_file["learning_rate_decay"],
enable_batchnorm=config_file["enable_batchnorm"],
batch_size=config_file["batch_size"],
epochs=config_file["epochs"],
early_stopping_args=config_file["early_stopping_args"],
checkpoint_args=config_file["checkpoint_args"],
task_type=config_file["task_type"],
learning_rate=config_file["learning_rate"],
weight_decay=config_file["weight_decay"],
num_classes=5)
# Cross Validation case
if config_file["type"] == "cross_validation":
mae_list, mse_list, accuracy_list = trainer.cross_validation(epochs=75)
history_cross_validation = {}
history_cross_validation["mae_list"] = str(mae_list)
history_cross_validation["mse_list"] = str(mse_list)
history_cross_validation["accuracy_list"] = str(accuracy_list)
with open(os.path.join(PROJECT_PATH, "history_cross_validation.json"), 'w+') as f:
json.dump(history_cross_validation, f, indent=4)
# Training case
elif config_file["type"] == "train":
history = trainer.train()
for key in history:
history[key] = str(history[key])
train_predicted_labels, train_predictions = trainer.get_predictions(train_data)
validation_predicted_labels, validation_predictions = trainer.get_predictions(validation_data)
test_predicted_labels, test_predictions = trainer.get_predictions(test_data)
# Accuracy score for train + validation
accuracy_train = accuracy_score(train_labels, train_predicted_labels)
accuracy_validation = accuracy_score(validation_labels, validation_predicted_labels)
print("#### [Train] Accuracy for the best model: {}".format(accuracy_train))
print("#### [Validation] Accuracy for the best model: {}".format(accuracy_validation))
history["train_accuracy"] = str(accuracy_train)
history["validation_accuracy"] = str(accuracy_validation)
# For regression, add MAE and MSE too
if config_file["task_type"] == "regression":
mae_train = mean_absolute_error(train_labels, train_predictions)
mae_validation = mean_absolute_error(validation_labels, validation_predictions)
mse_train = mean_squared_error(train_labels, train_predictions)
mse_validation = mean_squared_error(validation_labels, validation_predictions)
history["train_mae"] = str(mae_train)
history["validation_mae"] = str(mae_validation)
history["train_mse"] = str(mse_train)
history["validation_mse"] = str(mse_validation)
print("#### [Train] Mae for the best model: {}".format(mae_train))
print("#### [Validation] Mae for the best model: {}".format(mae_validation))
print("#### [Train] Mse for the best model: {}".format(mse_train))
print("#### [Validation] Mse for the best model: {}".format(mse_validation))
df = pd.DataFrame.from_dict({"id": df_test["id"], "label": list(test_predicted_labels)})
df.to_csv(os.path.join(PROJECT_PATH, "final_predictions.csv"), index=False)
df_train["prediction"] = list(train_predictions) # <--- add predictions
df_validation["prediction"] = list(validation_predictions) # <--- add predictions
df_test["prediction"] = list(test_predictions) # <--- add predictions
# merge back train + validation
merged_train_df = pd.concat([df_train, df_validation], ignore_index=True) # <--- merge train + validation predictions
merged_train_df = merged_train_df.sort_values(by=["id"]) # <--- sort by id
merged_train_df.to_csv(os.path.join(PROJECT_PATH, "train_validation_predictions.csv"), index=False)
df_test.to_csv(os.path.join(PROJECT_PATH, "test_predictions.csv"), index=False)
with open(os.path.join(PROJECT_PATH, "history.json"), 'w+') as f:
json.dump(history, f, indent=4)
print(df)
|
SebastianCojocariu/Detect-targets-in-radar-signals
|
src/main.py
|
main.py
|
py
| 7,992 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29456763062
|
import abc
import os.path
from oslo_config import cfg
from oslo_log import log
import requests
import requests.certs
import six
from atrope import exception
from atrope import ovf
from atrope import paths
from atrope import utils
opts = [
cfg.StrOpt('download_ca_file',
default=paths.state_path_def('atrope-ca-bundle.pem'),
help='Atrope will build a CA bundle for verifying the '
'HTTP servers when it is downloading the image, '
'concatenating the default OS CA bundle and the '
'CAs present in the $ca_path directory. This '
'is done as there may be certificates signed by '
'CAs that are trusted by the provider, but untrusted '
'by the default bundle and we need to trust both.'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.import_opt("ca_path", "atrope.smime")
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseImage(object):
@abc.abstractmethod
def __init__(self, image_info):
self.uri = None
self.sha512 = None
self.identifier = None
self.location = None
self.verified = False
@abc.abstractmethod
def download(self, dest):
"""Download the image.
:param dest: destionation directory.
"""
def get_file(self, mode="rb"):
"""Return a File object containing the downloaded file."""
return open(self.location, mode)
def get_kernel(self):
raise NotImplementedError()
def get_ramdisk(self):
raise NotImplementedError()
def get_disk(self):
"""Return the format and a 'ro' File-like object containing the disk.
Images can be stored in containers like OVA, this method will return a
tuple (format, fd) being 'format' a string containing the image disk
format and 'fd' File-like object containing the original image disk as
extracted from the container.
We assume that containers only store one image disk. We scan the file
in reverse order, as OVF specification states that files can be
appended so as to update the OVF file.
"""
if self.format.lower() != "ova":
return self.format, self.get_file()
ovf_file = ovf.get_ovf(self.location)
fmt, disk_filename = ovf.get_disk_name(ovf_file)
disk_fd = ovf.extract_file(self.location, disk_filename)
return fmt, disk_fd
def verify_checksum(self, location=None):
"""Verify the image's checksum."""
LOG.info("Image '%s' present in '%s', verifying checksum",
self.identifier, location)
location = location or self.location
if location is None:
raise exception.ImageNotFoundOnDisk(location=location)
sha512 = utils.get_file_checksum(location)
if sha512.hexdigest() != self.sha512:
raise exception.ImageVerificationFailed(
id=self.identifier,
expected=self.sha512,
obtained=sha512.hexdigest()
)
LOG.info("Image '%s' present in '%s', checksum OK",
self.identifier, location)
self.verified = True
class HepixImage(BaseImage):
field_map = {
"ad:group": "group",
"ad:mpuri": "mpuri",
"ad:user:fullname": "user_fullname",
"ad:user:guid": "user_guid",
"ad:user:uri": "user_uri",
"dc:description": "description",
"dc:identifier": "identifier",
"dc:title": "title",
"hv:hypervisor": "hypervisor",
"hv:format": "format",
"hv:size": "size",
"hv:uri": "uri",
"hv:version": "version",
"sl:arch": "arch",
"sl:checksum:sha512": "sha512",
"sl:comments": "comments",
"sl:os": "os",
"sl:osname": "osname",
"sl:osversion": "osversion",
}
required_fields = field_map.keys()
def __init__(self, image_info):
super(HepixImage, self).__init__(image_info)
image_dict = image_info.get("hv:image", {})
utils.ensure_ca_bundle(CONF.download_ca_file,
[requests.certs.where()],
CONF.ca_path)
for i in self.required_fields:
value = image_dict.get(i, None)
if value is None:
reason = "Invalid image definition, missing '%s'" % i
raise exception.InvalidImageList(reason=reason)
attr = self.field_map.get(i)
setattr(self, attr, value)
# add everything from hepix as 'extra', so it can be queried in glance
self.appliance_attributes = image_dict
def _download(self, location):
LOG.info("Downloading image '%s' from '%s' into '%s'",
self.identifier, self.uri, location)
with open(location, 'wb') as f:
try:
response = requests.get(self.uri, stream=True,
verify=CONF.download_ca_file)
except Exception as e:
LOG.error(e)
raise exception.ImageDownloadFailed(code=e.errno,
reason=e)
if not response.ok:
LOG.error("Cannot download image: (%s) %s",
response.status_code, response.reason)
raise exception.ImageDownloadFailed(code=response.status_code,
reason=response.reason)
for block in response.iter_content(1024):
if block:
f.write(block)
f.flush()
try:
self.verify_checksum(location=location)
except exception.ImageVerificationFailed as e:
LOG.error(e)
raise
else:
LOG.info("Image '%s' stored as '%s'",
self.identifier, location)
def download(self, basedir):
# The image has been already downloaded in this execution.
if self.location is not None:
raise exception.ImageAlreadyDownloaded(location=self.location)
location = os.path.join(basedir, self.identifier)
if not os.path.exists(location):
self._download(location)
else:
# Image exists, is it checksum valid?
try:
self.verify_checksum(location=location)
except exception.ImageVerificationFailed:
LOG.warning("Image '%s' present in '%s' is not valid, "
"downloading again",
self.identifier, location)
self._download(location)
self.location = location
|
alvarolopez/atrope
|
atrope/image.py
|
image.py
|
py
| 6,812 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33944686501
|
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics.scorer import make_scorer
from sklearn import model_selection
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import LogisticRegression
from sklearn.metrics.scorer import make_scorer
from sklearn import model_selection
from sklearn.ensemble import GradientBoostingRegressor #GBM algorithm
from sklearn import cross_validation, metrics #Additional scklearn functions
from sklearn.grid_search import GridSearchCV #Perforing grid search
"""
Created on Sat Nov 24 13:28:11 2018
@author: Muhammad Shahraiz Khan Niazi and Muhammad Daniyal Saqib
"""
'''
#Start:Kaggle.com
#Explanation: Plot a bar graph against SalePrice
#End '''
def barPlot(df, var):
plt.xticks(rotation =90)
sns.barplot(df.loc[:,'SalePrice'], df.loc[:, var])
plt.title('SalePrice vs ' + var)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
def colDrop(df, colName):
return df.drop(colName, axis = 1)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
'''
#Start:Kaggle.com
#Explanation: to figure out the division/scattering of the data's values
#End '''
def checkRange(df, var):
labels = df.loc[:,var].unique()
sizes = df.loc[:,var].value_counts().values
percent = 100.*sizes/sizes.sum()
labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, percent)]
print(labels)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
'''
#Start:Kaggle.com
#Explanation: Plot a historgram graph
#End '''
def histogram(df, var):
sns.distplot(df.loc[:, var]);
fig = plt.figure()
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
def join(df1, df2):
return pd.concat([df1, df2], axis = 1)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
'''
#Start:Kaggle.com
#Explanation: Plot a scatter graph against SalePrice
#End '''
def scatterPlot(df, var):
data = pd.concat([df.loc[:, 'SalePrice'], df.loc[:,var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000));
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This method implements binary encoding on particular columns in the dataframe and returns that dataframe
def encode(ser):
return pd.get_dummies(ser)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function checks percentage of the number of NA values in a coloumn
def checkMissingValues(df):
return df.apply(lambda col: (col.isnull().sum()/ df.shape[0]) *100 )
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function reads the data from the file and returns the entire dataset, a list of input cols and outputCol
def readData_1(numRows = None):
inputCols = ['MSSubClass', 'MSZoning', 'LotFrontage',
'LotArea', 'Street', 'Alley', 'LotShape', 'LandContour',
'Utilities', 'LotConfig', 'LotConfig',
'Neighborhood', 'Condition1', 'Condition2', 'BldgType'
'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt'
'YearRemodAdd', 'RoofStyle', 'RoofMatl',
'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea',
'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond',
'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2',
'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC',
'CentralAir', 'Electrical','1stFlrSF', '2ndFlrSF', 'LowQualFinSF',
'GrLivArea','BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','Bedroom',
'Kitchen','KitchenQual','TotRmsAbvGrd','Functional',
'Fireplaces','FireplaceQu','GarageType','GarageYrBlt',
'GarageFinish','GarageCars','GarageArea',
'GarageQual','GarageCond','PavedDrive',
'WoodDeckSF','OpenPorchSF','EnclosedPorch',
'3SsnPorch', 'ScreenPorch','PoolArea',
'PoolQC','Fence','MiscFeature', 'MiscVal',
'MoSold','YrSold','SaleType','SaleCondition']
outputCol = ['SalePrice']
trainHouseDF = pd.read_csv('Data/train.csv')
testHouseDF = pd.read_csv('Data/test.csv')
houseDF = pd.concat([trainHouseDF, testHouseDF], sort=True)
#houseDF = houseDF.sample(frac = 1, random_state = 99).reset_index(drop = True)
return houseDF, inputCols, outputCol
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function reads the data from the file and returns the two datasets -
# training and testing, a list of input cols and outputCol
def readData(numRows = None):
trainHouseDF = pd.read_csv('Data/train.csv')
testHouseDF = pd.read_csv('Data/test.csv')
outputCol = ['SalePrice']
return trainHouseDF, testHouseDF, outputCol
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
def c2(df, colName):
return df.loc[:, colName].corr(df.loc[:, 'SalePrice'])
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function finds out the correlation between the numerical columns and the SalePrice
def corrRelationForNumerical():
df, b, c = readData_1()
corr=df.corr()["SalePrice"]
print(corr[np.argsort(corr, axis=0)[::-1]])
inputCols = ['OverallQual', 'GrLivArea', 'GarageCars', 'GarageArea', 'TotalBsmtSF', '1stFlrSF',
'FullBath', 'TotRmsAbvGrd', 'YearBuilt', 'YearRemodAdd', 'GarageYrBlt', 'MasVnrArea',
'Fireplaces']
corrMatrix= df[inputCols].corr()
sns.set(font_scale=1.10)
plt.figure(figsize=(10, 10))
sns.heatmap(corrMatrix, vmax=.8, linewidths=0.01, square=True,annot=True,cmap='viridis',linecolor="white")
plt.title('Correlation between features')
'''
According to the correlation that we found with respect to SalePrice, we think:
values that are above close to 0.5 or greater than 0.5 would affect SalePrice and values that are less than -0.1.
Hence, our predictors list till now is: OverallQual, GrLiveArea, GarageCars, GarageArea
TotalBsmtSF, 1stFlrSF, FullBath, TotRmsAbvGrd, YearBuilt, YearRemodAdd, GarageYrBl
'''
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function would preprocess the nonNumerical data and we would evaluate which column to keep as a parameter
#for the final algorithm
def dataProcessing():
df, b, c = readData()
#____________________________________________________________________________________________
#While going through the Utilities - there is only one entry that is other than AllPub (Id = 945)
#Hence dropping ID 945 and then the entire col would not affect the TargetPrice - Sale price
df = df.drop(labels = 944, axis = 0)
'''
From: Kaggle.com
Explanation: After trying different plotting techniques, such as boxPlot, scatterplot and few others,
We think this describes the neighborhood perfectly.
'''
#barPlot(df, 'Neighborhood')
df = colDrop(df, 'Neighborhood')
#End: Kaggle.com
'''Other than few extra spikes, the SalePrice is not really affected that much by Neighborhood, as it is all between
100000 - 200000. Shahraiz and I believe, the Neighborhood really doesn't matter. Hence, we would drop this column'''
#----------------------------------------------------------------------------------------------------------
#MSZoning Attribute
#plt.xticks(rotation =90)
#plt.scatter(df.loc[:,'MSZoning'], df.loc[:, 'SalePrice'])
#plt.title('SalePrice vs MSZoning')
'''This graph clearly shows that they majority of the data is in RL; however, to confirm it further we would use Classifcation graph
to plot it'''
#labels = df.loc[:,"MSZoning"].unique()
#sizes = df.loc[:,"MSZoning"].value_counts().values
#explode=[0.1,0,0,0,0]
#parcent = 100.*sizes/sizes.sum()
#labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]
#colors = ['yellowgreen', 'gold', 'lightblue', 'lightcoral','blue']
#patches = plt.pie(sizes, colors=colors,explode=explode, shadow=True,startangle=90)
#plt.title("Zoning Classification")
#plt.show()
#print(labels)
#print()
'''Clearly a large part of the pie is yellow green which is RL - and the second most occuring
is gold which is RM. Therefore, we would keep the values that are RL and RM and remove the rows
that are any other value.'''
'''We would discreditize the data in MSZoning: 1 - RL, 0 - RM and then find the correlation'''
df = df.loc[(df.loc[:,'MSZoning'] == 'RL') | (df.loc[:,'MSZoning']=='RM') | (df.loc[:,'MSZoning']=='C')]
df.loc[:,'MSZoning'] = df.loc[:,'MSZoning'].map(lambda val: 0 if(val=='RL') else val)
df.loc[:,'MSZoning'] = df.loc[:,'MSZoning'].map(lambda val: 1 if(val=='RM') else val)
df.loc[:,'MSZoning'] = df.loc[:,'MSZoning'].map(lambda val: 2 if(val=='C') else val)
#print(corrRelation2Cols(df, ['MSZoning', 'SalePrice']))
'''
The correlation between SalePrice and MSZoning is 0.29556497792 - that is below the threshold of 0.5; hence, we
would not use this as well.
'''
df = colDrop(df, 'MSZoning')
#----------------------------------------------------------------------------------------------------------
#Street
#plt.xticks(rotation =90)
#plt.scatter(df.loc[:,'Street'], df.loc[:, 'SalePrice'])
#plt.title('SalePrice vs Street')
df = df.loc[(df.loc[:, 'Street'] == 'Pave')]
'''
The graph shows that majority of the values are Pave.
Therefore we would keep all the values that are Pave and get rid of the column.
'''
df = colDrop(df, 'Street')
#----------------------------------------------------------------------------------------------------------
#dropping 'Ally', MiscFeature, PoolQC because there high percentage of Na values, Uncomment the print line below to see that.
#print(checkMissingValues(df))
df = df.drop(['Alley', 'MiscFeature', 'PoolQC', 'Utilities', 'Fence', 'FireplaceQu', 'LotFrontage'], axis = 1)
#----------------------------------------------------------------------------------------------------------
# LotShape
#checkRange(df, 'LotShape')
'''['Reg - 62.2 %', 'IR1 - 34.4 %', 'IR2 - 2.6 %', 'IR3 - 0.7 %'] - the percentage of
the values show that it is all Reg and IR1. Hence we would keep all those values that
are Reg and IR1 otherwise, we would drop those rows.'''
#df = df.loc[(df.loc[:,'LotShape'] == 'Reg') | (df.loc[:,'LotShape']=='IR1') | (df.loc[:,'LotShape']=='IR2')]
#df.loc[:,'LotShape'] = df.loc[:,'LotShape'].map(lambda val: 0 if(val=='Reg') else val)
#df.loc[:,'LotShape'] = df.loc[:,'LotShape'].map(lambda val: 1 if(val=='IR1') else val)
#df.loc[:,'LotShape'] = df.loc[:,'LotShape'].map(lambda val: 2 if(val=='IR2') else val)
#scatterPlot(df, 'LotShape')
'''Now we would discreditize the data into 0,1 or 2 and find the correlation between Lot Shape
and SalePrice'''
#print(c2(df, 'LotShape'))
#Hence we would drop this too.
df = colDrop(df, 'LotShape')
#_____________________________________________________________________________________________
#This is LandContour
#checkRange(df, 'LandContour')
#df = df.loc[(df.loc[:,'LandContour'] == 'Lvl') | (df.loc[:,'LandContour'] == 'Bnk') | (df.loc[:,'LandContour'] == 'Low') | (df.loc[:,'LandContour'] == 'HLS')]
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 0 if(val=='Lvl') else val)
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 1 if(val=='Bnk') else val)
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 2 if(val=='Low') else val)
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 3 if(val=='HLS') else val)
#histogram(df, 'LandContour')
#print(c2(df, 'LandContour'))
'''Since, LandContour and LotShape basically is providing the same information - we would use only
one of it - the one with the higher correlation with SalePrice, if it exceeds 0.5'''
df = colDrop(df, 'LandContour')
#____________________________________________________________________________________________
#This is LotConfig
#checkRange(df, 'LotConfig')
df = df.loc[(df.loc[:,'LotConfig'] == 'Inside') | (df.loc[:,'LotConfig'] == 'FR2') |(df.loc[:,'LotConfig'] == 'Corner') | (df.loc[:,'LotConfig'] == 'CulDsac') ]
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 0 if(val=='Inside') else val)
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 1 if(val=='FR2') else val)
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 2 if(val=='Corner') else val)
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 3 if(val=='CulDsac') else val)
#print(c2(df, 'LotConfig'))
#Removed Landconfig as well because the correlation is very less
df = colDrop(df, 'LotConfig')
#__________________________________________________________________________________________________
#LandSlope
#checkRange(df, 'LandSlope')
df = df.loc[(df.loc[:,'LandSlope'] == 'Gtl') | (df.loc[:, 'LandSlope'] == 'Mod')]
df.loc[:, 'LandSlope'] = df.apply(lambda row: 1 if row.loc['LandSlope'] == 'Gtl' else 0, axis = 1)
#print(c2(df, 'LandSlope'))
#It shows a high percentage of Gtl values, therefore - we would just keep those and remove the others
#and this column - It also shows a very high -negative correlation - therewould we would keep this column
#_____________________________________________________________________________________________
#Condition1 Condition2
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 0 if(val=='Artery') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 1 if(val=='Feedr') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 2 if(val=='Norm') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 3 if(val=='RRNn') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 4 if(val=='PosN') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 5 if(val=='PosA') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 6 if(val=='RRNe') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 7 if(val=='RRAe') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 8 if(val=='RRAn') else val)
#print(c2(df, 'Condition1'))
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 0 if(val=='Artery') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 1 if(val=='Feedr') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 2 if(val=='Norm') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 3 if(val=='RRNn') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 4 if(val=='PosN') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 5 if(val=='PosA') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 6 if(val=='RRNe') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 7 if(val=='RRAe') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 8 if(val=='RRAn') else val)
#print(c2(df, 'Condition2'))
''' Clearly the correlation between these two coloumns and SalePrice is very low; therefore, we
would drop these two columns'''
df = colDrop(df, 'Condition1')
df = colDrop(df, 'Condition2')
#____________________________________________________________________________________________
#BldgType
#labels = df.loc[:,'BldgType'].unique()
#sizes = df.loc[:,"BldgType"].value_counts().values
#explode=[0.1,0,0,0,0]
#parcent = 100.*sizes/sizes.sum()
#labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]
#print(labels)
'''1 Fam and Duplex are more than 90% of the values for this column therefore we would,
binary encode them and see there correlation with the SalePrice'''
a = encode(df.loc[:, 'BldgType'])
#df.loc[:, '1Fam'] = a.loc[:, '1Fam']
df.loc[:, 'Duplex'] = a.loc[:, 'Duplex']
#print(c2(df, '1Fam'))
#print(c2(df, 'Duplex'))
'''
1Fam is not highly correlated with the SalePrice; therefore, which can be confirmed
by there scatter plots, we won't keep that. However, we would keep the Duplex.
'''
#data = pd.concat([df.loc[:, 'SalePrice'], df.loc[:,'1Fam']], axis=1)
#data.plot.scatter(x='1Fam', y='SalePrice', ylim=(0,800000));
#data1 = pd.concat([df.loc[:, 'SalePrice'], df.loc[:,'Duplex']], axis=1)
#data1.plot.scatter(x='Duplex', y='SalePrice', ylim=(0,800000));
'''In both the cases - SalePrice, is barely affected by 1Fam or DuPlex as it is scattered all over the
place. Hence we would also not consider BldgType. '''
df = colDrop(df, 'BldgType')
#____________________________________________________________________________________________
#RoofMatl
#checkRange(df, 'RoofMatl')
''' 99.6 percent of the values are CompShg therefore we would ignore the other values and ignore
this column'''
df = df.loc[df.loc[:, 'RoofMatl'] == 'CompShg']
df = colDrop(df, 'RoofMatl')
#____________________________________________________________________________________________
#MasVnrType
#checkRange(df, 'MasVnrType')
#scatterPlot(df, 'MasVnrArea')
df.loc[:, 'MasVnrType'] == df.loc[:, 'MasVnrType'].fillna('None')
df.loc[:, 'MasVnrArea'] == df.loc[:, 'MasVnrArea'].fillna(0)
a = df.loc[(df.loc[:,'MasVnrType'] == 'None') & (df.loc[:, 'MasVnrArea'] == 0)]
#print(len(a))
#print(len(df[df.loc[:, 'MasVnrType'] == 'None']))
#print(len(df[df.loc[:, 'MasVnrArea'] == 0]))
'''This shows a relationship between the two and even from the names we can figure it out that we just need one of it;
therefore, we would keep the one that is more easier to use (the one that is numeric) and figure out it's corr'''
df = colDrop(df, 'MasVnrType')
#print(c2(df, 'MasVnrArea'))
'''We would keep this because it's correlation with SalePrice is 0.499 which is approximately 0.5 - among the threshold
we are considering values.'''
#____________________________________________________________________________________________
#ExteriorQuality
#checkRange(df, 'ExterQual')
'''From this we figure out that Gd, TA, and Ex makes the entire data therefore we would remove the rest'''
df = df.loc[(df.loc[:, 'ExterQual'] == 'Gd') | (df.loc[:, 'ExterQual'] == 'TA') | (df.loc[:, 'ExterQual'] == 'Ex')]
df.loc[:,'ExterQual'] = df.loc[:,'ExterQual'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'ExterQual'] = df.loc[:,'ExterQual'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'ExterQual'] = df.loc[:,'ExterQual'].map(lambda val: 3 if(val=='Ex') else val)
#histogram(df, 'ExterQual')
#checkRange(df, 'ExterCond')
#print(c2(df, 'ExterQual'))
'''Since, it's corr relation is negative and far from zero, therefore, we would keep this. However, we would not
use exterior condition because both of them are almost the same thing because in both the cases - TA and Gd make up a large
portion of the data.'''
df = colDrop(df, 'ExterCond')
#____________________________________________________________________________________________
#Foundation
#checkRange(df, 'Foundation')
df = df.loc[(df.loc[:, 'Foundation'] == 'PConc') | (df.loc[:, 'Foundation'] == 'CBlock') | (df.loc[:, 'Foundation'] == 'CBlock')]
df.loc[:,'Foundation'] = df.loc[:,'Foundation'].map(lambda val: 1 if(val=='PConc') else val)
df.loc[:,'Foundation'] = df.loc[:,'Foundation'].map(lambda val: 2 if(val=='CBlock') else val)
df.loc[:,'Foundation'] = df.loc[:,'Foundation'].map(lambda val: 3 if(val=='CBlock') else val)
#histogram(df, 'Foundation')
#scatterPlot(df, 'Foundation')
#print(c2(df, 'Foundation'))
'''The corr is very high negative - therefore we will take this into consideration either.'''
#____________________________________________________________________________________________
#Basement Features
'''There is high correlation between basement features (BsmtFinType2, BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1);
therefore, we would only use BsmtCond - correlation shown below'''
#checkRange(df, 'BsmtQual')
#checkRange(df, 'BsmtCond')
df.loc[:, 'BsmtQual'] = df.loc[:, 'BsmtQual'].fillna('None')
df.loc[:, 'BsmtCond'] = df.loc[:, 'BsmtCond'].fillna('None')
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 2 if(val== 'Ex') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 4 if(val== 'Fa') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 5 if(val== 'None') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 6 if(val== 'Po') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 3 if(val== 'Ex') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 4 if(val== 'Fa') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 5 if(val== 'None') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 6 if(val== 'Po') else val)
#print(df.loc[:, 'BsmtCond'].corr(df.loc[:,'BsmtQual']))
'''This shows a high correlation between BsmtCond and BsmtQual - therefore, we would keep BsmtCond'''
df = colDrop(df, 'BsmtQual')
#____________________________________________________________________________________________
#Gas
#checkRange(df, 'Heating') #Show 99.6% GasA - therefore keeping only data that is GasA
df = df.loc[df.loc[:, 'Heating'] == 'GasA']
df = colDrop(df, 'Heating')
#checkRange(df, 'HeatingQC')
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 3 if(val== 'Ex') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 4 if(val== 'Fa') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 5 if(val== 'Po') else val)
#print(c2(df, 'HeatingQC'))
#scatterPlot(df, 'HeatingQC')
'''The graph shows that Heating Quality definitely affects the price range towards the upper end.'''
#____________________________________________________________________________________________
#Central Air
#checkRange(df, 'CentralAir')
df.loc[:,'CentralAir'] = df.loc[:,'CentralAir'].map(lambda val: 0 if(val=='N') else val)
df.loc[:,'CentralAir'] = df.loc[:,'CentralAir'].map(lambda val: 1 if(val=='Y') else val)
#scatterPlot(df, 'CentralAir')
#histogram(df, 'CentralAir')
#print(c2(df, 'CentralAir'))
df = colDrop(df, 'CentralAir')
#checkRange(df, 'Electrical')
df = df.loc[(df.loc[:, 'Electrical'] == 'SBrkr') | (df.loc[:, 'Electrical'] == 'FuseF')] #98.8 of the total values make this up
df.loc[:,'Electrical'] = df.loc[:,'Electrical'].map(lambda val: 0 if(val=='SBrkr') else 1)
#print(df.loc[:, 'Electrical'])
#print(c2(df, 'Electrical'))
'''This is a negative number close to 0; hence, a low corr between Electricity and SalePrice.
therefore - we would not keep this.'''
df = colDrop(df, 'Electrical')
#KitchenQuality
#checkRange(df, 'KitchenQual')
''' 98% of the data is made up of Gd, TA and Ex hence we would only keep, discretize it
and then find the correlation between KitcenQual and SalePrice'''
df = df.loc[(df.loc[:, 'KitchenQual'] == 'Gd') | (df.loc[:, 'KitchenQual'] == 'Ex') | (df.loc[:, 'KitchenQual'] == 'TA')]
df.loc[:,'KitchenQual'] = df.loc[:,'KitchenQual'].map(lambda val: 0 if(val=='Ex') else val)
df.loc[:,'KitchenQual'] = df.loc[:,'KitchenQual'].map(lambda val: 1 if(val=='TA') else val)
df.loc[:,'KitchenQual'] = df.loc[:,'KitchenQual'].map(lambda val: 2 if(val=='Gd') else val)
#print(c2(df, 'KitchenQual'))
'''It really has a low negative corr with SalePrice - therefore we would not keep this.'''
df = colDrop(df, 'KitchenQual')
#function
#checkRange(df, 'Functional')
df = df.loc[(df.loc[:, 'Functional'] == 'Typ') | (df.loc[:, 'Functional'] == 'Min1') | (df.loc[:, 'Functional'] == 'Maj1')]
df.loc[:,'Functional'] = df.loc[:,'Functional'].map(lambda val: 0 if(val=='Typ') else val)
df.loc[:,'Functional'] = df.loc[:,'Functional'].map(lambda val: 1 if(val=='Min1') else val)
df.loc[:,'Functional'] = df.loc[:,'Functional'].map(lambda val: 2 if(val=='Maj1') else val)
#print(c2(df, 'Functional'))
'''This has negative corr very close to zero; therefore, we would drop it.'''
df = colDrop(df, 'Functional')
#----------------------------------------------------------------------------------------------------------
#HouseStyle
#checkRange(df, 'HouseStyle')
df = df.loc[(df.loc[:, 'HouseStyle'] == '2Story') | (df.loc[:, 'HouseStyle'] == '1Story') | (df.loc[:, 'HouseStyle'] == '1.5Unf')]
df.loc[:,'HouseStyle'] = df.loc[:,'HouseStyle'].map(lambda val: 0 if(val=='2Story') else val)
df.loc[:,'HouseStyle'] = df.loc[:,'HouseStyle'].map(lambda val: 1 if(val=='1Story') else val)
df.loc[:,'HouseStyle'] = df.loc[:,'HouseStyle'].map(lambda val: 2 if(val=='1.5Unf') else val)
#print( df.loc[:,'HouseStyle'].dtype)
'''It is a high negative correlation with SalePrice - therefore we would keep it.'''
#----------------------------------------------------------------------------------------------------------
#RoofStyle
#checkRange(df, 'RoofStyle')
'''Data represents a lot like HouseStyle; therefore, assuming a high correlation between
roofstyle and housestyle - we would drop this as well.'''
df = colDrop(df, 'RoofStyle')
#_____________________________________________________________
#Exterior1st
df = colDrop(df, 'Exterior1st')
df = colDrop(df, 'Exterior2nd')
df = colDrop(df, 'BsmtExposure')
df = colDrop(df, 'BsmtFinType1')
df = colDrop(df, 'BsmtFinType2')
df = colDrop(df, 'GarageType')
#checkRange(df, 'GarageFinish')
a = encode(df.loc[:, 'GarageFinish'])
df = join(df, a)
df = colDrop(df,'GarageFinish')
#checkRange(df, 'PavedDrive')
df = df.loc[(df.loc[:, 'PavedDrive'] == 'Y') | (df.loc[:, 'PavedDrive'] == 'N')]
df.loc[:,'PavedDrive'] = df.loc[:,'PavedDrive'].map(lambda val: 0 if(val=='N') else 1)
#print(c2(df, 'PavedDrive'))
'''Very low correlation - hence we would drop this.'''
df = colDrop(df, 'PavedDrive')
df = colDrop(df, 'GarageCond')
df = colDrop(df, 'GarageQual')
#checkRange(df, 'SaleType')
df = df.loc[(df.loc[:, 'SaleType'] == 'WD') | (df.loc[:, 'SaleType'] == 'New') | (df.loc[:, 'SaleType'] == 'COD')]
df.loc[:,'SaleType'] = df.loc[:,'SaleType'].map(lambda val: 0 if(val=='WD') else val)
df.loc[:,'SaleType'] = df.loc[:,'SaleType'].map(lambda val: 1 if(val=='New') else val)
df.loc[:,'SaleType'] = df.loc[:,'SaleType'].map(lambda val: 2 if(val=='COD') else val)
df = colDrop(df, 'SaleType')
#checkRange(df, 'SaleCondition')
df = df.loc[(df.loc[:, 'SaleCondition'] == 'Normal') | (df.loc[:, 'SaleCondition'] == 'Partial') | (df.loc[:, 'SaleCondition'] == 'Abnormal')]
df.loc[:,'SaleCondition'] = df.loc[:,'SaleCondition'].map(lambda val: 0 if(val=='Normal') else val)
df.loc[:,'SaleCondition'] = df.loc[:,'SaleCondition'].map(lambda val: 1 if(val=='Partial') else val)
df.loc[:,'SaleCondition'] = df.loc[:,'SaleCondition'].map(lambda val: 2 if(val=='Abnormal') else val)
#print(c2(df, 'SaleCondition'))
#_handling the NA values
df.loc[:,'MasVnrArea'] = df.loc[:,'MasVnrArea'].fillna(0)
#df.loc[:,'BsmtExposure'] = df.loc[:,'BsmtExposure'].fillna(df.loc[:,'BsmtExposure'].mode()[0])
#df.loc[:,'BsmtFinType1'] = df.loc[:,'BsmtFinType1'].fillna(df.loc[:,'BsmtFinType1'].mode()[0])
#df.loc[:,'BsmtFinType2'] = df.loc[:,'BsmtFinType2'].fillna(df.loc[:,'BsmtFinType2'].mode()[0])
#df.loc[:,'GarageType'] = df.loc[:,'GarageType'].fillna(df.loc[:,'GarageType'].mode()[0])
df.loc[:,'GarageYrBlt'] = df.loc[:,'GarageYrBlt'].fillna(df.loc[:,'GarageYrBlt'].mean())
#df.loc[:,'GarageFinish'] = df.loc[:,'GarageFinish'].fillna(df.loc[:,'GarageFinish'].mode()[0])
#df.loc[:,'GarageQual'] = df.loc[:,'GarageQual'].fillna(df.loc[:,'GarageQual'].mode()[0])
#df.loc[:,'GarageCond'] = df.loc[:,'GarageCond'].fillna(df.loc[:,'GarageCond'].mode()[0])
#corr=df.corr()["SalePrice"]
#corr[np.argsort(corr, axis=0)[::-1]]
#print(corr)
'''Cols that we would keep because the corr relation with SalePrice are relatively higher
'LotArea', 'HouseStyle', 'OverallQual', 'OverallCond', 'OverallCond',
'YearRemodAdd', 'MasVnrArea', 'ExterQual', 'Foundation', 'BsmtCond',
'TotalBsmtSF', '1stFlrSF', 'GrLivArea', 'FullBath', 'KitchenAbvGr',
'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'Unf'
'''
df = colDrop(df, 'MSSubClass')
df = colDrop(df, 'Fireplaces')
df = colDrop(df, 'LandSlope')
df = colDrop(df, 'BsmtFinSF1')
df = colDrop(df, 'BsmtFinSF2')
df = colDrop(df, 'BsmtUnfSF')
df = colDrop(df, 'HeatingQC')
df = colDrop(df, '2ndFlrSF')
df = colDrop(df, 'LowQualFinSF')
df = colDrop(df, 'BsmtHalfBath')
df = colDrop(df, 'FullBath')
df = colDrop(df, 'HalfBath')
df = colDrop(df, 'BedroomAbvGr')
df = colDrop(df, 'KitchenAbvGr')
df = colDrop(df, 'TotRmsAbvGrd')
df = colDrop(df, 'GarageCars')
df = colDrop(df, 'WoodDeckSF')
df = colDrop(df, 'OpenPorchSF')
df = colDrop(df, '3SsnPorch')
df = colDrop(df, 'ScreenPorch')
df = colDrop(df, 'PoolArea')
df = colDrop(df, 'MiscVal')
df = colDrop(df, 'MoSold')
df = colDrop(df, 'YrSold')
df = colDrop(df, 'Fin')
df = colDrop(df, 'RFn')
df = colDrop(df, 'EnclosedPorch')
return df
def standardize(df, ls):
df.loc[:, ls] = (df.loc[:, ls] - df.loc[:, ls].mean())
df.loc[:, ls] = (df.loc[:, ls])/df.loc[:, ls].std()
def test10():
df = dataProcessing()
inputCols = ['Id', 'LotArea', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt',
'YearRemodAdd', 'MasVnrArea', 'ExterQual', 'Foundation', 'BsmtCond',
'TotalBsmtSF', '1stFlrSF', 'GrLivArea', 'BsmtFullBath',
'GarageYrBlt', 'GarageArea', 'SaleCondition', 'Duplex', 'Unf']
outputCol = ['SalePrice']
#print(df.dtypes)
standardize(df, inputCols + outputCol)
df1 = df.loc[:,inputCols]
outputSeries = df.loc[:,outputCol]
alg = GradientBoostingRegressor()
alg.fit(df1, outputSeries)
cvScores = model_selection.cross_val_score(alg, df1, outputSeries ,cv=10, scoring='r2')
print(cvScores.mean())
'''
alg = LogisticRegression()
df1 = df.loc[:, inputCols]
df2 = df.loc[:, outputCol]
standardize(df1, inputCols)
standardize(df2, outputCol)
#FROM:https://stackoverflow.com/questions/34165731/a-column-vector-y-was-passed-when-a-1d-array-was-expected
#Explanation: casts flaot to int types. As an error came forward
df2=df2.astype('int')
alg.fit(df1,df2.values.ravel())
#END: https://stackoverflow.com/questions/34165731/a-column-vector-y-was-passed-when-a-1d-array-was-expected
cvScores = model_selection.cross_val_score(alg, df1, df2 ,cv=10, scoring='accuracy')
orginal_cvScore_mean = cvScores.mean()
print(orginal_cvScore_mean)
'''
|
shahraizniazi/Regression-Simulation
|
Final_Final/Final.py
|
Final.py
|
py
| 36,277 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17484990800
|
#Интеграл от неявнозаданной функции
from math import *
def func(x, y): #Неявнозаданная функция
f = exp(x**3)-(exp(y))*(x**6 - 2*(x**3)*y - 2*(x**3) + y**2 + 2*y +2)
return f
def found_y(x, eps = 1e-8):
y_left = -0.1
y_right = 0.1
while (func(x, y_left)*func(x, y_right) > 0):
y_left = y_left-0.1
y_right = y_right+0.1
y_midle = y_left + (y_right - y_left)/2
while ( (y_right-y_left) > eps*abs(y_midle) + eps):
if (func(x, y_left)*func(x, y_midle) > 0):
y_left = y_midle
y_midle = y_left + (y_right - y_left)/2
elif (func(x, y_left)*func(x, y_midle) < 0):
y_right = y_midle
y_midle = y_left + (y_right - y_left)/2
return y_midle
def integrall(x_left, x_right, eps = 1e-8):
S1 = ((found_y(x_left, eps)+found_y(x_right, eps))/2*(x_right - x_left))
step = 1
while 1:
step *= 2
h = (x_right - x_left)/step
left = x_left
S2 = (found_y(x_left,eps)+found_y(x_right,eps))/2
for i in range(1, step-1):
left += h
S2 += found_y(left,eps)
S2 = S2*h
if (abs(S2-S1) < eps*S1 + eps):
break
S1 = S2
print(step)
return S2
############ Основная программа ############
MIN_LEFT = 0
MAX_RIGHT = 2
eps = 1e-4
print(integrall(MIN_LEFT, MAX_RIGHT, eps))
|
orehovakatya/4sem
|
Вычислительные алгоритмы/lab1.integral/ВА_Задача1.py
|
ВА_Задача1.py
|
py
| 1,483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18042025196
|
# __init__.py
__version__ = "1.2.2" # Be sure to update version in setup.py as well
from difflib import SequenceMatcher
from sh3ll.command import command
from art import tprint
class IS(object):
def __init__(self, name="", font="", prefix="CLA>"):
self.name = name
self.font = font
self.prefix = prefix
self.commands = []
self.categories = []
"""
Param: -variable value
"""
def run(self):
if self.font != "":
tprint(self.name, font=self.font)
else:
tprint(self.name)
while True:
try:
line = input(self.prefix).lower()
except KeyboardInterrupt:
exit()
inputted_command = line.split()[0]
# Parse command into command and arguments
args = []
tmp = line.split()[1:]
count = 0
for arg in range(len(tmp)):
if count == 0:
if tmp[arg][0] == "'":
for arg2 in range(len(tmp[arg + 1:])):
if tmp[arg + 1 + arg2][-1] == "'":
args.append(''.join([s.strip("'") for s in tmp[arg:arg + arg2 + 2]]))
count = len(tmp[arg:arg + arg2 + 1])
else:
if tmp[arg][-1] != "'":
args.append(tmp[arg])
else:
count -= 1
cmds = [cmd.name for cmd in self.commands]
categories = [cmd.category for cmd in self.commands]
aliases = {}
for command in self.commands:
aliases[command.name] = command.aliases
if inputted_command != "help" and inputted_command != "exit" and inputted_command != "q":
if inputted_command in cmds and self.commands[cmds.index(inputted_command)].category == "":
self.commands[cmds.index(inputted_command)].execute(args)
elif inputted_command in categories:
if line.split()[1] in cmds:
if self.commands[cmds.index(line.split()[1])].category == line.split()[0]:
self.commands[cmds.index(line.split()[1])].execute(args[1:])
else:
for command in self.commands:
if line.split()[1] in command.aliases:
command.execute(args[1:])
else:
highestSimilarity = 0
mostSimilarCommand = ""
mostSimilarCommandCategory = ""
for command in self.commands:
similarity = SequenceMatcher(None, command.name, inputted_command).ratio()
if similarity > highestSimilarity:
highestSimilarity = SequenceMatcher(None, command.name, inputted_command).ratio()
mostSimilarCommand = command.name
mostSimilarCommandCategory = command.category
print(f"Command not recognized.\nDid you mean: '{mostSimilarCommandCategory} {mostSimilarCommand}'?")
else:
self.help() if inputted_command == "help" else exit()
def help(self):
print("help\tDisplays this menu")
print("exit OR q\tExits the program")
for command in self.commands:
if command.category == "":
print(f"{command.name}\t{command.help}")
print()
for category in self.categories:
if category != "":
print(f"\"{category}\" Commands:\n" + ("-" * (len(category) + 12)))
cmds = []
for command in self.commands:
if command.category == category:
cmds.append(command)
longest_name = max([len(cmd.name) for cmd in cmds])
longest_aliases = max([len(str(cmd.aliases)) for cmd in cmds])
longest_help = max([len(cmd.help) for cmd in cmds])
print("\tCommand" + (" " * (abs((len(category) + 1 + longest_name) - 7) + 4)) + "Aliases" + (
" " * (abs(longest_aliases - 7) + 4)) + "Help" + " " * (abs(longest_help - 4) + 4))
print("\t" + ("-" * 7) + (" " * (abs((len(category) + 1 + longest_name) - 7) + 4)) + ("-" * 8) + (
" " * (abs(longest_aliases - 8) + 4)) + ("-" * 4))
for command in cmds:
if abs(longest_name - len(command.name)) == 0:
print(f"\t{category} {command.name}" + (" " * (abs((len(category) + 1 + longest_name) - (
len(category) + len(command.name) + 1)) + 4)), end="")
else:
print(f"\t{category} {command.name}" + (" " * (
abs((len(category) + 1 + longest_name) - len(f"{category} {command.name}")) + 4)),
end="")
if abs(longest_aliases - len(str(command.aliases))) == 0:
print(f"{command.aliases} ", end="")
else:
print(f"{command.aliases}" + (" " * (abs(longest_aliases - len(str(command.aliases))) + 4)),
end="")
print(f"{command.help}" + (" " * abs(longest_help - len(command.help))))
print()
def command(self, name="Unknown command", aliases=[], help="No help given", category="", progress=()):
def wrap(function):
if category not in self.categories:
self.categories.append(category) # Auto register cats
self.commands.append(command(function, name=name, aliases=aliases, help=help, category=category, progress=progress))
def wrapped_function(*args):
return function(*args)
return wrapped_function
return wrap
|
HullaBrian/sh3ll
|
sh3ll/__init__.py
|
__init__.py
|
py
| 6,104 |
python
|
en
|
code
| 2 |
github-code
|
6
|
25881575517
|
#!/usr/bin/env python3
import ast
f = open("in.txt", "r").read().strip().split("\n\n")
xy = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def compare(first, second):
if type(first) is not list and type(second) is not list:
return 0 if first == second else (-1 if first < second else 1)
if type(first) is int and type(second) is list:
first = [first]
if type(first) is list and type(second) is int:
second = [second]
for firstc, secondc in zip(first, second):
r = compare(firstc, secondc)
if r != 0:
return r
return -1 if len(first) < len(second) else (0 if len(first) == len(second) else 1)
seznam = []
for x in f:
prvi = x.split("\n")[0]
drugi = x.split("\n")[1]
seznam.append((ast.literal_eval(prvi), ast.literal_eval(drugi)))
packets = []
for a, b in seznam:
packets.append(a), packets.append(b)
packets.append([[2]]), packets.append([[6]])
a = len(packets)
for i in range(0, a):
for j in range(0, a - 1):
if compare(packets[j], packets[j + 1]) > 0:
temp = packets[j + 1]
packets[j + 1] = packets[j]
packets[j] = temp
zmnozek = 1
for i in range(a):
if packets[i] == [[2]] or packets[i] == [[6]]:
zmnozek *= i + 1
print(zmnozek)
|
Anja159/Advent_of_code_2022
|
Day13/part2.py
|
part2.py
|
py
| 1,335 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9710578646
|
import time
class Timer:
def __init__(self):
self.dt = -1.0
self.et = 0.0
self.currentTime = 0.0
self.lastTime = 0.0
self.timerRunning = False
def isTimerOn(self):
if self.dt > 0:
return True
return False
def start(self):
if self.timerRunning is not True:
self.currentTime = time.clock()
self.lastTime = self.currentTime
self.timerRunning = True
def stop(self):
self.timerRunning = False
def reset(self):
self.dt = -1.0
self.et = 0.0
self.currentTime = 0.0
self.lastTime = 0.0
self.timerRunning = False
def getDt(self):
if self.timerRunning is not True :
return 0.0
self.currentTime = time.clock()
self.dt = self.currentTime - self.lastTime
self.lastTime = self.currentTime
self.et += self.dt
return self.dt
def getEt(self):
self.getDt()
return self.et
|
zetwhite/2019_assignments
|
ComputerGraphics/PA02/Timer.py
|
Timer.py
|
py
| 1,033 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13130316582
|
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from frontend.views import SPAView, UserConfigSPAWebService
# spa view
VIEWS_PATTERNS = [
url(regex=r'$',
view=login_required(SPAView.as_view()),
name='spa'),
]
# config endpoint
API_PATTERNS = [
url(regex=r'user-config/$',
view=login_required(UserConfigSPAWebService.as_view()),
name='user-config'),
]
urlpatterns = [
url(r'^api/', include(API_PATTERNS)),
url(r'^', include(VIEWS_PATTERNS)),
]
|
victordelval/spa-design-basics
|
frontend/urls.py
|
urls.py
|
py
| 549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74473554749
|
import struct
import os
def reverse_string(string):
string_out =""
for i in range(len(string)-1,-1,-1):
string_out+=string[i]
return string_out
def print_hex(number):
num = number
string = ""
if num>0:
while num>=1:
ost = num%16
if ost<10:
string+=str(int(ost))
elif ost==10:
string+="A"
elif ost==11:
string+="B"
elif ost==12:
string+="C"
elif ost==13:
string+="D"
elif ost==14:
string+="E"
elif ost==15:
string+="F"
else:
print("Error")
break
num = int(num/16)
print (reverse_string(string))
return True
else:
return False
num = 1234567890
print_hex(num)
print (hex(num))
numb =hex(num)
print (numb)
|
kvintagav/learning_to_program
|
Python/num_to_hex.py
|
num_to_hex.py
|
py
| 695 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5478969307
|
# A simple MLP network structure for point clouds,
#
# Added by Jiadai Sun
import torch
import torch.nn as nn
import torch.nn.functional as F
class PointRefine(nn.Module):
def __init__(self, n_class=3,
in_fea_dim=35,
out_point_fea_dim=64):
super(PointRefine, self).__init__()
self.n_class = n_class
self.PPmodel = nn.Sequential(
nn.BatchNorm1d(in_fea_dim),
nn.Linear(in_fea_dim, 64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, out_point_fea_dim)
)
self.logits = nn.Sequential(
nn.Linear(out_point_fea_dim, self.n_class)
)
def forward(self, point_fea):
# the point_fea need with size (b, N, c) e.g. torch.Size([1, 121722, 35])
# process feature
# torch.Size([124668, 9]) --> torch.Size([124668, 256])
processed_point_fea = self.PPmodel(point_fea)
logits = self.logits(processed_point_fea)
point_predict = F.softmax(logits, dim=1)
return point_predict
if __name__ == '__main__':
import time
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = PointRefine()
model.train()
# t0 = time.time()
# pred = model(cloud)
# t1 = time.time()
# print(t1-t0)
total = sum([param.nelement() for param in model.parameters()])
print("Number of PointRefine parameter: %.2fM" % (total/1e6))
# Number of PointRefine parameter: 0.04M
|
haomo-ai/MotionSeg3D
|
modules/PointRefine/PointMLP.py
|
PointMLP.py
|
py
| 1,699 |
python
|
en
|
code
| 206 |
github-code
|
6
|
73130357629
|
# Multiples of 3 and 5 return 'Fizz' and 'Buzz' respectively while 15 returns 'FizzBuzz'
def fizzBuzz():
# Solution 1
'''
for i in range(1, 16):
if i in [3, 6, 9, 12]:
i='Fizz'
if i in [5, 10]:
i='Buzz'
if i==15:
i='FizzBuzz'
print(i)
'''
# Solution 2
for ii in range(1, 16):
if ii == 15:
ii='FizzBuzz'
elif ii % 3 == 0:
ii='Fizz'
elif ii % 5 == 0:
ii='Buzz'
print(ii)
if __name__ == "__main__":
fizzBuzz()
|
Dzhud/fizzBuzz-Solution
|
fizzy.py
|
fizzy.py
|
py
| 576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17598463073
|
from PySide import QtCore, QtGui
class UserDialog(QtGui.QDialog):
def __init__(self, cl, text, parent=None):
super(UserDialog, self).__init__(parent)
textLabel = QtGui.QLabel(str('<b>%s</b>' % text))
mainbox = QtGui.QVBoxLayout()
mainbox.addWidget(textLabel)
vbox = QtGui.QVBoxLayout()
entry = 1
for client in cl:
userLabel = QtGui.QLabel(str('%d. <b>%s</b>' % (entry, client.username())))
vbox.addWidget(userLabel)
entry += 1
widget = QtGui.QWidget()
widget.setLayout(vbox)
scrollArea = QtGui.QScrollArea()
scrollArea.setWidget(widget)
mainbox.addWidget(scrollArea)
closeButton = QtGui.QPushButton('&Close')
closeButton.clicked.connect(self.closeDialog)
mainbox.addWidget(closeButton)
self.setLayout(mainbox)
self.resize(270, 400)
self.setWindowTitle(text)
def closeDialog(self):
self.done(0)
|
freepax/PysideSocketSkeletons
|
UserDialog.py
|
UserDialog.py
|
py
| 1,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9627877293
|
from . import views
from django.urls import path, include
urlpatterns = [
path('', views.index, name="index"),
path('about/', views.about, name="about"),
path('contact/', views.contact, name="contact"),
path('services/', views.services, name="services"),
path('skill/', views.skill, name="skill"),
]
|
abrahammmmmmmm/dynamicPortfolio
|
portfolio/app1/urls.py
|
urls.py
|
py
| 329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70400872827
|
"""
Snake game view.
"""
import sys
import pygame
from snake_controller import get_mouse_position
def play_eaten_sound():
"""
Plays a crunch sound.
"""
food_eaten_sound = pygame.mixer.Sound('sounds/snake_eat_sound.wav')
pygame.mixer.Sound.play(food_eaten_sound)
def play_click_sound():
"""
Plays a click sound
"""
click_sound = pygame.mixer.Sound('sounds/click_sound.wav')
pygame.mixer.Sound.play(click_sound)
class View():
"""
Pygame based view of a snake game.
Attributes:
_START_MENU: A png image of the start menu.
_END_MENU: A png image of the end menu.
_board: A Board instance representing the snake game to
display.
_screen: A display surface representing the window to display
the rest of the game components on.
_head_image: A png image representing the head of the snake.
_start_menu_surface: A surface representing the start menu.
_end_menu_surface: A surface representing the end menu.
_start_menu_rect: A rect representing the start menu.
_end_menu_rect: A rect representing the end menu.
"""
_START_MENU = pygame.image.load('images/start_menu.png')
_END_MENU = pygame.image.load('images/end_menu.png')
def __init__(self, board):
"""
Create a new view of a snake game.
Args:
_board: A Board instance representing the snake game to
display.
_screen: A display surface representing the window to display
the rest of the game components on.
_head_image: A png image representing the head of the snake.
_start_menu_surface: A surface representing the start menu.
_end_menu_surface: A surface representing the end menu.
_start_menu_rect: A rect representing the start menu.
_end_menu_rect: A rect representing the end menu.
"""
self._board = board
self._screen = pygame.display.set_mode((self._board.LENGTH + \
self._board.BORDER_WIDTH,self._board.HEIGHT+self._board.BORDER_WIDTH))
self._head_image = pygame.image.load('images/snake_left.png')
self._start_menu_surface = pygame.display.set_mode((self._board.LENGTH + \
self._board.BORDER_WIDTH,self._board.HEIGHT+self._board.BORDER_WIDTH))
self._end_menu_surface = pygame.display.set_mode((self._board.LENGTH + \
self._board.BORDER_WIDTH,self._board.HEIGHT+self._board.BORDER_WIDTH))
self._start_menu_rect = self._START_MENU.get_rect(center = (300,300))
self._end_menu_rect = self._END_MENU.get_rect(center = (300,300))
def get_head_image(self):
"""
Gets the correct image for the snake head.
Gets the correct orientation of the snake head based on
the snake's head position in relation to its body.
"""
head_up = pygame.image.load('images/snake_up.png')
head_down = pygame.image.load('images/snake_down.png')
head_right = pygame.image.load('images/snake_right.png')
head_left = pygame.image.load('images/snake_left.png')
# figures out what image of the head to use based on
# the relative position of the body and head
head_orientation_x = self._board.snake.coordinates[1][0] - \
self._board.snake.coordinates[0][0]
head_orientation_y = self._board.snake.coordinates[1][1] - \
self._board.snake.coordinates[0][1]
if head_orientation_x == self._board.snake.GRID_SIZE and head_orientation_y == 0:
self._head_image = head_left
elif head_orientation_x == -self._board.snake.GRID_SIZE and head_orientation_y == 0:
self._head_image = head_right
elif head_orientation_x == 0 and head_orientation_y == self._board.snake.GRID_SIZE:
self._head_image = head_up
elif head_orientation_x == 0 and head_orientation_y == -self._board.snake.GRID_SIZE:
self._head_image = head_down
def draw(self, ate_potion):
"""
Display a representation of the snake game.
"""
pygame.init()
pygame.mixer.init()
pygame.display.set_caption("Ultimate Snake Game")
icon = pygame.image.load('images/snake_icon.png')
pygame.display.set_icon(icon)
self._screen.fill('white')
self.draw_apple()
self.draw_border()
self.draw_potion()
self.draw_speed()
if ate_potion:
self.draw_invisible_snake()
elif not ate_potion:
self.draw_snake()
self.draw_menus()
self.draw_score()
pygame.display.update()
def draw_apple(self):
"""
Displays the apple item.
"""
# blit an image of an apple for the food
apple_image = pygame.image.load('images/apple.png').convert_alpha()
apple_rect = apple_image.get_rect()
apple_rect.x = self._board.food.item_location[0]
apple_rect.y = self._board.food.item_location[1]
self._screen.blit(apple_image, apple_rect)
def draw_potion(self):
"""
Displays the potion item.
"""
# blit an image of the potion for the invisibility potion
potion_image = pygame.image.load('images/potion.png').convert_alpha()
potion_rect = potion_image.get_rect()
potion_rect.x = self._board.potion.item_location[0]
potion_rect.y = self._board.potion.item_location[1]
self._screen.blit(potion_image, potion_rect)
def draw_speed(self):
"""
Draws the speed boost item.
"""
# blit an image of the speed boost for the speed boost item
lightning_image = pygame.image.load(
'images/lightning.png').convert_alpha()
lightning_rect = lightning_image.get_rect()
lightning_rect.x = self._board.speed_boost.item_location[0]
lightning_rect.y = self._board.speed_boost.item_location[1]
self._screen.blit(lightning_image, lightning_rect)
def draw_snake(self):
"""
Displays the snake head and body.
"""
# get head image and blit
self.get_head_image()
head_rect = self._head_image.get_rect()
head_rect.x = self._board.snake.coordinates[0][0]
head_rect.y = self._board.snake.coordinates[0][1]
self._screen.blit(self._head_image, head_rect)
# get surface for each snake body chunk, and blit each one
for segment in self._board.snake.coordinates[1:]:
surface = pygame.Surface((30, 30))
surface.fill(pygame.Color('blue'))
segment_rect = surface.get_rect()
segment_rect.x = segment[0]
segment_rect.y = segment[1]
self._screen.blit(surface, segment_rect)
def draw_invisible_snake(self):
"""
Displays invisible snake.
Draws the invisible snake by not displaying anything.
"""
def draw_border(self):
"""
Displays the border frame around the screen.
"""
# create frame around the game window
# top line
pygame.draw.rect(self._screen, (169, 169, 169), [
0, 0, self._board.LENGTH, self._board.BORDER_WIDTH*2])
# bottom line
pygame.draw.rect(self._screen, (169, 169, 169), [
0, self._board.HEIGHT, self._board.LENGTH, self._board.BORDER_WIDTH])
# left line
pygame.draw.rect(self._screen, (169, 169, 169), [
0, 0, self._board.BORDER_WIDTH, self._board.HEIGHT])
# right line
pygame.draw.rect(self._screen, (169, 169, 169), [
self._board.LENGTH, 0, self._board.BORDER_WIDTH, self._board.LENGTH +
self._board.BORDER_WIDTH])
def draw_score(self):
"""
Displays the score.
"""
# display score
score = str(self._board.score)
font = pygame.font.SysFont(None, 60)
score_text = font.render(f'Score: {score}', True, 'black')
self._screen.blit(score_text, (30, 10))
pygame.display.update()
def draw_start_menu(self):
"""
Displays the start menu.
"""
self._start_menu_surface.blit(self._START_MENU, self._start_menu_rect)
def draw_game_over(self):
"""
Displays the game over menu.
"""
self._end_menu_surface.blit(self._END_MENU, self._end_menu_rect)
def draw_menus(self):
"""
Draws each menu as needed.
"""
# Draws starts screen.
while self._board.start_game is False:
self.draw_start_menu()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Will exit start screen if mouse cursor clicks on start button.
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self._start_menu_rect.collidepoint(get_mouse_position()):
play_click_sound()
self._board.start_game = True
# Draws game over screen.
while self._board.game_over is True:
self._screen.fill('white')
self.draw_game_over()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Will trigger new game if mouse cursor clicks on restart button.
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self._end_menu_rect.collidepoint(get_mouse_position()):
play_click_sound()
self._board.new_game = True
break
|
olincollege/ultimate-snake
|
snake_view.py
|
snake_view.py
|
py
| 9,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17508200123
|
from typing import List
def products_div(arr: List[int]) -> List[int]:
prod = 1
for x in arr:
prod *= x
res = [prod//x for x in arr]
return res
'''
arr: [2, 3, 4, 5,]
l: [2 23 234]
r: [5 54 543]
res: [.-345, 2-45, 23-5, 234-.]
l = []
prod = 1
for i in range(len(arr)-1):
prod *= arr[i]
l.append(prod)
prod = 1
for i in range(len(arr)-1, 0, -1):
prod *= arr[i]
r.append(prod)
res = [1] * len(arr)
for i in range(len(arr) - 1):
res[i] *= r[-i-1]
res[i+1] = l[i]
return res
'''
def products(arr: List[int]) -> List[int]:
l = []
prod = 1
for i in range(len(arr)-1):
prod *= arr[i]
l.append(prod)
prod = 1
r = []
for i in range(len(arr)-1, 0, -1):
prod *= arr[i]
r.append(prod)
res = [1] * len(arr)
for i in range(len(arr) - 1):
res[i] *= r[-i-1]
res[i+1] = l[i]
return res
arr = [2, 3, 4, 5,]
print(products(arr)) # [60, 40, 30, 24]
|
soji-omiwade/cs
|
dsa/before_rubrik/array_of_array_of_products.py
|
array_of_array_of_products.py
|
py
| 982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41584655028
|
import json
import time
import requests
# 크롤링 대상 URL 리스트
PAGE_URL_LIST = [
'http://example.com/1.page'
'http://example.com/2.page',
'http://example.com/3.page',
]
def fetch_pages():
"""페이지의 내용을 추출합니다"""
# 처리 기록 전용 로그 파일을 append 모드로 엽니다
f_info_log = open('crawler_info.log', 'a')
# 오류 기록 전용 로그 파일을 append 모드로 엽니다
f_error_log = open('crawler_error.log', 'a')
# 추출 내용을 저장할 딕셔너리
page_contents = {}
# 터미널에 처리 시작을 출력하고, 로그 파일에도 메시지를 출력합니다.
msg = "크롤링을 시작합니다\n"
print(msg)
f_info_log.write(msg)
for page_url in PAGE_URL_LIST:
r = requests.get(page_url, timeout=30)
try:
r.raise_for_status() # 응답에 문제가 있으면 예외를 발생시킵니다.
except requests.exceptions.RequestException as e:
# requests와 관련된 예외가 발생하면
# 터미널과 오류 로그에 오류를 출력합니다.
msg = "[ERROR] {exception}\n".format(exception=e)
print(msg)
f_error_log.write(msg)
continue # 예외가 발생하면 반복을 중지하는게 아니라 건너 뜁니다.
# 정상적으로 내용을 추출했다면 딕셔너리에 내용을 저장합니다.
page_contents[page_url] = r.text
time.sleep(1) # 상대 사이트에 대한 부하를 고려해서 요청 간격을 설정합니다.
f_info_log.close()
f_error_log.close()
return page_contents
if __name__ == '__main__':
page_contents = fetch_pages()
f_page_contents = open('page_contents.json', 'w')
json.dump(page_contents, f_page_contents, ensure_ascii=False)
f_page_contents.close()
|
JSJeong-me/2021-K-Digital-Training
|
Web_Crawling/python-crawler/chapter_5/get_example_domain_pages.3.py
|
get_example_domain_pages.3.py
|
py
| 1,939 |
python
|
ko
|
code
| 7 |
github-code
|
6
|
33120873994
|
import json
import re
from typing import Any
from aiohttp import ClientSession
from .exceptions import FailedToParseIntialData
class Client:
"""YouTube API client."""
_session: ClientSession
@classmethod
async def new(cls, host: str = "https://www.youtube.com"):
"""Create a new YouTube client."""
self = cls()
self._session = ClientSession(base_url=host, raise_for_status=True)
return self
async def get_search_results(self, search_query: str) -> str:
"""Get YouTube search results."""
async with self._session.get(
"/results", params={"search_query": search_query}
) as response:
return await response.text()
async def close(self) -> None:
"""Close client session."""
await self._session.close()
def get_initial_data(search_results: str) -> dict[str, Any]:
"""Get YouTube initial data."""
initial_data_regex = re.compile(r"(var\ ytInitialData\ =\ )(.*);</script><script")
match = initial_data_regex.search(search_results)
if not match:
raise FailedToParseIntialData
return json.loads(match.group(2))
|
Flowrey/youtube-bz
|
youtube_bz/api/youtube/api.py
|
api.py
|
py
| 1,161 |
python
|
en
|
code
| 12 |
github-code
|
6
|
42641405385
|
#!/usr/bin/env python3
# Simple Script to replace cron for Docker
import argparse
import sys
from subprocess import CalledProcessError, run
from time import sleep, time
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("interval", help="Time in seconds between runs", type=int)
args = parser.parse_args()
print("Running gem mirror every %ds" % args.interval, file=sys.stderr)
try:
while True:
start_time = time()
try:
run(['/tmp/run.sh'], check=True)
except CalledProcessError as cpe:
return cpe.returncode
run_time = time() - start_time
if run_time < args.interval:
sleep_time = args.interval - run_time
print("Sleeping for %ds" % sleep_time, file=sys.stderr)
sleep(sleep_time)
except KeyboardInterrupt:
pass
return 0
if __name__ == "__main__":
sys.exit(main())
|
osssanitizer/maloss
|
registries/rubygems/runner.py
|
runner.py
|
py
| 985 |
python
|
en
|
code
| 98 |
github-code
|
6
|
17435637259
|
import pytest
from collections import defaultdict
class UndergroundSystem:
def __init__(self):
self.user_table = defaultdict(list)
self.course_table = defaultdict(list)
def checkIn(self, id: int, stationName: str, t: int) -> None:
assert len(self.user_table[id]) == 0
self.user_table[id].append((stationName, t))
def checkOut(self, id: int, stationName: str, t: int) -> None:
assert len(self.user_table[id]) == 1
source = self.user_table[id].pop()
travel_t = t - source[1]
course = f"{source[0]}_{stationName}"
self.course_table[course].append(travel_t)
def getAverageTime(self, startStation: str, endStation: str) -> float:
course = f"{startStation}_{endStation}"
travel_ts = self.course_table[course]
return round(sum(travel_ts) / len(travel_ts), 5)
@pytest.mark.parametrize(
"action, value, expected",
[
(
[
"UndergroundSystem",
"checkIn",
"checkIn",
"checkIn",
"checkOut",
"checkOut",
"checkOut",
"getAverageTime",
"getAverageTime",
"checkIn",
"getAverageTime",
"checkOut",
"getAverageTime",
],
[
[],
[45, "Leyton", 3],
[32, "Paradise", 8],
[27, "Leyton", 10],
[45, "Waterloo", 15],
[27, "Waterloo", 20],
[32, "Cambridge", 22],
["Paradise", "Cambridge"],
["Leyton", "Waterloo"],
[10, "Leyton", 24],
["Leyton", "Waterloo"],
[10, "Waterloo", 38],
["Leyton", "Waterloo"],
],
[
None,
None,
None,
None,
None,
None,
None,
14.00000,
11.00000,
None,
11.00000,
None,
12.00000,
],
),
(
[
"UndergroundSystem",
"checkIn",
"checkOut",
"getAverageTime",
"checkIn",
"checkOut",
"getAverageTime",
"checkIn",
"checkOut",
"getAverageTime",
],
[
[],
[10, "Leyton", 3],
[10, "Paradise", 8],
["Leyton", "Paradise"],
[5, "Leyton", 10],
[5, "Paradise", 16],
["Leyton", "Paradise"],
[2, "Leyton", 21],
[2, "Paradise", 30],
["Leyton", "Paradise"],
],
[None, None, None, 5.00000, None, None, 5.50000, None, None, 6.66667],
),
],
)
def test(action, value, expected):
print()
outputs = []
obj = None
for act, values in zip(action, value):
if act == "UndergroundSystem":
obj = UndergroundSystem()
outputs.append(None)
elif act == "checkIn":
outputs.append(obj.checkIn(values[0], values[1], values[2]))
elif act == "checkOut":
outputs.append(obj.checkOut(values[0], values[1], values[2]))
elif act == "getAverageTime":
outputs.append(obj.getAverageTime(values[0], values[1]))
assert expected == outputs
if __name__ == "__main__":
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
naubull2/codingtests
|
leetcode/solved/1512_Design_Underground_System/solution.py
|
solution.py
|
py
| 3,711 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36383540075
|
import socket
import copy
import struct
import time
import board
import adafruit_bno055
import subprocess
from button import ToggleButton
from serial_read import DeviceInterface
class UDPSocket:
def __init__(self, IP_ADDR="192.168.0.198", PORT=65432):
self.IP_ADDR = "192.168.0.198"
self.APP_PORT = 65432
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def send(self, data):
self.socket.sendto(data, (self.IP_ADDR, self.APP_PORT))
def buttonToggleCallback():
print("button pushed")
if __name__ == '__main__':
# socket and toggle button
s = UDPSocket()
button = ToggleButton(17)
# interface to stm board
head_device = DeviceInterface(port="/dev/ttyACM0")
head_device.start()
# interface to handheld gyro
i2c = board.I2C()
hand_device = adafruit_bno055.BNO055_I2C(i2c)
x,y,z,x_sign,y_sign,z_sign = hand_device.axis_remap
hand_device.axis_remap = (z, x, y, x_sign, y_sign, z_sign)
seq = 0
itr = 0
yaw_ref = 0
while itr <= 1000:
data = head_device.getAngles()
yaw_ref += data[2]
itr+=1
yaw_ref /= itr
print("starting")
while True:
start = time.time()
angles_head = copy.deepcopy(head_device.getAngles())
angles_head[2] = yaw_ref - angles_head[2]
angles_hand = copy.deepcopy(hand_device.euler)
# angles_head = (head_device.getAngles())
# angles_hand = (hand_device.euler)
if None in angles_head:
continue
if (angles_hand[1] == None):
continue
msg = bytearray(struct.pack('f', angles_head[0]))
msg += bytearray(struct.pack('f', angles_head[1]))
msg += bytearray(struct.pack('f', angles_head[2]))
if button.getValue():
val = angles_hand[1]/90.
msg += bytearray(struct.pack('f', val))
else:
val = float(0.0)
msg += bytearray(struct.pack('f', val))
s.send(msg)
end = time.time()
if (end - start) >= 0.01:
print("Deadline not met")
|
NMMallick/sd-drone
|
modules/rpi_client/client.py
|
client.py
|
py
| 2,142 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34529799093
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='quantile_transformer_tf',
version='1.2',
description='An implementation of QuantileTransformer in tensorflow',
long_description=long_description,
url='https://github.com/yandexdataschool/QuantileTransformerTF',
author='Nikita Kazeev',
author_email='[email protected]',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
],
packages=find_packages(),
install_requires=['numpy>=1.13',
'tensorflow>=1.9']
)
|
yandexdataschool/QuantileTransformerTF
|
setup.py
|
setup.py
|
py
| 905 |
python
|
en
|
code
| 7 |
github-code
|
6
|
43367016676
|
###############################
# ProcessorBase.py
#
# Basic processor
###############################
import sys
import os
###############################
# Basic processor
###############################
class ProcessorBase:
def __init__(self, file):
self.file = file
@property
def description(self):
raise NotImplementedError
#
# Actions avail with processor
#
@property
def has_childs(self):
return False
def get_childs(self, callback=None):
raise NotImplementedError #return None
@property
def has_hexdump(self):
return False
def get_hexdump(self, callback=None):
raise NotImplementedError #return ""
@property
def has_details(self):
return False
def get_details(self, callback=None):
raise NotImplementedError #return ""
@property
def can_save(self):
return False
def save(self, path, callback=None):
raise NotImplementedError #return False
#
# Basic methods avail to all processors
#
def load(self, callback=None):
if callback: callback()
if not os.path.exists(self.file):
print("{}: File '{}' doesn't exist".format(self.__name__,self.file))
return None
f = open(self.file, "rb", 0)
if not f:
print("{}: Error opening file '{}'".format(self.__name__,self.file))
return None
if callback: callback()
data = f.readall()
if callback: callback()
f.close()
return data
#
# Private methods avail to all processors
#
def _create_hexdump(self, data, start=0, length=-1, linelength=16, callback=None):
dump = ""
if length <= 0: length = len(data)
#lines = int(length / linelength)
#fragment = length % linelength
#if fragment > 0:
# lines += 1
lines = int((length+linelength-1) / linelength)
l = 0
while l < lines:
if callback: callback()
p = start + (l * linelength)
addr = self._tohex(p,8)
by = " "
d = " "
i = 0
while i < linelength:
if p < (start+length):
val = data[p]
by += " " + self._tohex(val,2)
d += chr(val) if val >= 32 and val < 128 else "."
else:
by += " "
d += " "
i += 1
p += 1
if i % 4 == 0: by += " "
l += 1
#s = addr + " " + by + " " + d
#if l < lines: print(s)
#else: sys.stdout.write(s)
dump += addr + " " + by + " " + d + "\n"
return dump
def _tohex(self,val,length=2):
s = hex(val).upper()[2:]
return ("0000000000000000" + s)[-length:]
|
SillyBits/DumpMyRideUI
|
processors/ProcessorBase.py
|
ProcessorBase.py
|
py
| 2,368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14011684325
|
import numpy as np
from tqdm import tqdm
from collections import Counter
import pandas as pd
class PMI():
def __init__(self, text, lang):
self.text = text
self.lang = lang
self.p_1, self.c_1 = self.get_unigram_probs()
self.bigram = self.get_joint_probs()
self.distant = self.get_distant_probs()
def get_unigram_probs(self):
''' compute all unigram probabilities '''
text_size = len(self.text)
counter_1 = Counter(self.text)
# p_1(w_i) = c_1(w_i)/T
p_1 = {word: count/text_size for word, count in counter_1.items()}
return p_1, counter_1
def get_joint_probs(self):
'''
calculate joint probability p(a,b)
for all neighbouring words in text
'''
counts = Counter()
# get counts of pairs of words
for i, j in zip(self.text, self.text[1:]):
counts[i, j] += 1
total = sum(counts.values())
# probability based on counts
joint_probs = {k: v/total for k, v in counts.items()}
return joint_probs
def get_distant_probs(self):
'''
calculate joint probability p(a,b) for all words
with distance at most 50 in either direction
'''
counts = Counter()
for (i,a) in tqdm(enumerate(self.text)):
if i < 50: ran, start = self.text[:i+50], 0
elif i > len(self.text) - 50: ran, start = self.text[i-50:], i-50
else: ran, start = self.text[i-50:i+50], i-50
# ran, start = self.text[:i+50], i
for (j,b) in enumerate(ran, start):
if i != j:
counts[a, b] += 1
total = sum(counts.values())
# probability based on counts
joint_probs = {k: v/total for k, v in counts.items()}
return joint_probs
def pmi(self, a, b, probs):
'''
I'(a,b) = log_2(p(a,b)/p(a)p(b)) = log_2(p(a|b)/p(a))
'''
joint_prob = probs[a, b]
p_a = self.p_1[a]
p_b = self.p_1[b]
return np.log2(joint_prob/(p_a * p_b))
def write_to_file(self, label, results):
''' save results '''
with open('pmi_results'+self.lang+'.txt', 'a') as f:
for (a, b), pmi in reversed(results):
f.write('%s\t%s\t%s\t%1.4f\n' % (label, a, b, pmi))
def run(self):
results = {}
for (i,a),(j,b) in zip(enumerate(self.text),enumerate(self.text[1:],1)):
# disregard pairs in which one or both words
# appear less than 10 times in the corpus
if self.c_1[a] >= 10 and self.c_1[b] >= 10:
results[a, b] = self.pmi(a, b, self.bigram)
sorted_results = sorted(results.items(), key=lambda x:x[1])
self.write_to_file('bigram', sorted_results)
results = {}
for (i,a) in enumerate(self.text):
if i < 50: ran, start = self.text[:i+50], 0
elif i > len(self.text) - 50: ran, start = self.text[i-50:], i-50
else: ran, start = self.text[i-50:i+50], i-50
for (j,b) in enumerate(ran, start):
if i != j:
if self.c_1[a] >= 10 and self.c_1[b] >= 10:
results[a, b] = self.pmi(a, b, self.distant)
sorted_results = sorted(results.items(), key=lambda x:x[1])
self.write_to_file('distant', sorted_results)
print(list(reversed(sorted_results[-50:])))
if __name__ == '__main__':
for lang in ['EN', 'CZ']:
with open('TEXT'+lang+'1.txt', 'r', encoding='iso-8859-2') as f:
text = [word.strip() for word in f.readlines()]
pmi = PMI(text, lang)
# prepare results file
with open('pmi_results'+lang+'.txt', 'w') as f:
f.write('WORD1\tWORD2\tPMI\n')
# run experiments
results = pmi.run()
|
awmcisaac/charles
|
winter/npfl067/hw2/best_friends.py
|
best_friends.py
|
py
| 3,872 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71184167229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pl
from mpl_toolkits.basemap import Basemap
# llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon
# are the lat/lon values of the lower left and upper right corners
# of the map.
# lat_ts is the latitude of true scale.
# resolution = 'c' means use crude resolution coastlines.
pl.clf()
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, \
llcrnrlon=-180, urcrnrlon=180, lat_ts=20, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral', lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90., 91., 30.))
m.drawmeridians(np.arange(-180., 181., 60.))
m.drawmapboundary(fill_color='aqua')
m.drawstates()
pl.title("Mercator Projection")
pl.show()
pl.savefig('basemap.png')
|
ddboline/programming_tests
|
numpy/basemap_test.py
|
basemap_test.py
|
py
| 847 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33557423336
|
from django.conf import settings
from django.contrib.sites.models import Site
from .models import SiteSettings
class SiteSettingsMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
try:
request.wf_site = Site.objects._get_site_by_request(request)
settings.SITE_ID = request.wf_site.id
except Site.DoesNotExist:
request.wf_site = None
try:
request.sitesettings = SiteSettings.objects.get(site=request.wf_site)
except SiteSettings.DoesNotExist:
request.sitesettings = None
return self.get_response(request)
|
phildini/wordfugue
|
wordfugue/sitesettings/middleware.py
|
middleware.py
|
py
| 684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9535093806
|
#!/usr/bin/env python3
"""
RBE/CS549 Spring 2023: Computer Vision
Author(s):
Uday Sankar ([email protected])
Mechatronics, Robotics and Automation Engineering,
Worcester Polytechnic Institute
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
# termcolor, do (pip install termcolor)
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from torch.optim import AdamW
from torchvision.datasets import CIFAR10
import cv2
import sys
import os
import numpy as np
import random
import skimage
import PIL
import os
import glob
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
import time
from torchvision.transforms import ToTensor
import argparse
import shutil
import string
from termcolor import colored, cprint
import math as m
from tqdm.notebook import tqdm
# import Misc.ImageUtils as iu
from Network.Network import CIFAR10Model
from Misc.MiscUtils import *
from Misc.DataUtils import *
# setting the device as 'cuda'
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f'Device being used: {device}')
# global variables for testing
ModelNumber = 1
CheckPointPath = 'Checkpoints/BasicNet/14model.ckpt' # BasiNet by default
# Don't generate pyc codes
sys.dont_write_bytecode = True
def GenerateBatch(TrainSet, TrainLabels, ImageSize, MiniBatchSize):
"""
Inputs:
TrainSet - Variable with Subfolder paths to train files
NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
TrainLabels - Labels corresponding to Train
NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
ImageSize is the Size of the Image
MiniBatchSize is the size of the MiniBatch
Outputs:
I1Batch - Batch of images
LabelBatch - Batch of one-hot encoded labels
"""
I1Batch = []
LabelBatch = []
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
RandIdx = random.randint(0, len(TrainSet)-1)
ImageNum += 1
##########################################################
# Add any standardization or data augmentation here!
##########################################################
I1, Label = TrainSet[RandIdx]
# Append All Images and Mask
I1Batch.append(I1)
LabelBatch.append(torch.tensor(Label))
return torch.stack(I1Batch).to(device), torch.stack(LabelBatch).to(device)
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile):
"""
Prints all stats with all arguments
"""
print('Number of Epochs Training will run for ' + str(NumEpochs))
print('Factor of reduction in training data is ' + str(DivTrain))
print('Mini Batch Size ' + str(MiniBatchSize))
print('Number of Training Images ' + str(NumTrainSamples))
if LatestFile is not None:
print('Loading latest checkpoint with the name ' + LatestFile)
def TrainOperation(TrainLabels, NumTrainSamples, ImageSize, ModelNumber,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, TrainSet, LogsPath, TestSet, TestLabels):
"""
Inputs:
TrainLabels - Labels corresponding to Train/Test
NumTrainSamples - length(Train)
ImageSize - Size of the image
NumEpochs - Number of passes through the Train data
MiniBatchSize is the size of the MiniBatch
SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
CheckPointPath - Path to save checkpoints/model
DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
LatestFile - Latest checkpointfile to continue training
TrainSet - The training dataset
LogsPath - Path to save Tensorboard Logs
Outputs:
Saves Trained network in CheckPointPath and Logs to LogsPath
"""
# Initialize the model
model = CIFAR10Model(ModelNumber)
model = model.to(device)
###############################################
# Fill your optimizer of choice here!
###############################################
Optimizer = AdamW(model.parameters(), lr=1e-3)
# Tensorboard
# Create a summary to monitor loss tensor
Writer = SummaryWriter(LogsPath)
# adding the graph of the model to tensorboard for visualization
Writer.add_graph(model, GenerateBatch(TrainSet, TrainLabels, ImageSize, MiniBatchSize)[0])
if LatestFile is not None:
CheckPoint = torch.load(CheckPointPath + LatestFile + '.ckpt')
# Extract only numbers from the name
StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
model.load_state_dict(CheckPoint['model_state_dict'])
print('Loaded latest checkpoint with the name ' + LatestFile + '....')
else:
StartEpoch = 0
print('New model initialized....')
for Epochs in tqdm(range(StartEpoch, NumEpochs)):
train_acc_plot = 0
train_loss_plot = 0
NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
Batch = GenerateBatch(TrainSet, TrainLabels, ImageSize, MiniBatchSize)
# Predict output with forward pass
LossThisBatch = model.training_step(Batch)
Optimizer.zero_grad()
LossThisBatch.backward()
Optimizer.step()
# Save checkpoint every some SaveCheckPoint's iterations
if PerEpochCounter % SaveCheckPoint == 0:
# Save the Model learnt in this epoch
SaveName = CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
torch.save({'epoch': Epochs,'model_state_dict': model.state_dict(),'optimizer_state_dict': Optimizer.state_dict(),'loss': LossThisBatch}, SaveName)
print('\n' + SaveName + ' Model Saved...')
result = model.validation_step(Batch)
train_acc_plot += result['acc']
train_loss_plot += result['loss']
model.epoch_end(Epochs*NumIterationsPerEpoch + PerEpochCounter, result)
# Tensorboard
# Writer.add_scalar('LossEveryIter', result["loss"], Epochs*NumIterationsPerEpoch + PerEpochCounter)
# Writer.add_scalar('Accuracy', result["acc"], Epochs*NumIterationsPerEpoch + PerEpochCounter)
# # If you don't flush the tensorboard doesn't update until a lot of iterations!
# Writer.flush()
model.eval()
test_loss_plot = 0
test_acc_plot = 0
with torch.no_grad():
Batch_ = GenerateBatch(TestSet, TestLabels, ImageSize, MiniBatchSize)
test_result = model.validation_step(Batch_)
test_loss_plot += test_result["loss"]
test_acc_plot += test_result["acc"]
Writer.add_scalars('Training/Testing Loss', {'TrainLossPerEpoch': train_loss_plot/NumIterationsPerEpoch, 'TestLossPerEpoch': test_loss_plot}, Epochs)
Writer.add_scalars('Training/Testing Accuracy', {'TrainAccuracyPerEpoch': train_acc_plot/NumIterationsPerEpoch, 'TestAccuracyPerEpoch': test_acc_plot}, Epochs)
Writer.flush()
# Save model every epoch
SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
torch.save({'epoch': Epochs,'model_state_dict': model.state_dict(),'optimizer_state_dict': Optimizer.state_dict(),'loss': LossThisBatch}, SaveName)
print('\n' + SaveName + ' Model Saved...')
def main():
"""
Inputs:
None
Outputs:
Runs the Training and testing code based on the Flag
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--CheckPointPath', default='./Checkpoints/', help='Path to save Checkpoints, Default: ../Checkpoints/')
Parser.add_argument('--NumEpochs', type=int, default=15, help='Number of Epochs to Train for, Default:15')
Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
Parser.add_argument('--MiniBatchSize', type=int, default=32, help='Size of the MiniBatch to use, Default:32')
Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
Parser.add_argument('--LogsPath', default='./Logs/', help='Path to save Logs for Tensorboard, Default=Logs/')
Parser.add_argument('--ModelNumber', default=1, help='Model Type: 1-BasicNet, 2-BatchNormNet, 3-ResNet, 4-ResNeXt, 5-DenseNet, Default:1')
Args = Parser.parse_args()
NumEpochs = Args.NumEpochs
DivTrain = float(Args.DivTrain)
MiniBatchSize = Args.MiniBatchSize
LoadCheckPoint = Args.LoadCheckPoint
CheckPointPath = Args.CheckPointPath
LogsPath = Args.LogsPath
model_number_dict = {1: 'BasicNet', 2: 'BatchNormNet', 3: 'ResNet', 4: 'ResNeXt', 5: 'DenseNet'}
global ModelNumber
ModelNumber = int(Args.ModelNumber)
# ModelNumber = int(input("Select model type (1-BasicNet, 2-BatchNormNet, 3-ResNet, 4-ResNeXt, 5-Densenet): "))
# print(f"Selected Model Type: {model_number_dict[ModelNumber]}")
# # setting the checkpoint and logs path based on the model selected
CheckPointPath = CheckPointPath + model_number_dict[ModelNumber] + '/'
LogsPath = LogsPath + model_number_dict[ModelNumber] + '/'
# Default Hyperparameters
NumEpochs = 15
TrainSet = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=ToTensor())
TestSet = CIFAR10(root='data/', train=False, transform=ToTensor())
TestLabelsPath = "./TxtFiles/LabelsTest.txt"
TestLabelsPath = "./TxtFiles/LabelsTest.txt"
TestLabels = ReadLabels(TestLabelsPath)
# Setup all needed parameters including file reading
# SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(TestLabelsPath, CheckPointPath)
SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(TestLabelsPath, CheckPointPath)
# Find Latest Checkpoint File
if LoadCheckPoint==1:
LatestFile = FindLatestModel(CheckPointPath)
else:
LatestFile = None
# Pretty print stats
PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile)
TrainOperation(TrainLabels, NumTrainSamples, ImageSize, ModelNumber,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, TrainSet, LogsPath, TestSet, TestLabels)
if __name__ == '__main__':
main()
|
udaysankar01/Image-Classification-using-ResNet-ResNeXt-and-DenseNet
|
Train.py
|
Train.py
|
py
| 11,349 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6182705632
|
import urllib.request
from bs4 import BeautifulSoup
f = open("Newyork_articles.txt", 'w')
html = 'http://www.nytimes.com/'
open_url = urllib.request.urlopen(html)
soup = BeautifulSoup(open_url, 'html.parser')
article_headings = soup.find_all(class_="indicate-hover")
head = "Articles for Today:\n"
i = 0
f.write(head)
for heading in article_headings:
i += 1
f.write("\n"+str(i)+"."+heading.string+"\n")
f.close()
|
Jsid2022/Python
|
decode_web_page.py
|
decode_web_page.py
|
py
| 437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18261510300
|
from rackmond import configure_rackmond
# flag
# ascii 0x80XX
# 2'comp 0x4NXX
# decimal 0x20XX
# bitmap 1,2 0x10XX
reglist = [
{"begin": 0x0, "length": 8, "flags": 0x8000}, # MFR_MODEL # ascii
{"begin": 0x10, "length": 8, "flags": 0x8000}, # MFR_DATE # ascii
{"begin": 0x20, "length": 8, "flags": 0x8000}, # FB Part # # ascii
{"begin": 0x30, "length": 4, "flags": 0x8000}, # HW Revision # ascii
{"begin": 0x38, "length": 4, "flags": 0x8000}, # FW Revision # ascii
{"begin": 0x40, "length": 16, "flags": 0x8000}, # MFR Serial # # ascii
{"begin": 0x60, "length": 4, "flags": 0x8000}, # Workorder # # ascii
{
"begin": 0x68, # PSU Status
"length": 1,
"keep": 10, # 10-sample ring buffer
"flags": 0x1000 | 1,
},
{
"begin": 0x69, # Battery Status
"length": 1,
"keep": 10, # 10-sample ring buffer
"flags": 0x1000 | 1,
},
{
"begin": 0x6B, # BBU Battery Mode
"length": 1,
"keep": 10, # 10-sample ring buffer
"flags": 1,
},
{
"begin": 0x6C, # BBU Battery Status
"length": 1,
"keep": 10, # 10-sample ring buffer
"flags": 1,
},
{"begin": 0x6D, "length": 1, "keep": 10}, # BBU Cell Voltage 1
{"begin": 0x6E, "length": 1, "keep": 10}, # BBU Cell Voltage 2
{"begin": 0x6F, "length": 1, "keep": 10}, # BBU Cell Voltage 3
{"begin": 0x70, "length": 1, "keep": 10}, # BBU Cell Voltage 4
{"begin": 0x71, "length": 1, "keep": 10}, # BBU Cell Voltage 5
{"begin": 0x72, "length": 1, "keep": 10}, # BBU Cell Voltage 6
{"begin": 0x73, "length": 1, "keep": 10}, # BBU Cell Voltage 7
{"begin": 0x74, "length": 1, "keep": 10}, # BBU Cell Voltage 8
{"begin": 0x75, "length": 1, "keep": 10}, # BBU Cell Voltage 9
{"begin": 0x76, "length": 1, "keep": 10}, # BBU Cell Voltage 10
{"begin": 0x77, "length": 1, "keep": 10}, # BBU Cell Voltage 11
{"begin": 0x78, "length": 1, "keep": 10}, # BBU Cell Voltage 12
{"begin": 0x79, "length": 1, "keep": 10}, # BBU Cell Voltage 13
{"begin": 0x7A, "length": 1, "keep": 10}, # BBU Temp 1
{"begin": 0x7B, "length": 1, "keep": 10}, # BBU Temp 2
{"begin": 0x7C, "length": 1, "keep": 10}, # BBU Temp 3
{"begin": 0x7D, "length": 1, "keep": 10}, # BBU Temp 4
{"begin": 0x7E, "length": 1, "keep": 10}, # BBU Relative State of Charge
{"begin": 0x7F, "length": 1, "keep": 10}, # BBU Absolute State of Charge
{
"begin": 0x80, # Input VAC
"length": 1,
"keep": 10,
"flags": 0x4600, # 2'comp N=6
},
{"begin": 0x81, "length": 1, "keep": 10}, # BBU Battery Voltage
{
"begin": 0x82, # Input Current AC
"length": 1,
"keep": 10,
"flags": 0x4A00, # 2'comp N=10
},
{"begin": 0x83, "length": 1, "keep": 10}, # BBU Battery Current
{
"begin": 0x84, # Battery Voltage
"length": 1,
"keep": 10,
"flags": 0x4900, # 2'comp N=9
},
{"begin": 0x85, "length": 1, "keep": 10}, # BBU Average Current
{
"begin": 0x86, # Battery Current Output
"length": 1,
"flags": 0x4800, # 2'comp N=8
},
{"begin": 0x87, "length": 1, "keep": 10}, # BBU Remaining Capacity
{
"begin": 0x88, # Battery Current Input
"length": 1,
"flags": 0x4C00, # 2'comp N=12
},
{"begin": 0x89, "length": 1, "keep": 10}, # BBU Full Charge Capacity
{
"begin": 0x8A, # Output Voltage (main converter)
"length": 1,
"keep": 10,
"flags": 0x4B00, # 2'comp N=11
},
{"begin": 0x8B, "length": 1, "keep": 10}, # BBU Run Time to Empty
{
"begin": 0x8C, # Output Current (main converter)
"length": 1,
"keep": 10,
"flags": 0x4600, # 2'comp N=6
},
{"begin": 0x8D, "length": 1, "keep": 10}, # BBU Average Time to Empty
{
"begin": 0x8E, # IT Load Voltage Output
"length": 1,
"flags": 0x4900, # 2'comp N=9
},
{"begin": 0x8F, "length": 1}, # BBU Charging Current
{
"begin": 0x90, # IT Load Current Output
"length": 1,
"flags": 0x4C00, # 2'comp N=12
},
{"begin": 0x91, "length": 1, "keep": 10}, # BBU Charging Voltage
{"begin": 0x92, "length": 1, "flags": 0x4600}, # Bulk Cap Voltage # 2'comp N=6
{"begin": 0x93, "length": 1, "keep": 10}, # BBU Cycle Count
{
"begin": 0x94, # Input Power
"length": 1,
"keep": 10,
"flags": 0x4300, # 2'comp N=3
},
{"begin": 0x95, "length": 1}, # BBU Design Capacity
{
"begin": 0x96, # Output Power
"length": 1,
"keep": 10,
"flags": 0x4300, # 2'comp N=3
},
{"begin": 0x97, "length": 1}, # BBU Design Voltage
{"begin": 0x98, "length": 1, "flags": 0x2000}, # RPM Fan 0 # Decimal
{"begin": 0x99, "length": 1}, # BBU At Rate
{"begin": 0x9A, "length": 1, "flags": 0x2000}, # RPM Fan 1 # Decimal
{"begin": 0x9B, "length": 1, "keep": 10}, # BBU At Rate Time to Full
{
"begin": 0x9C, # BBU At Rate Time to Empty
"length": 1,
"keep": 10,
"flags": 0x2000, # Decimal
},
{"begin": 0x9D, "length": 1, "keep": 10}, # BBU At Rate OK
{"begin": 0x9E, "length": 1, "flags": 0x4700}, # Temp 0 # 2'comp N=7
{"begin": 0x9F, "length": 1}, # BBU Temp
{"begin": 0xA0, "length": 1, "flags": 0x4700}, # Temp 1 # 2'comp N=7
{"begin": 0xA1, "length": 1}, # BBU Max Error
{
"begin": 0xD0, # General Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD1, # PFC Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD2, # LLC Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD3, # Current Feed Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD4, # Auxiliary Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD5, # Battery Charger Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD7, # Temperature Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD8, # Fan Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{
"begin": 0xD9, # Communication Alarm Status Register
"flags": 0x1000, # Bitmap
"length": 1,
},
{"begin": 0x106, "length": 1}, # BBU Specification Info
{"begin": 0x107, "length": 1}, # BBU Manufacturer Date
{"begin": 0x108, "length": 1}, # BBU Serial Number
{"begin": 0x109, "length": 2}, # BBU Device Chemistry
{"begin": 0x10B, "length": 2}, # BBU Manufacturer Data
{"begin": 0x10D, "length": 8}, # BBU Manufacturer Name
{"begin": 0x115, "length": 8}, # BBU Device Name
{"begin": 0x11D, "length": 4}, # FB Battery Status
{"begin": 0x121, "length": 1}, # SoH results
]
def main():
configure_rackmond(reglist, verify_configure=True)
if __name__ == "__main__":
main()
|
WeilerWebServices/Facebook
|
openbmc/meta-facebook/meta-wedge400/recipes-wedge400/rackmon/rackmon/rackmon-config.py
|
rackmon-config.py
|
py
| 7,356 |
python
|
en
|
code
| 3 |
github-code
|
6
|
34429142094
|
import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
gCamAng = 0.
def myLookAt(eye,at,up):
w = (eye-at)/np.sqrt(np.dot(eye-at,eye-at))
u = np.cross(up, w)/np.sqrt(np.dot(np.cross(up, w), np.cross(up,w)))
v = np.cross(w, u)
M= np.array([[u[0], u[1], u[2], -np.dot(u, eye)],
[v[0], v[1], v[2], np.dot(-v, eye)],
[w[0], w[1], w[2], np.dot(-w, eye)],
[0, 0, 0, 1]])
glMultMatrixf(M.T)
def myOrtho(l, r, b, t, n, f):
Morth = np.array([[2/(r-l),0 , 0,-(r+l)/(r-l)],
[ 0, 2/(t-b), 0, -(t+b)/(t-b)],
[ 0, 0, -2/(f-n), -(f+n)/(f-n)],
[ 0, 0, 0, 1]])
def render(camAng):
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
# set the current matrix to the identity matrix
glLoadIdentity()
# use orthogonal projection (right-multiply the current matrix by "projection" matrix - we'll see details later)
myOrtho(left, right, bottom, top, near, far)
#glOrtho(-1,1, -1,1, -10,10)
# rotate "camera" position (right-multiply the current matrix by viewing matrix)
# try to change parameters
eye = np.array([1*np.sin(camAng),.5,1*np.cos(camAng)])
up = np.array([0, 1, 0])
at = np.array([0, 0, 0])
myLookAt(eye, at, up)
myOrtho()
# draw coordinates
glBegin(GL_LINES)
glColor3ub(255, 0, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([1.,0.,0.]))
glColor3ub(0, 255, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([0.,1.,0.]))
glColor3ub(0, 0, 255)
glVertex3fv(np.array([0.,0.,0]))
glVertex3fv(np.array([0.,0.,1.]))
glEnd()
def key_callback(window, key, scancode, action, mods):
global gCamAng
# rotate the camera when 1 or 3 key is pressed or repeated
if action==glfw.PRESS or action==glfw.REPEAT:
if key==glfw.KEY_1:
gCamAng += np.radians(-10)
elif key==glfw.KEY_3:
gCamAng += np.radians(10)
def main():
if not glfw.init():
return
window = glfw.create_window(640,640,'Lecture8', None,None)
if not window:
glfw.terminate()
return
glfw.make_context_current(window)
glfw.set_key_callback(window, key_callback)
while not glfw.window_should_close(window):
glfw.poll_events()
render(gCamAng)
glfw.swap_buffers(window)
glfw.terminate()
if __name__ == "__main__":
main()
|
vctr7/Computer_Graphics
|
hw8.py
|
hw8.py
|
py
| 2,584 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13442910609
|
def gridRound( pos, w, h, roundToTopLeft=True, trueRounding=False ):
"""gridRound( pos, w, h, roundToTopLeft=True )\n""" \
"""This function rounds a given pos variable to the nearest lower or upper multiples \n""" \
""" of w and h in their respective directions. roundToTopLeft=True means it rounds towards the topleft. \n""" \
""" trueRounding means round to the closest corner, not topleft or bottomright."""
xRemainder, yRemainder = pos[0]%w, pos[1]%h
newPosition = [ pos[0] - xRemainder, pos[1] - yRemainder ]
if trueRounding:
if float(xRemainder)/w > 0.5:
newPosition[0] += w
if float(yRemainder)/h > 0.5:
newPosition[1] += h
elif not roundToTopLeft:
newPosition[0] += w
newPosition[1] += h
return newPosition
|
Occuliner/ThisHackishMess
|
modules/stockfunctions/gridrounding.py
|
gridrounding.py
|
py
| 819 |
python
|
en
|
code
| 2 |
github-code
|
6
|
15412213880
|
import sys
from random import shuffle
with open("all_eqs.txt") as f:
eq = f.readlines()
with open("all_text.txt") as f:
txt = f.readlines()
dat = list(range(len(txt)))
shuffle(dat)
valeq = open("val.eq",'w')
valtxt = open("val.txt",'w')
for i in dat[:200]:
valeq.write(eq[i])
valtxt.write(txt[i])
valeq = open("test.eq",'w')
valtxt = open("test.txt",'w')
for i in dat[200:400]:
valeq.write(eq[i])
valtxt.write(txt[i])
valeq = open("train.eq",'w')
valtxt = open("train.txt",'w')
for i in dat[400:]:
valeq.write(eq[i])
valtxt.write(txt[i])
|
rikkarikka/nn_math_solver
|
splitter.py
|
splitter.py
|
py
| 560 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19109148247
|
import asyncio
import async_timeout
import aiohttp
class DiscordAttachmentHandler:
def __init__(self):
self.loop = asyncio.get_event_loop()
@staticmethod
async def fetch_json(session, url):
async with async_timeout.timeout(10):
async with session.get(url) as response:
return await response.json()
async def get_content(self, url):
async with aiohttp.ClientSession() as session:
return await self.fetch_json(session, url)
def get_ship_list(self, file_url, logger):
try:
return self.loop.run_until_complete(self.get_content(file_url))
except asyncio.TimeoutError:
logger.error("Could not download attachment. Asyncio timeout error.")
|
Mirdalan/discord_astro_bot
|
dastro_bot/attachments_downloader.py
|
attachments_downloader.py
|
py
| 762 |
python
|
en
|
code
| 2 |
github-code
|
6
|
4601203522
|
import subprocess
import py_compile
import os
from hashlib import md5
from datetime import datetime
from django.db import models
from django.conf import settings
from .constants import TYPES
def get_filename(instance, filename):
now = datetime.now()
base = now.strftime('utility_files/%Y/%m/%d')
hash = md5(str(now).encode('utf-8'))
return os.path.join(base, '{}_{}'.format(hash.hexdigest(), filename))
class Utility(models.Model):
class Meta:
verbose_name = 'verktyg'
verbose_name_plural = 'verktyg'
def __str__(self):
return self.name
name = models.CharField('namn', max_length=100)
description = models.CharField('beskrivning', blank=True, null=True, max_length=100)
type = models.CharField('typ', choices=TYPES, max_length=100)
class PythonUtility(Utility):
class Meta:
verbose_name = 'pythonverktyg'
verbose_name_plural = verbose_name
def execute(self):
filename = self.code.name
compiled_filename = filename + 'c'
path = os.path.join(settings.MEDIA_ROOT, filename)
compiled_path = os.path.join(settings.MEDIA_ROOT, compiled_filename)
utility_root = os.path.join(settings.MEDIA_ROOT, 'utility_files')
for root, dirs, files in os.walk(utility_root):
if not compiled_filename in files:
try:
# Append file at path to 'utility_files/base.py' and then compile the resulting file
py_compile.compile(path, cfile=compiled_path, doraise=True)
except py_compile.PyCompileError:
return None
return compiled_filename
code = models.FileField('pythonkod', upload_to=get_filename, blank=True, null=True, help_text='En .py-fil med interpreterbar Pythonkod.')
class CommandLineUtility(Utility):
class Meta:
verbose_name = 'kommandoradsverktyg'
verbose_name_plural = verbose_name
def execute(self):
cmd = self.command_line.split()
output = None
try:
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.CalledProcessError as e:
return {
'success': False,
'exit_status': e.returncode,
'output': e.output,
}
return {
'success': True,
'exit_status': 0,
'output': output,
}
command_line = models.CharField('kommandorad', max_length=100)
|
viljan/intraweb
|
viljan/utility/models.py
|
models.py
|
py
| 2,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39215055594
|
import boto3
import random
import json
topic = 'arn:aws:sns:us-east-1:511086078676:temps'
client = boto3.client('sns')
def lambda_handler(event, context):
#id de la persona (editar con el id real)
#id = '11223344'
#simulador de aparato de medir la temperatura
temp = random.uniform(36,40)
print(temp)
mensaje = {
#'id' : id,
'temp' : '%.2f' % round(temp, 2)
}
#publica mensaje en el topic
response = client.publish(
TopicArn = topic,
Message = json.dumps(mensaje)
)
print(response)
|
ansmartin/Proyecto-PYGITIC-2020
|
Pruebas/Ansel/temp_to_topic.py
|
temp_to_topic.py
|
py
| 598 |
python
|
es
|
code
| 1 |
github-code
|
6
|
42890816740
|
import time
from pathlib import Path
import shutil
import torch
import torch.nn
from torch.utils.tensorboard import SummaryWriter
import torch.backends.cudnn as cudnn
from . import utils
class Bone:
def __init__(self,
model,
datasets,
criterion,
optimizer,
scheduler=None,
scheduler_after_ep=True,
early_stop_epoch=None,
metric_fn=None,
metric_increase=False,
batch_size=8,
num_workers=4,
resume=False,
data_parallel=False,
seed=0,
weights_path='weights/best_model.pth',
log_dir='logs/experiment'):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.scheduler_after_ep = scheduler_after_ep
self.early_stop_epoch = early_stop_epoch
self.metric_fn = criterion if metric_fn is None else metric_fn
self.metric_increase = metric_increase
self.batch_size = batch_size
self.num_workers = num_workers
self.resume = resume
self.data_parallel = data_parallel
self.seed = seed
self.weights_path = Path(weights_path)
self.log_dir = Path(log_dir)
self.epochs_count = 0
self.logger = utils.get_logger()
self.recreate_experiment_folders(from_scratch=False)
utils.set_seed(seed)
self.dataloaders = { # TODO: automatically handel all in loop
'train': torch.utils.data.DataLoader(datasets['train'],
batch_size=batch_size,
shuffle=True,
num_workers=num_workers),
'val': torch.utils.data.DataLoader(datasets['val'],
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
}
if self.resume:
if not self.weights_path.exists():
self.logger.error('Resume is not possible, no weights')
else:
self.logger.info(f'Resuming from {self.weights_path}')
checkpoint = torch.load(self.weights_path)
self.model.load_state_dict(checkpoint)
# TODO: bug, move model to device
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if self.device == 'cuda' and data_parallel:
self.model = torch.nn.DataParallel(self.model)
def recreate_experiment_folders(self, from_scratch=False):
if from_scratch:
if self.weights_path.parent.exists():
self.weights_path.unlink()
if self.log_dir.exists():
shutil.rmtree(self.log_dir)
self.weights_path.parent.mkdir(exist_ok=True)
self.log_dir.mkdir(parents=True, exist_ok=True)
self.phase_writer = {
'train': SummaryWriter(self.log_dir / 'train'),
'val': SummaryWriter(self.log_dir / 'val')
}
def step(self, inputs, labels, phase):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
metric = self.metric_fn(outputs, labels)
if phase == 'train':
loss.backward()
self.optimizer.step()
return loss.cpu().data.numpy(), metric.cpu().data.numpy()
def epoch(self, epoch_num, phase):
running_loss = 0
running_metric = 0
pbar = utils.get_pbar(self.dataloaders[phase],
f'{phase} {epoch_num + 1}/{self.epochs_count}')
if phase == 'val' and self.scheduler and not self.scheduler_after_ep:
self.scheduler.step()
for i, (inputs, labels) in enumerate(self.dataloaders[phase]):
loss, metric = self.step(inputs, labels, phase)
running_loss += loss * inputs.size(0)
running_metric += metric * inputs.size(0)
utils.update_pbar(pbar, loss, metric)
step = epoch_num * len(self.dataloaders['train']) + i
self.phase_writer[phase].add_scalar('batch/loss', loss,
global_step=step)
self.phase_writer[phase].add_scalar('batch/metric', metric,
global_step=step)
running_loss /= len(self.dataloaders[phase].dataset)
running_metric /= len(self.dataloaders[phase].dataset)
utils.update_pbar(pbar, running_loss, running_metric)
pbar.close()
self.phase_writer[phase].add_scalar('epoch/loss', running_loss,
global_step=epoch_num)
self.phase_writer[phase].add_scalar('epoch/metric', running_metric,
global_step=epoch_num)
if phase == 'val':
if self.scheduler and self.scheduler_after_ep:
self.scheduler.step(running_metric)
lr = utils.get_lr(self.optimizer)
self.phase_writer[phase].add_scalar('epoch/lr', lr,
global_step=epoch_num)
return running_loss, running_metric
def fit(self, epochs_count, from_scratch=False):
if from_scratch:
self.recreate_experiment_folders(from_scratch)
start_time = time.time()
self.epochs_count = epochs_count
epoch_without_improvement = 0
best_metric = None
def is_better(new_m, old_m, eps=1e-5):
if old_m is None:
return True
return new_m > old_m + eps if self.metric_increase else \
new_m < old_m - eps
for epoch_num in range(epochs_count):
for phase in ['train', 'val']: # TODO: test phase
if phase == 'train':
self.model.train()
else:
self.model.eval()
loss, metric = self.epoch(epoch_num, phase)
if phase == 'val':
if is_better(metric, best_metric):
best_metric = metric
if self.data_parallel:
torch.save(self.model.module.state_dict(),
self.weights_path)
else:
torch.save(self.model.state_dict(),
self.weights_path)
epoch_without_improvement = 0
self.logger.debug('Val metric improved')
else:
epoch_without_improvement += 1
self.logger.debug(f'Val metric did not improve for '
f'{epoch_without_improvement} epochs')
if self.early_stop_epoch is not None and\
epoch_without_improvement == self.early_stop_epoch:
self.logger.info('Early stopping')
break
time_elapsed = time.time() - start_time
self.logger.info(f'Training complete in {time_elapsed/60:.0f}m'
f' {time_elapsed%60:.0f}s')
self.logger.info(f'Best val metric: {best_metric:.4f}')
|
EvgenyKashin/backbone
|
back/bone.py
|
bone.py
|
py
| 7,736 |
python
|
en
|
code
| 4 |
github-code
|
6
|
17273180201
|
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
from scipy.sparse.linalg import norm as sparsenorm
from scipy.linalg import qr
# from sklearn.metrics import f1_score
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
#
# def calc_f1(y_true, y_pred):
# y_true = np.argmax(y_true, axis=1)
# y_pred = np.argmax(y_pred, axis=1)
# return f1_score(y_true, y_pred, average="micro"), f1_score(y_true, y_pred, average="macro")
#
#
# def load_data(dataset_str):
# """Load data."""
# names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
# objects = []
# for i in range(len(names)):
# with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
# if sys.version_info > (3, 0):
# objects.append(pkl.load(f, encoding='latin1'))
# else:
# objects.append(pkl.load(f))
#
# x, y, tx, ty, allx, ally, graph = tuple(objects)
# test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
# test_idx_range = np.sort(test_idx_reorder)
#
# if dataset_str == 'citeseer':
# # Fix citeseer dataset (there are some isolated nodes in the graph)
# # Find isolated nodes, add them as zero-vecs into the right position
# test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
# tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
# tx_extended[test_idx_range-min(test_idx_range), :] = tx
# tx = tx_extended
# ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
# ty_extended[test_idx_range-min(test_idx_range), :] = ty
# ty = ty_extended
#
# features = sp.vstack((allx, tx)).tolil()
# features[test_idx_reorder, :] = features[test_idx_range, :]
# adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
#
# labels = np.vstack((ally, ty))
# labels[test_idx_reorder, :] = labels[test_idx_range, :]
#
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y)+500)
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
#
# return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
#
def load_data(dataset_str):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(ally)-500)
idx_val = range(len(ally)-500, len(ally))
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def load_data_original(dataset_str):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def nontuple_preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def nontuple_preprocess_adj(adj):
adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
# adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
return adj_normalized.tocsr()
def column_prop(adj):
column_norm = sparsenorm(adj, axis=0)
# column_norm = pow(sparsenorm(adj, axis=0),2)
norm_sum = sum(column_norm)
return column_norm/norm_sum
def mix_prop(adj, features, sparseinputs=False):
adj_column_norm = sparsenorm(adj, axis=0)
if sparseinputs:
features_row_norm = sparsenorm(features, axis=1)
else:
features_row_norm = np.linalg.norm(features, axis=1)
mix_norm = adj_column_norm*features_row_norm
norm_sum = sum(mix_norm)
return mix_norm / norm_sum
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
# adj_appr = np.array(sp.csr_matrix.todense(adj))
# # adj_appr = dense_lanczos(adj_appr, 100)
# adj_appr = dense_RandomSVD(adj_appr, 100)
# if adj_appr.sum(1).min()<0:
# adj_appr = adj_appr- (adj_appr.sum(1).min()-0.5)*sp.eye(adj_appr.shape[0])
# else:
# adj_appr = adj_appr + sp.eye(adj_appr.shape[0])
# adj_normalized = normalize_adj(adj_appr)
# adj_normalized = normalize_adj(adj+sp.eye(adj.shape[0]))
# adj_appr = np.array(sp.coo_matrix.todense(adj_normalized))
# # adj_normalized = dense_RandomSVD(adj_appr,100)
# adj_normalized = dense_lanczos(adj_appr, 100)
adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
# adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
return sparse_to_tuple(adj_normalized)
from lanczos import lanczos
def dense_lanczos(A,K):
q = np.random.randn(A.shape[0], )
Q, sigma = lanczos(A, K, q)
A2 = np.dot(Q[:,:K], np.dot(sigma[:K,:K], Q[:,:K].T))
return sp.csr_matrix(A2)
def sparse_lanczos(A,k):
q = sp.random(A.shape[0],1)
n = A.shape[0]
Q = sp.lil_matrix(np.zeros((n,k+1)))
A = sp.lil_matrix(A)
Q[:,0] = q/sparsenorm(q)
alpha = 0
beta = 0
for i in range(k):
if i == 0:
q = A*Q[:,i]
else:
q = A*Q[:,i] - beta*Q[:,i-1]
alpha = q.T*Q[:,i]
q = q - Q[:,i]*alpha
q = q - Q[:,:i]*Q[:,:i].T*q # full reorthogonalization
beta = sparsenorm(q)
Q[:,i+1] = q/beta
print(i)
Q = Q[:,:k]
Sigma = Q.T*A*Q
A2 = Q[:,:k]*Sigma[:k,:k]*Q[:,:k].T
return A2
# return Q, Sigma
def dense_RandomSVD(A,K):
G = np.random.randn(A.shape[0],K)
B = np.dot(A,G)
Q,R =qr(B,mode='economic')
M = np.dot(Q, np.dot(Q.T, A))
return sp.csr_matrix(M)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
|
matenure/FastGCN
|
utils.py
|
utils.py
|
py
| 12,670 |
python
|
en
|
code
| 514 |
github-code
|
6
|
70276275707
|
"""
多头注意力机制:
每个头开始从词义层面分割输出的张量,也就是每个头都想获得一组Q,K,V进行注意力机制的计算,
但是句子中的每个词的表示只获得一部分,
也就是只分割了最后一维的词嵌入向量. 这就是所谓的多头.
将每个头的获得的输入送到注意力机制中, 就形成了多头注意力机制
多头注意力机制的作用:
这种结构设计能让每个注意力机制去优化每个词汇的不同特征部分,从而均衡同一种注意力机制可能产生的偏差,
让词义拥有来自更多元的表达,从而提升模型效果
"""
import copy
import torch
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MultiHeadAttention(nn.Module):
"""在类的初始化时, 会传入三个参数,head代表头数,embedding_dim代表词嵌入的维度,
dropout代表进行dropout操作时置0比率,默认是0.1."""
def __init__(self, head, embedding_dim, dropout=0.1):
super(MultiHeadAttention, self).__init__()
# 在函数中,首先使用了一个测试中常用的assert语句,判断h是否能被d_model整除,
# 这是因为我们之后要给每个头分配等量的词特征.也就是embedding_dim/head个.
assert embedding_dim % head == 0
# 得到每个头获得的分割词向量维度d_k
self.d_k= embedding_dim // head
self.head=head
# 然后获得线性层对象,通过nn的Linear实例化,它的内部变换矩阵是embedding_dim x embedding_dim,然后使用clones函数克隆四个,
# 为什么是四个呢,这是因为在多头注意力中,Q,K,V各需要一个,最后拼接的矩阵还需要一个,因此一共是四个.
self.linears=self.clones(nn.Linear(embedding_dim,embedding_dim),4)
self.attn=None
self.dropout=nn.Dropout(p=dropout)
# 首先需要定义克隆函数, 因为在多头注意力机制的实现中, 用到多个结构相同的线性层.
# 我们将使用clone函数将他们一同初始化在一个网络层列表对象中. 之后的结构中也会用到该函数.
"""用于生成相同网络层的克隆函数, 它的参数module表示要克隆的目标网络层, N代表需要克隆的数量"""
def clones(self,module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
"""生成向后遮掩的掩码张量, 参数size是掩码张量最后两个维度的大小, 它的最后两维形成一个方阵"""
def subsequentMask(self,size):
# 在函数中, 首先定义掩码张量的形状
attnShape=(1,size,size)
# 然后使用np.ones方法向这个形状中添加1元素,形成上三角阵, 最后为了节约空间,
# 再使其中的数据类型变为无符号8位整形unit8
subsequent_mask=np.triu(np.ones(attnShape),k=1).astype('uint8')
# 最后将numpy类型转化为torch中的tensor, 内部做一个1 - 的操作,
# 在这个其实是做了一个三角阵的反转, subsequent_mask中的每个元素都会被1减,
# 如果是0, subsequent_mask中的该位置由0变成1
# 如果是1, subsequent_mask中的该位置由1变成0
return torch.from_numpy(1-subsequent_mask)
"""注意力计算规则"""
def attention(self,query,key,value,mask=None,dropout=None):
# 在函数中, 首先取query的最后一维的大小, 一般情况下就等同于我们的词嵌入维度, 命名为d_k
d_k = query.size(-1)
# 按照注意力公式, 将query与key的转置相乘, 这里面key是将最后两个维度进行转置, 再除以缩放系数根号下d_k, 这种计算方法也称为缩放点积注意力计算.
# 得到注意力得分张量scores
scores=torch.matmul(query,key.transpose(-2,-1)) / math.sqrt(d_k) # 最后两个维度做矩阵乘积
# print("scores before masked: ", scores,scores.shape)
# 接着判断是否使用掩码张量
if mask is not None:
# 使用tensor的masked_fill方法, 将掩码张量和scores张量每个位置一一比较, 如果掩码张量处为0
# 则对应的scores张量用-1e9这个值来替换
scores=scores.masked_fill(mask==0,-1e9)
# print("scores after masked: ",scores,scores.shape)
# 对scores的最后一维进行softmax操作, 使用F.softmax方法, 第一个参数是softmax对象, 第二个是目标维度.
# 这样获得最终的注意力张量
p_attn=F.softmax(scores,dim=-1)
# 判断是否使用dropout进行随机置0
if dropout is not None:
p_attn=dropout(p_attn)
# print("value : ",value,value.shape)
# 最后, 根据公式将p_attn与value张量相乘获得最终的query注意力表示, 同时返回注意力张量
return torch.matmul(p_attn,value),p_attn
"""前向逻辑函数, 它的输入参数有四个,前三个就是注意力机制需要的Q, K, V,
最后一个是注意力机制中可能需要的mask掩码张量,默认是None. """
def forward(self,query,key,value,mask=None):
if mask is not None:
mask=mask.unsqueeze(0)
# 接着,我们获得一个batch_size的变量,他是query尺寸的第1个数字,代表有多少条样本.
batch_size=query.size(0)
# 之后就进入多头处理环节
# 首先利用zip将输入QKV与三个线性层组到一起,然后使用for循环,将输入QKV分别传到线性层中,
# 做完线性变换后,开始为每个头分割输入,这里使用view方法对线性变换的结果进行维度重塑,多加了一个维度h,代表头数,
# 这样就意味着每个头可以获得一部分词特征组成的句子,其中的-1代表自适应维度,
# 计算机会根据这种变换自动计算这里的值.然后对第二维和第三维进行转置操作,
# 为了让代表句子长度维度和词向量维度能够相邻,这样注意力机制才能找到词义与句子位置的关系,
# 从attention函数中可以看到,利用的是原始输入的倒数第一和第二维.这样我们就得到了每个头的输入.
query,key,value=[model(x).view(batch_size,-1,self.head,self.d_k).transpose(1,2) for model,x in zip(self.linears,(query,key,value))]
# print("query.shape before attention",query.shape)
# 得到每个头的输入后,接下来就是将他们传入到attention中,
# 这里直接调用我们之前实现的attention函数.同时也将mask和dropout传入其中.
x,self.attn=self.attention(query,key,value,mask,dropout=self.dropout)
# 通过多头注意力计算后,我们就得到了每个头计算结果组成的4维张量,我们需要将其转换为输入的形状以方便后续的计算,
# 因此这里开始进行第一步处理环节的逆操作,先对第二和第三维进行转置,然后使用contiguous方法,
# 这个方法的作用就是能够让转置后的张量应用view方法,否则将无法直接使用,
# 所以,下一步就是使用view重塑形状,变成和输入形状相同.
# print("x shape",x.shape)
x=x.transpose(1,2).contiguous().view(batch_size,-1,self.head*self.d_k)
# print("x shape",x.shape) # 这一步,x维度回到最初的
# 最后使用线性层列表中的最后一个线性层对输入进行线性变换得到最终的多头注意力结构的输出.
return self.linears[-1](x)
|
Jacquelin803/Transformers
|
transformerArc/MultiHeadAttention.py
|
MultiHeadAttention.py
|
py
| 7,709 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
71951318589
|
from flask import Flask, render_template, request
import os
from Prediction import deep_ocr, easy_ocr
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired, Email, Length
# webserver gateway interface
app = Flask(__name__)
Bootstrap(app)
BASIC_PATH = os.getcwd()
UPLOAD_PATH = os.path.join(BASIC_PATH, 'static/upload/')
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
# get the uploaded file
uploaded_file = request.files['image']
# get the name of the uploaded file
file_name = uploaded_file.filename
# create the path for saving the uploaded file
save_path = os.path.join(UPLOAD_PATH, file_name)
# saving the uploaded file
uploaded_file.save(save_path)
print(file_name, 'was uploaded successfully!')
#plate_number = deep_ocr(save_path, file_name)
plate_number = easy_ocr(save_path, file_name)
#print(plate_number)
return render_template('index.html', upload=True, uploaded_image=file_name, text=plate_number)
return render_template('index.html', upload=False)
if __name__ == "__main__":
app.run(debug=True)
|
babakmbm/Optical-Character-Recognition-OCR-SYSTEM
|
App.py
|
App.py
|
py
| 1,289 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35765418423
|
'''
Created on Apr 21, 2014
@author: Borja
'''
import os.path
import xlrd
import __data__
class XslReader(object):
def __init__(self):
if not os.path.exists(__data__.path()):
os.makedirs(__data__.path())
self._data_path = __data__.path();
def load_indicator_sheet(self, file_name):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_name("Indicators")
data_dictionary = {}
for curr_col in range (0, worksheet.ncols):
field_name = worksheet.cell_value(0, curr_col).decode("UTF-8")
data_dictionary[field_name] = worksheet.cell_value(1, curr_col);
return data_dictionary
def load_organization_sheet(self, file_name):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_name("Organization")
data_dictionary = {}
data_dictionary["Name"] = worksheet.cell_value(1, 1).decode("UTF-8")
data_dictionary["Description_EN"] = worksheet.cell_value(2, 1).decode("UTF-8")
data_dictionary["Description_ES"] = worksheet.cell_value(3, 1).decode("UTF-8")
data_dictionary["Description_FR"] = worksheet.cell_value(4, 1).decode("UTF-8")
data_dictionary["URL"] = worksheet.cell_value(5, 1).decode("UTF-8")
data_dictionary["Logo"] = worksheet.cell_value(6, 1).decode("UTF-8")
data_dictionary["License_Name"] = worksheet.cell_value(9, 1).decode("UTF-8")
data_dictionary["License_Description"] = worksheet.cell_value(10, 1).decode("UTF-8")
data_dictionary["License_Republish"] = worksheet.cell_value(11, 1)
data_dictionary["License_URL"] = worksheet.cell_value(12, 1).decode("UTF-8")
return data_dictionary
def load_xsl(self, file_name):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_name("Values")
data_matrix = [[0 for x in xrange(worksheet.ncols)] for x in xrange(worksheet.nrows)]
for curr_row in range (0, worksheet.nrows):
for curr_col in range (0, worksheet.ncols):
#print "%s,%s ---- %s" %(curr_row, curr_col, worksheet.cell_value(curr_row, curr_col))
if worksheet.cell_type(curr_row, curr_col) == 1: # text cell
data_matrix[curr_row][curr_col] = worksheet.cell_value(curr_row, curr_col).encode("UTF-8");
else:
data_matrix[curr_row][curr_col] = worksheet.cell_value(curr_row, curr_col);
return data_matrix
|
weso/landportal-importers
|
RAWImporter/es/weso/raw/ExcelManagement/excel_reader.py
|
excel_reader.py
|
py
| 2,930 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9773551663
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 19:36:10 2020
@author: lnajt
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 11:57:08 2018
@author: MGGG
"""
import networkx as nx
import random
import numpy as np
##############
'''Wilsons Algorithm'''
def random_spanning_tree_wilson(graph):
'''The David Wilson random spanning tree algorithm'''
tree_edges = []
root = random.choice(list(graph.nodes()))
hitting_set = set ( [ root])
allowable_set = set(graph.nodes()).difference(hitting_set)
len_graph = len(graph)
len_hitting_set = 1
while len_hitting_set < len_graph:
start_node = random.choice(list(allowable_set))
trip = random_walk_until_hit(graph, start_node, hitting_set)
new_branch, branch_length = loop_erasure(trip)
for i in range(branch_length - 1):
tree_edges.append( [ new_branch[i], new_branch[i + 1]])
for v in new_branch[:-1]:
hitting_set.add(v)
len_hitting_set += 1
allowable_set.remove(v)
tree = nx.DiGraph()
for node in graph.nodes:
node_attributes = list(graph.nodes[node].keys())
tree.add_node(node)
for attr in node_attributes:
tree.nodes[node][attr] = graph.nodes[node][attr]
tree.add_edges_from(tree_edges)
return tree
def simple_random_walk_variable_length(graph,node, walk_length):
'''does a random walk of length walk_length'''
wet = set([node])
trip = [node]
while len(wet) < walk_length:
next_step = random.choice(list(graph.neighbors(node)))
wet.add(next_step)
trip.append(next_step)
node = next_step
return trip, wet
def forward_tree_variable_length(graph,node, walk_length):
'''builds the forward tree in Broders algorithm, using a walk of length
walk_length'''
walk, wet = simple_random_walk_variable_length(graph, node, walk_length)
edges = []
for vertex in list(wet):
if (vertex != walk[0]):
first_occurance = walk.index(vertex)
edges.append( [walk[first_occurance], walk[first_occurance-1]])
return edges, wet
def random_tree_variable_length(graph, walk_length):
'''runs Broders algorithm to produce a tree of length walk_length'''
tree_edges, wet = forward_tree_variable_length(graph, random.choice(list(graph.nodes())), walk_length)
tree = nx.DiGraph()
for node in list(wet):
tree.add_node(node)
tree.add_edges_from(tree_edges)
return tree
def random_spanning_tree_wilson_with_starting(graph, starting_tree):
#The David Wilson random spanning tree algorithm
tree_edges = list(starting_tree.edges())
hitting_set = set(starting_tree.nodes())
allowable_set = set(graph.nodes()).difference(hitting_set)
len_graph = len(graph)
len_hitting_set = len(hitting_set)
while len_hitting_set < len_graph:
start_node = random.choice(list(allowable_set))
trip = random_walk_until_hit(graph, start_node, hitting_set)
new_branch, branch_length = loop_erasure(trip)
#print(branch_length)
for i in range(branch_length - 1):
tree_edges.append( [ new_branch[i], new_branch[i + 1]])
for v in new_branch[:-1]:
hitting_set.add(v)
len_hitting_set += 1
allowable_set.remove(v)
tree = nx.DiGraph()
tree.add_edges_from(tree_edges)
return tree
def random_walk_until_hit(graph, start_node, hitting_set):
'''Does a random walk from start_node until it hits the hitting_set
:graph: input graph
:start_node: the node taht the graph starts at
:hitting_set: the set to stop at, i.e. the tree we are building up
'''
current_node = start_node
trip = [current_node]
while current_node not in hitting_set:
current_node = random.choice(list(graph.neighbors(current_node)))
trip.append(current_node)
return trip
def loop_erasure(trip):
'''erases loops from a trip
:trip: input of node names...
'''
n = len(trip)
loop_erased_walk_indices = []
last_occurance = n - trip[::-1].index(trip[0]) - 1
loop_erased_walk_indices.append(last_occurance)
branch_length = 0
while trip[loop_erased_walk_indices[-1]] != trip[-1]:
last_occurance = n - trip[::-1].index(trip[loop_erased_walk_indices[-1]]) -1
loop_erased_walk_indices.append(last_occurance + 1)
branch_length += 1
loop_erased_trip = [trip[i] for i in loop_erased_walk_indices]
return (loop_erased_trip, branch_length + 1)
def statistics():
samples = 100
graph = nx.grid_graph([20,20])
W_trees = []
for i in range(samples):
W_trees.append( nx.to_undirected(random_spanning_tree_wilson(graph)))
|
ElleNajt/TinyProjects
|
boundaryofUST.py
|
boundaryofUST.py
|
py
| 4,806 |
python
|
en
|
code
| 4 |
github-code
|
6
|
74056208827
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiDirectionalTreeGRU(nn.Module):
def __init__(self, n_hidden=None, n_iters=1):
super().__init__()
self.n_hidden = n_hidden
self.n_iters = n_iters
self.down_root = nn.Linear(n_hidden, n_hidden)
self.down_gru = AnyBatchGRUCell(n_hidden, n_hidden)
self.up_leaf = nn.Linear(n_hidden, n_hidden)
self.up_gru = AnyBatchGRUCell(n_hidden, n_hidden)
def forward(self, up_embeddings, down_embeddings, levels, children, n_inners):
for _ in range(self.n_iters):
self.down_the_tree(states, up_embeddings, down_embeddings, levels, children, n_inners)
self.up_the_tree(states, up_embeddings, down_embeddings, levels, children, n_inners)
def up_the_tree(self, up_embeddings, down_embeddings, levels, children, n_inners):
zero = torch.zeros(1).long(); one = torch.ones(1).long()
if torch.cuda.is_available(): zero = zero.cuda(); one = one.cuda()
for i, nodes in enumerate(levels[::-1]):
j = n_levels - 1 - i
try:
inner = nodes[:n_inners[j]]
except ValueError:
inner = []
try:
outer = nodes[n_inners[j]:]
except ValueError:
outer = []
if len(inner) > 0:
try:
u_k_inners = u_k[:n_inners[j]]
except ValueError:
u_k_inners = []
try:
u_k_leaves = u_k[n_inners[j]:]
except ValueError:
u_k_leaves = []
h_L = embeddings[j+1][children[inner, zero]]
h_R = embeddings[j+1][children[inner, one]]
hhu = torch.cat((h_L, h_R, u_k_inners), 1)
r = self.fc_r(hhu)
if self.bn: r = self.bn_r(r)
r = F.sigmoid(r)
h_H = self.fc_h(r * hhu)
if self.bn: h_H = self.bn_h(h_H)
h_H = self.activation(h_H)
z = self.fc_z(torch.cat((h_H, hhu), -1))
if self.bn: z = self.bn_z(z)
z_H = z[:, :n_hidden] # new activation
z_L = z[:, n_hidden:2*n_hidden] # left activation
z_R = z[:, 2*n_hidden:3*n_hidden] # right activation
z_N = z[:, 3*n_hidden:] # local state
z = torch.stack([z_H,z_L,z_R,z_N], 2)
z = F.softmax(z)
h = ((z[:, :, 0] * h_H) +
(z[:, :, 1] * h_L) +
(z[:, :, 2] * h_R) +
(z[:, :, 3] * u_k_inners))
try:
embeddings.append(torch.cat((h, u_k_leaves), 0))
except AttributeError:
embeddings.append(h)
else:
embeddings.append(u_k)
def down_the_tree(self, up_embeddings, down_embeddings, levels, children, n_inners):
down_embeddings[0] = F.tanh(self.down_root(up_embeddings[0])) # root nodes
zero = torch.zeros(1).long(); one = torch.ones(1).long()
if torch.cuda.is_available(): zero = zero.cuda(); one = one.cuda()
for j, nodes in enumerate(levels[:-1]):
down_parent = down_embeddings[j]
up_L = up_embeddings[j+1][children[nodes, zero]]
up_R = up_embeddings[j+1][children[nodes, one]]
down_L = self.down_gru(up_L, down_parent)
down_R = self.down_gru(up_R, down_parent)
h = Variable(torch.zeros(down_L.size()[0] * 2, down_L.size()[1]))
h[children[nodes, zero]] = down_L
h[children[nodes, one]] = down_R
down_embeddings[j] = h
|
isaachenrion/jets
|
src/architectures/utils/bidirectional_tree_gru.py
|
bidirectional_tree_gru.py
|
py
| 3,809 |
python
|
en
|
code
| 9 |
github-code
|
6
|
19793355076
|
# coding:utf-8
from bs4 import BeautifulSoup
import re
from urllib.parse import urljoin
from urllib.parse import quote
from ipdb import set_trace
class HtmlParser(object):
def replaceillgalchar(self, link_text):
# set_trace()
while link_text.find(':')>=0:
#
link_text = link_text.replace(':', '-')
while link_text.find('?')>=0:
link_text = link_text.replace('?', '-')
while link_text.find('<')>=0:
link_text = link_text.replace('<', '-')
while link_text.find('>')>=0:
link_text = link_text.replace('>', '-')
while link_text.find('|')>=0:
link_text = link_text.replace('|', '-')
while link_text.find(r'\\')>=0:
link_text = link_text.replace(r'\\', '-')
while link_text.find('\/')>=0:
# set_trace()
link_text = link_text.replace(r'\/', '-')
return link_text
def _baidu_get_new_urls(self, page_url, soup):
new_urls = []
new_titles = []
keyword_times = []
# /view/123.htm
# links = soup.find_all('a', href=re.compile(r'/view/[\u4e00-\u9fa5]+'))
links = soup.find_all('a',href=re.compile(r'/item/*'))
old_link = set()
for link in links:
if link.text not in old_link:
new_url = link['href']
if new_url != '/item/史记·2016?fr=navbar':
new_full_url = urljoin(page_url, new_url)
# print(new_full_url)
new_urls.append(new_full_url)
if link.get('title') != None:
if (link['title'] != ''):
textnow = self.replaceillgalchar(link['title'])
else:
textnow = self.replaceillgalchar(link.text)
else:
textnow = self.replaceillgalchar(link.text)
new_titles.append(textnow)
old_link.add(link.text)
temp = link.text
if (temp.find('(') != -1):
temp = temp.replace('(','\(')
if (temp.find(')') != -1):
temp = temp.replace(')','\)')
if (temp.find('+') != -1):
temp = temp.replace('+','\+')
if (temp.find('*') != -1):
temp = temp.replace('*','\*')
if (temp.find(' ') != -1):
temp = temp.replace(' ','\s')
if (temp.find('?') != -1):
temp = temp.replace('?','\?')
if (temp.find('[') != -1):
temp = temp.replace('[','\[')
if (temp.find(']') != -1):
temp = temp.replace(']','\]')
if (temp.find('{') != -1):
temp = temp.replace('{','\{')
if (temp.find('}') != -1):
temp = temp.replace('}','\}')
keyword_time = soup.find_all(string=re.compile(temp))
keywordtime = 0
for keyword in keyword_time:
keywordtime = keywordtime + keyword.count(link.text)
keyword_times.append(keywordtime)
#print(new_urls)
# set_trace()
return new_titles, keyword_times, new_urls
def _wiki_get_new_urls(self, page_url, soup):
# def has_class_and_text(tag):
# return tag['class'] == 'references'
# def has_class_but_no_id(tag):
# return tag.has_attr('class')
new_urls = []
new_titles = []
keyword_times = []
# /view/123.htm
# links = soup.find_all('a', href=re.compile(r'/view/[\u4e00-\u9fa5]+'))
# set_trace()
links_orginal = soup.find_all('a', href=re.compile(r'/wiki/'))
links = []
illegalchar = ('*', '+', '-', '?', '.', ',')
deleurl1 = re.compile(r'/wiki/Special:')
deleurl2 = re.compile(r'/wiki/Category:')
deleurl3 = re.compile(r'/wiki/%E7%BB%B4%E5%9F%BA%E8%AF%AD%E5%BD%95')
deleurl4 = re.compile(r'/wiki/Wikipedia:')
deleurl5 = re.compile(r'/wiki/File:')
deleurl6 = re.compile(r'/wiki/Template:')
deleurl7 = re.compile(r'/wiki/Portal:')
deleurl8 = re.compile(r'/wiki/Privacy_policy')
deleurl9 = re.compile(r'/wiki/%E7%BB%B4%E5%9F%BA%E5%85%B1%E4%BA%AB%E8%B5%84%E6%BA%90')
deleurl10 = re.compile(r'/wiki/Help:')
deleurl11 = re.compile(r'wiktionary\.org')
deleurl12 = re.compile(r'wikimediafoundation\.org')
deleurl13 = re.compile(r'wikivoyage\.org')
deleurl14 = re.compile(r'wikimedia\.org')
deleurl17 = re.compile(r'wikisource\.org')
deleurl18 = re.compile(r'wikidata\.org')
deleurl19 = re.compile(r'wikibooks\.org')
deleurl15 = re.compile(r'/wiki/Main_Page')
deleurl16 = re.compile(r'/wiki/Talk:')
convert = re.compile(r'zh.m.wikipedia\.org/wiki/')
for link in links_orginal:
#if re.search('\.(jpg|JPG|svg|SVG)$',link['href']):
# links.remove(link)
if not ((re.search(deleurl1, link['href'])) or (re.search(deleurl2, link['href'])) or (
re.search(deleurl3, link['href'])) or (re.search(deleurl4, link['href'])) or (
re.search(deleurl6, link['href'])) or (re.search(deleurl5, link['href'])) or (
re.search(deleurl7, link['href'])) or (re.search(deleurl8, link['href'])) or (
re.search(deleurl9, link['href'])) or (re.search(deleurl10, link['href'])) or (
re.search(deleurl11, link['href'])) or (re.search(deleurl12, link['href'])) or (
re.search(deleurl13, link['href'])) or (re.search(deleurl14, link['href'])) or (
re.search(deleurl15, link['href'])) or (re.search(deleurl16, link['href'])) or (
re.search(deleurl17, link['href'])) or (re.search(deleurl18, link['href'])) or (re.search(deleurl19, link['href']))) :
links.append(link)
old_link = set()
# set_trace()
for link in links:
if link.text == '':
continue
# print(link) # for test
# if link['href'] == '/wiki/%E6%97%A5%E8%AA%9E%E6%9B%B8%E5%AF%AB%E7%B3%BB%E7%B5%B1':
# set_trace()
if link['href'].find('#') != -1:
t = link['href'].find('#')
link['href'] = link['href'][0:t]
# link.text = Converter('zh-hans').convert(link.text ) # transform the text
if link.text not in old_link:
new_url = link['href']
new_full_url = urljoin(page_url, new_url)
if re.search(convert, new_full_url):
new_full_url = new_full_url.replace('/wiki/', '/zh-cn/')
# set_trace()
new_urls.append(new_full_url)
# if 'title' in link):
if link.get('title') != None:
if (link['title'] != ''):
textnow = self.replaceillgalchar(link['title'])
else:
textnow = self.replaceillgalchar(link.text)
else:
textnow = self.replaceillgalchar(link.text)
new_titles.append(textnow)
old_link.add(link.text)
temp = link.text
if (temp.find('(') != -1):
temp = temp.replace('(', '\(')
if (temp.find(')') != -1):
temp = temp.replace(')', '\)')
if (temp.find('+') != -1):
temp = temp.replace('+', '\+')
if (temp.find('*') != -1):
temp = temp.replace('*', '\*')
if (temp.find(' ') != -1):
temp = temp.replace(' ', '\s')
if (temp.find('?') != -1):
temp = temp.replace('?', '\?')
keyword_time = soup.find_all(string=re.compile(temp))
keywordtime = 0
for keyword in keyword_time:
keywordtime = keywordtime + keyword.count(link.text)
keyword_times.append(keywordtime)
# set_trace()
return new_titles, keyword_times, new_urls
def _baidu_get_new_data(self, page_url, soup):
res_data = {}
# url
# res_data['url'] = page_url
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
parent_title = title_node.get_text()
res_data['title'] = parent_title
summary_node = soup.find('div', class_='lemma-summary')
if (summary_node != None):
if (summary_node.get_text() != None):
res_data['summary'] = summary_node.get_text()
else:
res_data['summary'] = '!!!No summary!!!'
else:
res_data['summary'] = '!!!No summary!!!'
# res_data['parent'] = parent
# print(res_data)
return res_data
def _wiki_get_new_data(self, page_url, soup):
res_data = {}
# url
# res_data['url'] = page_url
title_node = soup.find('div', class_='mw-body').find('h1')
parent_title = title_node.get_text()
res_data['title'] = parent_title
summary_node = soup.find(name='p')
if (summary_node != None):
if (summary_node.get_text() != None):
res_data['summary'] = summary_node.get_text()
else:
res_data['summary'] = '!!!No summary!!!'
else:
res_data['summary'] = '!!!No summary!!!'
# res_data['parent'] = parent_all
# print(res_data)
return res_data
def parse(self, baidu_url, wiki_url, baidu_soup, wiki_soup):
#if page_url is None or html_cont is None:
# return
# soup = BeautifulSoup(html_cont, 'html.parser')
# print(soup.prettify())
baidu_new_titles, baidu_keyword_times, baidu_urls = self._baidu_get_new_urls(baidu_url, baidu_soup)
wiki_new_titles, wiki_keyword_times, wiki_urls = self._wiki_get_new_urls(wiki_url, wiki_soup)
baidu_new_data = self._baidu_get_new_data(baidu_url, baidu_soup)
wiki_new_data = self._wiki_get_new_data(wiki_url, wiki_soup)
# print('mark')
return baidu_new_titles, baidu_keyword_times, baidu_new_data, baidu_urls, wiki_new_titles, wiki_keyword_times, wiki_new_data, wiki_urls
|
BaikeSpider/Randomly-Selection
|
html_parser.py
|
html_parser.py
|
py
| 10,632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36691623039
|
import streamlit as st
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
from streamlit_extras.add_vertical_space import add_vertical_space
from hugchat import hugchat
from document_processing import index, chain
st.set_page_config(page_title="HugChat - An LLM-powered Streamlit app")
st.title('🎈 PF1-Chatbot')
st.write('Hello student!')
with st.sidebar:
st.title('🤗💬 HugChat App')
st.markdown('''
''')
add_vertical_space(5)
st.write('Made by Matt')
if 'generated' not in st.session_state:
st.session_state['generated'] = ["I'm HugChat, How may I help you?"]
if 'past' not in st.session_state:
st.session_state['past'] = ['Hi!']
input_container = st.container()
colored_header(label='', description='', color_name='blue-30')
response_container = st.container()
# User input
## Function for taking user provided prompt as input
def get_text():
input_text = st.text_input("You: ", "", key="input")
return input_text
## Applying the user input box
with input_container:
user_input = get_text()
# Response output
## Function for taking user prompt as input followed by producing AI generated responses
def generate_response(prompt):
chatbot = hugchat.ChatBot()
response = chatbot.chat(prompt)
return response
def get_similar_docs(query, k=2, score=False):
if score:
similar_docs = get_similar_docs_with_score(query, k=k)
else:
similar_docs = get_similar_docs_without_score(query, k=k)
return similar_docs
def get_similar_docs_with_score(query, k=3):
similar_docs = index.similarity_search_with_score(query, k=k)
return similar_docs
def get_similar_docs_without_score(query, k=3):
similar_docs = index.similarity_search(query, k=k)
return similar_docs
def get_answer(query):
similar_docs = get_similar_docs(query)
answer = chain.run(input_documents=similar_docs, question=query)
return answer
if user_input:
answer = get_answer(user_input)
with response_container:
st.write(answer) # Use st.write() to display the message
else:
st.write("Please enter a prompt.") # Use st.write() for the prompt message
|
Ubond-edu/PF1-Chatbot
|
streamlit_app.py
|
streamlit_app.py
|
py
| 2,211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8261658572
|
import functions
import utils
from functions import greet_user
# import ecommerce.shipping
# from ecommerce.shipping import calc_shipping
from ecommerce import shipping
shipping.calc_shipping()
import math
# Google search python 3 math module
price = 10
# variable must be lower case - booleans must be capitalized
is_published = False
# birth_year = input('Birth year: ')
# age = 2019 - birth_year
# # print(age)
# print(type(birth_year))
# weight = input('What is your weight: ')
# converted = weight * .45
# print(converted)
# message = '''
# This is a mutli line string
# From John
# '''
course = 'Python for Beginners'
# This returns 0,1 and 2 index
# print(course[0:3])
# print(course[1:-1])
# string formatting
first = 'John'
last = 'Smith'
message = first + last + ' is a coder'
message = first + ' [' + last + '] is a coder'
# msg = f'{first} [{last}] is a coder'
# print(message)
course = "This is the Pourse"
# print(len(course))
# print(course.upper())
# print(course.find('P'))
# print(course.replace('P', 'C'))
# print(course.replace('P', 'Best C'))
print('Pourse' in course)
# Returns division without decimal
print(10 // 3)
# modulas operator returns remainder
print(10 % 3)
# 10 to the power of 3
print(10 ** 3)
# Rounds Up
x = 2.9
print(round(x))
numbers5 = [7, 16, 3, 1, 11]
numbers5.append(20)
numbers5.insert(1,33)
print("index", numbers5.index(3))
# numbersNew = numbers5.copy()
# print(numbersNew)
print(numbers5)
greet_user("John", 18)
functions.greet_user("Sally", 20)
max = utils.find_max([7,10,11,2,9,6])
print(max)
|
Rosenmatt1/Python-101
|
App.py
|
App.py
|
py
| 1,575 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3438496641
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root == None:
return []
res = []
s = [root]
while len(s) != 0:
node = s.pop()
res.append(node.val)
if node.right != None:
s.append(node.right)
if node.left != None:
s.append(node.left)
return res
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
def helper(node):
if node is None:
return
res.append(node.val)
helper(node.left)
helper(node.right)
helper(root)
return res
|
cuiy0006/Algorithms
|
leetcode/144. Binary Tree Preorder Traversal.py
|
144. Binary Tree Preorder Traversal.py
|
py
| 1,190 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39259069256
|
import logging
from django.db import transaction
from rest_framework import serializers
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.permissions import DjangoModelPermissions
from rest_framework.response import Response
from rest_framework.routers import DefaultRouter
from rest_framework.viewsets import ModelViewSet
from eums.api.standard_pagination import StandardResultsSetPagination
from eums.models import DistributionPlanNode as DeliveryNode, UserProfile
from eums.permissions.distribution_plan_node_permissions import DistributionPlanNodePermissions
logger = logging.getLogger(__name__)
class DistributionPlanNodeSerialiser(serializers.ModelSerializer):
quantity = serializers.IntegerField(write_only=True, required=False)
parents = serializers.ListField(required=False)
balance = serializers.IntegerField(read_only=True, required=False)
consignee_name = serializers.CharField(read_only=True, source='consignee.name')
item_description = serializers.CharField(read_only=True, source='item.item.description')
order_type = serializers.CharField(read_only=True, source='type')
class Meta:
model = DeliveryNode
fields = ('id', 'distribution_plan', 'location', 'consignee', 'tree_position', 'parents', 'quantity_in',
'contact_person_id', 'item', 'delivery_date', 'remark', 'track', 'quantity', 'quantity_out',
'balance', 'has_children', 'consignee_name', 'item_description', 'order_number', 'order_type',
'time_limitation_on_distribution', 'additional_remarks', 'is_assigned_to_self')
class DistributionPlanNodeViewSet(ModelViewSet):
permission_classes = (DjangoModelPermissions, DistributionPlanNodePermissions)
queryset = DeliveryNode.objects.all()
serializer_class = DistributionPlanNodeSerialiser
pagination_class = StandardResultsSetPagination
search_fields = ('location', 'consignee__name', 'delivery_date')
filter_fields = ('consignee', 'item', 'distribution_plan', 'contact_person_id', 'item__item')
def get_queryset(self):
user_profile = UserProfile.objects.filter(user_id=self.request.user.id).first()
logger.info('user profile = %s' % user_profile)
logger.info('user id = %s' % self.request.user.id)
if user_profile and user_profile.consignee:
logger.info('user consignee = %s' % user_profile.consignee)
return self._get_consignee_queryset(user_profile)
is_root = self.request.GET.get('is_root')
if is_root:
logger.info('root nodes = %s(%s)' % (DeliveryNode.objects.root_nodes(), DeliveryNode.objects.root_nodes()))
return DeliveryNode.objects.root_nodes()
logger.info('queryset clone node = %s(%s)' % (self.queryset._clone(), len(self.queryset._clone())))
return self.queryset._clone()
def _get_consignee_queryset(self, user_profile):
item_id = self.request.GET.get('consignee_deliveries_for_item')
if item_id:
return DeliveryNode.objects.delivered_by_consignee(user_profile.consignee, item_id).order_by('-id')
parent_id = self.request.GET.get('parent')
if parent_id:
logger.info('parent_id = %s' % parent_id)
parent = DeliveryNode.objects.get(pk=parent_id)
return parent.children()
return self._consignee_nodes(user_profile)
def _consignee_nodes(self, user_profile):
queryset = DeliveryNode.objects.filter(ip=user_profile.consignee)
logger.info('is_distributable = %s' % self.request.GET.get('is_distributable'))
if self.request.GET.get('is_distributable'):
queryset = queryset.filter(balance__gt=0, distribution_plan__confirmed=True,
tree_position=DeliveryNode.IMPLEMENTING_PARTNER)
logger.info('user consignee nodes after query = %s(%s)' % (queryset, len(queryset)))
return queryset
return queryset
def list(self, request, *args, **kwargs):
paginate = request.GET.get('paginate', None)
if paginate != 'true':
self.paginator.page_size = 0
return super(DistributionPlanNodeViewSet, self).list(request, *args, **kwargs)
@transaction.atomic
def perform_create(self, serializer):
serializer.save()
@detail_route()
def lineage(self, request, pk=None):
node = self.get_object()
lineage = node.lineage()
return Response(self.get_serializer(lineage, many=True).data)
@detail_route(methods=['patch'])
def report_loss(self, request, pk=None):
quantity_lost = request.data['quantity']
justification = request.data['justification']
node = self.get_object()
node.losses.create(quantity=quantity_lost, remark=justification)
node.save() # for updating the balance on the node - DO NOT REMOVE
return Response(status=status.HTTP_204_NO_CONTENT)
distributionPlanNodeRouter = DefaultRouter()
distributionPlanNodeRouter.register(r'distribution-plan-node', DistributionPlanNodeViewSet)
|
unicefuganda/eums
|
eums/api/distribution_plan_node/distribution_plan_node_endpoint.py
|
distribution_plan_node_endpoint.py
|
py
| 5,146 |
python
|
en
|
code
| 9 |
github-code
|
6
|
43085026671
|
sqmesh_min_coord = [359919.189 - 360600.0, 3972158.559 - 3973000.0]
sqmesh_step = 2.0
import h5py
import math
tmesh_data = h5py.File("visdump_surface_mesh_jaramillo_384.h5",'r')
tmesh_key = '6234'
ME_len = len(tmesh_data[tmesh_key]['Mesh']['MixedElements'])
ntris = ME_len // 4;
tricells_inodes = [[0 for x in range(3)] for x in range(ntris)]
for i in range(ME_len):
if i % 4 == 0:
if tmesh_data[tmesh_key]['Mesh']['MixedElements'][i] != 4:
raise RuntimeError("Mesh should only contain triangular cells!")
else:
tricells_inodes[i // 4][i % 4 - 1] = tmesh_data[tmesh_key]['Mesh']['MixedElements'][i]
tnodes = tmesh_data[tmesh_key]['Mesh']['Nodes']
def cohensutherland(left, top, right, bottom, x1, y1, x2, y2):
"""Clips a line to a rectangular area.
This implements the Cohen-Sutherland line clipping algorithm. left,
top, right and bottom denote the clipping area, into which the line
defined by x1, y1 (start point) and x2, y2 (end point) will be
clipped.
If the line does not intersect with the rectangular clipping area,
four None values will be returned as tuple. Otherwise a tuple of the
clipped line points will be returned in the form (cx1, cy1, cx2, cy2).
"""
LEFT_, RIGHT_, BOTTOM_, TOP_ = 1, 2, 4, 8
k1 = k2 = 0
def _getclip(xa, ya):
p = 0
if xa < left:
p |= LEFT_
elif xa > right:
p |= RIGHT_
if ya < bottom:
p |= BOTTOM_
elif ya > top:
p |= TOP_
return p
k1 = _getclip(x1, y1)
k2 = _getclip(x2, y2)
while (k1 | k2) != 0:
if (k1 & k2) != 0:
return None, None, None, None
opt = k1
if k1 == 0:
opt = k2
if opt & TOP_:
x = x1 + (x2 - x1) * (1.0*(top - y1)) / (y2 - y1)
y = top
elif opt & BOTTOM_:
x = x1 + (x2 - x1) * (1.0*(bottom - y1)) / (y2 - y1)
y = bottom
elif opt & RIGHT_:
y = y1 + (y2 - y1) * (1.0*(right - x1)) / (x2 - x1)
x = right
elif opt & LEFT_:
y = y1 + (y2 - y1) * (1.0*(left - x1)) / (x2 - x1)
x = left
if opt == k1:
x1 = x
y1 = y
k1 = _getclip(x1, y1)
else:
x2 = x
y2 = y
k2 = _getclip(x2, y2)
return x1, y1, x2, y2
def get_intersect_poly(sq_i, sq_j, itri):
left = sqmesh_min_coord[0] + sq_j*sqmesh_step
right = sqmesh_min_coord[0] + (sq_j + 1)*sqmesh_step
bottom = sqmesh_min_coord[1] + sq_i*sqmesh_step
top = sqmesh_min_coord[1] + (sq_i + 1)*sqmesh_step
#Triangle's nodes clockwise ordering: first node is the one with the min x coordinate
ifir = 0
for inode in range(1, 3):
if tnodes[int(tricells_inodes[itri][inode])][0] < tnodes[int(tricells_inodes[itri][ifir])][0]:
ifir = inode
isec = -1
clkw_ang = -math.pi
for inode in range(3):
if (inode != ifir):
cur_ang = math.atan2(tnodes[int(tricells_inodes[itri][inode])][1] - tnodes[int(tricells_inodes[itri][ifir])][1], tnodes[int(tricells_inodes[itri][inode])][0] - tnodes[int(tricells_inodes[itri][ifir])][0])
if cur_ang > clkw_ang:
clkw_ang = cur_ang
isec = inode
inodes_clkw = [ifir, isec, 0]
for inode in range(3):
if (inode != ifir) and (inode != isec):
inodes_clkw[2] = inode
break
for inode in range(3):
inodes_clkw[inode] = int(tricells_inodes[itri][inodes_clkw[inode]])
nclipped = 0
seg_pts = [[[0.0 for x in range(2)] for x in range(2)] for x in range(3)]
for iseg in range(3):
inode1 = inodes_clkw[iseg]
inode2 = inodes_clkw[(iseg + 1)%3]
x1, y1, x2, y2 = cohensutherland(left, top, right, bottom, tnodes[inode1][0], tnodes[inode1][1], tnodes[inode2][0], tnodes[inode2][1])
if x1 != None:
seg_pts[nclipped][0][0] = x1
seg_pts[nclipped][0][1] = y1
seg_pts[nclipped][1][0] = x2
seg_pts[nclipped][1][1] = y2
nclipped += 1
if nclipped == 0:
return [[]]
poly_nodes = [[0.0 for x in range(2)] for x in range(7)]
poly_nodes[0] = seg_pts[0][0]
inext_seg = 0
npolynodes = 1;
sides_cmp = [left, top, right, bottom]
sq_nodes = [[left, top], [right, top], [right, bottom], [left, bottom]]
if seg_pts[0][0] == seg_pts[nclipped - 1][1]:
isq_side_start = -1
isq_side_stop = -1
else:
node_cmp = [poly_nodes[0][0], poly_nodes[0][1], poly_nodes[0][0], poly_nodes[0][1]]
for iside in range(4):
if node_cmp[iside] == sides_cmp[iside]:
isq_side_start = iside
break
for iside in range(4):
if node_cmp[iside] == sides_cmp[iside]:
isq_side_stop = iside
while(1):
if (inext_seg != nclipped) and (poly_nodes[npolynodes - 1] == seg_pts[inext_seg][0]):
if (isq_side_stop == -1) and (inext_seg == nclipped - 1):
break
else:
poly_nodes[npolynodes] = seg_pts[inext_seg][1]
npolynodes += 1
inext_seg += 1
continue
node_cmp = [poly_nodes[npolynodes - 1][0], poly_nodes[npolynodes - 1][1], poly_nodes[npolynodes - 1][0], poly_nodes[npolynodes - 1][1]]
icurside = -1
if isq_side_start != -1:
for i in range(4):
iside = (isq_side_start + i)%4
if node_cmp[iside] == sides_cmp[iside]:
icurside = iside
else:
for iside in range(4):
if node_cmp[iside] == sides_cmp[iside]:
icurside = iside
isq_side_start = icurside
if icurside == isq_side_stop:
if inext_seg < nclipped:
raise RuntimeError("Completed the intersection polygon before tracing all the clipped segments!")
break
if inext_seg < nclipped:
next_node_cmp = [seg_pts[inext_seg][0][0], seg_pts[inext_seg][0][1], seg_pts[inext_seg][0][0], seg_pts[inext_seg][0][1]]
if next_node_cmp[icurside] == sides_cmp[icurside]:
poly_nodes[npolynodes] = seg_pts[inext_seg][0]
npolynodes += 1
continue
poly_nodes[npolynodes] = sq_nodes[icurside]
npolynodes += 1
poly_nodes = poly_nodes[0:npolynodes]
return poly_nodes
def get_poly_area(polynodes):
nnodes = len(polynodes)
poly_area = 0.0
for itri in range(nnodes - 2):
inodes = [0, itri + 1, itri + 2]
tri_nodes = [[0.0 for x in range(2)] for x in range(3)]
for i in range(3):
tri_nodes[i] = polynodes[inodes[i]]
tri_area = 0.5*abs((tri_nodes[0][0] - tri_nodes[2][0])*(tri_nodes[1][1] - tri_nodes[0][1]) - (tri_nodes[0][0] - tri_nodes[1][0])*(tri_nodes[2][1] - tri_nodes[0][1]))
poly_area += tri_area
return poly_area
def is_inside_tri(itri, sq_i, sq_j):
tri_nodes = [[0.0 for x in range(2)] for x in range(3)]
for inode in range(3):
tri_nodes[inode] = tnodes[int(tricells_inodes[itri][inode])]
sq_nodes = [[0.0 for x in range(2)] for x in range(4)]
for i in range(2):
for j in range(2):
sq_nodes[2*i + j] = [sqmesh_min_coord[0] + (sq_j + j)*sqmesh_step, sqmesh_min_coord[1] + (sq_i + i)*sqmesh_step]
is_inside = True
for inode in range(4):
det = (tri_nodes[1][1] - tri_nodes[2][1])*(tri_nodes[0][0] - tri_nodes[2][0]) + (tri_nodes[2][0] - tri_nodes[1][0])*(tri_nodes[0][1] - tri_nodes[2][1])
s = ((tri_nodes[1][1] - tri_nodes[2][1])*(sq_nodes[inode][0] - tri_nodes[2][0]) + (tri_nodes[2][0] - tri_nodes[1][0])*(sq_nodes[inode][1] - tri_nodes[2][1])) / det
t = ((tri_nodes[2][1] - tri_nodes[0][1])*(sq_nodes[inode][0] - tri_nodes[2][0]) + (tri_nodes[0][0] - tri_nodes[2][0])*(sq_nodes[inode][1] - tri_nodes[2][1])) / det
if (s < 0) or (t < 0) or (s + t > 1):
is_inside = False
break
return is_inside
fid = open('area_weights.dat', 'w')
fid.write(repr(ntris) + '\n')
fid.write(repr(sqmesh_min_coord[0]) + ' ' + repr(sqmesh_min_coord[1]) + ' ' + repr(sqmesh_step) + '\n')
for itri in range(ntris):
print('Processing triangle ' + repr(itri + 1) + '/' + repr(ntris) + '\r'),
xmin = tnodes[int(tricells_inodes[itri][0])][0]
xmax = tnodes[int(tricells_inodes[itri][0])][0]
ymin = tnodes[int(tricells_inodes[itri][0])][1]
ymax = tnodes[int(tricells_inodes[itri][0])][1]
for inode in range(1, 3):
if tnodes[int(tricells_inodes[itri][inode])][0] < xmin:
xmin = tnodes[int(tricells_inodes[itri][inode])][0]
elif tnodes[int(tricells_inodes[itri][inode])][0] > xmax:
xmax = tnodes[int(tricells_inodes[itri][inode])][0]
if tnodes[int(tricells_inodes[itri][inode])][1] < ymin:
ymin = tnodes[int(tricells_inodes[itri][inode])][1]
elif tnodes[int(tricells_inodes[itri][inode])][1] > ymax:
ymax = tnodes[int(tricells_inodes[itri][inode])][1]
imin = int(math.floor((ymin - sqmesh_min_coord[1]) / sqmesh_step))
imax = int(math.floor((ymax - sqmesh_min_coord[1]) / sqmesh_step))
jmin = int(math.floor((xmin - sqmesh_min_coord[0]) / sqmesh_step))
jmax = int(math.floor((xmax - sqmesh_min_coord[0]) / sqmesh_step))
fid.write(repr(itri) + '\t')
for i in range(imin, imax + 1):
for j in range(jmin, jmax + 1):
area_weight = 0.0
if is_inside_tri(itri, i, j):
area_weight = 1.0
else:
polygon = get_intersect_poly(i, j, itri)
if polygon != [[]]:
cur_poly_area = get_poly_area(polygon)
area_weight = cur_poly_area / pow(sqmesh_step, 2)
if area_weight > 0.0:
fid.write(repr(i) + '\t' + repr(j) + '\t')
fid.write(repr(area_weight) + '\t')
fid.write('\n')
fid.close
|
amanzi/ats
|
tools/square_to_tri_mesh_data_parser/tri_square_overlap_weights.py
|
tri_square_overlap_weights.py
|
py
| 9,284 |
python
|
en
|
code
| 35 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.