seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
10140997594
|
# -*- coding: utf-8 -*-
# @Time : 19-1-24 下午9:35
# @Author : ccs
import json
from django.http import HttpResponse
def calc(request):
a = request.GET['a']
b = request.GET['b']
c = request.GET['c']
print(a,b,c)
m = a+b+c
n = b+a
rets = {"m":m,"n":n}
retsj = json.dumps(rets)
return HttpResponse(retsj)
|
ccs258/python_code
|
learn_api.py
|
learn_api.py
|
py
| 348 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34702779543
|
from firebase_admin import firestore
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_user_skills(userid):
item = firestore.client().collection('user').document(userid).get().to_dict()['skills']
user_skill_string = ' '.join(str(e) for e in item)
return user_skill_string
def get_result(field, user_skill_string, keyword_string):
content = (user_skill_string, keyword_string)
cv = CountVectorizer()
matrix = cv.fit_transform(content)
singularity_matrix = cosine_similarity(matrix)
result = (singularity_matrix[1][0]*100)
result = str(result)
result = result.split('.', 1)[0]
# return 'You scored {}% in {} jobs.'.format(result, field)
return result
def check_result(field, result):
return 'You scored {}% in {} jobs.'.format(result, field)
|
prajwol-manandhar/resume-analysis-website
|
analysis.py
|
analysis.py
|
py
| 869 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1427353112
|
dic={
'nile':'egypt',
'yellow river':'shandong',
'yangtze river':'shanghai',
}
for river,province in dic.items():
print(f"The {river} runs through {province}")
for river in sorted(dic.keys()):
print(f"{river}")
for province in sorted(dic.values()):
print(f"{province}")
|
HAL200000/Python
|
pcc6-5.py
|
pcc6-5.py
|
py
| 309 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5291091637
|
def countdown(i: int) -> int:
while i >= 0:
yield i
i -= 1
def countup(i: int) -> int:
f = 0
while f < i:
yield f
f += 1
def range_countup(start: int, end: int) -> int:
while start < end:
yield start
start += 1
def trim(data: list) -> list:
data_length = len(data)
i = 0
not_wanted_chars = ['\t', ' ', '\r'] # \n and \U+000B been tried
while i < data_length:
length = len(data[i])
if length == 0:
i += 1
continue
if data[i][0] in not_wanted_chars:
not_wanted_char_count = 1
x = 1
while x < length:
if data[i][x] in not_wanted_chars:
not_wanted_char_count += 1
else:
break
x += 1
if not_wanted_char_count == length:
data.pop(i)
data_length -= 1
continue
if data[i][-1] == '\r':
data[i] = data[i][:-1]
i += 1
return data
def remove_spaces(a_string: str) -> str:
length = len(a_string)
while length > 1 and a_string[0] == ' ':
a_string = a_string[1:]
length -= 1
while length > 1 and a_string[-1] == ' ':
a_string = a_string[:-1]
length -= 1
return a_string
def folder_name_char_check(a_string: str) -> bool:
non_valid_chars = '\\/?%*:|"<>.'
for i in a_string:
if i in non_valid_chars:
print("{} Contains non valid character(s).\nFile and Folder names must not contain any of these characters {}.".format(a_string, non_valid_chars))
return False
return True
|
MichaelFulcher148/Batch-Folder-Create
|
code_tools.py
|
code_tools.py
|
py
| 1,757 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21546810060
|
###IMPORT NECESSARY MODULES TO RUN BIOGEME
from biogeme import *
from headers import *
from loglikelihood import *
from statistics import *
### Variables
# Piecewise linear definition of income
ScaledIncome = DefineVariable('ScaledIncome',\
CalculatedIncome / 1000)
ContIncome_0_4000 = DefineVariable('ContIncome_0_4000',\
min(ScaledIncome,4))
ContIncome_4000_6000 = DefineVariable('ContIncome_4000_6000',\
max(0,min(ScaledIncome-4,2)))
ContIncome_6000_8000 = DefineVariable('ContIncome_6000_8000',\
max(0,min(ScaledIncome-6,2)))
ContIncome_8000_10000 = DefineVariable('ContIncome_8000_10000',\
max(0,min(ScaledIncome-8,2)))
ContIncome_10000_more = DefineVariable('ContIncome_10000_more',\
max(0,ScaledIncome-10))
age_65_more = DefineVariable('age_65_more',age >= 65)
moreThanOneCar = DefineVariable('moreThanOneCar',NbCar > 1)
moreThanOneBike = DefineVariable('moreThanOneBike',NbBicy > 1)
individualHouse = DefineVariable('individualHouse',\
HouseType == 1)
male = DefineVariable('male',Gender == 1)
haveChildren = DefineVariable('haveChildren',\
((FamilSitu == 3)+(FamilSitu == 4)) > 0)
haveGA = DefineVariable('haveGA',GenAbST == 1)
highEducation = DefineVariable('highEducation', Education >= 6)
### Coefficients
coef_intercept = Beta('coef_intercept',0.398165,-1000,1000,0 )
coef_age_65_more = Beta('coef_age_65_more',0.0716533,-1000,1000,0 )
coef_haveGA = Beta('coef_haveGA',-0.578005,-1000,1000,0 )
coef_ContIncome_0_4000 = \
Beta('coef_ContIncome_0_4000',0.0902761,-1000,1000,0 )
coef_ContIncome_4000_6000 = \
Beta('coef_ContIncome_4000_6000',-0.221283,-1000,1000,0 )
coef_ContIncome_6000_8000 = \
Beta('coef_ContIncome_6000_8000',0.259466,-1000,1000,0 )
coef_ContIncome_8000_10000 = \
Beta('coef_ContIncome_8000_10000',-0.523049,-1000,1000,0 )
coef_ContIncome_10000_more = \
Beta('coef_ContIncome_10000_more',0.084351,-1000,1000,0 )
coef_moreThanOneCar = \
Beta('coef_moreThanOneCar',0.53301,-1000,1000,0 )
coef_moreThanOneBike = \
Beta('coef_moreThanOneBike',-0.277122,-1000,1000,0 )
coef_individualHouse = \
Beta('coef_individualHouse',-0.0885649,-1000,1000,0 )
coef_male = Beta('coef_male',0.0663476,-1000,1000,0 )
coef_haveChildren = Beta('coef_haveChildren',-0.0376042,-1000,1000,0 )
coef_highEducation = Beta('coef_highEducation',-0.246687,-1000,1000,0 )
### Latent variable: structural equation
# Note that the expression must be on a single line. In order to
# write it across several lines, each line must terminate with
# the \ symbol
CARLOVERS = \
coef_intercept +\
coef_age_65_more * age_65_more +\
coef_ContIncome_0_4000 * ContIncome_0_4000 +\
coef_ContIncome_4000_6000 * ContIncome_4000_6000 +\
coef_ContIncome_6000_8000 * ContIncome_6000_8000 +\
coef_ContIncome_8000_10000 * ContIncome_8000_10000 +\
coef_ContIncome_10000_more * ContIncome_10000_more +\
coef_moreThanOneCar * moreThanOneCar +\
coef_moreThanOneBike * moreThanOneBike +\
coef_individualHouse * individualHouse +\
coef_male * male +\
coef_haveChildren * haveChildren +\
coef_haveGA * haveGA +\
coef_highEducation * highEducation
### Measurement equations
INTER_Envir01 = Beta('INTER_Envir01',0,-10000,10000,1)
INTER_Envir02 = Beta('INTER_Envir02',0.348654,-10000,10000,0 )
INTER_Envir03 = Beta('INTER_Envir03',-0.309023,-10000,10000,0 )
INTER_Mobil11 = Beta('INTER_Mobil11',0.337726,-10000,10000,0 )
INTER_Mobil14 = Beta('INTER_Mobil14',-0.130563,-10000,10000,0 )
INTER_Mobil16 = Beta('INTER_Mobil16',0.128293,-10000,10000,0 )
INTER_Mobil17 = Beta('INTER_Mobil17',0.145876,-10000,10000,0 )
B_Envir01_F1 = Beta('B_Envir01_F1',-1,-10000,10000,1)
B_Envir02_F1 = Beta('B_Envir02_F1',-0.431461,-10000,10000,0 )
B_Envir03_F1 = Beta('B_Envir03_F1',0.565903,-10000,10000,0 )
B_Mobil11_F1 = Beta('B_Mobil11_F1',0.483958,-10000,10000,0 )
B_Mobil14_F1 = Beta('B_Mobil14_F1',0.58221,-10000,10000,0 )
B_Mobil16_F1 = Beta('B_Mobil16_F1',0.463139,-10000,10000,0 )
B_Mobil17_F1 = Beta('B_Mobil17_F1',0.368257,-10000,10000,0 )
MODEL_Envir01 = INTER_Envir01 + B_Envir01_F1 * CARLOVERS
MODEL_Envir02 = INTER_Envir02 + B_Envir02_F1 * CARLOVERS
MODEL_Envir03 = INTER_Envir03 + B_Envir03_F1 * CARLOVERS
MODEL_Mobil11 = INTER_Mobil11 + B_Mobil11_F1 * CARLOVERS
MODEL_Mobil14 = INTER_Mobil14 + B_Mobil14_F1 * CARLOVERS
MODEL_Mobil16 = INTER_Mobil16 + B_Mobil16_F1 * CARLOVERS
MODEL_Mobil17 = INTER_Mobil17 + B_Mobil17_F1 * CARLOVERS
SIGMA_STAR_Envir01 = Beta('SIGMA_STAR_Envir01',1,-10000,10000,1)
SIGMA_STAR_Envir02 = Beta('SIGMA_STAR_Envir02',0.767063,-10000,10000,0 )
SIGMA_STAR_Envir03 = Beta('SIGMA_STAR_Envir03',0.717835,-10000,10000,0 )
SIGMA_STAR_Mobil11 = Beta('SIGMA_STAR_Mobil11',0.783358,-10000,10000,0 )
SIGMA_STAR_Mobil14 = Beta('SIGMA_STAR_Mobil14',0.688264,-10000,10000,0 )
SIGMA_STAR_Mobil16 = Beta('SIGMA_STAR_Mobil16',0.754419,-10000,10000,0 )
SIGMA_STAR_Mobil17 = Beta('SIGMA_STAR_Mobil17',0.760104,-10000,10000,0 )
delta_1 = Beta('delta_1',0.251983,0,10,0 )
delta_2 = Beta('delta_2',0.759208,0,10,0 )
tau_1 = -delta_1 - delta_2
tau_2 = -delta_1
tau_3 = delta_1
tau_4 = delta_1 + delta_2
Envir01_tau_1 = (tau_1-MODEL_Envir01) / SIGMA_STAR_Envir01
Envir01_tau_2 = (tau_2-MODEL_Envir01) / SIGMA_STAR_Envir01
Envir01_tau_3 = (tau_3-MODEL_Envir01) / SIGMA_STAR_Envir01
Envir01_tau_4 = (tau_4-MODEL_Envir01) / SIGMA_STAR_Envir01
IndEnvir01 = {
1: bioNormalCdf(Envir01_tau_1),
2: bioNormalCdf(Envir01_tau_2)-bioNormalCdf(Envir01_tau_1),
3: bioNormalCdf(Envir01_tau_3)-bioNormalCdf(Envir01_tau_2),
4: bioNormalCdf(Envir01_tau_4)-bioNormalCdf(Envir01_tau_3),
5: 1-bioNormalCdf(Envir01_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Envir01 = Elem(IndEnvir01, Envir01)
Envir02_tau_1 = (tau_1-MODEL_Envir02) / SIGMA_STAR_Envir02
Envir02_tau_2 = (tau_2-MODEL_Envir02) / SIGMA_STAR_Envir02
Envir02_tau_3 = (tau_3-MODEL_Envir02) / SIGMA_STAR_Envir02
Envir02_tau_4 = (tau_4-MODEL_Envir02) / SIGMA_STAR_Envir02
IndEnvir02 = {
1: bioNormalCdf(Envir02_tau_1),
2: bioNormalCdf(Envir02_tau_2)-bioNormalCdf(Envir02_tau_1),
3: bioNormalCdf(Envir02_tau_3)-bioNormalCdf(Envir02_tau_2),
4: bioNormalCdf(Envir02_tau_4)-bioNormalCdf(Envir02_tau_3),
5: 1-bioNormalCdf(Envir02_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Envir02 = Elem(IndEnvir02, Envir02)
Envir03_tau_1 = (tau_1-MODEL_Envir03) / SIGMA_STAR_Envir03
Envir03_tau_2 = (tau_2-MODEL_Envir03) / SIGMA_STAR_Envir03
Envir03_tau_3 = (tau_3-MODEL_Envir03) / SIGMA_STAR_Envir03
Envir03_tau_4 = (tau_4-MODEL_Envir03) / SIGMA_STAR_Envir03
IndEnvir03 = {
1: bioNormalCdf(Envir03_tau_1),
2: bioNormalCdf(Envir03_tau_2)-bioNormalCdf(Envir03_tau_1),
3: bioNormalCdf(Envir03_tau_3)-bioNormalCdf(Envir03_tau_2),
4: bioNormalCdf(Envir03_tau_4)-bioNormalCdf(Envir03_tau_3),
5: 1-bioNormalCdf(Envir03_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Envir03 = Elem(IndEnvir03, Envir03)
Mobil11_tau_1 = (tau_1-MODEL_Mobil11) / SIGMA_STAR_Mobil11
Mobil11_tau_2 = (tau_2-MODEL_Mobil11) / SIGMA_STAR_Mobil11
Mobil11_tau_3 = (tau_3-MODEL_Mobil11) / SIGMA_STAR_Mobil11
Mobil11_tau_4 = (tau_4-MODEL_Mobil11) / SIGMA_STAR_Mobil11
IndMobil11 = {
1: bioNormalCdf(Mobil11_tau_1),
2: bioNormalCdf(Mobil11_tau_2)-bioNormalCdf(Mobil11_tau_1),
3: bioNormalCdf(Mobil11_tau_3)-bioNormalCdf(Mobil11_tau_2),
4: bioNormalCdf(Mobil11_tau_4)-bioNormalCdf(Mobil11_tau_3),
5: 1-bioNormalCdf(Mobil11_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Mobil11 = Elem(IndMobil11, Mobil11)
Mobil14_tau_1 = (tau_1-MODEL_Mobil14) / SIGMA_STAR_Mobil14
Mobil14_tau_2 = (tau_2-MODEL_Mobil14) / SIGMA_STAR_Mobil14
Mobil14_tau_3 = (tau_3-MODEL_Mobil14) / SIGMA_STAR_Mobil14
Mobil14_tau_4 = (tau_4-MODEL_Mobil14) / SIGMA_STAR_Mobil14
IndMobil14 = {
1: bioNormalCdf(Mobil14_tau_1),
2: bioNormalCdf(Mobil14_tau_2)-bioNormalCdf(Mobil14_tau_1),
3: bioNormalCdf(Mobil14_tau_3)-bioNormalCdf(Mobil14_tau_2),
4: bioNormalCdf(Mobil14_tau_4)-bioNormalCdf(Mobil14_tau_3),
5: 1-bioNormalCdf(Mobil14_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Mobil14 = Elem(IndMobil14, Mobil14)
Mobil16_tau_1 = (tau_1-MODEL_Mobil16) / SIGMA_STAR_Mobil16
Mobil16_tau_2 = (tau_2-MODEL_Mobil16) / SIGMA_STAR_Mobil16
Mobil16_tau_3 = (tau_3-MODEL_Mobil16) / SIGMA_STAR_Mobil16
Mobil16_tau_4 = (tau_4-MODEL_Mobil16) / SIGMA_STAR_Mobil16
IndMobil16 = {
1: bioNormalCdf(Mobil16_tau_1),
2: bioNormalCdf(Mobil16_tau_2)-bioNormalCdf(Mobil16_tau_1),
3: bioNormalCdf(Mobil16_tau_3)-bioNormalCdf(Mobil16_tau_2),
4: bioNormalCdf(Mobil16_tau_4)-bioNormalCdf(Mobil16_tau_3),
5: 1-bioNormalCdf(Mobil16_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Mobil16 = Elem(IndMobil16, Mobil16)
Mobil17_tau_1 = (tau_1-MODEL_Mobil17) / SIGMA_STAR_Mobil17
Mobil17_tau_2 = (tau_2-MODEL_Mobil17) / SIGMA_STAR_Mobil17
Mobil17_tau_3 = (tau_3-MODEL_Mobil17) / SIGMA_STAR_Mobil17
Mobil17_tau_4 = (tau_4-MODEL_Mobil17) / SIGMA_STAR_Mobil17
IndMobil17 = {
1: bioNormalCdf(Mobil17_tau_1),
2: bioNormalCdf(Mobil17_tau_2)-bioNormalCdf(Mobil17_tau_1),
3: bioNormalCdf(Mobil17_tau_3)-bioNormalCdf(Mobil17_tau_2),
4: bioNormalCdf(Mobil17_tau_4)-bioNormalCdf(Mobil17_tau_3),
5: 1-bioNormalCdf(Mobil17_tau_4),
6: 1.0,
-1: 1.0,
-2: 1.0
}
P_Mobil17 = Elem(IndMobil17, Mobil17)
loglike = log(P_Envir01) + \
log(P_Envir02) + \
log(P_Envir03) + \
log(P_Mobil11) + \
log(P_Mobil14) + \
log(P_Mobil16) + \
log(P_Mobil17)
BIOGEME_OBJECT.EXCLUDE = (Choice == -1 )
# Defines an iterator on the data
rowIterator('obsIter')
BIOGEME_OBJECT.ESTIMATE = Sum(loglike,'obsIter')
|
LiTrans/ICLV-RBM
|
biogeme/02oneLatentOrdered.py
|
02oneLatentOrdered.py
|
py
| 9,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9721588822
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
ICONSIZE = Gtk.IconSize.SMALL_TOOLBAR
class controlBar(Gtk.HeaderBar):
def __init__(self):
Gtk.HeaderBar.__init__(self)
self.set_show_close_button(True)
self.props.title = "PyFlowChart"
self.info_box = Gtk.Box(spacing=10,orientation=Gtk.Orientation.HORIZONTAL)
self.file_button = Gtk.Button.new_from_icon_name("open-menu-symbolic", ICONSIZE)
self.settings_button = Gtk.Button.new_from_icon_name("preferences-system", ICONSIZE)
self.help_button = Gtk.Button.new_from_icon_name("help-about", ICONSIZE)
self.main_menu = self.init_main_menu()
self.settings_menu = self.init_settings_menu()
self.help_menu = self.init_help_menu()
#self.init_edit_menu()
self.file_button.connect('clicked', self.file_clicked)
self.settings_button.connect('clicked', self.settings_clicked)
self.help_button.connect('clicked', self.help_clicked)
self.info_box.add(self.settings_button)
self.info_box.add(self.help_button)
self.pack_start(self.file_button)
self.pack_end(self.info_box)
self.buttons = []
# self.populate_buttons()
self.show_all()
def init_main_menu(self):
main_menu = Gtk.Menu()
self.new_button = Gtk.MenuItem.new_with_label('New')
self.open_button = Gtk.MenuItem.new_with_label('Open')
self.open_stock_button = Gtk.MenuItem.new_with_label('Import Stock')
view_button = Gtk.MenuItem.new_with_label('View')
self.view_menu = Gtk.Menu()
self.viewer_button = Gtk.MenuItem.new_with_label('Viewer')
self.builder_button = Gtk.MenuItem.new_with_label('Builder')
self.view_menu.append(self.viewer_button)
self.view_menu.append(self.builder_button)
self.view_menu.show_all()
view_button.set_submenu(self.view_menu)
self.save_button = Gtk.MenuItem.new_with_label('Save')
self.save_as_button = Gtk.MenuItem.new_with_label('Save As...')
self.quit_button = Gtk.MenuItem.new_with_label('Quit')
main_menu.append(self.new_button)
main_menu.append(self.open_button)
main_menu.append(self.open_stock_button)
main_menu.append(Gtk.SeparatorMenuItem())
main_menu.append(view_button)
main_menu.append(Gtk.SeparatorMenuItem())
main_menu.append(self.save_button)
main_menu.append(self.save_as_button)
main_menu.append(Gtk.SeparatorMenuItem())
main_menu.append(self.quit_button)
main_menu.show_all()
return main_menu
def init_settings_menu(self):
settings_menu = Gtk.Menu()
self.preferences_button = Gtk.MenuItem.new_with_label('Preferences')
settings_menu.append(self.preferences_button)
settings_menu.show_all()
return settings_menu
def init_help_menu(self):
help_menu = Gtk.Menu()
self.app_help_button = Gtk.MenuItem.new_with_label('Help')
self.about_button = Gtk.MenuItem.new_with_label('About')
help_menu.append(self.about_button)
help_menu.append(self.app_help_button)
help_menu.show_all()
return help_menu
def file_clicked(self, widget):
self.main_menu.popup( None, None, None, None, 0, Gtk.get_current_event_time())
def settings_clicked(self, widget):
self.settings_menu.popup( None, None, None, None, 0, Gtk.get_current_event_time())
def help_clicked(self, widget):
self.help_menu.popup( None, None, None, None, 0, Gtk.get_current_event_time())
|
steelcowboy/PyFlowChart
|
pyflowchart/interface/control_bar.py
|
control_bar.py
|
py
| 3,719 |
python
|
en
|
code
| 5 |
github-code
|
6
|
1128800769
|
def findMaxAverage(nums: list[int], k: int) -> float:
max_sum = 0
for i in range(k):
max_sum += nums[i]
curr_sum = max_sum
for i in range(1,len(nums)-k+1):
curr_sum = curr_sum - nums[i-1] + nums[i+k-1]
if curr_sum > max_sum:
max_sum = curr_sum
return max_sum / k
print(findMaxAverage([5], 1))
|
SleepingRedPanda/leetcode
|
643.py
|
643.py
|
py
| 350 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7807070088
|
import logging
from copy import deepcopy
from itertools import permutations
import numpy as np
from scipy.special import softmax
from scipy.stats import entropy
def true_entropy(team_generator, batch_predict, num_items: int, num_selections: int):
P_A = np.zeros((num_selections, num_items)) # basically P(A^i_j)
past_probs = []
for i in range(num_selections):
# separately calculate P(A^i_j)
# all possible permutations with team size upto i
sets = list(permutations(range(num_items), i + 1))
teams = [team_generator() for x in range(len(sets))]
for j, s in enumerate(sets):
for item in s:
teams[j].mark(item)
# put them together for a batch update
vals = batch_predict(teams)
# reshape them, so we can group by same prefix teams (so that p(last_element) sums to 1
struct_vals = softmax(vals.reshape(-1, num_items - i), axis=1)
vals = struct_vals.reshape(-1)
# to add to past probabilities coz P(A^j| prod of A's < j)
P = np.zeros((num_items,) * (i + 1))
for j, team in enumerate(teams):
prefix_p = 1
for k in range(len(team)):
pp = past_probs[k - 1][tuple(team[z] for z in range(k))] if k > 0 else 1 # to help find the prefix
prefix_p *= pp
P[tuple(team[z] for z in range(len(team)))] += vals[j]
P_A[i, team[-1]] += prefix_p * vals[j]
# print(team.pkms, P_A[i, team[-1]], prefix_p, vals[j])
past_probs.append(P) # somevariant of vals so that its easily indexible)
# print(P_A, np.sum(P_A, axis=1))
# print((np.sum(P_A, axis=0)))
# P_A = np.sum(P_A, axis = 0)
"""
P_X = np.zeros((num_items))
for i in range(num_selections):
accumulated_P = np.ones((num_items))
for j in range(num_selections):
if i != j:
accumulated_P *= (np.ones((num_items)) - P_A[j])
P_X += P_A[i] * accumulated_P
"""
P_X = np.sum(P_A, axis=0) / num_selections
entropy_loss = -entropy(P_X)
logging.info("P_A=%s\tEntropy=%s\t", str(list(P_X)), str(entropy_loss))
return entropy_loss
def sample_based_entropy(team_generator, batch_predict, num_items: int, num_selections: int, num_samples: int):
counts = np.zeros(num_items)
for i in range(num_samples):
team = team_generator()
for j in range(num_selections):
tmp_teams = [deepcopy(team) for z in range(num_items)]
items = [z for z in range(num_items)]
for k, item in enumerate(items):
tmp_teams[k].mark(item)
vals = (batch_predict(tmp_teams))
for k in range(len(team) - 1):
vals[team[k]] = float("-inf")
p = softmax(vals)
selection = np.random.choice(range(num_items), p=p)
team.mark(selection)
counts[selection] += 1
P_A = counts / sum(counts)
entropy_loss = -entropy(P_A)
logging.info("P_A=%s\tEntropy=%s\t", str(list(P_A)), str(entropy_loss))
return entropy_loss
def lower_bound_entropy(team_generator, batch_predict, num_items: int, num_selections: int):
all_teams = [team_generator() for x in range(num_items)]
for i in range(num_items):
all_teams[i].mark(i) # just mark one element
P_A = softmax(batch_predict(all_teams))
entropy_loss = -entropy(P_A)
logging.info("P_A=%s\tEntropy=%s\t", str(list(P_A)), str(entropy_loss))
return entropy_loss
|
nianticlabs/metagame-balance
|
src/metagame_balance/entropy_fns.py
|
entropy_fns.py
|
py
| 3,539 |
python
|
en
|
code
| 3 |
github-code
|
6
|
16106061785
|
def _extend_pre_ranges(df, upstream:int=0, downstream:int=0, start:str="Start", end:str="End", strand:str="Strand"):
strand_rm = False
if strand not in df.columns:
strand_rm = True
df[strand] = "+"
df.loc[df[strand] == "+", start] -= upstream
df.loc[df[strand] == "-", start] -= downstream
df.loc[df[strand] == "+", end] += downstream
df.loc[df[strand] == "-", end] += upstream
if strand_rm:
del df[strand]
return df
def _extend_ranges(gf,
min_upstream:int=1000,
max_upstream:int=100000,
min_downstream:int=1000,
max_downstream:int=100000,
gene_upstream:int=5000,
gene_downstream:int=0,
gene_scale_factor:float=5.):
import numpy as np
gf["gene_length"] = gf["End"] - gf["Start"]
### In case of duplicated indices, sum lengths across indices
gb = gf.groupby(level=0).agg(gene_length=("gene_length", sum))
gf["gene_length"] = gb.loc[gf.index.values, "gene_length"].values
### scale by inverse gene length
gs = 1/gf["gene_length"].values
### min max scale, plus epsilon to avoid divide by 0
gs = (gs - np.min(gs)) / (np.max(gs) - np.min(gs)) + 1e-300
### expand to 1 to GSF range. But take log, so log1p((gs-1)*s) = log(1 + (gs - 1) * s)
gf["log_gene_scale"] = np.log1p((gene_scale_factor - 1) * gs)
#gf.index = gf["gene_id"].values
gf = _extend_pre_ranges(gf, upstream=gene_upstream, downstream=gene_downstream)
gf["MinStart"] = gf["Start"].values
gf["MinEnd"] = gf["End"].values
gf = _extend_pre_ranges(gf, upstream=min_upstream, downstream=min_downstream, start="MinStart", end="MinEnd")
gf["interval"] = ["%s:%d-%d" % (chrom, start, end) for chrom, start, end in zip(gf["Chromosome"], gf["Start"], gf["End"])]
gf["min_interval"] = ["%s:%d-%d" % (chrom, start, end) for chrom, start, end in zip(gf["Chromosome"], gf["MinStart"], gf["MinEnd"])]
return gf
def estimate_features_archr(adata, feature_df,
min_upstream:int=1000,
max_upstream:int=100000,
min_downstream:int=1000,
max_downstream:int=100000,
gene_upstream:int=5000,
gene_downstream:int=0,
target_sum:int=None,
gene_scale_factor:float=5.,
peak_column:str=None, ## If not provided, will use peak index
feature_column:str=None,
var_column_tolerance:float=0.999, ### Tolerance used for determining if a feature should be kept, in case of weird things
distal:bool=True, ### Use nearest gene to a peak if unassigned
log1p:bool=False,
save_raw:bool=False,
layer:str=None):
import numpy as np
import pandas as pd
import scipy.sparse
import pyranges
import anndata
import scanpy as sc
from .timer import template
sw = template()
if not isinstance(feature_df, pd.DataFrame):
raise ValueError("Feature_df is not a dataframe")
if not np.all(np.isin(["Chromosome", "Start", "End"], feature_df.columns)):
raise ValueError("Feature_df does not have ranges")
if feature_column is not None:
feature_df.index = feature_df[feature_column].values
with sw("Extending ranges"):
gf = _extend_ranges(feature_df, min_upstream=min_upstream, min_downstream=min_downstream,
max_upstream=max_upstream, max_downstream=max_downstream,
gene_upstream=gene_upstream, gene_downstream=gene_downstream,
gene_scale_factor=gene_scale_factor)
gr = pyranges.from_dict({"Chromosome": gf["Chromosome"],
"Start": gf["Start"],
"End": gf["End"],
"feature_index": gf.index.values,
"feature_interval": gf["interval"].values})
mingr = pyranges.from_dict({"Chromosome": gf["Chromosome"],
"Start": gf["MinStart"],
"End": gf["MinEnd"],
"feature_index": gf.index.values,
"feature_interval": gf["interval"].values})
##
## Now, get peak ranges (pr) from peak frame (pf)
##
with sw("Extracting peak ranges"):
if peak_column is None:
pstr = adata.var_names.values
else:
pstr = adata.var[peak_column].values
pf = pd.DataFrame([x.replace(":", "-", 1).split("-") for x in pstr], columns=["Chromosome", "Start", "End"], index=adata.var_names)
pr = pyranges.from_dict({"Chromosome": pf["Chromosome"], "Start": pf["Start"], "End": pf["End"], "peak_name": pf.index.values})
with sw("Calculating overlaps"):
iif = gf.drop_duplicates("interval")
iif.index = iif["interval"]
## Once peak ranges are gathered, find intersecting gene bodies:
inter_df = pr.join(gr).df.loc[:, ["peak_name", "feature_index", "feature_interval"]]
inter_df["Distance"] = 0
## Then, find genes with minimum upstream/downstream distance away
min_df = pr.join(mingr).df.loc[:, ["peak_name", "feature_index", "feature_interval"]]
## diff. is accurate, unless overlapping intervals. Do not need to worry, as duplicates from inter_df will take care
diff = pf.loc[min_df["peak_name"].values, ["Start", "Start", "End", "End"]].values.astype(int) - iif.loc[min_df["feature_interval"].values, ["Start", "End", "Start", "End"]].values.astype(int)
min_df["Distance"] = np.abs(diff).min(1) + 1
## Finally, find distal. Only need nearest gene
if distal:
distance_df = pr.nearest(gr).df.loc[:, ["peak_name", "feature_index", "feature_interval", "Distance"]]
## Concat such that 1) prioritized intersections, then 2) minimum distance away, then 3) distal
df = pd.concat([inter_df, min_df, distance_df]).drop_duplicates(["peak_name", "feature_index"])
else:
df = pd.concat([inter_df, min_df]).drop_duplicates(["peak_name", "feature_index"])
df["weight"] = np.exp(-1 - np.abs(df["Distance"]) / 5000. + iif.loc[df["feature_interval"].values, "log_gene_scale"].values)
with sw("Calculating accessibility"):
if gf.index.duplicated().sum() > 0:
### Get columns that are the same across repeated indices
nf = gf.groupby(level=0).nunique()
gf = gf.loc[~gf.index.duplicated(keep="first"), nf.columns[(nf==1).mean() >= var_column_tolerance]]
S = scipy.sparse.csr_matrix((df["weight"].values,
(pf.index.get_indexer(df["peak_name"].values),
gf.index.get_indexer(df["feature_index"].values))),
shape=(pf.shape[0], gf.shape[0]))
if layer is not None and layer in adata.layers:
X = adata.layers[layer]
else:
X = adata.X
gdata = anndata.AnnData(X.dot(S), obs=adata.obs, var=gf, dtype=np.float32, obsm=adata.obsm, obsp=adata.obsp,
uns={k: v for k, v in adata.uns.items() if k in ["neighbors", "files", "lsi", "pca", "umap", "leiden"]})
if save_raw:
gdata.layers["raw"] = gdata.X.copy()
if target_sum is not None and target_sum > 0:
sc.pp.normalize_total(gdata, target_sum=target_sum)
else:
print("Using median normalization")
sc.pp.normalize_total(gdata)
if log1p:
sc.pp.log1p(gdata)
return gdata
def estimate_genes_archr(adata, gtf:str,
min_upstream:int=1000,
max_upstream:int=100000,
min_downstream:int=1000,
max_downstream:int=100000,
gene_upstream:int=5000,
gene_downstream:int=0,
target_sum:int=None,
gene_scale_factor:float=5.,
peak_column:str=None, ## If not provided, will use peak index
feature_column:str="gene_id", ### If not provided, will use feature index
log1p:bool=True,
distal:bool=True,
save_raw:bool=False,
layer:str=None):
import numpy as np
import pandas as pd
import scipy.sparse
import pyranges
import anndata
import scanpy as sc
from .timer import template
sw = template()
with sw("Reading GTF"):
gf = pyranges.read_gtf(gtf).df
gf = gf.loc[gf["Feature"] == "gene", :]
if feature_column in gf.columns:
gf.index = gf[feature_column].values.astype(str)
gdata = estimate_features_archr(adata, feature_df=gf,
min_upstream=min_upstream, min_downstream=min_downstream,
max_upstream=max_upstream, max_downstream=max_downstream,
gene_upstream=gene_upstream, gene_downstream=gene_downstream,
target_sum=target_sum, gene_scale_factor=gene_scale_factor,
peak_column=peak_column,
feature_column=feature_column,
log1p=log1p, layer=layer, save_raw=save_raw)
gdata.var = gdata.var.loc[:, ["gene_id", "gene_name"]]
gdata.var_names = gdata.var["gene_name"].values
gdata.var_names_make_unique()
del gdata.var["gene_name"]
gdata.var.columns = ["gene_ids"]
add_gene_length(gdata.var, gtf)
return gdata
def get_tss(tss:str):
import pandas as pd
tss = pd.read_csv(tss, sep="\t", header=None)
tss.columns = ["Chromosome", "Start", "End", "gene_id", "score", "strand"]
df = tss.groupby(["Chromosome", "gene_id", "strand"]).agg(left=("Start", "min"),
right=("End", "max")).reset_index()
df["interval"] = df["Chromosome"] + ":" + df["left"].astype(str) + "-" + df["right"].astype(str)
df.index = df["gene_id"].values
return df
def add_interval(var, tss:str, inplace=True):
tf = get_tss(tss)
interval = [tf["interval"].get(g, "NA") for g in var["gene_ids"]]
if inplace:
var["interval"] = interval
else:
import pandas as pd
return pd.Series(interval, index=var.index, name="interval")
def add_gene_length(var, gtf:str=None, inplace=True):
import pandas as pd
import pyranges
from .timer import template
sw = template()
with sw("Reading GTF"):
gf = pyranges.read_gtf(gtf).df
gf = gf.loc[gf["Feature"] == "gene",:]
gf["gene_length"] = gf["End"] - gf["Start"]
gf.index = gf["gene_id"].values
gl = [gf["gene_length"].get(g, -1) for g in var["gene_ids"]]
gs = [gf["Strand"].get(g, "*") for g in var["gene_ids"]]
if not inplace:
var = var.copy()
var["gene_length"] = gl
var["strand"] = gs
if not inplace:
return var.loc[:, ["gene_length", "strand"]]
def add_gene_info(var, gene_info:str=None, inplace=True):
"""Add a STAR geneInfo.tab file to .var"""
import pandas as pd
df = pd.read_csv(gene_info, sep="\t", skiprows=[0], header=None)
df.index = df[0].values
gi = [df[2].get(g, "NA") for g in var["gene_ids"]]
if inplace:
var["gene_type"] = gi
else:
return pd.Series(gi, index=var.index, name="gene_type")
|
KellisLab/benj
|
benj/gene_estimation.py
|
gene_estimation.py
|
py
| 11,891 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19766046195
|
from util import get_options, check_input
import random
def game_round(film_data, target_type, question_type, question):
target = random.choice(film_data)
options = get_options(film_data, target, target_type)
print(f"{question}\n>>> {target[question_type]}")
print(f"1: {options[0]}\n"
f"2: {options[1]}\n"
f"3: {options[2]}")
attempt = check_input()
if options[attempt-1] == target[target_type]:
print("Correct! +1\n")
return True
else:
print(f"Incorrect. Correct answer: {target[target_type]}.\n")
return False
def interpret_round(outcome, points):
if outcome:
points += 1
return points
else:
return points
|
praerie/cinequiz
|
quiz.py
|
quiz.py
|
py
| 728 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70267283389
|
import epyk as pk
from epyk.mocks import urls as data_urls
# Create a basic report object
page = pk.Page()
page.headers.dev()
# retrieve some random json data
data_rest = page.py.requests.csv(data_urls.BLOG_OBJECT)
# create a json viewer object
j = page.ui.json(data_rest, height=(100, 'px'))
# change the default options
j.options.open = False
j.options.hoverPreviewEnabled = True
# add a button for interactivity
page.ui.button("Click").click([
j.js.openAtDepth(0),
j.build({"test": 'ok'})
])
|
epykure/epyk-templates
|
locals/data/json_viewer.py
|
json_viewer.py
|
py
| 508 |
python
|
en
|
code
| 17 |
github-code
|
6
|
9766823930
|
from collections import *
import numpy as np
from common.session import AdventSession
session = AdventSession(day=20, year=2017)
data = session.data.strip()
data = data.split('\n')
p1, p2 = 0, 0
class Particle:
def __init__(self, p, v, a, _id):
self.p = np.array(p)
self.v = np.array(v)
self.a = np.array(a)
self.p_sum = np.abs(p).sum()
self.v_sum = np.abs(v).sum()
self.a_sum = np.abs(a).sum()
self.id = _id
def update(self):
self.v += self.a
self.p += self.v
particles = []
for i, line in enumerate(data):
line = line.replace('<', '(').replace('>', ')')
particles.append(eval(f'Particle({line}, _id={i})'))
sorted_particles = sorted(particles, key=lambda p: (
p.a_sum, p.v_sum, p.p_sum))
p1 = sorted_particles[0].id
for _ in range(1000):
positions = defaultdict(list)
for p in particles:
positions[tuple(p.p)].append(p)
for parts in positions.values():
if len(parts) > 1:
for part in parts:
particles.remove(part)
for p in particles:
p.update()
p2 = len(particles)
session.submit(p1, part=1)
session.submit(p2, part=2)
|
smartspot2/advent-of-code
|
2017/day20.py
|
day20.py
|
py
| 1,196 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36659620035
|
# finding the right modules/packages to use is not easy
import os
import pyodbc
from numpy import genfromtxt
import pandas as pd
import sqlalchemy as sa # use sqlalchemy for truncating etc
from sqlalchemy import Column, Integer, Float, Date, String, BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects import mssql
import urllib
import csv
import fnmatch2
from time import time
## DATABASE STUFF
# server + instance = DESKTOP-A6J6D7Q\LEROYB_INSTANCE
# database = Tracks
server_name='DESKTOP-A6J6D7Q\LEROYB_INSTANCE'
db_name='Tracks'
path = 'F:\export_6642035' # location of all the files related to STRAVA export
pathArchive = 'F:\export_6642035_archive' # this is the archive folder that will contain all the processed files
searchstring = '*' # we could limit the files to a particular string pattern, however in this case we want to go through all the files
# https://docs.sqlalchemy.org/en/13/dialects/mssql.html#connecting-to-pyodbc
# https://docs.sqlalchemy.org/en/13/core/connections.html
# https://www.pythonsheets.com/notes/python-sqlalchemy.html
# https://auth0.com/blog/sqlalchemy-orm-tutorial-for-python-developers/
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.truncate.html
params = urllib.parse.quote_plus('Driver={SQL Server};'
'Server='+server_name +';'
'Database='+db_name+';'
'Trusted_Connection=yes;')
engine = sa.create_engine("mssql+pyodbc:///?odbc_connect=%s" % params,fast_executemany=True, echo=True)
# https://chrisalbon.com/python/data_wrangling/pandas_dataframe_importing_csv/
def Load_Data(file_name):
#data = genfromtxt(file_name, delimiter=',', skip_header=1, converters={0: lambda s: str(s)})
data = pd.read_csv(file_name, skiprows=0)
return data.values.tolist()
Base = declarative_base()
BaseGPX = declarative_base()
class Activities(Base):
#Tell SQLAlchemy what the table name is and if there's any table-specific arguments it should know about
__tablename__ = 'Activities'
__table_args__ = {'schema':'STG'}
#tell SQLAlchemy the name of column and its attributes:
Activity_ID = Column(BigInteger, primary_key=True, nullable=False) #
Activity_Date = Column(Date)
Activity_Name = Column(String)
Activity_Type = Column(String)
Activity_Description = Column(String)
Elapsed_Time = Column(Integer)
Distance = Column(Float)
Commute = Column(String)
Activity_Gear = Column(String)
Filename = Column(String)
class GPXFiles(BaseGPX):
#Tell SQLAlchemy what the table name is and if there's any table-specific arguments it should know about
__tablename__ = 'GPXFiles'
__table_args__ = {'schema':'STG'}
#tell SQLAlchemy the name of column and its attributes:
GPXFile_ID = Column(BigInteger, primary_key=True, autoincrement=True, nullable=False) #
GPXFile_Name = Column(String, nullable=True)
GPXFile_Contents = Column(String, nullable=True)
#GPXFile_XMLContents = Column(String, nullable=True)
#GPXFile_Route_ID = Column(BigInteger, nullable=True)
# loop through files
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if fnmatch2.fnmatch2(file, searchstring):
if file.endswith('.gpx'):
print('gpxfile')
# go straight into the STG.GPXFiles table
#Create the session
#GPXFiles.__table__.drop(bind=engine)
GPXFiles.__table__.create(bind=engine, checkfirst=True) # if the table exists, it is dropped (not using checkfirts=true)
session = sessionmaker(bind=engine)
#print(session)
session.configure(bind=engine)
s = session()
try:
data = Load_Data(os.path.join(r, file))
#print(data)
#for i in data:
#print(str(i[0])+'-'+str(i[1]))
record = GPXFiles(**{
#'GPXFile_ID' : i[0],
'GPXFile_Name' : file, #str(i[1]),
'GPXFile_Contents' : data
})
s.add(record) #Add all the records
s.commit() #Attempt to commit all the records
except Exception as e:
print(e)
s.rollback() #Rollback the changes on error
s.close() #Close the connection
continue
elif file.endswith('.gpx.gz'):
# print('a GZ gpxfile')
# unzip file first, then get the file into the STG.GPXFiles table
continue
elif file.endswith('.csv'):
# the only file we really need is the activities.csv file
# the columns are
if file == 'activities.csv':
# import it
### using pure panda
# insert CSV into the table
# https://stackoverflow.com/questions/31394998/using-sqlalchemy-to-load-csv-file-into-a-database
#Create the session
#Activities.__table__.drop(bind=engine)
Activities.__table__.create(bind=engine, checkfirst=True) # if the table exists, it is dropped (not using checkfirts=true)
session = sessionmaker(bind=engine)
print(session)
session.configure(bind=engine)
s = session()
try:
data = Load_Data(os.path.join(r, file))
#print(data)
for i in data:
#print(str(i[0])+'-'+str(i[1]))
record = Activities(**{
'Activity_ID' : i[0],
'Activity_Date' : i[1].replace(',', ''),
'Activity_Name' : str(i[2]),
'Activity_Type' : str(i[3]),
'Activity_Description' : str(i[4]),
'Elapsed_Time' : int(i[5]),
'Distance' : float(i[6].replace(',', '')),
'Commute' : str(i[7]),
'Activity_Gear' : str(i[8]),
'Filename' : str(i[9])
})
s.add(record) #Add all the records
s.commit() #Attempt to commit all the records
except Exception as e:
print(e)
s.rollback() #Rollback the changes on error
s.close() #Close the connection
else:
continue
# move file to archive folder at same level as source folder
# first check folder exist, if not, then create it
continue
#conn.close
# now that this is done, we need to run the stored procedures to get the data from the staging tbales into the production conformed tables
# first to brng the data from staging into conformed area
# then run a bunch of procs to update some of th emissing attributes
# don't forget to clean up in some way
|
BertrandLeroy/GPXReader
|
ProcessStravaGPX.py
|
ProcessStravaGPX.py
|
py
| 7,471 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16149992177
|
# -*- coding: utf-8 -*-
__author__ = 'xl'
"""
__date__ = TIME: 2018/10/02 上午9:24
describe:利用二分搜索树实现集合
"""
from bst import BinarySearchTree
import chardet
import re
class MySet:
"""
利用二分搜索树实现集合
"""
def __init__(self):
self.bst = BinarySearchTree()
def add(self,e):
self.bst.add(e)
def remove(self,e):
self.bst.remove(e)
def contains(self, e):
return self.bst.contains(e)
def getsize(self):
return self.bst.size
def isEmpty(self):
return True if self.bst.size == 0 else False
if __name__ == "__main__":
myset = MySet()
with open("Les Miserables.txt",'r') as f:
lines = f.readlines()
for line in lines:
line = line.decode('EUC-JP')
#print(line.strip())
line = line.strip().replace(',',' ').replace(';',' ').replace('"',' ').replace('.',' ').replace('?',' ')
line = re.sub("['!-]","",line)
words = line.strip().split(' ')
for word in words:
print(str(word).lower())
myset.add(str(word).lower())
print(myset.getsize())
#print(text)
# for line in txt:
# print(line.strip())
#print("type",type)
# for line in txt:
# print(line)
|
xStone9527/algorithm
|
set_.py
|
set_.py
|
py
| 1,354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36697678270
|
#Coded by: QyFashae
import os
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import HashingVectorizer, TfidfTransformer
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn import tree
# Paths to the directories containing the emails
s_ep = os.path.join("training_data_for_ml_emails", "training_data_ML00")
v_ep = os.path.join("training_data_for_ml_emails", "training_data_ML01")
# List of file directories and corresponding labels
l_fd = [(s_ep, 0), (v_ep, 1)]
e_cs = []
l_bs = []
# Function to read email content and labels
def cve_sh_trfm(s_ep, v_ep, l_fd, e_cs, l_bs):
for cfs, lbs in l_fd:
files = os.listdir(cfs)
for file in files:
file_path = os.path.join(cfs, file)
try:
with open(file_path, "r") as current_file:
eml_ctt = current_file.read().replace("\n", "")
eml_ctt = str(eml_ctt)
e_cs.append(eml_ctt)
l_bs.append(lbs)
except:
pass
# Splitting the dataset and training the model
def vary_trmd(cve_sh_trfm, s_ep, v_ep, l_fd, e_cs, l_bs):
x_train, x_test, y_train, y_test = train_test_split(
e_cs, l_bs, test_size=0.4, random_state=17
)
nlp_followed_by_dt = Pipeline(
[
("vect", HashingVectorizer(input="eml_ctt", ngram_range=(1, 4))),
("tfidf", TfidfTransformer(use_idf=True)),
("dt", tree.DecisionTreeClassifier(class_weight="balanced")),
]
)
nlp_followed_by_dt.fit(x_train, y_train)
# Predicting and evaluating the model
y_test_predict = nlp_followed_by_dt.predict(x_test)
accuracy = accuracy_score(y_test, y_test_predict)
confusion = confusion_matrix(y_test, y_test_predict)
# Writing accuracy and confusion matrix to files
with open("accuracy_score.txt", "w") as acc_file:
acc_file.write("Accuracy: " + str(accuracy))
with open("confusion_matrix.txt", "w") as conf_file:
conf_file.write("Confusion Matrix:\n" + str(confusion))
|
Qyfashae/ML_IDS_EmailSec_Spam
|
smtp_assasin.py
|
smtp_assasin.py
|
py
| 2,150 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27065743114
|
from abc import ABC
from car import Car
class Tires(Car, ABC):
def __init__(self, last_service_date, left_front_tire_pressure, right_front_tire_pressure, left_rear_tire_pressure, right_rear_tire_presure):
super().__init__(last_service_date)
self.left_front_tire = left_front_tire_pressure
self.right_front_tire = right_front_tire_pressure
self.left_rear_tire = left_rear_tire_pressure
self.right_rear_tire = right_rear_tire_presure
#this is returns any bool value for when any tire should be serviced
def tire_should_be_serviced(self):
if self.left_front_tire < 32:
return True
elif self.right_front_tire < 32:
return True
elif self.left_rear_tire < 32:
return True
elif self.right_rear_tire < 32:
return True
else:
return False
|
ak55m/Lyft-Forage
|
tires/all_tires.py
|
all_tires.py
|
py
| 888 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27259428600
|
"""We are the captains of our ships, an we stay 'till the end. We see our stories through.
"""
"""947. Most Stones Removed with Same Row or Column
"""
class Solution:
def removeStones(self, stones):
visited = {tuple(stone): False for stone in stones}
def dfs(s1, visited):
visited[s1] = True
for s2 in stones:
s2 = tuple(s2)
if not visited[s2]:
if (s2[0] == s1[0] or s2[1] == s1[1]):
visited[s2] = True
dfs((s2[0], s2[1]), visited)
connected_components = 0
for stone in stones:
s = tuple(stone)
if not visited[s]:
dfs(s, visited)
connected_components += 1
return len(stones) - connected_components
|
asperaa/back_to_grind
|
Graphs/947. Most Stones Removed with Same Row or Column.py
|
947. Most Stones Removed with Same Row or Column.py
|
py
| 851 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3954866418
|
# -*- coding: utf-8 -*-
"""view module prilog application
* view function, and run Flask
"""
from glob import glob
from flask import Flask, render_template, request, session, redirect, jsonify
import os
import re
import json
import urllib.parse
import subprocess
import time as tm
import analyze as al
import common as cm
import state_list as state
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
SERVER_ERROR_STATE = config.get("general", "error_state")
SERVER_TOKEN_AUTH = config.get("general", "token_auth")
MULTI_SERVER = config.get("rest", "multi_server")
DL_INTERVAL = int(config.get("rest", "interval"))
# movie download directory
stream_dir = "tmp/"
if not os.path.exists(stream_dir):
os.mkdir(stream_dir)
# analyze result save as cache directory
cache_dir = "cache/"
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
# save analyzing id as file directory
download_dir = "download/"
if not os.path.exists(download_dir):
os.mkdir(download_dir)
# waiting analyze id as file directory
dl_queue_dir = "download/queue/"
if not os.path.exists(dl_queue_dir):
os.mkdir(dl_queue_dir)
# save analyzing id as file directory
dl_ongoing_dir = "download/ongoing/"
if not os.path.exists(dl_ongoing_dir):
os.mkdir(dl_ongoing_dir)
# save analyzing id as file directory
dl_pending_dir = "download/pending/"
if not os.path.exists(dl_pending_dir):
os.mkdir(dl_pending_dir)
# save analyzing id as file directory
dl_server_dir = "download/server/"
if not os.path.exists(dl_server_dir):
os.mkdir(dl_server_dir)
# waiting analyze id as file directory
queue_dir = "queue/"
if not os.path.exists(queue_dir):
os.mkdir(queue_dir)
# save analyzing id as file directory
pending_dir = "pending/"
if not os.path.exists(pending_dir):
os.mkdir(pending_dir)
# api token as file directory
token_dir = "token/"
if not os.path.exists(token_dir):
os.mkdir(token_dir)
def get_web_txt(youtube_id, title, time_line, debuff_value, total_damage):
debuff_dict = None
if debuff_value:
debuff_dict = ({key: val for key, val in zip(time_line, debuff_value)})
data_url = "https://prilog.jp/?v=" + youtube_id
data_txt = "@PriLog_Rより%0a"
data_txt += title + "%0a"
if total_damage:
total_damage = "総ダメージ " + "".join(total_damage)
data_txt += total_damage + "%0a"
return debuff_dict, data_txt, data_url, total_damage
def get_rest_result(title, time_line, time_line_enemy, time_data, total_damage, debuff_value):
rest_result = {"title": title, "timeline": time_line, "timeline_enemy": time_line_enemy, "process_time": time_data,
"total_damage": total_damage, "debuff_value": debuff_value}
if time_line:
rest_result["timeline_txt"] = "\r\n".join(time_line)
if time_line_enemy:
rest_result["timeline_txt_enemy"] = "\r\n".join(time_line_enemy)
else:
rest_result["timeline_txt_enemy"] = False
if debuff_value:
rest_result["timeline_txt_debuff"] = "\r\n".join(list(
map(lambda x: "↓{} {}".format(str(debuff_value[x[0]][0:]).rjust(3, " "), x[1]),
enumerate(time_line))))
else:
rest_result["timeline_txt_debuff"] = False
else:
rest_result["timeline_txt"] = False
rest_result["timeline_txt_enemy"] = False
rest_result["timeline_txt_debuff"] = False
return rest_result
app = Flask(__name__)
app.config.from_object(__name__)
app.config["SECRET_KEY"] = "zJe09C5c3tMf5FnNL09C5e6SAzZuY"
app.config["JSON_AS_ASCII"] = False
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
url = (request.form["Url"])
# urlからid部分の抽出
youtube_id = al.get_youtube_id(url)
if youtube_id is False:
error = state.get_error_message(state.ERR_BAD_URL)
return render_template("index.html", error=error)
cache = cm.cache_check(youtube_id)
if cache:
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
if past_status % 100 // 10 == 0:
debuff_dict, data_txt, data_url, total_damage = get_web_txt(youtube_id, title,
time_line, debuff_value, total_damage)
return render_template("result.html", title=title, timeLine=time_line, timeLineEnemy=time_line_enemy,
timeData=time_data, totalDamage=total_damage, debuffDict=debuff_dict,
data_txt=data_txt, data_url=data_url)
else:
error = state.get_error_message(past_status)
return render_template("index.html", error=error)
if SERVER_ERROR_STATE:
error = state.get_error_message(state.ERR_SERVICE_UNAVAILABLE)
return render_template("index.html", error=error)
# start download
dl_queue_path = dl_queue_dir + str(youtube_id)
dl_ongoing_path = dl_ongoing_dir + str(youtube_id)
# 既にキューに登録されているか確認
queued = os.path.exists(dl_queue_path)
if not queued: # 既にダウンロード待機中ではない場合、ダウンロード待機キューに登録
cm.queue_append(dl_queue_path)
# キューが回ってきたか確認し、来たらダウンロード実行
while True:
if not cm.is_path_exists(dl_ongoing_path) and cm.is_path_due(dl_queue_path):
if cm.is_pending_download(DL_INTERVAL): # check pending download
break
timeout = cm.watchdog_download(youtube_id, 300) # 5分間タイムアウト監視
if timeout:
cm.clear_path(dl_queue_path)
error = "動画の解析待ちでタイムアウトが発生しました。再実行をお願いします。"
return render_template("index.html", error=error)
tm.sleep(1)
else: # ダウンロード待機中の場合エラーメッセージ表示
cm.clear_path(dl_queue_path)
error = "同一の動画が解析中です。時間を置いて再実行をお願いします。"
return render_template("index.html", error=error)
path, title, length, thumbnail, url_result = al.search(youtube_id)
cm.clear_path(dl_queue_path)
if url_result % 100 // 10 == 2:
error = state.get_error_message(url_result)
cm.save_cache(youtube_id, title, False, False, False, False, False, url_result)
return render_template("index.html", error=error)
session["path"] = path
session["title"] = title
session["youtube_id"] = youtube_id
length = int(int(length) / 8) + 3
return render_template("analyze.html", title=title, length=length, thumbnail=thumbnail)
elif request.method == "GET":
if "v" in request.args: # ?v=YoutubeID 形式のGETであればリザルト返却
youtube_id = request.args.get("v")
if re.fullmatch(r"^([a-zA-Z0-9_-]{11})$", youtube_id):
cache = cm.cache_check(youtube_id)
if cache:
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
if past_status % 100 // 10 == 0:
debuff_dict, data_txt, data_url, total_damage = get_web_txt(youtube_id, title,
time_line, debuff_value,
total_damage)
return render_template("result.html", title=title, timeLine=time_line,
timeLineEnemy=time_line_enemy,
timeData=time_data, totalDamage=total_damage, debuffDict=debuff_dict,
data_txt=data_txt, data_url=data_url)
else:
error = state.get_error_message(past_status)
return render_template("index.html", error=error)
else: # キャッシュが存在しない場合は解析
if SERVER_ERROR_STATE:
error = state.get_error_message(state.ERR_SERVICE_UNAVAILABLE)
return render_template("index.html", error=error)
# start download
dl_queue_path = dl_queue_dir + str(youtube_id)
dl_ongoing_path = dl_ongoing_dir + str(youtube_id)
# 既にキューに登録されているか確認
queued = os.path.exists(dl_queue_path)
if not queued: # 既にダウンロード待機中ではない場合、ダウンロード待機キューに登録
cm.queue_append(dl_queue_path)
# キューが回ってきたか確認し、来たらダウンロード実行
while True:
if not cm.is_path_exists(dl_ongoing_path) and cm.is_path_due(dl_queue_path):
if cm.is_pending_download(DL_INTERVAL): # check pending download
break
timeout = cm.watchdog_download(youtube_id, 300) # 5分間タイムアウト監視
if timeout:
cm.clear_path(dl_queue_path)
error = "動画の解析待ちでタイムアウトが発生しました。再実行をお願いします。"
return render_template("index.html", error=error)
tm.sleep(1)
else: # ダウンロード待機中の場合エラーメッセージ表示
cm.clear_path(dl_queue_path)
error = "同一の動画が解析中です。時間を置いて再実行をお願いします。"
return render_template("index.html", error=error)
path, title, length, thumbnail, url_result = al.search(youtube_id)
cm.clear_path(dl_queue_path)
if url_result % 100 // 10 == 2:
error = state.get_error_message(url_result)
cm.save_cache(youtube_id, title, False, False, False, False, False, url_result)
return render_template("index.html", error=error)
session["path"] = path
session["title"] = title
session["youtube_id"] = youtube_id
length = int(int(length) / 8) + 3
return render_template("analyze.html", title=title, length=length, thumbnail=thumbnail)
else: # prilog.jp/(YoutubeID)に該当しないリクエスト
error = "不正なリクエストです"
return render_template("index.html", error=error)
else:
path = session.get("path")
session.pop("path", None)
session.pop("title", None)
session.pop("youtube_id", None)
error = None
if str(path).isdecimal():
error = state.get_error_message(path)
elif path is not None:
cm.clear_path(path)
return render_template("index.html", error=error)
@app.route("/analyze", methods=["GET", "POST"])
def analyze():
path = session.get("path")
title = session.get("title")
youtube_id = session.get("youtube_id")
session.pop("path", None)
if request.method == "GET" and path is not None:
# TL解析
time_line, time_line_enemy, time_data, total_damage, debuff_value, status = al.analyze_movie(path)
# キャッシュ保存
status = cm.save_cache(youtube_id, title, time_line, time_line_enemy, False, total_damage, debuff_value, status)
if status % 100 // 10 == 0:
# 解析が正常終了ならば結果を格納
session["time_line"] = time_line
session["time_line_enemy"] = time_line_enemy
session["time_data"] = time_data
session["total_damage"] = total_damage
session["debuff_value"] = debuff_value
return render_template("analyze.html")
else:
session["path"] = status
return render_template("analyze.html")
else:
return redirect("/")
@app.route("/result", methods=["GET", "POST"])
def result():
title = session.get("title")
time_line = session.get("time_line")
time_line_enemy = session.get("time_line_enemy")
time_data = session.get("time_data")
total_damage = session.get("total_damage")
debuff_value = session.get("debuff_value")
youtube_id = session.get("youtube_id")
session.pop("title", None)
session.pop("time_line", None)
session.pop("time_line_enemy", None)
session.pop("time_data", None)
session.pop("total_damage", None)
session.pop("debuff_value", None)
session.pop("youtube_id", None)
if request.method == "GET" and time_line is not None:
debuff_dict, data_txt, data_url, total_damage = get_web_txt(youtube_id, title,
time_line, debuff_value, total_damage)
return render_template("result.html", title=title, timeLine=time_line, timeLineEnemy=time_line_enemy,
timeData=time_data, totalDamage=total_damage, debuffDict=debuff_dict,
data_txt=data_txt, data_url=data_url)
else:
return redirect("/")
@app.route("/download", methods=["GET", "POST"])
def download():
if request.method == "GET":
return render_template("download.html")
else:
return redirect("/")
@app.route("/rest", methods=["GET", "POST"])
def rest():
if request.method == "GET":
return render_template("rest.html")
else:
return redirect("/")
@app.route("/standalone/version", methods=["GET"])
def standalone_version():
ret = {"version": "", "update": False}
if request.method == "GET":
path = "./static/release"
fl = glob(path + "/*")
if not fl:
return jsonify(ret)
# sort time stamp and find latest version
fl.sort(key=lambda x: os.path.getctime(x), reverse=True)
version = os.path.basename(fl[0])
ret["version"] = version
if "Version" in request.args:
if request.args.get("Version") < version:
ret["update"] = True
return jsonify(ret)
else:
return jsonify(ret)
@app.route("/rest/analyze", methods=["POST", "GET"])
def rest_analyze():
status = state.ERR_REQ_UNEXPECTED
is_parent = False
rest_result = {}
ret = {}
url = ""
raw_url = ""
token = ""
# clear old movie if passed 2 hours
cm.tmp_movie_clear()
if request.method == "POST":
if "Url" not in request.form:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
raw_url = request.form["Url"]
if SERVER_TOKEN_AUTH and "Token" not in request.form:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
token = request.form["Token"]
elif request.method == "GET":
if "Url" not in request.args:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
raw_url = request.args.get("Url")
if SERVER_TOKEN_AUTH and "Token" not in request.args:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
token = request.args.get("Token")
try:
# tokenの確認とロード
if SERVER_TOKEN_AUTH:
json.load(open(token_dir + urllib.parse.quote(token) + ".json"))
except FileNotFoundError:
status = state.ERR_BAD_TOKEN
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
# URL抽出
tmp_group = re.search('(?:https?://)?(?P<host>.*?)(?:[:#?/@]|$)', raw_url)
if tmp_group:
host = tmp_group.group('host')
if host == "www.youtube.com" or host == "youtu.be":
url = raw_url
# キャッシュ確認
youtube_id = al.get_youtube_id(url)
queue_path = queue_dir + str(youtube_id)
pending_path = pending_dir + str(youtube_id)
dl_queue_path = dl_queue_dir + str(youtube_id)
if youtube_id is False:
# 不正なurlの場合
status = state.ERR_BAD_URL
else:
# 正常なurlの場合
cache = cm.cache_check(youtube_id)
if cache:
# キャッシュ有りの場合
# キャッシュを返信
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
if past_status % 100 // 10 == 0:
rest_result = get_rest_result(title, time_line, time_line_enemy, time_data, total_damage, debuff_value)
ret["result"] = rest_result
ret["msg"] = state.get_error_message(past_status)
ret["status"] = past_status
return jsonify(ret)
else:
ret["result"] = rest_result
ret["msg"] = state.get_error_message(past_status)
ret["status"] = past_status
return jsonify(ret)
if SERVER_ERROR_STATE:
ret["result"] = rest_result
ret["msg"] = state.get_error_message(state.ERR_SERVICE_UNAVAILABLE)
ret["status"] = state.ERR_SERVICE_UNAVAILABLE
return jsonify(ret)
# start analyze
# 既にキューに登録されているか確認
queued = os.path.exists(queue_path)
if not queued: # 既に解析中ではない場合、解析キューに登録
cm.queue_append(queue_path)
# キューが回ってきたか確認し、来たら解析実行
while True:
cm.watchdog(youtube_id, is_parent, 1800, state.TMP_QUEUE_TIMEOUT)
rest_pending = cm.is_path_exists(pending_path)
rest_queue = cm.is_path_due(queue_path)
web_download = cm.is_path_exists(dl_queue_path)
if not rest_pending and rest_queue and not web_download:
if cm.is_pending_download(DL_INTERVAL): # check pending download
if not MULTI_SERVER:
analyzer_path = f'python exec_analyze.py {url}'
cm.pending_append(pending_path)
subprocess.Popen(analyzer_path.split())
is_parent = True
else:
analyzer_path = f'python multi_exec_analyze.py {url}'
cm.pending_append(pending_path)
subprocess.Popen(analyzer_path.split())
is_parent = True
break
tm.sleep(1)
while True: # キューが消えるまで監視
queued = os.path.exists(queue_path)
if queued:
if is_parent:
# 親ならばpendingを監視
cm.watchdog(youtube_id, is_parent, 300, state.TMP_ANALYZE_TIMEOUT)
else:
# 子ならばqueueを監視
cm.watchdog(youtube_id, is_parent, 2160, state.TMP_QUEUE_TIMEOUT)
tm.sleep(1)
continue
else: # 解析が完了したら、そのキャッシュJSONを返す
cache = cm.queue_cache_check(youtube_id)
if cache:
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
rest_result = get_rest_result(title, time_line, time_line_enemy, time_data, total_damage,
debuff_value)
status = past_status
break
else: # キャッシュ未生成の場合
# キャッシュを書き出してから解析キューから削除されるため、本来起こり得ないはずのエラー
status = state.TMP_UNEXPECTED
break
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
if __name__ == "__main__":
app.run()
|
Neilsaw/PriLog_web
|
app.py
|
app.py
|
py
| 21,596 |
python
|
en
|
code
| 30 |
github-code
|
6
|
40941005277
|
# github üzerinden yapılan arama sonuçlarını consola yazdırma
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome()
url = "https://github.com"
driver.get(url)
searchInput = driver.find_element_by_name("q")
time.sleep(1)
print("\n" + driver.title + "\n")
searchInput.send_keys("python")
time.sleep(1)
searchInput.send_keys(Keys.ENTER)
time.sleep(2)
result = driver.find_elements_by_css_selector(".repo-list-item div div a")
for element in result:
print(element.text)
time.sleep(2)
driver.close()
|
furkan-A/Python-WS
|
navigate.py
|
navigate.py
|
py
| 588 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34197446896
|
#!/bin/python
import sys
import os
import time
import datetime
import hashlib
from os import walk
import mysql.connector
from sys import argv
import json
import boto3
from botocore.exceptions import ClientError
import requests
from requests.exceptions import HTTPError
game_client = argv[1]
target_dir = argv[2]
backoffice_url = argv[3]
enable_forcing = argv[4]
version = argv[5].split("/")[1]
source_dir = argv[5].split("/")[0]
environment = argv[6]
build_numer = argv[7]
performer = argv[8]
bucket_name = "cdn.project.com"
database_conf = "/var/lib/jenkins/mysql_engine.cnf"
def get_db_data() -> List[str]:
global client_s3_name
global short_code
global game_id
try:
cnx = mysql.connector.connect(option_files=database_conf,
option_groups="client")
cursor = cnx.cursor()
print("*** Collecting information about Game")
query = ("select short_code, game_id from core_game where game_name='{}'".format(game_client))
cursor.execute(query)
results = cursor.fetchall()
for code in results:
short_code = code[0].replace("_", "")
game_id = code[1]
client_s3_name = short_code.replace("social", "")
print("*** Data was successfully collected")
return (client_s3_name, short_code, game_id)
except mysql.connector.Error as e:
print("*** ERROR: {}".format(e.msg))
exit()
finally:
if (cnx.is_connected()):
cnx.close()
cursor.close()
print("*** MySQL connection is closed")
def ensure_dir(dir_name: str):
try:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
except OSError as e:
print("*** ERROR: {}".format(sys.exc_info()[1]))
exit()
def cleanup(item: str):
try:
os.system("rm -rf {}".format(item))
print("*** {} was successfully removed from workspace".format(item))
except OSError as e:
print("*** Error occurs: {}".format(sys.exc_info()[1]))
exit()
def download_from_s3():
ensure_dir(short_code)
try:
os.system("aws s3 cp s3://cdn.project.com/ags/{0}/{1}/{2}/ ./{3} --recursive".format(source_dir, client_s3_name, version, short_code))
except OSError as e:
print("*** Error during downloading from s3: {}".format(sys.exc_info()[1]))
cleanup(short_code)
exit()
def get_sha1sum(sha1sum_target: str) -> str:
try:
sha1hash = hashlib.sha1(open("{0}/{1}".format(client_s3_name, sha1sum_target),"rb").read()).hexdigest()
return sha1hash
except OSError as e:
print("*** ERROR: {}".format(sys.exc_info()[1]))
exit()
def update_devops_data(client_artifact: str):
try:
cnx = mysql.connector.connect(option_files=database_conf, option_groups="devops")
cursor = cnx.cursor()
print("*** Working with devops database")
artifact_data = datetime.datetime.now()
sha1sum_data = get_sha1sum(client_artifact)
update_sql = ("INSERT INTO deployments (Product, Date, Environment, Version, BuildNumber, Artifact, MD5sum, Performer) \
VALUES ('{0} client', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}' \
);".format(game_client, artifact_data, environment, version, build_numer, client_artifact, sha1sum_data, performer))
cursor.execute(update_sql)
cnx.commit()
print("*** Updating devops database with {} artifact".format(client_artifact))
print("*** record(s) affected: ", cursor.rowcount)
except mysql.connector.Error as e:
print("*** ERROR: {}".format(e.msg))
exit()
finally:
if (cnx.is_connected()):
cnx.close()
cursor.close()
print("*** MySQL connection is closed")
def modify_json():
with open("{}/game-config.json".format(short_code), "r") as json_file:
data = json.load(json_file)
data["enableForcing"] = bool(enable_forcing)
with open("{}/game-config.json".format(short_code), "w") as json_file:
json.dump(data, json_file, sort_keys=True, indent=2)
def upload_to_s3() -> bool:
print("*** Uploading {0} version:{1} to S3".format(game_client, version))
s3 = boto3.resource('s3')
try:
engine_files = []
total_file_count = 0
total_file_size = 0
for path, dirs, files in os.walk(short_code):
for file in files:
file_name = (os.path.join(path, file)).replace("{}/".format(short_code), "")
size_file = os.path.getsize("{0}/{1}".format(short_code, file_name))
engine_files.append(file_name)
total_file_size += size_file
total_file_count += 1
print(" START TIME: {}".format(time.asctime()))
print(" - Files to upload: {}".format(str(total_file_count)))
print(" - Total size to upload: {}MB".format(int(total_file_size/1024/1024)))
for f in engine_files:
if f == "index.html":
s3.meta.client.upload_file(
Filename="{0}/{1}".format(short_code, f),
Bucket=bucket_name,
Key="ags/{0}/{1}/{2}/{3}".format(target_dir, short_code, version, f),
ExtraArgs={"ContentType": "text/html"}
)
else:
s3.meta.client.upload_file(
Filename="{0}/{1}".format(short_code, f),
Bucket=bucket_name,
Key="ags/{0}/{1}/{2}/{3}".format(target_dir, short_code, version, f)
)
print(" FINISH TIME: {}".format(time.asctime()))
return True
except ClientError as err:
print("*** Error during uploading to s3: {}".format(err))
return False
def invalidate_s3() -> bool:
client = boto3.client('cloudfront')
try:
response = client.create_invalidation(
DistributionId="E30T6SVV8C",
InvalidationBatch={
"Paths": {
"Quantity": 1,
"Items": [
"/ags/{0}/{1}/{2}/*".format(target_dir, short_code, version),
]
},
"CallerReference": str(time.asctime())
}
)
return True
except ClientError as err:
print("*** Error during invalidation: {}".format(err))
return False
finally:
print("*** Data {0}/{1}/{2}/* was invalidated on s3.".format(target_dir, short_code, version))
def get_url(action: str) -> str:
if action == "clearCache":
url = "https://{0}/backoffice/{1}".format(backoffice_url, action)
else:
url = "https://{0}/backoffice/games/{1}/".format(backoffice_url, game_id)
return url
def request_data():
headers={"Authorization": "Basic 123asdluczo", # jenkins user pass from BO
"Content-type": "application/json"
}
launch_address = "https://cdn.project.com/ags/{0}/{1}/{2}/index.html".format(target_dir, short_code, version)
try:
response_get = requests.get(get_url(game_id), headers=headers, verify=False) # verify=False, issue with ssl on NJ
game_json = response_get.json()
print("*** Changing Launch Adresses")
game_json["desktopLaunchAddress"] = unicode(launch_address)
game_json["mobileLaunchAddress"] = unicode(launch_address)
print(" - DesktopLaunchAddress: {}".format(game_json["desktopLaunchAddress"]))
print(" - MobileLaunchAddress: {}".format(game_json["mobileLaunchAddress"]))
response_put = requests.put(get_url(game_id), headers=headers, verify=False, data=json.dumps(game_json)) # verify=False, issue with ssl on NJ
response_post = requests.post(get_url("clearCache"), headers=headers, verify=False) # verify=False, issue with ssl on NJ
print("*** Clean Cache: status {}".format(response_post.status_code))
except HTTPError as http_err:
print("*** HTTP error occurred: {}".format(http_err))
except Exception as err:
print("*** Other error occurred: {}".format(err))
def main():
get_db_data()
download_from_s3()
update_devops_data("app-{}.js".format(version))
update_devops_data("index.html")
modify_json()
upload_to_s3()
request_data()
invalidate_s3()
cleanup(short_code)
if __name__ == '__main__':
main()
|
vlad-solomai/viam_automation
|
automation_gambling/deploy_game_client/deploy_game_client.py
|
deploy_game_client.py
|
py
| 8,482 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36035072095
|
import tweepy
import pandas as pd
import re
import time
from textblob import TextBlob
from sqlalchemy import create_engine
import yaml
import json
TWITTER_CONFIG_FILE = '../auth.yaml'
with open(TWITTER_CONFIG_FILE, 'r') as config_file:
config = yaml.load(config_file)
consumer_key = config['twitter']['consumer_key']
consumer_secret = config['twitter']['consumer_secret']
access_token = config['twitter']['access_token']
access_token_secret = config['twitter']['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# get geo for top 50 US cities by population
json_data=open('cities_trunc.json').read()
data = json.loads(json_data)
# define twitter API calls by geo coordinates to a 10mi radius
def get_tweet_payload(d):
return {d['city']: '%s,%s,%s' % (d['latitude'], d['longitude'],'10mi')}
geo = {}
i = 0
while i < len(data):
geo.update(get_tweet_payload(data[i]))
i += 1
# manually setting queries against set of popular competitive games - leverage twitch_top.py to keep this dynamic
api = tweepy.API(auth, wait_on_rate_limit=True)
query = ['fortnite', 'overwatch', 'starcraft', 'dota', 'league of legends', 'CSGO', 'hearthstone' ,'pubg', 'tekken', 'ssbm']
d = []
# Since we are using Standard APIs we are limited in data volume, adding additional geo regions and/or queries can result in long run-time
for game in query:
for city,coords in geo.items():
public_tweets = [status for status in tweepy.Cursor(api.search,q=game, geocode=coords, count=100).items(1000)]
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
TweetText = re.sub('[^A-Za-z0-9]+', ' ', tweet.text)
polarity = analysis.sentiment.polarity
subjectivity = analysis.sentiment.subjectivity
d.append((TweetText,
polarity,
subjectivity,
game,
city))
# use Pandas to format analyzed tweets into CSV file for appending to a database
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = timestr + "_tweets.csv"
cols=['Tweet','polarity','subjectivity','game','city']
df = pd.DataFrame(d, columns=cols)
df = df[['Tweet','polarity','subjectivity','game','city']]
df.drop_duplicates(['Tweet'], keep='last')
df.to_csv(filename, encoding='utf-8-sig')
|
dgletts/project-spirit-bomb
|
tweets.py
|
tweets.py
|
py
| 2,259 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11464232803
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 4 18:23:32 2022
@author: sampasmann
"""
import numpy as np
from src.functions.material import Material
from src.functions.mesh import Mesh
class URRb_H2Oa5_2_0_SL_init:
def __init__(self, N=2**10, Nx=100, generator="halton"):
self.N = N
self.Nx = Nx
self.generator = generator
self.totalDim = 2
self.RB = 7.822954
self.LB = -7.822954
self.G = 2
self.right = False
self.left = False
self.material_code = "URRb_H2Oa5_2_0_SL_data"
self.geometry = "slab"
self.avg_scalar_flux = True
self.edge_scalar_flux = False
self.avg_angular_flux = False
self.avg_current = False
self.edge_current = False
self.shannon_entropy = False
self.save_data = True
self.moment_match = False
self.true_flux = np.array((False))
self.mesh = Mesh(self.LB, self.RB, self.Nx)
self.material = Material(self.material_code, self.geometry, self.mesh)
self.source = np.ones((self.Nx,self.G))
|
spasmann/iQMC
|
src/input_files/URRb_H2Oa5_2_0_SL_init.py
|
URRb_H2Oa5_2_0_SL_init.py
|
py
| 1,118 |
python
|
en
|
code
| 2 |
github-code
|
6
|
8927043584
|
from collections import OrderedDict
from concurrent import futures
import six
from nose import tools
from tornado import gen
from tornado import testing as tt
import tornado.concurrent
from flowz.artifacts import (ExtantArtifact, DerivedArtifact, ThreadedDerivedArtifact,
WrappedArtifact, TransformedArtifact, KeyedArtifact,
maybe_artifact)
from ..channels.util import raises_channel_done
class ArtifactsTest(tt.AsyncTestCase):
NAME = "Fooble"
NUM_ARR = [1, 2, 3, 4, 5]
NUM_DICT = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}
@classmethod
def setUpClass(cls):
cls.NUM_ORDERED_DICT = OrderedDict([(i, cls.NUM_DICT[i]) for i in cls.NUM_ARR])
cls.NUM_REVERSED_DICT = OrderedDict([(i, cls.NUM_DICT[i]) for i in reversed(cls.NUM_ARR)])
# Possible getter/deriver/transform functions
@staticmethod
@gen.coroutine
def get_ordered_dict():
raise gen.Return(ArtifactsTest.NUM_ORDERED_DICT)
@staticmethod
def derive_ordered_dict(num_arr, num_dict):
return OrderedDict([(i, num_dict[i]) for i in num_arr])
@staticmethod
def transform_reversed_dict(orig_dict):
return OrderedDict([(i, orig_dict[i]) for i in reversed(orig_dict.keys())])
@staticmethod
def derive_value(key, dict_):
return dict_[key]
@staticmethod
def derive_key(dict_, value):
for (k, v) in six.iteritems(dict_):
if v == value:
return k
return None
@staticmethod
@gen.coroutine
def battery(artifact_maker, exp_value, exists_pre_get):
"""
A batter of tests to run against a particular artifact type
@param artifact_maker: a callable to build the artifact
@param exp_value: the expected value of getting the artifact
@param exists_pre_get: the expect value of calling exists() before calling get()
"""
artifact = artifact_maker()
tools.assert_true(ArtifactsTest.NAME in str(artifact))
tools.assert_equal(artifact.exists(), exists_pre_get)
tools.assert_true(artifact.ensure())
value = yield artifact.get()
tools.assert_equal(value, exp_value)
tools.assert_true(artifact.exists())
tools.assert_true(artifact.ensure())
@gen.coroutine
def check_channel(channel, exp_value):
"""
Validate a channel with one artifact in it
@param channel: the channel
@param exp_value: the expected value of the entry in the channel
"""
result = yield channel.start()
tools.assert_true(result)
obj = yield channel.next()
# the object might be an artifact or a direct value
val = yield maybe_artifact(obj)
tools.assert_equal(val, exp_value)
yield raises_channel_done(channel)
raise gen.Return(True)
yield check_channel(artifact_maker().as_channel(), exp_value)
yield check_channel(artifact_maker().value_channel(), exp_value)
yield check_channel(artifact_maker().ensure_channel(), True)
raise gen.Return(True)
@tt.gen_test
def test_extant_artifact(self):
maker = lambda: ExtantArtifact(self.get_ordered_dict, name=self.NAME)
yield self.battery(maker, self.NUM_ORDERED_DICT, True)
@tt.gen_test
def test_derived_artifact(self):
maker = lambda: DerivedArtifact(self.derive_ordered_dict, self.NUM_ARR,
self.NUM_DICT, name=self.NAME)
yield self.battery(maker, self.NUM_ORDERED_DICT, False)
@tt.gen_test
def test_threaded_derived_artifact(self):
executor = futures.ThreadPoolExecutor(1)
maker = lambda: ThreadedDerivedArtifact(executor, self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT, name=self.NAME)
result = yield self.battery(maker, self.NUM_ORDERED_DICT, False)
@tt.gen_test
def test_wrapped_artifact(self):
maker = lambda: WrappedArtifact(DerivedArtifact(self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT),
name=self.NAME)
yield self.battery(maker, self.NUM_ORDERED_DICT, False)
@tt.gen_test
def test_wrapped_artifact_getattr(self):
artifact = WrappedArtifact(DerivedArtifact(self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT),
name=self.NAME)
# in a normal situation, getting attributes should work fine, passing the call
# onto the underlying value...
tools.assert_equal(self.derive_ordered_dict, getattr(artifact, 'deriver'))
# ...and throwing AttributeError if it didn't have the attribute
tools.assert_raises(AttributeError, getattr, artifact, 'phamble')
# If you had not yet set a value attribute on the artifact, though...
delattr(artifact, 'value')
# ...this used to infinitely recurse until Python complained.
# But now it should return a proper AttributeError
tools.assert_raises(AttributeError, getattr, artifact, 'deriver')
@tt.gen_test
def test_transformed_artifact(self):
# Try with an ExtantArtifact
maker = lambda: TransformedArtifact(ExtantArtifact(self.get_ordered_dict),
self.transform_reversed_dict, name=self.NAME)
yield self.battery(maker, self.NUM_REVERSED_DICT, True)
# Try with a DerivedArtifact
maker = lambda: TransformedArtifact(DerivedArtifact(self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT),
self.transform_reversed_dict, name=self.NAME)
yield self.battery(maker, self.NUM_REVERSED_DICT, False)
@tt.gen_test
def test_keyed_artifact(self):
key = 1
maker = lambda: KeyedArtifact(key,
DerivedArtifact(self.derive_value, key, self.NUM_DICT),
name=self.NAME)
yield self.battery(maker, 'one', False)
artifact = maker()
tools.assert_equal(artifact[0], key)
tools.assert_equal(artifact[1], artifact)
tools.assert_equal(artifact['key'], key)
tools.assert_raises(KeyError, artifact.__getitem__, 'spaz')
for (a,b) in zip((key, artifact), iter(artifact)):
tools.assert_equal(a, b)
@tt.gen_test
def test_keyed_artifact_transform(self):
key = 1
artifact = KeyedArtifact(key, DerivedArtifact(self.derive_value, key, self.NUM_DICT))
artifact2 = artifact.transform(self.derive_key, self.NUM_DICT)
key2 = yield artifact2.get()
tools.assert_equal(key, key2)
tools.assert_is_instance(artifact2, KeyedArtifact)
@tt.gen_test
def test_keyed_artifact_threaded_transform(self):
executor = futures.ThreadPoolExecutor(1)
key = 1
artifact = KeyedArtifact(key, DerivedArtifact(self.derive_value, key, self.NUM_DICT))
artifact2 = artifact.threaded_transform(executor, self.derive_key, self.NUM_DICT)
key2 = yield artifact2.get()
tools.assert_equal(key, key2)
tools.assert_is_instance(artifact2, KeyedArtifact)
@tt.gen_test
def test_maybe_artifact(self):
# prove that both artifacts and non-artifacts result in futures
key = 1
artifact = DerivedArtifact(self.derive_value, key, self.NUM_DICT)
future1 = maybe_artifact(artifact)
tools.assert_is_instance(future1, tornado.concurrent.Future)
future2 = maybe_artifact('one')
tools.assert_is_instance(future2, tornado.concurrent.Future)
val1 = yield future1
val2 = yield future2
tools.assert_equal(val1, val2)
# Make sure that just having a "get" function isn't enough to be an artifact!
dict_ = {1: 'one'}
tools.assert_true(hasattr(dict_, 'get'))
future3 = maybe_artifact(dict_)
val3 = yield future3
tools.assert_equal(val3, dict_)
|
ethanrowe/flowz
|
flowz/test/artifacts/artifacts_test.py
|
artifacts_test.py
|
py
| 8,314 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11275131068
|
from django.contrib import admin
from django.urls import path, include
from basesite import views
urlpatterns = [
path('', views.index, name='index'),
path('academics', views.academics, name='academics'),
path('labs', views.labs, name='labs'),
path('committee', views.committee, name='committee'),
path('gallery', views.gallery, name='gallery'),
path('hostel', views.hostel, name='hostel'),
path('placements', views.placements, name='placements'),
path('alumni', views.alumni, name='alumni'),
path('library', views.library, name='library'),
path('about', views.about, name='about'),
path('contact', views.contact, name='contact'),
path('coursempe', views.coursempe, name='coursempe'),
path('coursemae', views.coursemae, name='coursemae'),
path('coursecse', views.coursecse, name='coursecse'),
path('insert', views.insert, name="insert")
]
|
Mr-vabs/GPA
|
basesite/urls.py
|
urls.py
|
py
| 923 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10420450933
|
from __future__ import annotations
from typing import TYPE_CHECKING
from randovania.games.prime1.layout.hint_configuration import PhazonSuitHintMode
from randovania.games.prime1.layout.prime_configuration import (
LayoutCutsceneMode,
PrimeConfiguration,
RoomRandoMode,
)
from randovania.layout.preset_describer import (
GamePresetDescriber,
fill_template_strings_from_tree,
message_for_required_mains,
)
if TYPE_CHECKING:
from randovania.layout.base.base_configuration import BaseConfiguration
_PRIME1_CUTSCENE_MODE_DESCRIPTION = {
LayoutCutsceneMode.MAJOR: "Major cutscene removal",
LayoutCutsceneMode.MINOR: "Minor cutscene removal",
LayoutCutsceneMode.COMPETITIVE: "Competitive cutscene removal",
LayoutCutsceneMode.SKIPPABLE: None,
LayoutCutsceneMode.SKIPPABLE_COMPETITIVE: "Competitive cutscenes",
LayoutCutsceneMode.ORIGINAL: "Original cutscenes",
}
_PRIME1_PHAZON_SUIT_HINT = {
PhazonSuitHintMode.DISABLED: None,
PhazonSuitHintMode.HIDE_AREA: "Area only",
PhazonSuitHintMode.PRECISE: "Area and room",
}
_PRIME1_ROOM_RANDO_MODE_DESCRIPTION = {
RoomRandoMode.NONE: None,
RoomRandoMode.ONE_WAY: "One-way Room Rando",
RoomRandoMode.TWO_WAY: "Two-way Room Rando",
}
class PrimePresetDescriber(GamePresetDescriber):
def format_params(self, configuration: BaseConfiguration) -> dict[str, list[str]]:
assert isinstance(configuration, PrimeConfiguration)
template_strings = super().format_params(configuration)
cutscene_removal = _PRIME1_CUTSCENE_MODE_DESCRIPTION[configuration.qol_cutscenes]
ingame_difficulty = configuration.ingame_difficulty.description
phazon_hint = _PRIME1_PHAZON_SUIT_HINT[configuration.hints.phazon_suit]
room_rando = _PRIME1_ROOM_RANDO_MODE_DESCRIPTION[configuration.room_rando]
def describe_probability(probability, attribute):
if probability == 0:
return None
return f"{probability / 10:.1f}% chance of {attribute}"
superheated_probability = describe_probability(configuration.superheated_probability, "superheated")
submerged_probability = describe_probability(configuration.submerged_probability, "submerged")
def attribute_in_range(rand_range, attribute):
if rand_range[0] == 1.0 and rand_range[1] == 1.0:
return None
elif rand_range[0] > rand_range[1]:
rand_range = (rand_range[1], rand_range[0])
return f"Random {attribute} within range {rand_range[0]} - {rand_range[1]}"
def different_xyz_randomization(diff_xyz):
if enemy_rando_range_scale is None:
return None
elif diff_xyz:
return "Enemies will be stretched randomly"
if configuration.enemy_attributes is not None:
enemy_rando_range_scale = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_scale_low,
configuration.enemy_attributes.enemy_rando_range_scale_high,
],
"Size",
)
enemy_rando_range_health = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_health_low,
configuration.enemy_attributes.enemy_rando_range_health_high,
],
"Health",
)
enemy_rando_range_speed = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_speed_low,
configuration.enemy_attributes.enemy_rando_range_speed_high,
],
"Speed",
)
enemy_rando_range_damage = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_damage_low,
configuration.enemy_attributes.enemy_rando_range_damage_high,
],
"Damage",
)
enemy_rando_range_knockback = attribute_in_range(
[
configuration.enemy_attributes.enemy_rando_range_knockback_low,
configuration.enemy_attributes.enemy_rando_range_knockback_high,
],
"Knockback",
)
enemy_rando_diff_xyz = different_xyz_randomization(configuration.enemy_attributes.enemy_rando_diff_xyz)
else:
enemy_rando_range_scale = None
enemy_rando_range_health = None
enemy_rando_range_speed = None
enemy_rando_range_damage = None
enemy_rando_range_knockback = None
enemy_rando_diff_xyz = None
extra_message_tree = {
"Difficulty": [
{f"Heat Damage: {configuration.heat_damage:.2f} dmg/s": configuration.heat_damage != 10.0},
{f"{configuration.energy_per_tank} energy per Energy Tank": configuration.energy_per_tank != 100},
],
"Gameplay": [
{
f"Elevators: {configuration.teleporters.description('elevators')}": (
not configuration.teleporters.is_vanilla
)
},
{
"Dangerous Gravity Suit Logic": configuration.allow_underwater_movement_without_gravity,
},
],
"Quality of Life": [{f"Phazon suit hint: {phazon_hint}": phazon_hint is not None}],
"Game Changes": [
message_for_required_mains(
configuration.ammo_pickup_configuration,
{
"Missiles needs Launcher": "Missile Expansion",
"Power Bomb needs Main": "Power Bomb Expansion",
},
),
{
"Progressive suit damage reduction": configuration.progressive_damage_reduction,
},
{
"Warp to start": configuration.warp_to_start,
"Final bosses removed": configuration.teleporters.skip_final_bosses,
"Unlocked Vault door": configuration.main_plaza_door,
"Unlocked Save Station doors": configuration.blue_save_doors,
"Phazon Elite without Dynamo": configuration.phazon_elite_without_dynamo,
},
{
"Small Samus": configuration.small_samus,
"Large Samus": configuration.large_samus,
},
{
"Shuffle Item Position": configuration.shuffle_item_pos,
"Items Every Room": configuration.items_every_room,
},
{
"Random Boss Sizes": configuration.random_boss_sizes,
"No Doors": configuration.no_doors,
},
{
room_rando: room_rando is not None,
},
{
superheated_probability: superheated_probability is not None,
submerged_probability: submerged_probability is not None,
},
{
"Spring Ball": configuration.spring_ball,
},
{
cutscene_removal: cutscene_removal is not None,
},
{
ingame_difficulty: ingame_difficulty is not None,
},
{
enemy_rando_range_scale: enemy_rando_range_scale is not None,
enemy_rando_range_health: enemy_rando_range_health is not None,
enemy_rando_range_speed: enemy_rando_range_speed is not None,
enemy_rando_range_damage: enemy_rando_range_damage is not None,
enemy_rando_range_knockback: enemy_rando_range_knockback is not None,
enemy_rando_diff_xyz: enemy_rando_diff_xyz is not None,
},
],
}
if enemy_rando_range_scale is not None:
for listing in extra_message_tree["Game Changes"]:
if "Random Boss Sizes" in listing.keys():
listing["Random Boss Sizes"] = False
fill_template_strings_from_tree(template_strings, extra_message_tree)
backwards = [
message
for flag, message in [
(configuration.backwards_frigate, "Frigate"),
(configuration.backwards_labs, "Labs"),
(configuration.backwards_upper_mines, "Upper Mines"),
(configuration.backwards_lower_mines, "Lower Mines"),
]
if flag
]
if backwards:
template_strings["Game Changes"].append("Allowed backwards: {}".format(", ".join(backwards)))
if configuration.legacy_mode:
template_strings["Game Changes"].append("Legacy Mode")
# Artifacts
template_strings["Item Pool"].append(
f"{configuration.artifact_target.num_artifacts} Artifacts, "
f"{configuration.artifact_minimum_progression} min actions"
)
return template_strings
|
randovania/randovania
|
randovania/games/prime1/layout/preset_describer.py
|
preset_describer.py
|
py
| 9,313 |
python
|
en
|
code
| 165 |
github-code
|
6
|
1868384059
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
# Create your models here.
class Country(models.Model):
name = models.CharField(_("Name"), db_column='name', max_length = 150, null=True, blank=True)
code2 = models.CharField(_("Code2"), db_column='code2', max_length = 2, unique = True)
code3 = models.CharField(_("Code3"), db_column='code3', max_length = 3, unique = True)
number = models.CharField(_("Number"), db_column='number', max_length = 3, unique = True)
class Document(models.Model):
image = models.ImageField(_("Document image"), upload_to=settings.UPLOAD_TO, null=True, blank=True)
|
amlluch/vectorai
|
vectorai/restapi/models.py
|
models.py
|
py
| 696 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11169933239
|
# from django.shortcuts import render
from rest_framework import generics
from review_app.models import FarmersMarket, Vendor
from fm_api.serializers import FarmersMarketSerializer, VendorSerializer
# Create your views here.
class FarmersMarketListAPIView(generics.ListAPIView):
queryset = FarmersMarket.objects.all()
serializer_class = FarmersMarketSerializer
class FarmersMarketRetrieveAPIView(generics.RetrieveAPIView):
queryset = FarmersMarket.objects.all()
serializer_class = FarmersMarketSerializer
class VendorListAPIView(generics.ListAPIView):
queryset = Vendor.objects.all()
serializer_class = VendorSerializer
class VendorRetrieveAPIView(generics.RetrieveAPIView):
queryset = Vendor.objects.all()
serializer_class = VendorSerializer
|
dhcrain/FatHen
|
fm_api/views.py
|
views.py
|
py
| 783 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12932593468
|
from django.db import models
class UserMailmapManager(models.Manager):
"""A queryset manager which defers all :class:`models.DateTimeField` fields, to avoid
resetting them to an old value involuntarily."""
@classmethod
def deferred_fields(cls):
try:
return cls._deferred_fields
except AttributeError:
cls._deferred_fields = [
field.name
for field in UserMailmap._meta.get_fields()
if isinstance(field, models.DateTimeField) and not field.auto_now
]
return cls._deferred_fields
def get_queryset(self):
return super().get_queryset().defer(*self.deferred_fields())
class UserMailmap(models.Model):
"""
Model storing mailmap settings submitted by users.
"""
user_id = models.CharField(max_length=50, null=True)
"""Optional user id from Keycloak"""
from_email = models.TextField(unique=True, null=False)
"""Email address to find author in the archive"""
from_email_verified = models.BooleanField(default=False)
"""Indicates if the from email has been verified"""
from_email_verification_request_date = models.DateTimeField(null=True)
"""Last from email verification request date"""
display_name = models.TextField(null=False)
"""Display name to use for the author instead of the archived one"""
display_name_activated = models.BooleanField(default=False)
"""Indicates if the new display name should be used"""
to_email = models.TextField(null=True)
"""Optional new email to use in the display name instead of the archived one"""
to_email_verified = models.BooleanField(default=False)
"""Indicates if the to email has been verified"""
to_email_verification_request_date = models.DateTimeField(null=True)
"""Last to email verification request date"""
mailmap_last_processing_date = models.DateTimeField(null=True)
"""Last mailmap synchronisation date with swh-storage"""
last_update_date = models.DateTimeField(auto_now=True)
"""Last date that mailmap model was updated"""
class Meta:
app_label = "swh_web_mailmap"
db_table = "user_mailmap"
# Defer _date fields by default to avoid updating them by mistake
objects = UserMailmapManager()
@property
def full_display_name(self) -> str:
if self.to_email is not None and self.to_email_verified:
return "%s <%s>" % (self.display_name, self.to_email)
else:
return self.display_name
class UserMailmapEvent(models.Model):
"""
Represents an update to a mailmap object
"""
timestamp = models.DateTimeField(auto_now=True, null=False)
"""Timestamp of the moment the event was submitted"""
user_id = models.CharField(max_length=50, null=False)
"""User id from Keycloak of the user who changed the mailmap.
(Not necessarily the one who the mail belongs to.)"""
request_type = models.CharField(max_length=50, null=False)
"""Either ``add`` or ``update``."""
request = models.TextField(null=False)
"""JSON dump of the request received."""
successful = models.BooleanField(default=False, null=False)
"""If False, then the request failed or crashed before completing,
and may or may not have altered the database's state."""
class Meta:
indexes = [
models.Index(fields=["timestamp"]),
]
app_label = "swh_web_mailmap"
db_table = "user_mailmap_event"
|
SoftwareHeritage/swh-web
|
swh/web/mailmap/models.py
|
models.py
|
py
| 3,525 |
python
|
en
|
code
| 11 |
github-code
|
6
|
70728232508
|
import frida
import sys
package_name = "com.jni.anto.kalip"
def get_messages_from_js(message, data):
print(message)
print (message['payload'])
def instrument_debugger_checks():
hook_code = """
setTimeout(function(){
Dalvik.perform(function () {
var TM = Dalvik.use("android.os.Debug");
TM.isDebuggerConnected.implementation = function () {
send("Called - isDebuggerConnected()");
return false;
};
});
},0);
"""
return hook_code
process = frida.get_device_manager().enumerate_devices()[-1].attach(package_name)
script = process.create_script(instrument_debugger_checks())
script.on('message',get_messages_from_js)
script.load()
sys.stdin.read()
|
antojoseph/frida-android-hooks
|
debugger.py
|
debugger.py
|
py
| 800 |
python
|
en
|
code
| 371 |
github-code
|
6
|
8179016390
|
import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def hello(event, context):
logger.info(f"AWS Lambda processing message from GitHub: {event}.")
body = {
"message": "Your function executed successfully!",
"input": event
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
|
Qif-Equinor/serverless-edc2021
|
aws-demo/handler.py
|
handler.py
|
py
| 396 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39304842298
|
import os
from flask import Flask, jsonify, Blueprint
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
import flask_restplus
from werkzeug.contrib import fixers
# instantiate the extensions
db = SQLAlchemy()
migrate = Migrate()
bcrypt = Bcrypt()
def register_api(_app):
from users.api.users import ns as users_ns
from users.api.auth import ns as auth_ns
blueprint = Blueprint('api', __name__)
api = flask_restplus.Api(
app=blueprint,
doc=_app.config['SWAGGER_PATH'],
version=_app.config['API_VERSION'],
title='Shows On Demand - Users Service REST API',
description='Shows on deman Users service API for users access.',
validate=_app.config['RESTPLUS_VALIDATE']
)
api.add_namespace(auth_ns, path='/{}/auth'.format(_app.config['API_VERSION']))
api.add_namespace(users_ns, path='/{}/users'.format(_app.config['API_VERSION']))
_app.register_blueprint(blueprint)
def create_app():
# instantiate the app
app = Flask(__name__)
# enable CORS
CORS(app)
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
app_config = app.config
# set up extensions
db.init_app(app)
bcrypt.init_app(app)
migrate.init_app(app, db)
app.wsgi_app = fixers.ProxyFix(app.wsgi_app)
# register blueprints
register_api(app)
return app
|
guidocecilio/shows-on-demand-users
|
src/users/__init__.py
|
__init__.py
|
py
| 1,480 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17016755905
|
import urllib.request as request
import json
src="https://padax.github.io/taipei-day-trip-resources/taipei-attractions-assignment.json"
with request.urlopen(src) as response:
data=json.load(response)
spot_data=data["result"]["results"]
with open("data.csv","w",encoding="UTF-8-sig") as file:
for spot_item in spot_data:
spot_item['address']=spot_item['address'][5:8]
img_list=spot_item['file']
img=img_list.split('https://')
file.write(spot_item['stitle']+","+spot_item['address']+","+spot_item['longitude']+","+spot_item['latitude']+","+"https://"+img[1]+"\n")
|
ba40431/wehelp-assignments
|
week_3/week_3.py
|
week_3.py
|
py
| 629 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43022293294
|
"""Escribe una función qué reciba varios números y devuelva el mayor de ellos."""
def numero_mayor(lista_numeros):
#encuentra el número mayor de una lista
mayor = 0
mayor =lista_numeros[0]
for item in range(0,len(lista_numeros)):
if lista_numeros[item] > mayor :
mayor = lista_numeros[item]
return mayor
numeros =[]
sigue ="S"
num_mayor=0
while sigue in ["S","s"]:
dato="D"
while dato.isdigit() ==False:
dato=input('Dame un número: ')
numeros.append(int(dato))
sigue =input('Deseas continuar (S/N) : ')
num_mayor =numero_mayor(numeros)
print('\n\t\t El número mayor de tu lista es: ',num_mayor,"\n\n")
|
lupitallamas/tareasjulio2
|
nummayor.py
|
nummayor.py
|
py
| 694 |
python
|
es
|
code
| 0 |
github-code
|
6
|
73931874429
|
#!python
"""
The 5-digit number, 16807=75, is also a fifth power. Similarly, the 9-digit number, 134217728=89, is a ninth power.
How many n-digit positive integers exist which are also an nth power?
"""
def num_digits(x):
return len(str(x))
if __name__=="__main__":
m = 100
s = 0
for i in range(1,m+1):
j=1
while num_digits(j**i) <= i:
if num_digits(j**i) == i:
s += 1
j+=1
print([i, s])
|
DanMayhem/project_euler
|
063.py
|
063.py
|
py
| 412 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24685520932
|
from . import views
from django.urls import path
app_name = 'bankapp' #namespace
urlpatterns = [
path('',views.home,name='home'),
path('login/',views.login,name='login'),
path('register/',views.register,name='register'),
path('logout/',views.logout,name='logout'),
path('user/',views.user,name='user'),
path('form/',views.form,name="form"),
]
|
simisaby/bank
|
Bank/bankapp/urls.py
|
urls.py
|
py
| 371 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20468728047
|
# sourcery skip: do-not-use-staticmethod
"""
A module that contains the AIConfig class object that contains the configuration
"""
from __future__ import annotations
import os
from typing import Type
import yaml
class AIConfig:
"""
A class object that contains the configuration information for the AI
Attributes:
ai_name (str): El nombre de la IA.
ai_role (str): La descripción de la función de la IA.
ai_goals (list): La lista de objetivos que la IA debe cumplir.
"""
def __init__(
self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
) -> None:
"""
Initialize a class instance
Parameters:
ai_name (str): El nombre de la IA.
ai_role (str): La descripción de la función de la IA.
ai_goals (list): La lista de objetivos que la IA debe cumplir.
Returns:
None
"""
if ai_goals is None:
ai_goals = []
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
@staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig":
"""
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
yaml file if yaml file exists,
else returns class with no parameters.
Parameters:
config_file (int): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
cls (object): An instance of given cls object
"""
try:
with open(config_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
ai_name = config_params.get("ai_name", "")
ai_role = config_params.get("ai_role", "")
ai_goals = config_params.get("ai_goals", [])
# type: Type[AIConfig]
return AIConfig(ai_name, ai_role, ai_goals)
def save(self, config_file: str = SAVE_FILE) -> None:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
Returns:
None
"""
config = {
"ai_name": self.ai_name,
"ai_role": self.ai_role,
"ai_goals": self.ai_goals,
}
with open(config_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(self) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
Parameters:
None
Returns:
full_prompt (str): A string containing the initial prompt for the user
including the ai_name, ai_role and ai_goals.
"""
prompt_start = (
"Sus decisiones deben tomarse siempre de forma independiente, sin"
" buscar asistencia al usuario. Aprovecha tus puntos fuertes como LLM y persigue"
" estrategias sencillas y sin complicaciones jurídicas."
""
)
from nancy.prompt import get_prompt
# Construct full prompt
full_prompt = (
f"Usted es {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nOBJETIVOS:\n\n"
)
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{get_prompt()}"
return full_prompt
|
badboytuba/nancy
|
nancy/config/ai_config.py
|
ai_config.py
|
py
| 3,801 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22626134896
|
# Name:- Anurag Rai
msg1 = input("Enter the message 1 binary code: ")
msg2 = input("Enter the message 2 binary code: ")
if len(msg1) != len(msg2):
print("Wrong Input: Both message is having diffrent length")
exit(0)
length = len(msg2)
result = ''
for index in range(length):
if msg1[index] == msg2[index]:
result += str(0)
else:
result += str(1)
print(f"The result code after XOR operation: {result}")
# Conuting Number of ones in result will give us hamming distance
hamming_distance = result.count('1')
print(f"The hamming distance of the given strings is: {hamming_distance}")
|
arironman/MSU-Computer-Network-Lab
|
lab-2 22-09-20/Q1.hamming Distance.py
|
Q1.hamming Distance.py
|
py
| 635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40696737073
|
import asyncio
import importlib
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Awaitable, Callable, Dict, List, Union
ParamValueT = Union[str, int, float, bool, List[Union[str, int, float, bool]]]
ExecutorFuncT = Callable[[Dict[str, ParamValueT]], Awaitable[Dict[str, Any]]]
class CommandExecutionException(Exception):
pass
class CommandExecutor(ABC):
"""
Abstract class for command executors
"""
def __init__(
self,
config: Dict[str, Any],
loop: asyncio.AbstractEventLoop,
) -> None:
self._config = config
self._loop = loop
async def execute_command(
self,
command: str,
params: Dict[str, ParamValueT],
) -> Dict[str, Any]:
"""
Run the command from the dispatch table with params
"""
cmd = self.get_command_dispatch().get(command)
if not cmd:
raise CommandExecutionException(f"no config for {command}")
allow_params = isinstance(cmd, partial) and cmd.args[-1]
if allow_params and list(params.keys()) != ["shell_params"]:
raise CommandExecutionException("the parameters must be JSON with one key, 'shell_params'")
result = await cmd(params)
return result
@abstractmethod
def get_command_dispatch(self) -> Dict[str, ExecutorFuncT]:
"""
Returns the command dispatch table for this command executor
"""
pass
def get_command_executor_impl(service):
"""
Gets the command executor impl from the service config
"""
config = service.config.get('generic_command_config', None)
assert config is not None, 'generic_command_config not found'
module = config.get('module', None)
impl_class = config.get('class', None)
assert module is not None, 'generic command module not found'
assert impl_class is not None, 'generic command class not found'
command_executor_class = getattr(
importlib.import_module(module),
impl_class,
)
command_executor = command_executor_class(service.config, service.loop)
assert isinstance(command_executor, CommandExecutor), \
'command_executor is not an instance of CommandExecutor'
return command_executor
|
magma/magma
|
orc8r/gateway/python/magma/magmad/generic_command/command_executor.py
|
command_executor.py
|
py
| 2,311 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
5759883314
|
# -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#%%
np.random.seed(5)
X = np.r_[np.random.randn(20,2)-[2,2],np.random.randn(20,2)+[2,2]]
Y = [0]*20+[1]*20
plt.scatter(X[:,0],X[:,1],c=Y)
plt.show()
#%% Modelo de clasificación.
modelo = svm.SVC(kernel= 'linear')
#modelo = svm.SVC(kernel= 'poly', degree=2)
#modelo = svm.SVC(kernel= 'rbf')
modelo.fit(X,Y)
Yhat = modelo.predict(X)
#%% Dibujar vector soporte (aplica únicamente con modelo lineal, con polinomial o gausssiana no permite ver los polinomios)
W = modelo.coef_[0]
m = -W[0]/W[1]
xx = np.linspace(-4,4)
yy = m*xx-(modelo.intercept_[0]/W[1])
VS = modelo.support_vectors_
plt.plot(xx,yy, 'k--')
plt.scatter(X[:,0],X[:,1],c=Y)
plt.scatter(VS[:,0],VS[:,1],s=80,facecolors='k')
plt.show()
|
OscarFlores-IFi/CDINP19
|
code/p18.py
|
p18.py
|
py
| 902 |
python
|
es
|
code
| 0 |
github-code
|
6
|
37588584638
|
from sqlalchemy import TypeDecorator
from sqlalchemy.types import VARCHAR
from sqlalchemy import dialects
from sqlalchemy.dialects import postgresql, mysql
import json
from typing import Union, Optional
DialectType = Union[postgresql.UUID, VARCHAR]
ValueType = Optional[Union[dict, str]]
class JSON(TypeDecorator):
impl = VARCHAR
_MAX_VARCHAR_LIMIT = 100000
def load_dialect_impl(self, dialect: dialects) -> DialectType:
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.JSON())
elif dialect.name == 'mysql':
if 'JSON' in dialect.ischema_names:
return dialect.type_descriptor(mysql.JSON())
else:
return dialect.type_descriptor(
VARCHAR(self._MAX_VARCHAR_LIMIT)
)
else:
return dialect.type_descriptor(VARCHAR(self._MAX_VARCHAR_LIMIT))
def process_bind_param(self, value: ValueType, dialect: dialects) -> Optional[str]:
if value is None:
return value
else:
return json.dumps(value)
def process_result_value(self, value: Optional[str], dialect: dialects) -> Optional[dict]:
if value is None:
return value
else:
return json.loads(value)
def copy(self, *args, **kwargs) -> 'JSON':
return JSON(*args, **kwargs)
|
infrascloudy/gandalf
|
gandalf/database/json_type.py
|
json_type.py
|
py
| 1,390 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36474819729
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class SideMenuItem(QWidget):
open_signal = pyqtSignal(int)
def __init__(self, name, height, items=None, alignment="right", style=None, ):
super(SideMenuItem, self).__init__()
self.alignment = alignment
self.menu_status = False
self.menu_frame = None
self.animation = None
self.main_btn = None
self.height = height
self.items = items
self.name = name
self.default_style = {
"main_btn": {
"color": "white",
"icon": "assets/images/icons/icon_settings.png",
"arrow_down": "assets/images/icons/cil-arrow-bottom.png",
"arrow_up": "assets/images/icons/cil-arrow-top.png"
},
"item_btn": {
"color": "white"
},
}
self.style = style if style is not None else self.default_style
self.frame_height = len(self.items) * (self.height - 5)
self.setMaximumHeight(self.height + self.frame_height)
self.setMinimumHeight(self.height)
self.setObjectName("container")
self.setStyleSheet(f'''
#container QPushButton{{
background-repeat: no-repeat;
border: none;
background-color: transparent;
}}
#container #main_btn {{
background-image:url({self.style["main_btn"]["arrow_down"]});
background-position: {self.revers_alignment(self.alignment)} center;
padding-{self.alignment}: 22px;
border-{self.revers_alignment(self.alignment)}: 22px solid transparent;
color: {self.style["main_btn"]["color"]};
text-align: {self.alignment};
}}
#container #main_btn:hover {{
background-color: #bd93f9;
}}
#container #main_btn:pressed {{
background-color: #ff79c6;
color: rgb(255, 255, 255);
}}
#sideMenuItemMainMenuFrame QPushButton {{
background-position: {self.alignment} center;
text-align: {self.alignment};
color: {self.style["item_btn"]["color"]};
border-{self.alignment}: 22px solid transparent;
padding-{self.revers_alignment(self.alignment)}: 48px;
}}
#sideMenuItemMainMenuFrame QPushButton:hover {{
background-color: #bd93f9;
}}
#sideMenuItemMainMenuFrame QPushButton:pressed {{
background-color: #ff79c6;
color: rgb(255, 255, 255);
}}
''')
self.ui()
if self.alignment.lower() == "right":
self.setLayoutDirection(Qt.RightToLeft)
else:
self.setLayoutDirection(Qt.LeftToRight)
def ui(self):
self.menu_frame_func()
self.top_btn()
bg_layout = QVBoxLayout(self)
self.setLayout(bg_layout)
bg_layout.setSpacing(0)
bg_layout.setContentsMargins(0, 0, 0, 0)
bg_layout.addWidget(self.main_btn)
bg_layout.addWidget(self.menu_frame)
def top_btn(self):
self.main_btn = QPushButton(str(self.name))
self.main_btn.setObjectName("main_btn")
self.main_btn.setMinimumHeight(self.height)
self.main_btn.setMaximumHeight(self.height)
self.main_btn.clicked.connect(self.expand_menu)
if self.alignment.lower() == "right":
self.main_btn.setLayoutDirection(Qt.LeftToRight)
else:
self.main_btn.setLayoutDirection(Qt.RightToLeft)
def menu_frame_func(self):
self.menu_frame = QFrame()
self.menu_frame.setObjectName("sideMenuItemMainMenuFrame")
layout = QVBoxLayout(self.menu_frame)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.menu_frame.setMaximumHeight(0)
self.menu_frame.setLayout(layout)
layout.setAlignment(Qt.AlignTop)
for item in self.items:
btn = QPushButton(str(item["name"]))
btn.setStyleSheet(f'''
background-image:url({item["icon"]});
''')
btn.setMinimumHeight(self.height - 5)
btn.setMaximumHeight(self.height - 5)
btn.clicked.connect(item['callback'])
btn.setLayoutDirection(Qt.RightToLeft)
layout.addWidget(btn)
@staticmethod
def revers_alignment(alignment):
if alignment == "right":
return "left"
else:
return "right"
def animate(self, start, end, widget):
self.animation = QPropertyAnimation(widget, b"maximumHeight")
self.animation.stop()
self.animation.setDuration(300)
self.animation.setStartValue(start)
self.animation.setEndValue(end)
self.animation.setEasingCurve(QEasingCurve.Linear)
self.animation.start()
def expand_menu(self):
if self.menu_status:
self.animate(self.frame_height, 0, self.menu_frame)
self.main_btn.setStyleSheet(f'''
background-image:url({self.style["main_btn"]["arrow_down"]});
''')
self.menu_status = False
self.open_signal.emit(- self.frame_height)
else:
self.animate(0, self.frame_height, self.menu_frame)
self.main_btn.setStyleSheet(f'''
background-image:url({self.style["main_btn"]["arrow_up"]});
''')
self.menu_status = True
self.open_signal.emit(self.frame_height)
|
Mahmoud1478/icestore
|
globals/widgets/sidemenu_item/sidemenuitem.py
|
sidemenuitem.py
|
py
| 5,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41749587001
|
import numpy as np
import pandas as pd
from _datetime import timezone, datetime, timedelta
#data config (all methods)
DATA_PATH = '../../dressipy/store.h5'
DATA_PATH_PROCESSED = '../data/dressipi/preparedDS/'
#DATA_FILE = 'yoochoose-clicks-10M'
DATA_FILE = 'views'
DATA_FILE_BUYS = 'transactions'
SESSION_LENGTH = 30 * 60 #30 minutes
#filtering config (all methods)
MIN_SESSION_LENGTH = 2
MIN_ITEM_SUPPORT = 5
#min date config
MIN_DATE = '2014-04-01'
#days test default config
DAYS_TEST = 4
#slicing default config
NUM_SLICES = 10
DAYS_OFFSET = 0
DAYS_SHIFT = 5
DAYS_TRAIN = 9
#DAYS_TEST = 1
#preprocessing from original gru4rec
def preprocess_org( path=DATA_PATH, file=DATA_FILE, buys_file=DATA_FILE_BUYS, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH ):
data, buys = load_data( path, file, buys_file )
data = filter_data( data, min_item_support, min_session_length )
split_data_org( data, path_proc+file )
buys.to_csv( path_proc + buys_file + '.txt', sep='\t', index=False)
#preprocessing from original gru4rec but from a certain point in time
def preprocess_org_min_date( path=DATA_PATH, file=DATA_FILE, buys_file=DATA_FILE_BUYS, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, min_date=MIN_DATE ):
data, buys = load_data( path, file, buys_file )
data = filter_data( data, min_item_support, min_session_length )
data = filter_min_date( data, min_date )
split_data_org( data, path_proc+file )
buys.to_csv( path_proc + buys_file + '.txt', sep='\t', index=False)
#preprocessing adapted from original gru4rec
def preprocess_days_test( path=DATA_PATH, file=DATA_FILE, buys_file=DATA_FILE_BUYS, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, days_test=DAYS_TEST ):
data, buys = load_data( path, file, buys_file )
data = filter_data( data, min_item_support, min_session_length )
split_data( data, path_proc+file, days_test )
print(buys)
buys.to_csv( path_proc + buys_file + '.txt', sep='\t', index=False)
#preprocessing to create data slices with a window
def preprocess_slices( path=DATA_PATH, file=DATA_FILE, buys_file=DATA_FILE_BUYS, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH,
num_slices = NUM_SLICES, days_offset = DAYS_OFFSET, days_shift = DAYS_SHIFT, days_train = DAYS_TRAIN, days_test=DAYS_TEST ):
data, buys = load_data( path, file, buys_file )
data = filter_data( data, min_item_support, min_session_length )
slice_data( data, path_proc+file, num_slices, days_offset, days_shift, days_train, days_test )
buys.to_csv( path_proc + buys_file + '.txt', sep='\t', index=False)
#just load and show info
def preprocess_info( path=DATA_PATH, file=DATA_FILE, buys_file=DATA_FILE_BUYS, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH ):
data, buys = load_data( path, file, buys_file )
data = filter_data( data, min_item_support, min_session_length )
def load_data( store, name, name_buys ) :
store = pd.HDFStore( store )
data = store[name]
buys = store[name_buys]
del( data['Time'] )
data['SessionId'] = data['SessionDay']
data['ItemId'] = data['Item']
data['Time'] = data['TimeObject'].apply( lambda t: t.timestamp() )
data['UserId'] = data['User']
data['TimeO'] = data['TimeObject']
del( data['Session'] )
del( data['SessionDay'] )
del( data['Item'] )
del( data['User'] )
del( data['TimeObject'] )
data.sort_values( ['SessionId','Time'], inplace=True )
sessionid_map = {} # day => user => id
sessiontime_map = {} # session => time
for row in data.itertuples(index=False):
user, session, time = row[3], row[0], row[4]
key = time.date()
if not key in sessionid_map:
sessionid_map[key] = {}
sessiontime_map[key] = {}
if not user in sessionid_map[key]:
sessionid_map[key][user] = {}
sessiontime_map[key][user] = {}
sessionid_map[key][user] = session
sessiontime_map[session] = time.timestamp()
del( data['TimeO'] )
buys['SessionId'] = buys.apply( lambda row: sessionid_map[row['Day'].date()][row['User']] if row['Day'].date() in sessionid_map and row['User'] in sessionid_map[row['Day'].date()] else -1, axis=1 )
buys['ItemId'] = buys['Item']
buys['TimeO'] = buys['Day']
del(buys['Time'])
buys = buys[ buys['SessionId'] > 0 ]
buys['Time'] = buys.apply( lambda row: sessiontime_map[row['SessionId']] + 1, axis=1 )
buys['UserId'] = buys['User']
del(buys['User'])
del(buys['Item'])
del(buys['Day'])
del(buys['TimeO'])
return data, buys;
def filter_data( data, min_item_support, min_session_length ) :
#y?
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[ session_lengths>1 ].index)]
#filter item support
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[ item_supports>= min_item_support ].index)]
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[ session_lengths>= min_session_length ].index)]
#output
data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format( len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
return data;
def filter_min_date( data, min_date='2014-04-01' ) :
min_datetime = datetime.strptime(min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')
#filter
session_max_times = data.groupby('SessionId').Time.max()
session_keep = session_max_times[ session_max_times > min_datetime.timestamp() ].index
data = data[ np.in1d(data.SessionId, session_keep) ]
#output
data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format( len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
return data;
def split_data_org( data, output_file ) :
tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(output_file + '_train_full.txt', sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(output_file + '_test.txt', sep='\t', index=False)
tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv( output_file + '_train_tr.txt', sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv( output_file + '_train_valid.txt', sep='\t', index=False)
def split_data( data, output_file, days_test ) :
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )
test_from = data_end - timedelta( days=days_test )
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[ session_max_times < test_from.timestamp() ].index
session_test = session_max_times[ session_max_times >= test_from.timestamp() ].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(output_file + '_train_full.txt', sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(output_file + '_test.txt', sep='\t', index=False)
def slice_data( data, output_file, num_slices, days_offset, days_shift, days_train, days_test ):
for slice_id in range( 0, num_slices ) :
split_data_slice( data, output_file, slice_id, days_offset+(slice_id*days_shift), days_train, days_test )
def split_data_slice( data, output_file, slice_id, days_offset, days_train, days_test ) :
data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc )
print('Full data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format( slice_id, len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.isoformat(), data_end.isoformat() ) )
start = datetime.fromtimestamp( data.Time.min(), timezone.utc ) + timedelta( days_offset )
middle = start + timedelta( days_train )
end = middle + timedelta( days_test )
#prefilter the timespan
session_max_times = data.groupby('SessionId').Time.max()
greater_start = session_max_times[session_max_times >= start.timestamp()].index
lower_end = session_max_times[session_max_times <= end.timestamp()].index
data_filtered = data[np.in1d(data.SessionId, greater_start.intersection( lower_end ))]
print('Slice data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} / {}'.
format( slice_id, len(data_filtered), data_filtered.SessionId.nunique(), data_filtered.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat(), end.date().isoformat() ) )
#split to train and test
session_max_times = data_filtered.groupby('SessionId').Time.max()
sessions_train = session_max_times[session_max_times < middle.timestamp()].index
sessions_test = session_max_times[session_max_times >= middle.timestamp()].index
train = data[np.in1d(data.SessionId, sessions_train)]
print('Train set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format( slice_id, len(train), train.SessionId.nunique(), train.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat() ) )
train.to_csv(output_file + '_train_full.'+str(slice_id)+'.txt', sep='\t', index=False)
test = data[np.in1d(data.SessionId, sessions_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Test set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} \n\n'.
format( slice_id, len(test), test.SessionId.nunique(), test.ItemId.nunique(), middle.date().isoformat(), end.date().isoformat() ) )
test.to_csv(output_file + '_test.'+str(slice_id)+'.txt', sep='\t', index=False)
# -------------------------------------
# MAIN TEST
# --------------------------------------
if __name__ == '__main__':
preprocess_info();
|
rn5l/session-rec
|
preprocessing/session_based/preprocess_dressipi.py
|
preprocess_dressipi.py
|
py
| 12,917 |
python
|
en
|
code
| 362 |
github-code
|
6
|
20600597111
|
import functools
import os
import google.protobuf.json_format
from synthtool.protos.preconfig_pb2 import Preconfig
PRECONFIG_ENVIRONMENT_VARIABLE = "SYNTHTOOL_PRECONFIG_FILE"
PRECONFIG_HELP = """
A json file containing a description of prefetch sources that this synth.py may
us. See preconfig.proto for detail about the format.
"""
@functools.lru_cache(maxsize=None)
def load():
"""Loads the preconfig file specified in an environment variable.
Returns:
An instance of Preconfig
"""
preconfig_file_path = os.environ.get(PRECONFIG_ENVIRONMENT_VARIABLE)
if not preconfig_file_path:
return Preconfig()
with open(preconfig_file_path, "rt") as json_file:
return google.protobuf.json_format.Parse(json_file.read(), Preconfig())
|
googleapis/google-cloud-java
|
owl-bot-postprocessor/synthtool/preconfig.py
|
preconfig.py
|
py
| 777 |
python
|
en
|
code
| 1,781 |
github-code
|
6
|
4369034891
|
# coding: utf-8
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import numpy as np
train_df = pd.read_csv('../data/train.csv')
test_df = pd.read_csv('../data/test.csv')
# 填充空值,用中位数填充数值型空值,用众数填充字符型空值
from sklearn.base import TransformerMixin
class DataFrameImputer(TransformerMixin):
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].median() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
train_df['Family'] = train_df['Parch'] + train_df['SibSp']
test_df['Family'] = test_df['Parch'] + test_df['SibSp']
# print(train_df.loc[:,['Family','Parch','SibSp']])
feature_columns_to_use = ['Pclass', 'Age', 'Sex', 'Fare', 'Family', 'Embarked']
nonnumeric_columns = ['Sex', 'Embarked']
big_X = train_df[feature_columns_to_use].append(test_df[feature_columns_to_use])
big_X_Imputed = DataFrameImputer().fit_transform(big_X)
le = LabelEncoder()
for feature in nonnumeric_columns:
big_X_Imputed[feature] = le.fit_transform(big_X_Imputed[feature])
X_train = big_X_Imputed[0:train_df.shape[0]].as_matrix()
Y_train = train_df['Survived']
X_test = big_X_Imputed[train_df.shape[0]:].as_matrix()
gbm = xgb.XGBClassifier(max_depth=3, n_estimators=300, learning_rate=0.05)
gbm.fit(X_train, Y_train)
Y_pred = gbm.predict(X_test)
print(gbm.score(X_train, Y_train))
submission = pd.DataFrame({
'PassengerId': test_df['PassengerId'],
"Survived": Y_pred
})
# print(submission.head())
submission.to_csv('../submission/submission_7.csv', index=False)
|
Gczaizi/kaggle
|
Titanic/XGBoost/XGBoost.py
|
XGBoost.py
|
py
| 1,786 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37585700958
|
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import showinfo
import tkinter.font as tkFont
import sqlite3, time, datetime, random
name_of_db = 'inventory_master.db'
my_conn = sqlite3.connect(name_of_db)
cdb = my_conn.cursor()
def create_table():
cdb.execute(
'CREATE TABLE IF NOT EXISTS customer_master('
'idno INTEGER PRIMARY KEY,'
'datestamp TEXT, '
'customer_name TEXT, '
'address TEXT, '
'town TEXT, '
'post_code TEXT, '
'contact TEXT)')
def show_ID():
frmList = tk.Tk()
frmList.title("List of customer")
width = 665
height = 500
screenwidth = frmList.winfo_screenwidth()
screenheight = frmList.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmList.geometry(alignstr)
frmList.resizable(width=False, height=False)
customerID = txtID.get()
txtName.focus_set()
txtName.insert(INSERT,"Hello")
data_set = my_conn.execute("SELECT * FROM customer_master WHERE idno=?", (customerID,))
# btnFullName.grid(columnspan=2, padx=15, pady=15)
output_data(data_set, frmList)
clear_form()
def show_Name():
frmList = tk.Tk()
frmList.title("List of customer")
width = 665
height = 500
screenwidth = frmList.winfo_screenwidth()
screenheight = frmList.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmList.geometry(alignstr)
frmList.resizable(width=False, height=False)
customerName = txtName.get()
data_set = my_conn.execute("SELECT * FROM customer_master WHERE customer_name like?", (customerName,))
# btnFullName.grid(columnspan=2, padx=15, pady=15)
output_data(data_set, frmList)
clear_form()
def show_Contact():
frmList = tk.Tk()
frmList.title("List of customer")
width = 665
height = 500
screenwidth = frmList.winfo_screenwidth()
screenheight = frmList.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmList.geometry(alignstr)
frmList.resizable(width=False, height=False)
contact = txtContact.get()
data_set = my_conn.execute("SELECT * FROM customer_master WHERE contact like?", (contact,))
# btnFullName.grid(columnspan=2, padx=15, pady=15);2
output_data(data_set, frmList)
clear_form()
def update_record():
with my_conn:
customer_id = txtID.get()
customer_name = txtName.get()
address = txtAddress.get()
town = txtTown.get()
post_code = txtPostCode.get()
contact = txtContact.get()
cdb.execute("UPDATE customer_master SET customer_name=?, address=?, town=?, post_code=?, contact=? WHERE idno=?",
(customer_name, address, town, post_code, contact, customer_id))
my_conn.commit()
msg = f'Record Successfully Saved!'
showinfo(title='Information', message=msg)
clear_form()
def delete_record():
with my_conn:
customer_id = txtID.get()
cdb.execute("DELETE FROM customer_master WHERE idno=?", (customer_id,))
my_conn.commit()
clear_form()
def output_data(data_set, frmList):
i = 0 # row value inside the loop
for person in data_set:
for j in range(len(person)):
e = Entry(frmList, width=15, fg='black')
e.grid(row=i, column=j)
e.insert(END, person[j])
i = i + 1
return frmList
def clear_form():
txtID.delete(0, END)
txtName.delete(0, END)
txtAddress.delete(0, END)
txtTown.delete(0, END)
txtContact.delete(0, END)
txtPostCode.delete(0, END)
def btnClose_Command():
clear_form()
exit()
create_table()
frmCustomerUpdate = tk.Tk()
frmCustomerUpdate.title("Customer Update")
width = 513
height = 364
screenwidth = frmCustomerUpdate.winfo_screenwidth()
screenheight = frmCustomerUpdate.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmCustomerUpdate.geometry(alignstr)
frmCustomerUpdate.resizable(width=False, height=False)
txtID = tk.Entry(frmCustomerUpdate)
txtID["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtID["font"] = ft
txtID["fg"] = "#333333"
txtID["justify"] = "center"
txtID["text"] = "Customer ID"
txtID.place(x=100, y=60, width=251, height=30)
txtName = tk.Entry(frmCustomerUpdate)
txtName["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtName["font"] = ft
txtName["fg"] = "#333333"
txtName["justify"] = "left"
txtName["text"] = "Customer Name"
txtName.place(x=100, y=110, width=251, height=30)
txtAddress = tk.Entry(frmCustomerUpdate)
txtAddress["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtAddress["font"] = ft
txtAddress["fg"] = "#333333"
txtAddress["justify"] = "left"
txtAddress["text"] = "Address"
txtAddress.place(x=100, y=160, width=250, height=30)
txtTown = tk.Entry(frmCustomerUpdate)
txtTown["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtTown["font"] = ft
txtTown["fg"] = "#333333"
txtTown["justify"] = "left"
txtTown["text"] = "Town"
txtTown.place(x=100, y=210, width=248, height=30)
txtPostCode = tk.Entry(frmCustomerUpdate)
txtPostCode["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtPostCode["font"] = ft
txtPostCode["fg"] = "#333333"
txtPostCode["justify"] = "left"
txtPostCode["text"] = "Post Code"
txtPostCode.place(x=100, y=260, width=248, height=30)
txtContact = tk.Entry(frmCustomerUpdate)
txtContact["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtContact["font"] = ft
txtContact["fg"] = "#333333"
txtContact["justify"] = "left"
txtContact["text"] = "Contact"
txtContact.place(x=100, y=310, width=247, height=30)
lblID = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblID["font"] = ft
lblID["fg"] = "#333333"
lblID["justify"] = "left"
lblID["text"] = "Customer ID"
lblID.place(x=10, y=60, width=89, height=30)
lblName = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblName["font"] = ft
lblName["fg"] = "#333333"
lblName["justify"] = "left"
lblName["text"] = "Customer Name"
lblName.place(x=10, y=110, width=91, height=30)
lblAddress = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblAddress["font"] = ft
lblAddress["fg"] = "#333333"
lblAddress["justify"] = "left"
lblAddress["text"] = "Address"
lblAddress.place(x=10, y=160, width=91, height=30)
lblTown = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblTown["font"] = ft
lblTown["fg"] = "#333333"
lblTown["justify"] = "left"
lblTown["text"] = "Town"
lblTown.place(x=10, y=210, width=92, height=30)
lblPostCode = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblPostCode["font"] = ft
lblPostCode["fg"] = "#333333"
lblPostCode["justify"] = "left"
lblPostCode["text"] = "Post Code"
lblPostCode.place(x=10, y=260, width=91, height=30)
lblContact = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblContact["font"] = ft
lblContact["fg"] = "#333333"
lblContact["justify"] = "left"
lblContact["text"] = "Mobile No."
lblContact.place(x=10, y=310, width=91, height=30)
lblTitle = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=22)
lblTitle["font"] = ft
lblTitle["fg"] = "#333333"
lblTitle["justify"] = "center"
lblTitle["text"] = "CUSTOMER UPDATE"
lblTitle.place(x=10, y=10, width=488, height=37)
btncustomerID = tk.Button(frmCustomerUpdate)
btncustomerID["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btncustomerID["font"] = ft
btncustomerID["fg"] = "#000000"
btncustomerID["justify"] = "center"
btncustomerID["text"] = "Search Customer ID"
btncustomerID.place(x=370, y=60, width=130, height=30)
btncustomerID["command"] = show_ID
btncustomerName = tk.Button(frmCustomerUpdate)
btncustomerName["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btncustomerName["font"] = ft
btncustomerName["fg"] = "#000000"
btncustomerName["justify"] = "center"
btncustomerName["text"] = "Search Customer Name"
btncustomerName.place(x=370, y=110, width=130, height=30)
btncustomerName["command"] = show_Name
btnMobile = tk.Button(frmCustomerUpdate)
btnMobile["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnMobile["font"] = ft
btnMobile["fg"] = "#000000"
btnMobile["justify"] = "center"
btnMobile["text"] = "Search Mobile No."
btnMobile.place(x=370, y=160, width=129, height=30)
btnMobile["command"] = show_Contact
btnUpdate = tk.Button(frmCustomerUpdate)
btnUpdate["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnUpdate["font"] = ft
btnUpdate["fg"] = "#000000"
btnUpdate["justify"] = "center"
btnUpdate["text"] = "Update"
btnUpdate.place(x=370, y=210, width=128, height=30)
btnUpdate["command"] = update_record
btnDelete = tk.Button(frmCustomerUpdate)
btnDelete["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnDelete["font"] = ft
btnDelete["fg"] = "#000000"
btnDelete["justify"] = "center"
btnDelete["text"] = "Delete"
btnDelete.place(x=370, y=260, width=126, height=30)
btnDelete["command"] = delete_record
btnClose = tk.Button(frmCustomerUpdate)
btnClose["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnClose["font"] = ft
btnClose["fg"] = "#000000"
btnClose["justify"] = "center"
btnClose["text"] = "Close"
btnClose.place(x=370, y=310, width=126, height=30)
btnClose["command"] = btnClose_Command
frmCustomerUpdate.mainloop() # run form by default
|
InfoSoftBD/Python
|
CustomerUpdate.py
|
CustomerUpdate.py
|
py
| 9,946 |
python
|
en
|
code
| 2 |
github-code
|
6
|
36830731053
|
import json
import time
from threading import Thread
import pika
from pika.exceptions import ConnectionClosed
from utils import Logging
class RabbitMQClient(Logging):
_channel_impl = None
def __init__(self, address, credentials, exchange, exchange_type='topic'):
super(RabbitMQClient, self).__init__()
self._address = address
self._exchange = exchange
self._credentials = credentials
self._exchange_type = exchange_type
self._reset_consumer_thread(start=False)
self._declare_exchange()
def send(self, topic, message):
self._channel.basic_publish(exchange=self._exchange,
routing_key=topic,
body=message)
def subscribe(self, topic, handler):
queue_name = self._channel.queue_declare(exclusive=True).method.queue
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name,
routing_key=topic)
self._channel.basic_consume(handler, queue=queue_name)
if not self._consumer_thread.is_alive():
self._reset_consumer_thread(start=True)
def consume(self, inactivity_timeout, handler, timeout_handler):
queue_name = self._channel.queue_declare(exclusive=True).method.queue
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name)
for message in self._channel.consume(queue=queue_name,
inactivity_timeout=inactivity_timeout):
if message is not None:
handler(self._channel, message)
else:
timeout_handler()
def _declare_exchange(self):
self._channel.exchange_declare(exchange=self._exchange,
exchange_type=self._exchange_type)
def _reset_consumer_thread(self, start):
self._consumer_thread = Thread(target=self._channel.start_consuming)
self._consumer_thread.daemon = True
if start:
assert not self._consumer_thread.is_alive()
self._consumer_thread.start()
@property
def _channel(self):
if not self._channel_impl:
connection = self._establish_connection_to_mq(self._address, self._credentials)
self._channel_impl = connection.channel()
return self._channel_impl
@staticmethod
def _establish_connection_to_mq(address, credentials):
while True:
try:
return pika.BlockingConnection(
pika.ConnectionParameters(host=address[0], port=address[1],
credentials=pika.PlainCredentials(credentials[0], credentials[1])))
except ConnectionClosed:
time.sleep(1)
class RabbitMQJsonSender(Logging):
def __init__(self, rabbit_mq_client, topic):
super(RabbitMQJsonSender, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
self._topic = topic
def send(self, message):
try:
json_message = json.dumps(message)
except Exception as e:
self.logger.debug('JSON serialization failed: {}. Message: {}'.format(e, message))
return
self._rabbit_mq_client.send(topic=self._topic,
message=json_message)
class RabbitMQJsonReceiver(Logging):
def __init__(self, rabbit_mq_client):
super(RabbitMQJsonReceiver, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
def subscribe(self, topic, handler):
self._rabbit_mq_client.subscribe(topic, self._wrapped_handler(handler))
self.logger.debug('Subscribed to topic {}'.format(topic))
@staticmethod
def _wrapped_handler(actual_handler):
# noinspection PyUnusedLocal
def handle(ch, method, properties, body):
message = json.loads(body)
return actual_handler(message)
return handle
|
deepsense-ai/seahorse
|
remote_notebook/code/rabbit_mq_client.py
|
rabbit_mq_client.py
|
py
| 4,047 |
python
|
en
|
code
| 104 |
github-code
|
6
|
12998412388
|
import uuid
from django.db import models
from django.conf import settings
User = settings.AUTH_USER_MODEL
# Create your models here.
class PlanCharge(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
tier = models.IntegerField()
charge_id = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, related_name='plans', on_delete=models.CASCADE)
def __unicode__(self):
return str(self.charge_id)
|
kapphire/99typos-server
|
plan/models.py
|
models.py
|
py
| 585 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11579230306
|
import sklearn
import cv2
import pandas as pd
import numpy as np
import math
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from collections import Counter
from scipy.spatial import distance_matrix
from scipy.sparse.csgraph import shortest_path
class ImageClassifier:
def __init__(self, n_clusters, target):
self._n_clusters = n_clusters
self._colorspaces = {
cv2.COLOR_BGR2HSV: cv2.COLOR_HSV2BGR,
cv2.COLOR_BGR2LAB: cv2.COLOR_LAB2BGR,
cv2.COLOR_BGR2HLS: cv2.COLOR_HLS2BGR,
}
self._img = cv2.imread(target)
self._rows,self._cols,_ = self._img.shape
def run(self, dst):
df = self.get_dataframe(colorspace=cv2.COLOR_BGR2HSV)
cluster_map = self.run_kmeans(df, [0])
clusters = self.get_clusters(cluster_map)
cmp = lambda pixel: int(pixel[0])
clusters = self.sort_clusters(clusters, cmp, color_sort=cv2.COLOR_BGR2LAB)
res = self.merge_clusters(clusters, lambda cluster: sum(cluster[0][0]))
cv2.imwrite(dst, res)
def get_dataframe(self, colorspace=None):
"""
Function to get a dataframe from an image's data.
Return value (pandas.DataFrame):
dataframe with every pixel's information (3 channels).
pixels are extracted left to right, top to bottom.
Parameters:
img_mat (cv2.Mat): image to extract data from (must be in BGR colorspace)
colorspace (cv2.COLOR_BGR2*): used if you want to form dataframe from other colorspace
"""
data = {'val1':[], 'val2':[], 'val3':[]}
img = self._img.copy()
# Convert image to desired colorspace
if colorspace is not None:
img = cv2.cvtColor(img, colorspace).astype(np.uint8)
for i in range(self._rows):
for j in range(self._cols):
data['val1'].append(img[i][j][0])
data['val2'].append(img[i][j][1])
data['val3'].append(img[i][j][2])
df = pd.DataFrame(data=data)
return df
def get_optimal_n_clusters(self, dataframe, keys):
max_n = 0
max_score = 0
x = dataframe.iloc[:, keys].values
print("Finding optimal cluster count...")
for n_clusters in range(2, 11):
kmeans = KMeans(n_clusters=n_clusters, n_init=10, max_iter=300, n_jobs=-1)
preds = kmeans.fit_predict(x)
print("start silhouette")
score = silhouette_score(x, preds)
print("end silhouette")
if (score > max_score):
max_n = n_clusters
max_score = score
print("For n_clusters = {}, silhouette score is {})".format(n_clusters, score))
print("Optimal cluster count is {}".format(max_n))
return max_n
def run_kmeans(self, dataframe, keys):
"""
Run kmeans from dataframe and returns clustering information.
Return value (list):
cluster id for each entry in the dataframe
Parameters:
dataframe (pandas.DataFrame): dataframe to run kmeans on
keys (list): indexes of the dataframe's columns used to run kmeans
"""
if self._n_clusters == -1:
self._n_clusters = self.get_optimal_n_clusters(dataframe, keys)
kmeans = KMeans(n_clusters=self._n_clusters, n_init=10, max_iter=300, n_jobs=-1)
x = dataframe.iloc[:, keys].values
y = kmeans.fit_predict(x)
return y
def get_clusters(self, cluster_map):
"""
Extract clusters from image
Return value (list):
List containing each cluster as a list of pixels.
Parameters:
n_clusters (int): Number of clusters to use
img_mat (cv2.Mat): img to extract pixels from
cluster_map (list): list containing cluster id for each pixel of img_mat (left to right, top to bottom)
"""
groups = [[] for i in range(self._n_clusters)]
for i in range(self._rows):
for j in range(self._cols):
group_id = cluster_map[i * self._cols + j]
groups[group_id].append(self._img[i][j])
return groups
def sort_clusters(self, clusters, comparator, color_sort=None):
"""
Sorts each cluster with a custom comparator
Return value (list):
list of sorted np.arrays
Parameters:
clusters (list): list of clusters to sort
comparator (lambda x): comparator function to use to sort clusters
colorspace: in which colorspace to be to sort the clusters
"""
avg = [np.zeros((3), dtype=np.uint64) for i in range (self._n_clusters)]
for i in range(len(clusters)):
cluster = clusters[i]
cluster = np.reshape(cluster, (1, len(cluster), 3)) # Reshape cluster so it fits cv2.Mat format, allowing to change its colorspace
if color_sort is not None: # Convert cluster to desired colorspace
cluster = cv2.cvtColor(cluster, color_sort).astype(np.uint8)
cluster[0] = np.array(sorted(cluster[0], key=comparator)).astype(np.uint8) # Sort cluster with specified comparator
if color_sort is not None: # Convert cluster back to BGR
cluster = cv2.cvtColor(cluster, self._colorspaces[color_sort]).astype(np.uint8)
clusters[i] = cluster
return clusters
def merge_clusters(self, clusters, comparator):
"""
Merges all clusters into one image. Clusters are places from left to right, top to bottom.
Return value (cv2.Mat):
cv2 image with merged clusters
Parameters:
clusters (list): list of clusters (np.arrays) (shape: (1, x, 3))
shape (2 value tuple): desired image shape (rows, cols)
"""
res = np.zeros((self._rows * self._cols, 3), dtype=np.uint8)
merge_index = 0
clusters = sorted(clusters, key=comparator)
for cluster in clusters:
res[merge_index:merge_index+len(cluster[0])] = cluster[0]
merge_index = merge_index + len(cluster[0])
res = np.reshape(res, (self._rows, self._cols, 3))
return res
|
elraffray/pyImage
|
imageclassifier.py
|
imageclassifier.py
|
py
| 6,442 |
python
|
en
|
code
| 0 |
github-code
|
6
|
49613121
|
from collections import deque, defaultdict
import random
class RandomizedSet:
def __init__(self):
self.vec = deque()
self.hash_map = defaultdict(int)
def insert(self, val: int) -> bool:
if val in self.hash_map:
return False
self.vec.append(val)
self.hash_map[val] = len(self.vec) - 1
return True
def remove(self, val: int) -> bool:
if val not in self.hash_map:
return False
idx = self.hash_map[val]
last_val = self.vec[-1]
self.vec[idx] = last_val
self.vec.pop()
# NOTE, this line should be before del
self.hash_map[last_val] = idx
del self.hash_map[val]
return True
def getRandom(self) -> int:
return self.vec[random.randint(0, len(self.vec) - 1)]
if __name__ == "__main__":
obj = RandomizedSet()
assert obj.insert(1)
assert not obj.remove(2)
assert obj.insert(2)
print(obj.getRandom())
assert obj.remove(1)
assert not obj.insert(2)
assert obj.getRandom() == 2
|
code-cp/leetcode
|
solutions/380/main.py
|
main.py
|
py
| 1,102 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75093396986
|
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pprint import pprint
from dotenv import load_dotenv
import os
EVENT_UPLOADED_MESSAGE = "message_uploaded"
load_dotenv()
UUID = os.getenv("uuid")
print("UUID desde dotenv es->")
print(UUID)
pnconfig = PNConfiguration()
pnconfig.subscribe_key = "sub-c-5640dcb4-620c-11ea-9a99-f2f107c29c38"
pnconfig.publish_key = "pub-c-3c259a14-9e90-49f0-bf85-03615209e485"
pnconfig.uuid = UUID
class PubNubClient:
display_controller = None
# PubNub configurations
class NewMessageSubscribeCallback(SubscribeCallback):
def __init__(self, firebase_client, drecorder, display_controller):
self.firebase_client = firebase_client
# self._drecorder = drecorder
self.display_controller = display_controller
def status(self, pubnub, status):
pass
def presence(self, pubnub, presence):
pprint(presence.__dict__)
def message(self, pubnub, message):
print('\n')
print('message from pubnub received')
print('\n')
if message.__dict__["message"]["content"] == "message_uploaded":
# self.display_controller.stop_loading()
num_messages = self.firebase_client.num_relevant_recordings()
self.display_controller.display_message_counter(num_messages)
# if message.__dict__["message"]["sender"] == pnconfig.uuid:
# pass
# self._firebase_client.fetch_relevant_recordings()
def __init__(self, firebase_client, drecorder, display_controller):
self.pubnub = PubNub(pnconfig)
self.pubnub.add_listener(
self.NewMessageSubscribeCallback(firebase_client, drecorder, display_controller))
self.pubnub.subscribe()\
.channels("pubnub_onboarding_channel")\
.with_presence()\
.execute()
# self.firebase_client = firebase_client
self.drecorder = drecorder
self.display_controller = display_controller
def publish_callback(self, envelope, status):
# print('full circle')
print('\n')
print('pubnub message published')
print('\n')
# print(envelope, status)
def broadcastUploadedMessage(self):
self.pubnub.publish()\
.channel("pubnub_onboarding_channel")\
.message({"sender": pnconfig.uuid, "content": EVENT_UPLOADED_MESSAGE, "url": self.drecorder.firebase_filename})\
.pn_async(self.publish_callback)
|
deibid/radio-azar
|
my_modules/PubNubClient.py
|
PubNubClient.py
|
py
| 2,682 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15430745288
|
class Solution:
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
output = ''
for i in digits:
output += str(i)
output_ = int(output)
output_ += 1
return [ int(i) for i in str(output_)]
|
dipalira/LeetCode
|
Arrays/66.py
|
66.py
|
py
| 318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29778129362
|
from flask import Flask, render_template, request, url_for
import y_u_so_stupid as fle
import json
app = Flask(__name__)
correctAnswer = ''
score = 0
highscore = 0
@app.route('/')
def play():
global correctAnswer
q = json.loads(fle.getRandomQuestion())
question = q['question']
choices = q['choices']
correctAnswer = q['answer']
return render_template('index.html',
question = question,
choices = choices,
score = score)
@app.route('/', methods=['POST'])
def game():
global score
global highscore
answer = request.form['answer']
if answer == correctAnswer:
score += 10
return play()
else:
if score > highscore:
highscore = score
return fail()
@app.route('/')
def fail():
global score
currScore = score
score = 0
return render_template('fail.html',
currScore = currScore,
highscore = highscore,
correctAnswer = correctAnswer)
if __name__ == '__main__':
app.run()
|
asav13/PRLA-Verk5
|
part2/y_u_so_stupid_SERVER.py
|
y_u_so_stupid_SERVER.py
|
py
| 1,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13564278099
|
class RockPaperScissors:
def __init__(self, A, B) -> None:
self.your_move = A
self.my_move = B
def __del__(self):
pass
def __str__(self):
your_move_translated = {
'A' : 'rock',
'B' : 'paper',
'C' : 'scissors'
} .get(self.your_move)
my_move_translated = {
'X' : 'rock',
'Y' : 'paper',
'Z' : 'scissors'
} .get(self.my_move)
return f"{your_move_translated}, {my_move_translated}"
def get_value(self):
return {
### YOUR CODE HERE ###
}.get(self.my_move)
def calculate_score(self):
### YOUR CODE HERE ###
return
# A Y
# B X
# C Z
first_turn = RockPaperScissors('A', 'Y')
print(first_turn)
score = RockPaperScissors.calculate_score(first_turn)
|
nicholaschungQR/Project2
|
Problem 2/problem2.py
|
problem2.py
|
py
| 852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42124061830
|
import requests
import os
from django.http import HttpResponse
from django.conf import settings
class ProductClient:
global host
def __init__(self):
global host
print("came inside product constructor")
if os.getenv("PRODUCT_HOST") != "":
host = os.getenv("PRODUCT_HOST")
elif settings.PRODUCT_HOST == "":
host = "http://google.com"
else:
host = settings.PRODUCT_HOST
def getAllProducts(self):
global host
print("Call all products api")
fullUrl = host + "/productmanagement/v1/products/all"
print("url is:" + fullUrl)
response = requests.get(fullUrl)
print(response.content)
return response
|
Robinrrr10/storeorderui
|
client/productClient.py
|
productClient.py
|
py
| 738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7903436646
|
# 백준 7662번 문제 - 이중 우선순위 큐
import sys
import heapq
input = sys.stdin.readline
test = int(input())
for _ in range(test):
max_heap, min_heap = [], []
visit = [0] * 1_000_001
n = int(input())
for i in range(n):
cmd = input().split()
if cmd[0] == 'I':
heapq.heappush(min_heap, (int(cmd[1]), i))
heapq.heappush(max_heap, (int(cmd[1]) * -1, i))
visit[i] = 1 #True이면 어떤 힙에서도 아직 삭제되지 않은 상태
elif cmd[0] == 'D':
if cmd[1] == '-1': #삭제연산시, key값을 기준으로 해당 노드가 다른힙에서 삭제된 노드인가를 먼저 판단한다.
# 이미 상대힙에 의해 삭제된 노드인경우 삭제되지 않은 노드가 나올때까지 계쏙 버리다가 이후 삭제대상노드가 나오면 삭제한다.
while min_heap and not visit[min_heap[0][1]]: # visit이 False일떄 -> 해당노드가 삭제된상태
heapq.heappop(min_heap) # 버림 (상대힙에서 이미 삭제된노드이므로)
if min_heap:
visit[min_heap[0][1]] = 0 # visit이 Ture엿으므로 False로 바꾸고 내가 삭제함
heapq.heappop(min_heap)
elif cmd[1] == '1':
while max_heap and not visit[max_heap[0][1]]: #이미 삭제된 노드인경우 삭제되지 않은 노드가 나올때까지 모두 버린다.
heapq.heappop(max_heap)
if max_heap:
visit[max_heap[0][1]] = 0
heapq.heappop(max_heap)
while min_heap and not visit[min_heap[0][1]]:
heapq.heappop(min_heap)
while max_heap and not visit[max_heap[0][1]]:
heapq.heappop(max_heap)
if min_heap and max_heap:
print(-max_heap[0][0], min_heap[0][0])
else:
print('EMPTY')
|
wnstjd9701/CodingTest
|
백준/category/Class3/이중우선순위큐.py
|
이중우선순위큐.py
|
py
| 1,920 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
5769042811
|
import hashlib
import json
import os
import pathlib
import shutil
import subprocess
from typing import Mapping, Any, List
class RunException(Exception):
pass
class ExecuteException(Exception):
pass
class style:
reset = 0
bold = 1
dim = 2
italic = 3
underline = 4
blink = 5
rblink = 6
reversed = 7
conceal = 8
crossed = 9
class fg:
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
gray = 37
reset = 39
def color(value):
return "\033[" + str(int(value)) + "m";
def print_check():
print("%s✓ %s" % (color(fg.green)+color(style.bold),
color(fg.reset)+color(style.reset)))
def bname(base, cmd, filename):
hstring = cmd
if filename:
hstring += filename
h = hashlib.sha224(hstring.encode()).hexdigest()[:7]
if filename:
bname = os.path.basename(filename)
bname, _ = os.path.splitext(bname)
return "%s-%s-%s" % (base, bname, h)
else:
return "%s-%s" % (base, h)
def _compare_eq_dict(
left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0
) -> List[str]:
explanation = [] # type: List[str]
set_left = set(left)
set_right = set(right)
common = set_left.intersection(set_right)
same = {k: left[k] for k in common if left[k] == right[k]}
if same and verbose < 2:
explanation += ["Omitting %s identical items" % len(same)]
elif same:
explanation += ["Common items:"]
explanation += pprint.pformat(same).splitlines()
diff = {k for k in common if left[k] != right[k]}
if diff:
explanation += ["Differing items:"]
for k in diff:
explanation += [repr({k: left[k]}) + " != " + repr({k: right[k]})]
extra_left = set_left - set_right
len_extra_left = len(extra_left)
if len_extra_left:
explanation.append(
"Left contains %d more item%s:"
% (len_extra_left, "" if len_extra_left == 1 else "s")
)
explanation.extend(
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
)
extra_right = set_right - set_left
len_extra_right = len(extra_right)
if len_extra_right:
explanation.append(
"Right contains %d more item%s:"
% (len_extra_right, "" if len_extra_right == 1 else "s")
)
explanation.extend(
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
)
return explanation
def fixdir(s):
local_dir = os.getcwd()
return s.replace(local_dir.encode(), "$DIR".encode())
def run(basename, cmd, out_dir, infile=None, extra_args=None):
"""
Runs the `cmd` and collects stdout, stderr, exit code.
The stdout, stderr and outfile are saved in the `out_dir` directory and
all metadata is saved in a json file, whose path is returned from the
function.
The idea is to use this function to test the compiler by running it with
an option to save the AST, ASR or LLVM IR or binary, and then ensure that
the output does not change.
Arguments:
basename ... name of the run
cmd ........ command to run, can use {infile} and {outfile}
out_dir .... output directory to store output
infile ..... optional input file. If present, it will check that it exists
and hash it.
extra_args . extra arguments, not part of the hash
Examples:
>>> run("cat2", "cat tests/cat.txt > {outfile}", "output", "tests/cat.txt")
>>> run("ls4", "ls --wrong-option", "output")
"""
assert basename is not None and basename != ""
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
if infile and not os.path.exists(infile):
raise RunException("The input file does not exist")
outfile = os.path.join(out_dir, basename + "." + "out")
cmd2 = cmd.format(infile=infile, outfile=outfile)
if extra_args:
cmd2 += " " + extra_args
r = subprocess.run(cmd2, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not os.path.exists(outfile):
outfile = None
if len(r.stdout):
stdout_file = os.path.join(out_dir, basename + "." + "stdout")
open(stdout_file, "wb").write(fixdir(r.stdout))
else:
stdout_file = None
if len(r.stderr):
stderr_file = os.path.join(out_dir, basename + "." + "stderr")
open(stderr_file, "wb").write(fixdir(r.stderr))
else:
stderr_file = None
if infile:
infile_hash = hashlib.sha224(open(infile, "rb").read()).hexdigest()
else:
infile_hash = None
if outfile:
outfile_hash = hashlib.sha224(open(outfile, "rb").read()).hexdigest()
outfile = os.path.basename(outfile)
else:
outfile_hash = None
if stdout_file:
stdout_hash = hashlib.sha224(open(stdout_file, "rb").read()).hexdigest()
stdout_file = os.path.basename(stdout_file)
else:
stdout_hash = None
if stderr_file:
stderr_hash = hashlib.sha224(open(stderr_file, "rb").read()).hexdigest()
stderr_file = os.path.basename(stderr_file)
else:
stderr_hash = None
data = {
"basename": basename,
"cmd": cmd,
"infile": infile,
"infile_hash": infile_hash,
"outfile": outfile,
"outfile_hash": outfile_hash,
"stdout": stdout_file,
"stdout_hash": stdout_hash,
"stderr": stderr_file,
"stderr_hash": stderr_hash,
"returncode": r.returncode,
}
json_file = os.path.join(out_dir, basename + "." + "json")
json.dump(data, open(json_file, "w"), indent=4)
return json_file
def run_test(basename, cmd, infile=None, update_reference=False,
extra_args=None):
"""
Runs the test `cmd` and compare against reference results.
The `cmd` is executed via `run` (passing in `basename` and `infile`) and
the output is saved in the `output` directory. The generated json file is
then compared against reference results and if it differs, the
RunException is thrown.
Arguments:
basename ........... name of the run
cmd ................ command to run, can use {infile} and {outfile}
infile ............. optional input file. If present, it will check that
it exists and hash it.
update_reference ... if True, it will copy the output into the reference
directory as reference results, overwriting old ones
extra_args ......... Extra arguments to append to the command that are not
part of the hash
Examples:
>>> run_test("cat12", "cat {infile} > {outfile}", "cat.txt",
... update_reference=True)
>>> run_test("cat12", "cat {infile} > {outfile}", "cat.txt")
"""
s = " * %-6s " % basename
print(s, end="")
basename = bname(basename, cmd, infile)
if infile:
infile = os.path.join("tests", infile)
jo = run(basename, cmd, os.path.join("tests", "output"), infile=infile,
extra_args=extra_args)
jr = os.path.join("tests", "reference", os.path.basename(jo))
do = json.load(open(jo))
if update_reference:
shutil.copyfile(jo, jr)
for f in ["outfile", "stdout", "stderr"]:
if do[f]:
f_o = os.path.join(os.path.dirname(jo), do[f])
f_r = os.path.join(os.path.dirname(jr), do[f])
shutil.copyfile(f_o, f_r)
return
if not os.path.exists(jr):
raise RunException("The reference json file '%s' does not exist" % jr)
dr = json.load(open(jr))
if do != dr:
e = _compare_eq_dict(do, dr)
print("The JSON metadata differs against reference results")
print("Reference JSON:", jr)
print("Output JSON: ", jo)
print("\n".join(e))
if do["outfile_hash"] != dr["outfile_hash"]:
if do["outfile_hash"] is not None and dr["outfile_hash"] is not None:
fo = os.path.join("tests", "output", do["outfile"])
fr = os.path.join("tests", "reference", dr["outfile"])
if os.path.exists(fr):
print("Diff against: %s" % fr)
os.system("diff %s %s" % (fr, fo))
else:
print("Reference file '%s' does not exist" % fr)
if do["stdout_hash"] != dr["stdout_hash"]:
if do["stdout_hash"] is not None and dr["stdout_hash"] is not None:
fo = os.path.join("tests", "output", do["stdout"])
fr = os.path.join("tests", "reference", dr["stdout"])
if os.path.exists(fr):
print("Diff against: %s" % fr)
os.system("diff %s %s" % (fr, fo))
else:
print("Reference file '%s' does not exist" % fr)
if do["stderr_hash"] != dr["stderr_hash"]:
if do["stderr_hash"] is not None and dr["stderr_hash"] is not None:
fo = os.path.join("tests", "output", do["stderr"])
fr = os.path.join("tests", "reference", dr["stderr"])
if os.path.exists(fr):
print("Diff against: %s" % fr)
os.system("diff %s %s" % (fr, fo))
else:
print("Reference file '%s' does not exist" % fr)
elif do["stderr_hash"] is not None and dr["stderr_hash"] is None:
fo = os.path.join("tests", "output", do["stderr"])
print("No reference stderr output exists. Stderr:")
os.system("cat %s" % fo)
raise RunException("The reference result differs")
print_check()
|
Abdullahjavednesar/lpython
|
compiler_tester/tester.py
|
tester.py
|
py
| 9,744 |
python
|
en
|
code
| null |
github-code
|
6
|
32150278957
|
def solution(s):
answer = [0,0]
while s != '1':
cnt = s.count('0')
tranS = s.replace('0','')
s = bin(len(tranS))
s = s[2:]
answer[0] += 1
answer[1] += cnt
return answer
s = "1111111"
print(solution(s))
|
HS980924/Algorithm
|
src/etc/src/04_19(화)/이진변환.py
|
이진변환.py
|
py
| 292 |
python
|
en
|
code
| 2 |
github-code
|
6
|
764504946
|
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster import DBSCAN, KMeans, SpectralClustering
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelBinarizer
class ClusterTransformer(TransformerMixin, BaseEstimator):
"""Turns sklearn clustering algorithm into a transformer component that
you can use in a pipeline.
If the clustering method cannot be used for prediction
(aka. it does not have a predict method), then a nearest neighbour
vot will be used to infer cluster labels for unseen samples.
Parameters
----------
model: ClusterMixin
Sklearn clustering model.
n_neighbors: int
Number of neighbours to use for inference.
metric: str
Metric to use for determining nearest neighbours.
Attributes
----------
labeler: LabelBinarizer
Component that turns cluster labels into one-hot embeddings.
neighbors: KNeighborsClassifier
Classifier to use for out of sample prediction.
"""
def __init__(
self, model: ClusterMixin, n_neighbors: int = 5, metric: str = "cosine"
):
self.model = model
self.labeler = LabelBinarizer()
self.neighbors = KNeighborsClassifier(
n_neighbors=n_neighbors, metric=metric
)
def fit(self, X, y=None):
"""Fits the clustering algorithm and label binarizer.
Parameters
----------
X: ndarray of shape (n_observations, n_features)
Observations to cluster.
y: None
Ignored, exists for compatiblity.
Returns
-------
self
"""
labels = self.model.fit_predict(X)
if not hasattr(self.model, "predict"):
self.neighbors.fit(X, labels)
self.labeler.fit(labels)
return self
def transform(self, X):
"""Infers cluster labels for given data points.
Parameters
----------
X: ndarray of shape (n_observations, n_features)
Observations to cluster.
Returns
-------
ndarray of shape (n_observations, n_clusters)
One-hot encoding of cluster labels.
"""
if hasattr(self.model, "predict"):
labels = self.model.predict(X)
else:
labels = self.neighbors.predict(X)
return self.labeler.transform(labels)
def get_feature_names_out(self):
"""Returns the cluster classes for each dimension.
Returns
-------
ndarray of shape (n_clusters)
Cluster names.
"""
return self.labeler.classes_
def DBSCANTransformer(
eps: float = 0.5, min_samples: int = 5, metric: str = "cosine"
) -> ClusterTransformer:
"""Convenience function for creating a DBSCAN transformer.
Parameters
----------
eps : float, default 0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default 5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : str, default 'cosine'
The metric to use when calculating distance between instances in a
feature array.
Returns
-------
ClusterTransformer
Sklearn transformer component that wraps DBSCAN.
"""
model = DBSCAN(eps=eps, min_samples=min_samples, metric=metric)
return ClusterTransformer(model, metric="cosine")
def KMeansTransformer(n_clusters: int) -> ClusterTransformer:
"""Convenience function for creating a KMeans transformer.
Parameters
----------
n_clusters: int
Number of clusters.
Returns
-------
ClusterTransformer
Sklearn transformer component that wraps KMeans.
"""
model = KMeans(n_clusters=n_clusters)
return ClusterTransformer(model, metric="cosine")
def SpectralClusteringTransformer(n_clusters: int) -> ClusterTransformer:
"""Convenience function for creating a Spectral Clustering transformer.
Parameters
----------
n_clusters: int
Number of clusters.
Returns
-------
ClusterTransformer
Sklearn transformer component that wraps SpectralClustering.
"""
model = SpectralClustering(n_clusters=n_clusters)
return ClusterTransformer(model, metric="cosine")
|
x-tabdeveloping/blackbert
|
blackbert/cluster.py
|
cluster.py
|
py
| 4,673 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42572073330
|
import abc
import collections
from typing import List, Callable, Optional, OrderedDict, Tuple
import pandas as pd
class PreProcessingBase:
def __init__(self,
df: pd.DataFrame,
actions: Optional[OrderedDict[Callable, Tuple]] = None):
self._df = df
self._actions = actions
if self._actions is None:
self._actions = collections.OrderedDict()
@abc.abstractmethod
def _get_actions(self) -> OrderedDict[Callable, Tuple]:
raise NotImplementedError
def setup(self):
self._actions = self._get_actions()
return self
def run(self) -> pd.DataFrame:
for action, args in self._actions.items():
self._df = self._df.apply(action, args=args)
return self._df
|
gilcaspi/COVID-19-Vaccinations
|
data_processing/preprocessing/pre_processing_base.py
|
pre_processing_base.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33425710181
|
import time
import math
n=int(input())
start = time.time()
def prime_testimony(n):
if n == 1:
return False
elif n==2 or n==3 :
return True
elif n%6 == 1 or n%6 == 5:
for i in range(3, math.floor(math.sqrt(n)) + 1, 2):
if n%i == 0:
return False
return True
else:
return False
print(prime_testimony(n))
print(time.time() - start)
|
harasees-singh/Ray_Traced_code
|
prime_testimony.py
|
prime_testimony.py
|
py
| 435 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27094908089
|
import pandas as pd
import random
from tqdm.auto import tqdm
tqdm.pandas()
import re
from tqdm import tqdm
import numpy as np
import cv2
from albumentations import (
Compose, OneOf, Normalize, Resize, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, CenterCrop
)
from albumentations.pytorch import ToTensorV2
from InChI_extra_image_gen import add_noise
def split_form(text):
PATTERN = re.compile('\d+|[A-Z][a-z]?|[^A-Za-z\d\/]|\/[a-z]')
return ' '.join(re.findall(PATTERN, text))
def get_atom_counts(df):
TARGETS = [
'B', 'Br', 'C', 'Cl',
'F', 'H', 'I', 'N',
'O', 'P', 'S', 'Si']
formula_regex = re.compile(r'[A-Z][a-z]?[0-9]*')
element_regex = re.compile(r'[A-Z][a-z]?')
number_regex = re.compile(r'[0-9]*')
atom_dict_list = []
for i in tqdm(df['Formula'].values):
atom_dict = dict()
for j in formula_regex.findall(i):
atom = number_regex.sub("", j)
dgts = element_regex.sub("", j)
atom_cnt = int(dgts) if len(dgts) > 0 else 1
atom_dict[atom] = atom_cnt
atom_dict_list.append(atom_dict)
atom_df = pd.DataFrame(atom_dict_list).fillna(0).astype(int)
atom_df = atom_df.sort_index(axis = 1)
for atom in TARGETS:
df[atom] = atom_df[atom]
return df
def train_file_path(image_id):
#pay attention to the directory before /train, need to change accordingly.
return "./bms-molecular-translation/train/{}/{}/{}/{}.png".format(
image_id[0], image_id[1], image_id[2], image_id
)
#Two ways to treat the input images. 1.crop and pad to fit the images' size to be constant. 2.resize images to certain w and h. Here is the crop function.
def crop_image(img,
contour_min_pixel = 2,
small_stuff_size = 2,
small_stuff_dist = 5,
pad_pixels = 5):
# idea: pad with contour_min_pixels just in case we cut off
# a small part of the structure that is separated by a missing pixel
#findContours only find white obj in black background color.
img = 255 - img
#Make all pixels except pure background, i.e. pure black, white and distinguish them using method BINARY and OTSU in order to not missing any obj. OTSU plus BINARY basically make the obj more distinguishable compared with just BINARY.
_, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#RETR_LIST lists all the contours without hierarchy of nested contours. CHAIN_APPROX_SIMPLE returns only the key pixels that form the contour, e.g., 4 points for a rectangle contour.
contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2:]
#Store the small contours.
small_stuff = []
x_min0, y_min0, x_max0, y_max0 = np.inf, np.inf, 0, 0
for i in contours:
if len(i) < contour_min_pixel: # if NO. of pixels is too small, ignore contours under contour_min_size pixels
continue
#x,y are the top-left coordinate of the rectangle and w, h are contour's width and heigh
x, y, w, h = cv2.boundingRect(i)
if w <= small_stuff_size and h <= small_stuff_size: # collect position of contours which are smaller than small_stuff_size.
small_stuff.append([x, y, x+w, y+h])
continue
#find the largest bounding rectangle.
x_min0 = min(x_min0, x)
y_min0 = min(y_min0, y)
x_max0 = max(x_max0, x + w)
y_max0 = max(y_max0, y + h)
x_min, y_min, x_max, y_max = x_min0, y_min0, x_max0, y_max0
# enlarge the found crop box if it cuts out small stuff that is very close by
for i in range(len(small_stuff)):
#if the small stuff overlap with the big obj, count the small stuff into the obj, update the xmin max ymin max with the small stuff's.
if small_stuff[i][0] < x_min0 and small_stuff[i][0] + small_stuff_dist >= x_min0:
x_min = small_stuff[i][0]
if small_stuff[i][1] < y_min0 and small_stuff[i][1] + small_stuff_dist >= y_min0:
y_min = small_stuff[i][1]
if small_stuff[i][2] > x_max0 and small_stuff[i][2] - small_stuff_dist <= x_max0:
x_max = small_stuff[i][2]
if small_stuff[i][3] > y_max0 and small_stuff[i][3] - small_stuff_dist <= y_max0:
y_max = small_stuff[i][3]
if pad_pixels > 0: # make sure we get the crop within a valid range, pad_pixels is the range to ensure the crop is larger than the obj but not exceeding the canvas.
y_min = max(0, y_min-pad_pixels)
y_max = min(img.shape[0], y_max+pad_pixels)
x_min = max(0, x_min-pad_pixels)
x_max = min(img.shape[1], x_max+pad_pixels)
img_cropped = img[y_min:y_max, x_min:x_max]
#flip the black/white colors.
# img_cropped = 255 - img_cropped
return img_cropped
def pad_image(image, desired_size):
h, w = image.shape[0], image.shape[1]
delta_h = desired_size - h
delta_w = desired_size - w
top, bottom = delta_h//2, delta_h - (delta_h//2)
left,right = delta_w//2, delta_w - (delta_w//2)
img_padded = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT,
value = [255, 255, 255])
return img_padded
def preprocess_train_images(train_df, transform, CFG):
#Goal of this func is to make all images the same size to fit the transformer model (crop and pad),
#create a new column 'image' to record the original image data and the transformed image data if the trans flag is 'rotate90 or verticalflip'.
#Here only one transformation is prepared because of the preliminary feeling that the scale of dataset is enough.
assert set(['InChI_text', 'file_path', 'text_length']).issubset(train_df.columns), 'make sure the df has been preprocessed and certain columns are created.'
trans_img = []
ori_img = []
transform_type = ['rotate90', 'verticalflip']
df = train_df.copy()
resize = Compose([Resize(CFG.image_size, CFG.image_size)])
for i in tqdm(range(len(train_df))):
img_path = train_df.loc[i, 'file_path']
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if CFG.crop == True:
image = crop_image(image,
contour_min_pixel = 2,
small_stuff_size = 2,
small_stuff_dist = 5,
pad_pixels = 10)
image = resize(image = image)['image']
image = add_noise(image)
#np.expand_dims is used here because the input images needs to have 3 dimensions with the last one as 1.
#But imread(cv2.IMREAD_GRAYSCALE) can only give a 2D image.
image = np.expand_dims(image, axis = -1)
ori_img.append(image)
if CFG.trans_type == 'rotate90 or verticalflip':
trans_image = transform(transform_type[random.randint(0, 1)])(image = image)['image']
trans_img.append(trans_image)
df.insert(3, 'image', ori_img)
if CFG.trans_type == 'rotate90 or verticalflip':
train_df['image'] = trans_img
temp = pd.concat([df, train_df]).sample(frac = 1).reset_index(drop = True)
return temp
else:
return df
def get_transform(trans_type):
#transform images, need to annotate trans flag.
if trans_type == 'rotate90':
return Compose([
OneOf([
Rotate([90, 90], p = 0.5),
Rotate([-90, -90], p = 0.5),
], p = 1.0),
])
elif trans_type == 'verticalflip':
return Compose([
OneOf([
VerticalFlip()
], p = 1.0),
])
def get_aug(CFG):
#the goal is to normalize the image data and convert np array to torch tensor before sending to the model
return Compose([Normalize(mean = CFG.pixels_mean, std = CFG.pixels_std), ToTensorV2()])
|
phelchegs/bms-molecular-translation
|
InChI/InChI_preprocessing.py
|
InChI_preprocessing.py
|
py
| 7,948 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36040675316
|
import typing
from datetime import datetime, timedelta
import arrow
from ParadoxTrading.Utils.DataStruct import DataStruct
DATETIME_TYPE = typing.Union[str, datetime]
class SplitAbstract:
def __init__(self):
self.cur_bar: DataStruct = None
self.cur_bar_begin_time: DATETIME_TYPE = None
self.cur_bar_end_time: DATETIME_TYPE = None
self.bar_list: typing.List[DataStruct] = []
self.bar_begin_time_list: typing.List[DATETIME_TYPE] = []
self.bar_end_time_list: typing.List[DATETIME_TYPE] = []
def __len__(self) -> len:
return len(self.getBarList())
def getLastData(self) -> DataStruct:
"""
get last
:return:
"""
return self.cur_bar.iloc[-1]
def getCurBar(self) -> DataStruct:
return self.cur_bar
def getCurBarBeginTime(self) -> DATETIME_TYPE:
return self.cur_bar_begin_time
def getCurBarEndTime(self) -> DATETIME_TYPE:
return self.cur_bar_end_time
def getBarList(self) -> typing.List[DataStruct]:
return self.bar_list
def getBarBeginTimeList(self) -> typing.List[DATETIME_TYPE]:
return self.bar_begin_time_list
def getBarEndTimeList(self) -> typing.List[DATETIME_TYPE]:
return self.bar_end_time_list
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
raise NotImplementedError('You need to implement _get_begin_end_time!')
def _create_new_bar(self, _data: DataStruct, _cur_time: DATETIME_TYPE):
self.cur_bar = _data.clone()
self.cur_bar_begin_time, self.cur_bar_end_time = \
self._get_begin_end_time(_cur_time)
self.bar_list.append(self.cur_bar)
self.bar_begin_time_list.append(self.cur_bar_begin_time)
self.bar_end_time_list.append(self.cur_bar_end_time)
def addOne(self, _data: DataStruct) -> bool:
"""
add one tick data into spliter
Args:
_data (DataStruct): one tick
Returns:
bool : whether created a new bar
"""
assert len(_data) == 1
cur_time = _data.index()[0]
if self.cur_bar is None:
self._create_new_bar(_data, cur_time)
return True
else:
if cur_time < self.cur_bar_end_time:
self.cur_bar.addDict(_data.toDict())
return False
else:
self._create_new_bar(_data, cur_time)
return True
def addMany(self, _data: DataStruct):
"""
add continue data into spliter
Args:
_data (DataStruct): continute data
"""
for d in _data:
self.addOne(d)
return self
class SplitIntoSecond(SplitAbstract):
def __init__(self, _second: int = 1):
super().__init__()
self.skip_s = _second
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
base_s = _cur_time.second // self.skip_s * self.skip_s
begin_datetime = _cur_time.replace(second=base_s, microsecond=0)
end_datetime = begin_datetime + timedelta(seconds=self.skip_s)
return begin_datetime, end_datetime
class SplitIntoMinute(SplitAbstract):
def __init__(self, _minute: int = 1):
super().__init__()
self.skip_m = _minute
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
base_m = _cur_time.minute // self.skip_m * self.skip_m
begin_datetime = _cur_time.replace(
minute=base_m, second=0, microsecond=0)
end_datetime = begin_datetime + timedelta(minutes=self.skip_m)
return begin_datetime, end_datetime
class SplitIntoHour(SplitAbstract):
def __init__(self, _hour: int = 1):
super().__init__()
self.skip_h = _hour
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
base_h = _cur_time.hour // self.skip_h * self.skip_h
begin_datetime = _cur_time.replace(
hour=base_h, minute=0, second=0, microsecond=0)
end_datetime = begin_datetime + timedelta(hours=self.skip_h)
return begin_datetime, end_datetime
class SplitIntoWeek(SplitAbstract):
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
cur_date = datetime.strptime(_cur_time, '%Y%m%d')
weekday = cur_date.weekday()
begin_datetime: datetime = cur_date - timedelta(days=weekday)
end_datetime: datetime = begin_datetime + timedelta(weeks=1)
return (
begin_datetime.strftime('%Y%m%d'),
end_datetime.strftime('%Y%m%d')
)
class SplitIntoMonth(SplitAbstract):
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
cur_date = arrow.get(_cur_time, 'YYYYMMDD')
begin_datetime = cur_date.replace(day=1)
end_datetime = begin_datetime.shift(months=1)
return (
begin_datetime.format('YYYYMMDD'),
end_datetime.format('YYYYMMDD')
)
class SplitIntoYear(SplitAbstract):
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
cur_date = arrow.get(_cur_time, 'YYYYMMDD')
begin_datetime = cur_date.replace(day=1)
end_datetime = begin_datetime.shift(years=1)
return (
begin_datetime.format('YYYYMMDD'),
end_datetime.format('YYYYMMDD')
)
class SplitVolumeBars(SplitAbstract):
def __init__(
self, _use_key='volume', _volume_size: int = 1,
):
"""
:param _use_key: use which index to split volume
:param _volume_size: split ticks
"""
super().__init__()
self.use_key = _use_key
self.volume_size = _volume_size
self.total_volume = 0
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
return _cur_time, _cur_time
def addOne(self, _data: DataStruct):
assert len(_data) == 1
cur_time = _data.index()[0]
cur_volume = _data[self.use_key][0]
if self.cur_bar is None: # the first tick
self._create_new_bar(_data, cur_time)
self.total_volume = cur_volume
return True
if self.total_volume > self.volume_size:
self._create_new_bar(_data, cur_time)
self.total_volume = cur_volume
return True
self.cur_bar.addDict(_data.toDict())
self.cur_bar_end_time = cur_time # override end time
self.bar_end_time_list[-1] = cur_time
self.total_volume += cur_volume
return False
class SplitTickImbalance(SplitAbstract):
def __init__(
self, _use_key='lastprice',
_period=7, _init_T=1000
):
"""
<Advances in Financial Machine Learning> - 2.3.2.1
_use_key: use which index to calc bt
_init_T: the length of first bar
_period: period of EMA
"""
super().__init__()
self.use_key = _use_key
self.last_value = None
self.last_b = 1
self.sum_b = 0 # sum of b
self.num_b = 0 # total number of b
self.T = _init_T # len of Bar
self.P = None # probability of b == 1
self.period = _period
self.threshold = None
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
return _cur_time, _cur_time
def _update_b(self, _value):
# update value, b and total_b
if _value > self.last_value:
self.last_b = 1
elif _value < self.last_value:
self.last_b = -1
else:
pass
self.last_value = _value
self.sum_b += self.last_b
self.num_b += 1
def _reset_b(self):
self.sum_b = 0
self.num_b = 0
def _update_threshold(self):
new_T = self.num_b
new_P = (self.sum_b + self.num_b) / 2. / self.num_b
self.T += (new_T - self.T) / self.period
if self.P is None: # init p
self.P = new_P
else:
self.P += (new_P - self.P) / self.period
self.threshold = self.T * abs(2 * self.P - 1)
def addOne(self, _data: DataStruct) -> bool:
# check data
assert len(_data) == 1
value = _data[self.use_key][0]
cur_time = _data.index()[0]
if self.cur_bar is None: # init the first bar
self.last_value = value
self._create_new_bar(_data, cur_time)
return True
self._update_b(value)
print(value, self.last_b, self.sum_b, self.num_b)
flag = False
if self.P is None: # current is the first bar
if self.num_b >= self.T: # finish the first bar
flag = True
elif abs(self.sum_b) >= self.threshold: # create new bar
flag = True
if flag:
self._update_threshold()
print(self.T, self.P, self.threshold)
input()
self._reset_b()
self._create_new_bar(_data, cur_time)
return True
else:
self.cur_bar.addDict(_data.toDict())
self.cur_bar_end_time = cur_time # override end time
self.bar_end_time_list[-1] = cur_time
return False
|
ppaanngggg/ParadoxTrading
|
ParadoxTrading/Utils/Split.py
|
Split.py
|
py
| 9,614 |
python
|
en
|
code
| 51 |
github-code
|
6
|
23857423265
|
"""Testing an common Anode RGB LED"""
from time import sleep
from picozero import RGBLED
def main():
led = RGBLED(red=2, blue=3, green=4, active_high=False)
try:
while True:
led.color = 255, 0, 0
sleep(1)
led.color = 0, 0, 255
sleep(1)
finally:
led.off()
if __name__ == "__main__":
main()
|
ValdezFOmar/micropython-projects
|
tests/misc/anode_rgb_led.py
|
anode_rgb_led.py
|
py
| 375 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42363921059
|
#part-1
# file = open("day2Input.txt", "r")
# my_dict = {'X':1, 'Y':2, 'Z':3}
# score = 0
# for line in file:
# opponent = line[0]
# you = line[2]
# score += my_dict[you]
# if (opponent == 'A' and you == 'X') or (opponent == 'B' and you == 'Y') or (opponent == 'C' and you == 'Z'):
# score+=3
# elif (opponent == 'A' and you == 'Y') or(opponent=='B' and you=='Z')or (opponent=='C'and you=='X'):
# score += 6
# else:
# score += 0
# print(score)
#part-2
file = open("day2Input.txt", "r")
dicto = {'X':1, 'Y':2,'Z':3}
my_dict = {'X':0, 'Y':3, 'Z':6}
score = 0
for line in file:
opponent = line[0]
you = line[2]
score += dicto[you]
#X means you need to lose
if you == 'X':
if opponent == 'A':
you = 'Z'
elif opponent == 'B':
you = 'X'
elif opponent == 'C':
you = 'Y'
#Y means you need to end the round in a draw
elif you == 'Y':
if opponent == 'A':
you = 'X'
elif opponent == 'B':
you = 'Y'
elif opponent == 'C':
you = 'Z'
#Z means you need to win
else:
if opponent == 'A':
you='Y'
elif opponent == 'B':
you = 'Z'
elif opponent == 'C':
you = 'X'
if (opponent == 'A' and you == 'X') or (opponent == 'B' and you == 'Y') or (opponent == 'C' and you == 'Z'):
score += 3
elif (opponent == 'A' and you == 'Y') or(opponent=='B' and you=='Z')or (opponent=='C'and you=='X'):
score += 6
else:
score += 0
print(score)
|
JayAtSeneca/Advent-of-code-2022
|
day 2/day2.py
|
day2.py
|
py
| 1,605 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5500500071
|
import time
import base64
from google.cloud import pubsub_v1
from google.oauth2 import service_account
project_id = "<gcp_project_id>"
topic_name = "<topic_name>"
credentials = service_account.Credentials.from_service_account_file("<gcp_Service_account_file_path>")
print(credentials)
publisher = pubsub_v1.PublisherClient(credentials = credentials)
topic_path = publisher.topic_path(project_id, topic_name)
def callback(message_future):
# When timeout is unspecified, the exception method waits indefinitely.
print("1")
if message_future.exception(timeout=30):
print('Publishing message on {} threw an Exception {}.'.format(
topic_name, message_future.exception()))
else:
print(message_future.result())
with open("15.jpg", "rb") as imageFile:
str = base64.b64encode(imageFile.read())
#print(str)
data = "sample data"
# Data must be a bytestring
data = data.encode('utf-8')
# When you publish a message, the client returns a Future.
message_future = publisher.publish(topic_path, data=str)
message_future.add_done_callback(callback)
print(data)
print('Published message IDs:')
##############################################################################################
subscriber = pubsub_v1.SubscriberClient(credentials = credentials)
subscription_path = subscriber.subscription_path(
project_id, "subscribe")
def callback1(message):
print('Received message: {}'.format(message))
message.ack()
subscriber.subscribe(subscription_path, callback=callback1)
# The subscriber is non-blocking. We must keep the main thread from
# exiting to allow it to process messages asynchronously in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
|
natsu1628/hackathons
|
ML/GCP-python-ML2/to_pubsub.py
|
to_pubsub.py
|
py
| 1,772 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31036024397
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
# function to convert linked list to list for addition
def toList(self, linked):
convt = []
while linked is not None:
convt.append(linked.val)
linked = linked.next
return convt
# function to add list of integers
def addLists(self, num1, num2):
sol = []
for i in range(len(num1)):
sol.append(num1[i]+num2[i])
for i in range(len(sol)-1):
if sol[i] >= 10:
sol[i] = sol[i] - 10
sol[i+1] = sol[i+1] + 1
if sol[(len(sol)-1)] >= 10:
sol[(len(sol)-1)] = sol[(len(sol)-1)] - 10
sol = sol+[1]
return sol
# function to change list of integers back to linked list
def cvtBack(self, lst):
cur = dummy = ListNode(0)
for e in lst:
cur.next = ListNode(e)
cur = cur.next
return dummy.next
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
first = self.toList(l1)
second = self.toList(l2)
# matching length
diff = len(first) - len(second)
if diff < 0:
diff = -1 * diff
first = first + [0]*diff
else:
second = second + [0]*diff
result = self.addLists(first, second)
answer = self.cvtBack(result)
return (answer)
|
notkshitijsingh/leetcode-solutions
|
2. Add Two Numbers.py
|
2. Add Two Numbers.py
|
py
| 1,560 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28398915179
|
import datetime
import termux
from sync.misc.Config import config
from sync.misc.Logger import logger
class Notification:
__instance__ = None
def __init__(self):
self.sync_all = {}
self.watchers = {}
self.global_status = "Active"
now_date = datetime.datetime.now()
self.last_start = f"Started: {now_date.strftime('%Y-%m-%d@%H:%M:%S')}"
self.last_stop = f"Stopped: -"
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.last_full_sync = f"Fully synchronized: -"
@staticmethod
def get() -> "Notification":
if Notification.__instance__ is None:
Notification.__instance__ = Notification()
return Notification.__instance__
def set_full_sync_status(self, sync_all):
self.sync_all = sync_all
self.update()
def set_watchers(self, watchers):
self.watchers = watchers
self.update()
def set_global_status(self, global_status):
self.global_status = global_status
def set_inactive(self):
self.set_global_status("Inactive")
now_date = datetime.datetime.now()
self.last_stop = f"Stopped: {now_date.strftime('%Y-%m-%d@%H:%M:%S')}"
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.update()
def set_active(self):
self.set_global_status("Active")
now_date = datetime.datetime.now()
self.last_start = f"Started: {now_date.strftime('%Y-%m-%d@%H:%M:%S')}"
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.update()
def full_sync_done(self):
self.last_full_sync = f"Fully synchronized: {datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S')}"
self.update()
def exiting(self):
self.set_global_status("Exited")
now_date = datetime.datetime.now()
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.update()
def update(self):
notification_title = f"Termux-sync [{self.global_status}] [{self.last_start_stop_time}]"
notification_id = 999
notification_content = ""
if config.debug:
notification_content += self.last_stop + "\n"
notification_content += self.last_start + "\n"
notification_content += self.last_full_sync + "\n"
notification_content += "\n"
for sync_info in config.sync_info_list:
item_line = f"{sync_info.label} "
if sync_info.id in self.sync_all:
item_line += f"{self.sync_all[sync_info.id]} | "
else:
item_line += f"- | "
if sync_info.id in self.watchers:
watcher = self.watchers[sync_info.id]
item_line += watcher.files_info.get_status()
if watcher.last_sync_date is not None:
last_sync_date = watcher.last_sync_date.strftime('%H:%M:%S')
item_line += f" ({last_sync_date})"
else:
item_line += " [Not watching]"
notification_content += item_line + "\n"
action = f"termux-open --content-type yaml {logger.log_file}"
termux.Notification.notify(notification_title,
notification_content,
notification_id,
args=("alert-once", "ongoing"),
kwargs={"button1": "See logs", "button1-action": action})
|
dpjl/termux-sync
|
sync/misc/Notification.py
|
Notification.py
|
py
| 3,522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41152326339
|
from tkinter import *
from datetime import datetime, timedelta
import tkinter as tk
from tkinter import Entry, Label, StringVar, ttk, Checkbutton, Button, messagebox
import numpy as np
import pandas as pd
def generarCodigo(texto):
sumar = 0
codigo = texto[:3]
if texto[len(texto) // 2] == " ":
sumar = 1
codigo += texto[len(texto) // 2 + sumar : len(texto) // 2 + 2 + sumar]
codigo += texto[len(texto) - 1]
codigo += str(len(texto))
return codigo
def moda(lista):
repetido = lista[0]
for i in lista:
if lista.count(i) > lista.count(repetido):
repetido = i
return repetido
def nombreIncorrecto(texto):
invalidos = '1234567890!#$%&/()=?¡¿´*{¨]}[-_.:;,<>|°'
for i in texto:
if i in invalidos:
return True
return False
class Tabla:
def __init__(self, root, dataFrame, anchos, fechas, bgColor, posX, posY):
self.anchos = anchos
self.fechas = fechas
self.nuevoDatos = []
self.componentes = []
cont = 0
self.df = dataFrame
self.frm = ttk.Frame(root)
for k in dataFrame:
tmp = Entry(
self.frm,
width=anchos[cont],
bg=bgColor,
fg="black",
font=("Arial", 12),
highlightthickness=1,
highlightbackground="#000000",
highlightcolor="#000000",
)
tmp.grid(row=0, column=cont)
tmp.insert(INSERT, k)
cont += 1
self.lista = list(dataFrame.to_records(index=False))
self.filas = len(self.lista)
self.columnas = cont
for i in range(self.filas):
row = []
for j in range(self.columnas):
aux = Entry(
self.frm,
width=anchos[j],
fg="black",
font=(
"Arial",
12,
),
highlightthickness=1,
highlightbackground="#000000",
highlightcolor="#000000",
)
aux.grid(row=i + 1, column=j)
if len(fechas) == 0:
aux.insert(INSERT, self.lista[i][j])
else:
if j in fechas:
aux.insert(
INSERT,
pd.to_datetime(self.lista[i][j])
.date()
.strftime("%d/%m/%y"),
)
else:
aux.insert(INSERT, self.lista[i][j])
aux.configure(state="readonly")
row.append(aux)
self.componentes.append(row)
self.frm.pack()
self.frm.place(x=posX, y=posY)
def ingresarDatos(self, datos):
self.lista.append(datos)
for i in range(self.columnas):
aux = Entry(
self.frm,
width=self.anchos[i],
fg="black",
font=(
"Arial",
12,
),
highlightthickness=1,
highlightbackground="#000000",
highlightcolor="#000000",
)
aux.grid(row=self.filas + 1, column=i)
aux.insert(INSERT, datos[i])
aux.configure(state="readonly")
self.df.loc[self.df.shape[0]] = datos
self.filas += 1
return
def borrarUltimaFila(self):
if self.filas < 1:
messagebox.showerror(
title="ERROR", message="No hay datos que puedan ser borrados"
)
return
cont = 0
for i in self.frm.winfo_children():
if cont >= self.columnas * self.filas:
i.destroy()
cont += 1
self.df = self.df[:-1]
self.lista.pop()
self.filas -= 1
class FrmIngresoDeLibros:
def __init__(self, master, regresar):
self.frm = ttk.Frame(master)
self.nombreLibro = StringVar()
self.cantidadLibro = StringVar()
self.tabla = None
self.agregarComponentes(master, regresar)
def agregarComponentes(self, master, regresar):
hoy = datetime.today().strftime("%d/%m/%y")
Label(
text="INGRESO DE LIBROS",
font=("Arial", 24, "bold"),
bg="#315E7A",
fg="white",
width="500",
height="2",
).pack()
Label(
text=hoy,
font=("Arial", 12),
bg="#00E0FF",
fg="white",
width="20",
height="1",
).pack()
Label(text="Libro", font=("Arial", 12, "bold")).place(x=150, y=150)
Entry(
textvariable=self.nombreLibro,
width="25",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=250, y=150)
Label(text="Cant", font=("Arial", 12, "bold")).place(x=520, y=150)
Entry(
textvariable=self.cantidadLibro,
width="25",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=590, y=150)
Button(
text="Borrar",
font=("Arial", 12),
width="20",
bg="#D0A9F5",
height="2",
command=self.borrar,
).place(x=150, y=250)
Button(
text="Ingresar",
font=("Arial", 12),
width="20",
bg="#D0A9F5",
height="2",
command=lambda: self.ingresar(master),
).place(x=400, y=250)
Button(
text="Regresar",
font=("Arial", 12),
width="20",
bg="#D0A9F5",
height="2",
command=regresar,
).place(x=650, y=250)
self.mostrarTabla(master)
def borrar(self):
self.tabla.borrarUltimaFila()
archivo = self.tabla.df
archivo2 = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
archivo2 = archivo2[:-1]
archivo.to_excel("Libros.xlsx", sheet_name="Hoja1", index=False)
archivo2.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
def mostrarTabla(self, master):
archivo = pd.read_excel("Libros.xlsx", sheet_name="Hoja1")
anchos = [5, 40, 20, 20, 5]
fechas = [2]
self.tabla = Tabla(master, archivo, anchos, fechas, "#154673", 100, 350)
def ingresar(self, master):
n = len(self.tabla.lista) + 1
nombre = self.nombreLibro.get()
if nombre == "":
messagebox.showerror(
title="ERROR", message="El nombre ingresado es incorrecto"
)
return
fecha = datetime.now().date().strftime("%d/%m/%y")
try:
stock = int(self.cantidadLibro.get())
except ValueError:
messagebox.showerror(
title="ERROR", message="La cantidad ingresada es incorrecta"
)
return
if stock <= 0:
messagebox.showerror(
title="ERROR", message="Debe ingresar una cantidad mayor a 0"
)
return
if len(self.tabla.df[self.tabla.df["Nombre del Libro"] == nombre]) > 0:
index = self.tabla.df.index[
self.tabla.df["Nombre del Libro"] == nombre
].tolist()[0]
valores = self.tabla.df[self.tabla.df["Nombre del Libro"] == nombre].values[
0
]
valores[3] += stock
self.tabla.df.loc[index] = valores
self.tabla.frm.destroy()
archivo = self.tabla.df
archivo.to_excel("Libros.xlsx", sheet_name="Hoja1", index=False)
archivo2 = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
valores2 = archivo2[archivo2["Nombre del Libro"] == nombre].values[0]
valores2[5] += stock
valores2[4] = valores2[5] - valores2[3]
archivo2.loc[index] = valores2
archivo2.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
self.mostrarTabla(master)
messagebox.showinfo(
message="El libro se ha actualizado correctamente",
title="LIBRO ACTUALIZADO",
)
self.nombreLibro.set("")
self.cantidadLibro.set("")
return
datos = (n, nombre, fecha, stock)
self.nombreLibro.set("")
self.cantidadLibro.set("")
self.tabla.ingresarDatos(datos)
archivo = self.tabla.df
archivo.to_excel("Libros.xlsx", sheet_name="Hoja1", index=False)
archivo2 = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
archivo2.loc[archivo2.shape[0]] = [n, nombre, "Disponible", 0, stock, stock]
archivo2.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
class FrmRegistroEstudiante:
def __init__(self, master, regresar):
self.frm = Frame(master)
self.nombre = StringVar()
self.apellido = StringVar()
self.lectorDelMes = StringVar()
self.libroMasSolicitado = StringVar()
self.cbxOperacion = None
self.cbxLibro = None
self.tabla = None
self.ultimaOperacion = ""
self.ultimoLibro = ""
self.hallarDatos()
self.agregarComponentes(master, regresar)
def hallarDatos(self):
excel = pd.read_excel("HistorialLibros.xlsx", sheet_name="Hoja1")
nombres = excel["Nombre"]
apellidos = excel["Apellido"]
nombreCompleto = []
for i in range(len(nombres)):
nombreCompleto.append(nombres[i] + " " + apellidos[i])
self.lectorDelMes.set(moda(nombreCompleto))
libros = excel["Nombre del Libro"]
self.libroMasSolicitado.set(moda(list(libros)))
def agregarComponentes(self, master, regresar):
hoy = datetime.today().strftime("%d/%m/%y")
Label(
text="REGISTRO DEL ESTUDIANTE",
font=("Arial", 24, "bold"),
bg="#DF7401",
fg="white",
width="500",
height="2",
).pack()
Label(
text=hoy,
font=("Arial", 12),
bg="#F5DA81",
fg="white",
width="25",
height="1",
).pack()
Label(text="Nombre", font=("Arial", 12, "bold")).place(x=150, y=150)
Entry(
textvariable=self.nombre,
width="20",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=250, y=150)
Label(text="Apellido", font=("Arial", 12, "bold")).place(x=150, y=200)
Entry(
textvariable=self.apellido,
width="20",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=250, y=200)
Label(text="Operacion", font=("Arial", 12, "bold")).place(x=520, y=150)
self.cbxOperacion = ttk.Combobox(
state="readonly",
values=["Retiro", "Devolucion"],
width=15,
font=("Arial", 12),
)
self.cbxOperacion.place(x=630, y=150)
Label(text="Libro", font=("Arial", 12, "bold")).place(x=520, y=200)
self.cbxLibro = ttk.Combobox(values=["a"], width=20, font=("Arial", 12))
self.cbxLibro.place(x=630, y=200)
Button(
text="Borrar",
font=("Arial", 12),
width="20",
bg="#F7BE81",
height="2",
command=lambda: self.borrar(master),
).place(x=150, y=260)
Button(
text="Aceptar",
font=("Arial", 12),
width="20",
bg="#F7BE81",
height="2",
command=lambda: self.aceptar(master),
).place(x=400, y=260)
Button(
text="Regresar",
font=("Arial", 12),
width="20",
bg="#F7BE81",
height="2",
command=regresar,
).place(x=650, y=260)
Label(text="Lector del mes", font=("Arial", 12, "bold")).place(x=50, y=350)
Entry(
textvariable=self.lectorDelMes,
width="25",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
state="readonly",
).place(x=180, y=350)
Label(text="Libro mas solicitado", font=("Arial", 12, "bold")).place(
x=450, y=350
)
Entry(
textvariable=self.libroMasSolicitado,
width="30",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
state="readonly",
).place(x=620, y=350)
self.mostrarTabla(master)
self.cbxOperacion.current(0)
self.cbxLibro.configure(values=list(self.tabla.df["Nombre del Libro"]))
self.cbxLibro.current(0)
def mostrarTabla(self, master):
archivo = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
anchos = [5, 40, 20, 10, 10, 10]
fechas = []
self.tabla = Tabla(master, archivo, anchos, fechas, "#F5DA81", 50, 400)
def borrar(self, master):
if len(self.ultimaOperacion) == 0:
messagebox.showerror(title='ERROR', message='No hay registros anteriores para borrar')
return
excel = self.tabla.df
index = self.tabla.df.index[self.tabla.df["Nombre del Libro"] == self.ultimoLibro].tolist()[0]
valores = self.tabla.df[self.tabla.df["Nombre del Libro"] == self.ultimoLibro].values[0]
if self.ultimaOperacion == "Retiro":
valores[4] += 1
valores[3] -= 1
if valores[4] > 0:
valores[2] = 'Disponible'
historial = pd.read_excel("HistorialLibros.xlsx", sheet_name="Hoja1")
historial = historial[:-1]
historial.to_excel("HistorialLibros.xlsx", sheet_name="Hoja1", index=False)
else:
valores[3] += 1
valores[4] -= 1
if valores[4] == 0:
valores[2] = 'No Disponible'
excel.loc[index] = valores
excel.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
self.tabla.frm.destroy()
self.mostrarTabla(master)
self.hallarDatos()
self.ultimaOperacion = ""
self.ultimoLibro = ""
def aceptar(self, master):
nombre = self.nombre.get()
apellido = self.apellido.get()
operacion = self.cbxOperacion.get()
libro = self.cbxLibro.get()
excel = self.tabla.df
mensaje = ""
if len(nombre) == 0:
mensaje += "Debe ingresar el nombre del alumno\n"
if len(apellido) == 0:
mensaje += "Debe ingresar el apelldio del alumno\n"
if len(mensaje) > 0:
messagebox.showerror(title="ERROR", message=mensaje)
return
mensaje = ""
if nombreIncorrecto(nombre) is True:
mensaje += 'El nombre del alumno es incorrecto\n'
if nombreIncorrecto(apellido) is True:
mensaje += 'El apellido del alumno es incorrecto\n'
if len(mensaje) > 0:
messagebox.showerror(title='ERROR', message=mensaje)
return
if len(self.tabla.df[self.tabla.df["Nombre del Libro"] == libro]) > 0:
index = self.tabla.df.index[
self.tabla.df["Nombre del Libro"] == libro
].tolist()[0]
valores = self.tabla.df[self.tabla.df["Nombre del Libro"] == libro].values[
0
]
if operacion == "Retiro":
if valores[4] > 0:
valores[3] += 1
valores[4] -= 1
if valores[4] == 0:
valores[2] = 'No Disponible'
historial = pd.read_excel(
"HistorialLibros.xlsx", sheet_name="Hoja1"
)
n = len(list(historial.to_records(index=False))) + 1
codigo = generarCodigo(libro)
hoy = datetime.today()
entrega = timedelta(7)
datos = [
n,
nombre,
apellido,
libro,
codigo,
hoy.strftime("%d/%m/%y"),
datetime.date(hoy + entrega).strftime("%d/%m/%y"),
]
historial.loc[historial.shape[0]] = datos
historial.to_excel(
"HistorialLibros.xlsx", sheet_name="Hoja1", index=False
)
self.nombre.set("")
self.apellido.set("")
messagebox.showinfo(
title="RETIRO EXITOSO",
message="El libro ha sido retirado satisfactoriamente",
)
else:
messagebox.showerror(
title="ERROR", message="No quedan mas libros disponibles"
)
else:
if valores[4] < valores[5]:
valores[4] += 1
valores[3] -= 1
if valores[4] > 0:
valores[2] = 'Disponible'
self.nombre.set("")
self.apellido.set("")
messagebox.showinfo(
title="DEVOLUCION EXITOSA",
message="El libro ha sido devuelto satisfactoriamente",
)
else:
messagebox.showerror(
title="ERROR", message="No existen devoluciones pendientes"
)
self.ultimaOperacion = operacion
self.ultimoLibro = libro
excel.loc[index] = valores
excel.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
self.tabla.frm.destroy()
self.mostrarTabla(master)
self.hallarDatos()
else:
messagebox.showerror(
title="ERROR", message="El libro que estas solicitando no existe"
)
class FrmRetirosDevoluciones:
def __init__(self, master, regresar):
self.cbxLibro = None
self.tabla = None
self.agregarComponentes(master, regresar)
def agregarComponentes(self, master, regresar):
Label(text="Libro", font=("Arial", 12, "bold")).place(x=50, y=40)
self.cbxLibro = ttk.Combobox(values=["a"], width=30, font=("Arial", 12))
self.cbxLibro.place(x=150, y=40)
Button(
text="Buscar",
font=("Arial", 12),
width="20",
bg="#6C3483",
height="2",
command=lambda: self.actualizarTabla(master),
).place(x=500, y=20)
Button(
text="Regresar",
font=("Arial", 12),
width="20",
bg="#6C3483",
height="2",
command=regresar,
).place(x=750, y=20)
excel = pd.read_excel("Libros.xlsx", sheet_name="Hoja1")
self.cbxLibro.configure(values=list(excel["Nombre del Libro"]))
def actualizarTabla(self, master):
if self.tabla != None:
self.tabla.frm.destroy()
libro = self.cbxLibro.get()
if len(libro) == 0:
messagebox.showerror(title='ERROR', message='Debe ingresar el nombre del libro que desea consultar')
return
excel = pd.read_excel("HistorialLibros.xlsx", sheet_name="Hoja1")
if len(excel[excel["Nombre del Libro"] == libro]) > 0:
filtrado = excel[excel["Nombre del Libro"] == libro]
anchos = [5, 15, 15, len(libro), 10, 13, 13]
fechas = []
self.tabla = Tabla(
master, filtrado, anchos, fechas, "#A569BD", 43 - len(libro), 100
)
else:
messagebox.showerror(title='ERROR', message='No existen registros del libro ingresado')
class BibliotecaEscolar:
def __init__(self):
self.root = tk.Tk()
self.root.title("Biblioteca Escolar")
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
w = 1000
h = 600
x = (screen_width/2) - (500)
y = (screen_height/2) - (300)
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.root.resizable(False, False)
self.agregarComponentes()
self.formulario = None
self.root.mainloop()
def regresar(self):
for widget in self.root.winfo_children():
widget.destroy()
self.agregarComponentes()
def limpiarFormulario(self, frm):
for widget in frm.winfo_children():
widget.destroy()
def agregarComponentes(self):
hoy = datetime.today().strftime("%d-%m-%y")
Label(
text="BIBLIOTECA ESCOLAR",
font=("Arial", 24, "bold"),
bg="#27AE60",
fg="white",
width="500",
height="2",
).pack()
Label(
text=hoy,
font=("Arial", 12),
bg="#82E0AA",
fg="black",
width="25",
height="1",
).pack()
Button(
text="Registrar Libro",
font=("Arial", 16),
width="20",
bg="#315E7A",
height="4",
fg="white",
command=self.abrirFrmRegistrar,
).place(x=150, y=230)
Button(
text="Solicitudes Libro",
font=("Arial", 16),
width="20",
bg="#DF7401",
height="4",
fg="white",
command=self.abrirFrmSolicitud,
).place(x=600, y=230)
Button(
text="Salir del programa",
font=("Arial", 16),
width="20",
bg="#A93226",
height="4",
fg="white",
command=self.cerrarPrograma,
).place(x=600, y=400)
Button(
text="Retiros y devoluciones",
font=("Arial", 16),
width="20",
bg="#5B2C6F",
height="4",
fg="white",
command=self.abrirFrmRetirosDevoluciones,
).place(x=150, y=400)
def abrirFrmRegistrar(self):
self.limpiarFormulario(self.root)
self.formulario = FrmIngresoDeLibros(self.root, self.regresar)
def abrirFrmSolicitud(self):
self.limpiarFormulario(self.root)
self.formulario = FrmRegistroEstudiante(self.root, self.regresar)
def abrirFrmRetirosDevoluciones(self):
self.limpiarFormulario(self.root)
self.formulario = FrmRetirosDevoluciones(self.root, self.regresar)
def cerrarPrograma(self):
self.root.destroy()
a = BibliotecaEscolar()
|
Moisesmp75/TkinterForms
|
Trabajo2/Biblioteca.py
|
Biblioteca.py
|
py
| 23,516 |
python
|
es
|
code
| 0 |
github-code
|
6
|
4785005840
|
n = int(input())
l = list(map(int,input().split()))
l.sort(reverse=True)
oddl = []
evenl = []
for i in l:
if i%2 == 0:
evenl.append(i)
else:
oddl.append(i)
ans = -1
if 2 <= len(oddl):
ans = max(ans,oddl[0]+oddl[1])
if 2 <= len(evenl):
ans = max(ans,evenl[0]+evenl[1])
print(ans)
|
K5h1n0/compe_prog_new
|
VirtualContest/008/7.py
|
7.py
|
py
| 310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22546128961
|
# import random
# import time
#* Génération de la liste
# TAILLE = int(1e5)
# BORNE_SUP = int(1e2)
# liste_a_trier = [random.randint(0,BORNE_SUP) for _ in range(TAILLE)]
# print(liste_a_trier)
def fusion(A,p,q,r):
"""
Opére la fusion de la liste A[p,q] et A[q,r]
"""
# on stocke les deux listes
liste1 = A[p:q] + [float("inf")]
liste2 = A[q:r] + [float("inf")]
# print(liste1)
# print(liste2)
i, j = 0, 0
for k in range(p,r):
if liste1[i] < liste2[j]:
A[k] = liste1[i]
i+=1
else:
A[k] = liste2[j]
j+=1
# print(A)
def tri_fusion(A, p, r):
"""
A est la liste
p est l'index du premier élément de
r est l'index du dernier élément
"""
if p < r-1: # si la liste comporté de plus d'un élément
q = (p+r)//2 # Milieu
# print(f"{A[p:q]} + {A[q+1:r]} | p={p} | q={q} | r={r}")
tri_fusion(A, p, q) # On trie la première partie
tri_fusion(A, q, r) #On trie la deuxième partie
fusion(A,p,q,r) # On fusionne le tout
if __name__ == '__main__':
# print(liste_a_trier)
# triée = sorted(liste_a_trier)
# print(f"True = {triée}")
# start = time.time()
# tri_fusion(liste_a_trier, 0, len(liste_a_trier))
# print(f"{time.time()-start}")
n = int(input())
numbers = list(map(int, input().split()))
tri_fusion(numbers, 0, n)
print(" ".join([str(x) for x in numbers]))
|
PsychoLeo/Club_Informatique
|
7-Sorting/mergeSort.py
|
mergeSort.py
|
py
| 1,532 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
24102936874
|
from helpers import ReadLines
from typing import Tuple, List
class DayFive(ReadLines):
def __init__(
self, file_path="/home/jonathan/projects/2020-advent-of-code/five/input.txt"
):
super().__init__(file_input=file_path)
self.seat_ids = sorted(
[DayFive.identify_seat(seat_code)[2] for seat_code in self.inputs]
)
@staticmethod
def _process_code(code: List[str], _range: Tuple[int, int]) -> int:
"""
I'm leaving this method in, because it's quite neat - but it has been rendered useless by the more practical _binary_count method below
"""
if len(code) == 1:
keys = {"L": 0, "F": 0, "R": 1, "B": 1}
return _range[keys[code[0]]]
else:
next_letter = code.pop(0)
mid_point = int((_range[1] + 1 - _range[0]) / 2)
if next_letter == "F" or next_letter == "L":
new_range = _range[0], _range[0] + mid_point - 1
elif next_letter == "B" or next_letter == "R":
new_range = _range[0] + mid_point, _range[1]
return DayFive._process_code(code, new_range)
@staticmethod
def _binary_count(seat_code: str):
letter_key = {"F": "0", "L": "0", "B": "1", "R": "1"}
binary_string_code = "".join([letter_key[letter] for letter in seat_code])
return int(binary_string_code, 2)
@staticmethod
def identify_seat(seat_reference: str) -> Tuple[int, int, int]:
row = DayFive._binary_count(seat_reference[:7])
column = DayFive._binary_count(seat_reference[-3:])
seat_id = row * 8 + column
return row, column, seat_id
def highest_id(self):
return max(self.seat_ids)
def find_missing_id(self) -> int:
all_ids = set([i for i in range(min(self.seat_ids), max(self.seat_ids) + 1)])
seat_ids = set(self.seat_ids)
return all_ids.difference(seat_ids).pop()
|
jonodrew/2020-advent-of-code
|
five/five.py
|
five.py
|
py
| 1,952 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24940438785
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import hashlib
import sys
import struct
def getPIN(username):
''' According to code from https://github.com/nowind/sx_pi
'''
current_time = time.time()
time_divided_by_five = int(current_time) // 5
time_char = [0] * 4
temp = [0] * 32
time_hash = [0] * 4
PIN27 = [0] * 6
PIN = ""
md5 = hashlib.md5()
for i in xrange(0, 4):
time_char[i] = time_divided_by_five >> (8 * (3 - i)) & 0xFF
before_md5 = struct.pack('>I', time_divided_by_five) + \
(username.split('@')[0] + "singlenet01").encode('ascii')
md5.update(before_md5)
after_md5 = md5.hexdigest()
for i in xrange(0, 32):
temp[i] = time_char[(31 - i) / 8] & 1
time_char[(31 - i) / 8] = time_char[(31 - i) / 8] >> 1
for i in xrange(0, 4):
time_hash[i] = temp[i] * 128 + temp[4 + i] * 64 + temp[8 + i] * \
32 + temp[12 + i] * 16 + temp[16 + i] * 8 + temp[20 + i] * \
4 + temp[24 + i] * 2 + temp[28 + i]
temp[1] = (time_hash[0] & 3) << 4
temp[0] = (time_hash[0] >> 2) & 0x3F
temp[2] = (time_hash[1] & 0xF) << 2
temp[1] = (time_hash[1] >> 4 & 0xF) + temp[1]
temp[3] = time_hash[2] & 0x3F
temp[2] = ((time_hash[2] >> 6) & 0x3) + temp[2]
temp[5] = (time_hash[3] & 3) << 4
temp[4] = (time_hash[3] >> 2) & 0x3F
for i in xrange(0, 6):
PIN27[i] = temp[i] + 0x020
if PIN27[i] >= 0x40:
PIN27[i] += 1
for i in xrange(0, 6):
PIN += chr(PIN27[i])
PIN = '\r\n' + PIN + after_md5[0] + after_md5[1] + username
return PIN
def write_conf():
pin = getPIN(sys.argv[1])
with open('provider_tpl', 'r') as fp:
template = fp.read()
conf = template + '\"' + pin + '\"'
with open('/etc/ppp/peers/dsl-provider', 'w') as fp:
fp.write(conf)
with open('pap_tpl', 'r') as fp:
template = fp.read()
conf = template + '\"' + getPIN(sys.argv[1]) + \
'\" * \"' + sys.argv[2] + '\"'
with open('/etc/ppp/pap-secrets', 'w') as fp:
fp.write(conf)
if __name__ == '__main__':
write_conf()
os.system('pon dsl-provider')
|
novakoki/shanxun-linux
|
shanxun.py
|
shanxun.py
|
py
| 2,198 |
python
|
en
|
code
| 5 |
github-code
|
6
|
73928014588
|
from collections import Set
import random
from cardboard import events
__all__ = ["UnorderedZone", "OrderedZone", "zone"]
# TODO: Clarify / make zone operations atomic
ENTER, LEAVE = events.ENTERED_ZONE, events.LEFT_ZONE
def _zone(name):
"""
Create a zone classmethod from the zone name.
"""
@classmethod
def zone(cls, game, contents=(), owner=None):
return cls(game, name=name, contents=contents, owner=owner)
return zone
class ZoneMixin(object):
def __init__(self, game, name, contents=(), owner=None):
self.game = game
self.name = name
self.owner = owner
self._contents = set(contents)
def __contains__(self, e):
return e in self._contents
def __str__(self):
return self.name
def __repr__(self):
return "<Zone: {}>".format(self)
def update(self, i, silent=False):
"""
Add multiple elements at the same time.
Analogous to list.extend and set.update.
"""
for e in i:
self.add(e, silent=silent)
def move(self, e, silent=False):
"""
Remove a card from its current zone and place it in this zone.
Raises a ValueError for cards that are already present.
"""
if e in self:
raise ValueError("'{}' is already in the {} zone.".format(e, self))
e.zone.remove(e, silent=silent)
self.add(e, silent=silent)
class UnorderedZone(ZoneMixin):
battlefield = _zone(u"battlefield")
exile = _zone(u"exile")
hand = _zone(u"hand")
ordered = False
def __iter__(self):
return iter(self._contents)
def __len__(self):
return len(self._contents)
def add(self, e, silent=False):
if not silent and self.owner is not None and self.owner != e.owner:
# TODO: log things that misbehaved
return getattr(e.owner, self.name).add(e)
if e in self:
if self.owner is not None:
s = "in {}'s {}".format(self.owner, self.name)
else:
s = "on the {}".format(self.name)
raise ValueError("{} is already {}.".format(e, s))
self._contents.add(e)
if not silent:
self.game.events.trigger(event=ENTER, card=e, zone=self)
def pop(self, silent=False):
try:
e = self._contents.pop()
return e
finally:
if not silent:
self.game.events.trigger(event=LEAVE, card=e, zone=self)
def remove(self, e, silent=False):
try:
self._contents.remove(e)
except KeyError:
raise ValueError("'{}' is not in the {} zone.".format(e, self))
else:
if not silent:
self.game.events.trigger(event=LEAVE, card=e, zone=self)
class OrderedZone(ZoneMixin):
graveyard = _zone(u"graveyard")
library = _zone(u"library")
stack = _zone(u"stack")
ordered = True
def __init__(self, game, name, contents=(), owner=None):
self._order = list(contents)
super(OrderedZone, self).__init__(
game=game, name=name, contents=self._order, owner=owner
)
def __getitem__(self, i):
# TODO / Beware: Zone slicing
return self._order[i]
def __iter__(self):
return iter(self._order)
def __len__(self):
# don't plan on allowing duplicates, but just in case, use order
return len(self._order)
def __reversed__(self):
return reversed(self._order)
def add(self, e, silent=False):
# a safeguard against cards that are accidentally being moved to
# another zone other than their owners (TODO: log misbehavers)
if not silent and self.owner is not None and self.owner != e.owner:
return getattr(e.owner, self.name).add(e)
if e in self:
if self.owner is not None:
s = "in {}'s {}".format(self.owner, self.name)
else:
s = "on the {}".format(self.name)
raise ValueError("{} is already {}.".format(e, s))
self._contents.add(e)
self._order.append(e)
if not silent:
self.game.events.trigger(event=ENTER, card=e, zone=self)
def count(self, e):
return self._order.count(e)
def index(self, e):
return self._order.index(e)
def pop(self, i=None, silent=False):
if i is None:
e = self._order.pop()
else:
e = self._order.pop(i)
self._contents.remove(e)
if not silent:
self.game.events.trigger(event=LEAVE, card=e, zone=self)
return e
def remove(self, e, silent=False):
if e not in self:
raise ValueError("'{}' is not in the {} zone.".format(e, self))
self._contents.remove(e)
self._order.remove(e)
if not silent:
self.game.events.trigger(event=LEAVE, card=e, zone=self)
def reverse(self):
self._order.reverse()
def shuffle(self):
random.shuffle(self._order)
zone = {"battlefield" : UnorderedZone.battlefield,
"exile" : UnorderedZone.exile,
"graveyard" : OrderedZone.graveyard,
"hand" : UnorderedZone.hand,
"library" : OrderedZone.library,
"stack" : OrderedZone.stack}
|
Julian/cardboard
|
cardboard/zone.py
|
zone.py
|
py
| 5,366 |
python
|
en
|
code
| 7 |
github-code
|
6
|
36341309654
|
# 세준이는 양수와 +, -, 그리고 괄호를 가지고 식을 만들었다. 그리고 나서 세준이는 괄호를 모두 지웠다.
# 그리고 나서 세준이는 괄호를 적절히 쳐서 이 식의 값을 최소로 만들려고 한다.
# 괄호를 적절히 쳐서 이 식의 값을 최소로 만드는 프로그램을 작성하시오.
import sys
formula = sys.stdin.readline().split('-')
result = []
for i in formula:
cnt = 0
s = i.split('+')
for j in s:
cnt += int(j)
result.append(cnt)
resultnum = int(result[0]) * 2
for i in result:
resultnum -= i
print(resultnum)
|
jujinyoung/CodingTest
|
bakjjun_codingTest/1541.py
|
1541.py
|
py
| 609 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
24829002801
|
import pygame
pygame.init()
pygame.display.set_caption("WannabePong")
size = 800, 600
screen = pygame.display.set_mode(size)
width, height = size
speed = [1, 1]
bgc = 255, 255, 255
fontControls = pygame.font.SysFont("monospace", 16)
font = pygame.font.SysFont("monospace", 26)
fontCount = pygame.font.SysFont("monospace", 42)
pelota = pygame.image.load("pelota.png")
pelotaRect = pelota.get_rect()
palaRoja = pygame.image.load("palaRoja.png")
palaRojaRect = palaRoja.get_rect()
palaAzul = pygame.image.load("palaAzul.png")
palaAzulRect = palaAzul.get_rect()
divisor = pygame.image.load("divisor.png")
divisorRect = divisor.get_rect()
strikesRojo = 0
strikesAzul = 0
countdown = 10
run = True
divisorRect.move_ip(400, 0)
palaRojaRect.move_ip(1, 300)
palaAzulRect.move_ip(773, 300)
while countdown > 0:
count = fontCount.render("{0}".format(countdown), 1, (0,0,0))
redControls = fontControls.render("Moves with W and S keys", 1, (0,0,0))
blueControls = fontControls.render("Moves with UP and DOWN arrows", 1, (0,0,0))
screen.fill(bgc)
screen.blit(redControls, (5, 50))
screen.blit(blueControls, (505, 50))
screen.blit(count, (388, 250))
pygame.display.flip()
pygame.time.wait(1000)
countdown -= 1
while run:
pygame.time.delay(2)
pelotaRect = pelotaRect.move(speed)
keys = pygame.key.get_pressed()
strikesRojoDisplay = font.render("Strikes: {0}".format(strikesRojo), 1, (0,0,0))
strikesAzulDisplay = font.render("Strikes: {0}".format(strikesAzul), 1, (0,0,0))
winnerRojo = font.render("RED WINS!", 1, (0,0,0))
winnerAzul = font.render("BLUE WINS!", 1, (0,0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if keys[pygame.K_w] and palaRojaRect.top <= 0:
palaRojaRect = palaRojaRect.move(0, 0)
elif keys[pygame.K_w]:
palaRojaRect = palaRojaRect.move(0, -1)
if keys[pygame.K_s] and palaRojaRect.bottom >= height:
palaRojaRect = palaRojaRect.move(0, 0)
elif keys[pygame.K_s]:
palaRojaRect = palaRojaRect.move(0, 1)
if keys[pygame.K_UP] and palaAzulRect.top <= 0:
palaAzulRect = palaAzulRect.move(0, 0)
elif keys[pygame.K_UP]:
palaAzulRect = palaAzulRect.move(0, -1)
if keys[pygame.K_DOWN] and palaAzulRect.bottom >= height:
palaAzulRect = palaAzulRect.move(0, 0)
elif keys[pygame.K_DOWN]:
palaAzulRect = palaAzulRect.move(0, 1)
if palaRojaRect.colliderect(pelotaRect):
speed[0] = -speed[0]
if palaAzulRect.colliderect(pelotaRect):
speed[0] = -speed[0]
if pelotaRect.left <= 0 or pelotaRect.right >= width:
speed[0] = -speed[0]
if pelotaRect.left <= 0:
strikesRojo += 1
elif pelotaRect.right >= width:
strikesAzul += 1
if pelotaRect.top <= 0 or pelotaRect.bottom >= height:
speed[1] = -speed[1]
if strikesRojo == 3 or strikesAzul == 3:
run = False
screen.fill(bgc)
screen.blit(divisor, divisorRect)
screen.blit(pelota, pelotaRect)
screen.blit(palaRoja, palaRojaRect)
screen.blit(palaAzul, palaAzulRect)
screen.blit(strikesRojoDisplay, (5, 10))
screen.blit(strikesAzulDisplay, (633, 10))
pygame.display.flip()
screen.fill(bgc)
if strikesRojo == 3:
screen.blit(winnerAzul, (333, 250))
pygame.display.flip()
elif strikesAzul == 3:
screen.blit(winnerRojo, (333, 250))
pygame.display.flip()
pygame.time.wait(5000)
pygame.QUIT()
|
vsanjorge/localMultiplayerPong
|
main.py
|
main.py
|
py
| 3,327 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5188129194
|
def quick_sort(a_list):
if len(a_list) < 2:
return a_list
else:
pivot = a_list[0]
less = quick_sort([i for i in a_list if i < pivot])
greater = quick_sort([i for i in a_list if i > pivot])
return less + [pivot] + greater
print(quick_sort([7,6,3,99]))
|
IshGill/DSA-Guides
|
Sorting and searching/Quicksort.py
|
Quicksort.py
|
py
| 320 |
python
|
en
|
code
| 9 |
github-code
|
6
|
70945120828
|
# 형태소 분석
from konlpy.tag import Okt
from docutils.parsers.rst.directives import encoding
okt = Okt()
#result = okt.pos('고추 등 매운음식을 오랫동안 너무 많이 먹었을 경우 인지능력과 기억력을 저하시킬 위험이 높다는 연구결과가 나왔다.')
#result = okt.morphs('고추 등 매운음식을 오랫동안 너무 많이 먹었을 경우 인지능력과 기억력을 저하시킬 위험이 높다는 연구결과가 나왔다.')
#result = okt.nouns('고추 등 매운음식을 오랫동안 너무 많이 먹었을 경우 인지능력과 기억력을 저하시킬 위험이 높다는 연구결과가 나왔다.')
#print(result)
import urllib
from bs4 import BeautifulSoup
from urllib import parse
para = parse.quote("이순신")
print(para)
url = "https://ko.wikipedia.org/wiki/" + para
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page.read(), 'lxml')
print(soup)
wordlist = []
for item in soup.select("#mw-content-text > div > p"):
if item.string != None:
#print(item.string)
ss = item.string
wordlist += okt.nouns(ss)
print('wordlist 출력')
print(wordlist)
print('단어 수 : ' + str(len(wordlist)))
word_dict = {}
for i in wordlist:
if i in word_dict:
word_dict[i] += 1
else:
word_dict[i] = 1
print('\n\n word_dict 출력')
print(word_dict)
print('중복 단어 제거')
setdata = set(wordlist)
print(setdata)
print('발견된 단어 수 (중복x) : ' + str(len(setdata)))
# csv 파일로 저장
import csv
import pandas as pd
try:
f = csv.writer(open('ws1.csv', 'w', encoding='utf-8'))
f.writerow(word_dict)
except Exception as e:
print('err : ', e)
# df1 = pd.read_csv('ws1.csv', encoding='utf-8')
# print(df1)
with open('ws1.csv', 'r', encoding='utf-8')as f:
print(f.read())
print()
from pandas import Series, DataFrame
li_data = Series(wordlist)
#print(li_data)
print(li_data.value_counts()[:5])
print()
li_data = Series(word_dict)
print(li_data.value_counts()[:5])
print('-----------------')
df = DataFrame(wordlist, columns = ['단어'])
print(df.head())
###############################################################
|
kangmihee/EX_python
|
py_morpheme/pack/morp1.py
|
morp1.py
|
py
| 2,358 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
71567915387
|
def get_next_pos(row, col, direction):
if direction == 'up':
return row - 1, col
if direction == 'down':
return row + 1, col
if direction == 'left':
return row, col - 1
if direction == 'right':
return row, col + 1
def is_inside(row, col, size):
return 0 <= row < size and 0 <= col < size
def get_around_kids(matrix, row, col):
result = []
if is_inside(row, col - 1, len(matrix)) and matrix[row][col - 1] == 'X' or matrix[row][col - 1] == 'V':
result.append([row, col - 1])
if is_inside(row, col + 1, len(matrix)) and matrix[row][col + 1] == 'X' or matrix[row][col + 1] == 'V':
result.append([row, col + 1])
if is_inside(row - 1, col, len(matrix)) and matrix[row - 1][col] == 'X' or matrix[row - 1][col] == 'V':
result.append([row - 1, col])
if is_inside(row + 1, col, len(matrix)) and matrix[row + 1][col] == 'X' or matrix[row + 1][col] == 'V':
result.append([row + 1, col])
return result
gifts = int(input())
size = int(input())
santa_row = 0
santa_col = 0
nice_kids = 0
matrix = []
for row in range(size):
row_elements = input().split()
for col in range(size):
if row_elements[col] == 'S':
santa_row = row
santa_col = col
elif row_elements[col] == 'V':
nice_kids += 1
matrix.append(row_elements)
nice_kids_gifted = 0
while gifts > 0:
line = input()
if line == 'Christmas morning':
break
matrix[santa_row][santa_col] = '-'
santa_row, santa_col = get_next_pos(santa_row, santa_col, line)
if matrix[santa_row][santa_col] == 'V':
gifts -= 1
nice_kids_gifted += 1
elif matrix[santa_row][santa_col] == 'C':
around_kids = get_around_kids(matrix, santa_row, santa_col)
for kid_row, kid_col in around_kids:
if matrix[kid_row][kid_col] == 'V':
nice_kids_gifted += 1
gifts -= 1
matrix[kid_row][kid_col] = '-'
if gifts == 0:
break
matrix[santa_row][santa_col] = 'S'
if nice_kids_gifted != nice_kids and gifts == 0:
print("Santa ran out of presents!")
for row in matrix:
print(*row, sep=' ')
if nice_kids_gifted == nice_kids:
print(f"Good job, Santa! {nice_kids} happy nice kid/s.")
else:
print(f"No presents for {nice_kids - nice_kids_gifted} nice kid/s.")
|
lorindi/SoftUni-Software-Engineering
|
Python-Advanced/4.Multidimensional Lists/07_present_delivery.py
|
07_present_delivery.py
|
py
| 2,416 |
python
|
en
|
code
| 3 |
github-code
|
6
|
39426129134
|
''' Strategy to be backtested. '''
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
''' Base class to be subclassed for user defined strategies. '''
# Moving average parameters
params = (('pfast',2),('pslow',184),)
def __init__(self):
self.dataclose = self.datas[0].close
self.datahigh = self.datas[0].high
self.datalow = self.datas[0].low
# Order variable will contain ongoing order details/status
self.order = None
# Instantiate moving averages
self.slow_sma = bt.indicators.MovingAverageSimple(self.datas[0],
period=self.params.pslow)
self.fast_sma = bt.indicators.MovingAverageSimple(self.datas[0],
period=self.params.pfast)
self.bar_executed = 0
def log(self, txt, dt=None):
''' Logging function for this strategy. '''
dt = dt or self.datas[0].datetime.date(0)
print(f'{dt.isoformat()}, {txt}')
def next(self):
'''
This method will be called for all remaining data points when
the minimum period for all datas/indicators have been meet.
'''
# Check for open orders
if self.order:
return
# Check if we are in the market
if not self.position:
# We are not in the market, look for a signal to OPEN trades
if self.fast_sma[0] > self.slow_sma[0]:
self.log(f'BUY CREATED: {self.dataclose[0]:2f}')
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
elif self.fast_sma[0] < self.slow_sma[0]:
self.log(f'SELL CREATED: {self.dataclose[0]}')
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
def notify_order(self, order):
''' Receives an order whenever there has been a change in one. '''
if order.status in [order.Submitted, order.Accepted]:
# An active Buy/Sell order has been submitted/accepted - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(f'BUY EXECUTED: {order.executed.price}')
elif order.issell():
self.log(f'SELL EXECUTED: {order.executed.price}')
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Reset orders
self.order = None
|
Kyle-sn/PaperStreet
|
python/backtest/strategy.py
|
strategy.py
|
py
| 2,714 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70488593467
|
import csv
import functools
import json
import math
import random
def cycle_call_parametrized(string_q: int, left_b: int, right_b: int):
def cycle_call(func):
# print(f'LALA')
def wrapper_(*args, **kwargs):
# creating a csv-file:
generate_csv(string_q, left_b, right_b)
roots = dict()
with open('info.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f, dialect='excel')
for i, row in enumerate(reader):
if row:
a, b, c = row
a, b, c, = int(a), int(b), int(c)
roots[i // 2] = str(func(a, b, c))
return roots
return wrapper_
return cycle_call
def jsonize(func):
def wrapper_(*args, **kwargs):
# getting info:
roots = func(args, kwargs)
with open('info.json', 'w', encoding='utf-8') as f:
json.dump(roots, f, indent='\n')
return wrapper_
@jsonize
@cycle_call_parametrized(100, 100, 1000)
def solve_quadratic_equation(a: int, b: int, c: int):
"""solves a * x^2 + b * x + c = 0 equation..."""
sqrt_d = (b ** 2 - 4 * a * c) ** .5
x1, x2 = (-b + sqrt_d) / (2 * a), (-b - sqrt_d) / (2 * a)
return x1, x2 if x1 != x2 else x1
def generate_csv(string_q: int, left_b: int, right_b: int): # 100 -->> 1000 strings...
with open('info.csv', 'w', encoding='utf-8') as f:
writer = csv.writer(f, dialect='excel', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for ind in range(string_q + 1):
k = [random.randint(left_b, right_b + 1) for _ in [0, 1, 2]]
# print(f'k: {k}')
writer.writerow(k)
# generate_csv(100, 100, 1000)
solve_quadratic_equation()
solve_quadratic_equation()
|
LocusLontrime/Python
|
Dive_into_python/HomeWork9/Decorators.py
|
Decorators.py
|
py
| 1,807 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19241458582
|
def FiveCnt(K):
cnt = 0
while K:
cnt += K//5
K //= 5
return cnt
def TwoCnt(K):
cnt = 0
while K:
cnt += K//2
K //= 2
return cnt
N, M = map(int, input().split())
print(min(FiveCnt(N)-FiveCnt(N-M)-FiveCnt(M), TwoCnt(N)-TwoCnt(N-M)-TwoCnt(M)))
|
sdh98429/dj2_alg_study
|
백준/Silver/2004. 조합 0의 개수/조합 0의 개수.py
|
조합 0의 개수.py
|
py
| 286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6363938887
|
def calcRedundantBits(m):
for i in range(m):
if 2**i >= m + i + 1:
return i
def posRedundantBits(data, r):
# Redundancy bits are placed at the positions
# which correspond to the power of 2.
j = 0
k = 1
m = len(data)
res = ""
# If position is power of 2 then insert '0'
# Else append the data
for i in range(1, m + r + 1):
if i == 2**j:
res = res + "0"
j += 1
else:
res = res + data[-1 * k]
k += 1
# The result is reversed since positions are
# counted backwards. (m + r+1 ... 1)
return res[::-1]
def calcParityBits(arr, r):
n = len(arr)
# For finding rth parity bit, iterate over
# 0 to r - 1
for i in range(r):
val = 0
for j in range(1, n + 1):
# If position has 1 in ith significant
# position then Bitwise OR the array value
# to find parity bit value.
if j & (2**i) == (2**i):
val = val ^ int(arr[-1 * j])
# -1 * j is given since array is reversed
# String Concatenation
# (0 to n - 2^r) + parity bit + (n - 2^r + 1 to n)
arr = arr[: n - (2**i)] + str(val) + arr[n - (2**i) + 1 :]
return arr
# Enter the data to be transmitted
print("Code by : Swarup Kharul (20BCT0073)")
data = input("Enter the data: ")
# data = '1011001'
# Calculate the no of Redundant Bits Required
m = len(data)
r = calcRedundantBits(m)
# Determine the positions of Redundant Bits
arr = posRedundantBits(data, r)
# Determine the parity bits
arr = calcParityBits(arr, r)
# Data to be transferred
print("Generated Code Word is " + arr)
|
SwarupKharul/NetCom
|
error-detection/hamming.py
|
hamming.py
|
py
| 1,704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71634215547
|
INTRODUCTION = '''
\U0001F6E1 Health Insurance Risk Calculator\U0001F6E1
\U0001F534*************************************************************\U0001F534
Welcome to the Health Insurance Risk Calculator, where we'll
give you enough information to get an idea of how much you
owe us. We'll ask a series of basic question about your
personal information that will determine your risk level
and whether or not you are insurable.
\U0001F534*************************************************************\U0001F534
'''
print(INTRODUCTION + '\n\n' + 'Questions:')
def ValidNum(Response):
if Response.isnumeric():
return True
else:
print('This is not a number, try again!')
return False
def ValidString(Response):
if Response.isnumeric():
print('This is a number, try again!')
return False
else:
return True
TotPoints = 0
i = 1
while i < 2:
i+=1
UserR = input('\nWhat is your age?\n')
age = UserR
if ValidNum(UserR):
i+=300000
if int(UserR) < 30 and int(UserR) > 0:
TotPoints += 0
elif int(UserR) < 45:
TotPoints += 10
elif int(UserR) < 60:
TotPoints += 20
else:
TotPoints += 30
else:
i-=1
i=1
while i<2:
i+-1
UserR = input('\nWhat is your height? Answer in typical 0\'0\" format.\n')
UserHeight = UserR
if UserR.find('\'') == 1 and UserR.endswith('\"'):
feet = UserR.split('\'')
ft = feet[0]
inches = feet[1].split('\"')
inch = inches[0]
if ValidNum(ft) and ValidNum(inch):
if int(ft) > 2 and int(ft) < 9 and int(inch) > -1 and int(inch) < 12:
height = (float(ft)*12) + float(inch)
i+=431745817387613
else:
print('This is not a valid input, try again!')
i-=1
else:
i-=1
else:
print('This is not a valid input, try again!')
i-=1
BMI = 0
i=1
while i<2:
i+=1
weight = input('''\nWhat is your weight in lbs?
(Only write the number, no \"lbs\" following it.)\n''')
UserWeigth = weight
if ValidNum(weight):
if int(weight) > 5 and int(weight) < 750:
i+=9232798
BMI = (703.0*float(weight))/(float(height)*float(height))
else:
print('This is not a valid weight, try again!')
i-=1
else:
i-=1
if BMI >= 18.5 and BMI <= 24.9:
TotPoints +=0
elif BMI >= 25 and BMI <= 29.9:
TotPoints += 30
elif BMI >= 30 and BMI <= 34.9:
TotPoints += 75
BP = '''
***BLOOD PRESSURE CATEGORIES***
BP Category Systopic Diastolic
| (mm Hg Upper Number) | | (mm Hg Lower Number)
| | |
Normal | Less than 120 | and | Less than 80
| | |
Elevated | 120-129 | and | Less than 80
| | |
High BP | 130-139 | or | 80-89
(Hypertension Stage 1) | | |
| | |
Hight BP | 140 or higher | or | 90 or higher
(Hypertension Stage 2) | | |
| | |
Hyptensive Crisis | Higher than 180 | and/or | Higher than 120
'''
print(BP)
i=1
while i<2:
i+=1
UserR = input('''\nWhat is your BP, input your answer as normal, elevated,
stage 1, stage 2, or crisis? Select from the chart above.\n''')
UserBP = UserR
if ValidString(UserR):
if str(UserR).lower() == 'normal':
TotPoints += 0
elif str(UserR).lower() == 'elevated':
TotPoints += 15
elif str(UserR).lower() == 'stage 1':
TotPoints += 30
elif str(UserR).lower() == 'stage 2':
TotPoints += 75
elif str(UserR).lower() == 'crisis':
TotPoints += 100
else:
i-=1
print('This is not a valid response, try again!')
else:
i-=1
i=1
while i<2:
i+=1
UserR = input('\nDoes Diabetes run in your family? Answer with a simple yes or no.\n')
UserD = 'Does Diabetes run in the family - ' + UserR
if ValidString(UserR):
if UserR.lower() == 'yes':
i+=18971
TotPoints += 10
elif UserR.lower() == 'no':
i+=187237
else:
print('That\'s not a \"yes\" or \"no\" answer, try again!')
i-=1
else:
i-=1
i=1
while i<2:
i+=1
UserR = input('\nDoes Cancer run in your family? Answer with a simple yes or no.\n')
UserC = 'Does Cancer run in the family - ' + UserR
if ValidString(UserR):
if UserR.lower() == 'yes':
i+=18971
TotPoints += 10
elif UserR.lower() == 'no':
i+=187237
else:
print('That\'s not a \"yes\" or \"no\" answer, try again!')
i-=1
else:
i-=1
i=1
while i<2:
i+=1
UserR = input('\nDoes Alzheimer\'s run in your family? Answer with a simple yes or no.\n')
UserA = 'Does Alzhermer\'s run in the family - ' + UserR
if ValidString(UserR):
if UserR.lower() == 'yes':
i+=18971
TotPoints += 10
elif UserR.lower() == 'no':
i+=187237
else:
print('That\'s not a \"yes\" or \"no\" answer, try again!')
i-=1
else:
i-=1
print('''\n***For the following questions you can enter \"done\" to
immediately see you evaluation.***''')
isDone = False
UserR = input('\nIs there anything else you\'d like to tell us about your health?\n')
if UserR.lower() == 'done':
isDone = True
if not isDone:
UserR = input('\nHow\'s your day going?\n')
else:
pass
if UserR.lower() == 'done':
isDone = True
if not isDone:
UserR = input('\nDo you have any suggestion for how we can improve?\n')
else:
pass
print('\nAge: ' + age)
print('Height: ' + UserHeight)
print('Weight: ' + UserWeigth)
print('BMI: ' + str(BMI))
print(UserD)
print(UserC)
print(UserA)
print('\nTotal Risk Score(Higher = Worse): ' + str(TotPoints))
RiskC = ''
if TotPoints <= 20:
RiskC = 'Low Risk'
elif TotPoints <= 50:
RiskC = 'Moderate Risk'
elif TotPoints <= 75:
RiskC = 'High Risk'
else:
RiskC = 'Unisurable'
print('Your Risk Category: ' + RiskC + '\n')
|
JubinJ0110/HealthInsuranceRiskCalculator
|
InsuranceCalculatorJJJ.py
|
InsuranceCalculatorJJJ.py
|
py
| 6,808 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27923745620
|
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from keras.utils import np_utils
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
from scipy.io import loadmat
import numpy as np
def display(i):
img = X[i]
plt.title('Example'+ str(i)+ 'Label:'+str(Y[i])+ 'Predicted:'+str(ypred[i]))
plt.imshow(img.reshape((28,28)),cmap=plt.cm.gray_r)
plt.show()
def plot_accuracy(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
def plot_loss(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
mnist = loadmat('mnist-original')
X , Y = mnist['data'] , mnist['label']
X= X.T
Y = Y.T
X_train , X_test , Y_train , Y_test = train_test_split(X,Y,test_size=0.1,shuffle = True)
X_train , X_val , Y_train , Y_val = train_test_split(X_train,Y_train,test_size=0.2,shuffle = True)
X_train = X_train/255
X_test = X_test/255
X_val = X_val/255
Ytrain = np_utils.to_categorical(Y_train)
Ytest = np_utils.to_categorical(Y_test)
Yval = np_utils.to_categorical(Y_val)
model = Sequential()
model.add(Dense(784,input_shape=(784,),activation='relu',kernel_initializer='normal'))
model.add(Dense(10, activation = 'softmax',kernel_initializer='normal'))
model.compile(loss='categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'])
history = model.fit(X_train ,Ytrain,batch_size = 512 ,epochs=30,verbose=2, validation_data=(X_val,Yval))
test_accuracy = model.evaluate(x=X_test,y=Ytest,batch_size=200,verbose=2)
print("Test results : ", test_accuracy)
Ypred = model.predict(X)
ypred = []
for i in Ypred:
ypred.append(np.argmax(i))
plot_accuracy(history)
plot_loss(history)
|
ankitlohiya212/basic-ml-problems
|
Basic ML problems/Mnist.py
|
Mnist.py
|
py
| 2,107 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14839954104
|
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import sys
from AssignmentCategoryDict import AssignmentCategoryDict
from Assignment import Assignment
import uuid
class EditCategories(object):
def __init__(self, course, reload_gradesheet):
col_headers = ['Category Name', 'Drop Count']
self.ECategories = QtWidgets.QDialog()
self.ui = uic.loadUi('../assets/ui/EditCategories.ui', self.ECategories)
self.ECategories.categoryTable.setHorizontalHeaderLabels(col_headers)
self.course = course
self.ECategories.show()
self.category_uuids = []
self.setup_display()
self.reload_gradesheet = reload_gradesheet
self.original_row_count = self.ECategories.categoryTable.rowCount()
self.ECategories.removeSelectedCategoryButton.clicked.connect(self.remove_category)
self.ECategories.addCategoryButton.clicked.connect(self.add_category)
self.ECategories.saveCategoriesButton.clicked.connect(self.save_table_data)
def setup_display(self):
for category in self.course.assignment_category_dict.assignment_categories.values():
row_insert = self.ECategories.categoryTable.rowCount()
self.add_category()
self.ECategories.categoryTable.setItem(row_insert, 0, QtWidgets.QTableWidgetItem(category.categoryName))
self.ECategories.categoryTable.setItem(row_insert, 1, QtWidgets.QTableWidgetItem(category.drop_count))
self.category_uuids.append(category.category_uuid)
def add_category(self):
row_insert = self.ECategories.categoryTable.rowCount()
self.ECategories.categoryTable.insertRow(self.ECategories.categoryTable.rowCount())
self.ECategories.categoryTable.setItem(row_insert, 0, QtWidgets.QTableWidgetItem(""))
self.ECategories.categoryTable.setItem(row_insert, 1, QtWidgets.QTableWidgetItem(""))
def remove_category(self):
if self.ECategories.categoryTable.rowCount() <= 0:
return
row = self.ECategories.categoryTable.currentRow()
if row > self.original_row_count:
self.ECategories.categoryTable.removeRow(row)
else:
choice = QtWidgets.QMessageBox.question(self.ECategories, "Warning",
"You are about to delete one of your original categories. Continue?",
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if choice == QtWidgets.QMessageBox.Yes:
cat_to_delete_uuid = self.category_uuids[row]
self.course.assignment_category_dict.delete_category(self.course, cat_to_delete_uuid)
self.original_row_count = self.original_row_count - 1
del self.category_uuids[row]
self.ECategories.categoryTable.removeRow(row)
self.reload_gradesheet()
def save_table_data(self):
row_count = self.ECategories.categoryTable.rowCount()
output = []
for row in range(0, row_count):
cat_name = self.ECategories.categoryTable.item(row, 0).text()
cat_drop_count = self.ECategories.categoryTable.item(row, 1).text()
output.append([cat_name, cat_drop_count])
valid = self.error_checking(output)
if valid:
self.course.assignment_category_dict.reload_categories()
for i in range(len(output)):
if i < self.original_row_count:
self.course.assignment_category_dict.save_category_info(output[i][0], output[i][1], self.category_uuids[i])
# Add the database update function
else:
self.course.assignment_category_dict.add_category(str(uuid.uuid4()), output[i][0], output[i][1], self.course.student_list)
# Add the database create function
self.reload_gradesheet()
def error_checking(self, user_input):
category_names = [user_input[i][0] for i in range(len(user_input))]
category_drop_counts = [user_input[i][1] for i in range(len(user_input))]
for i in category_names:
if i == "":
self.bad_input('Error', 'Please enter a category name for all categories')
return False
for i in category_drop_counts:
if "." in i:
return False
try:
x = int(i.strip())
if x < 0:
return False
except ValueError:
self.bad_input('Error', 'You have a drop count that is a nonnegative integer. Please try again.')
return False
return True
"""
Function for telling the user they entered bad input
Parameters:
window_text: (string) the name of the window
error_message: (string) the error message that is displayed to the user
"""
def bad_input(self, window_text, error_message):
choice = QtWidgets.QMessageBox.question(self.ECategories, window_text, error_message,
QtWidgets.QMessageBox.Cancel)
if choice:
pass
|
meeksjt/SuperTeacherGradebook499
|
src/EditCategories.py
|
EditCategories.py
|
py
| 5,236 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37612660256
|
from django.shortcuts import render
from .models import *
import cv2
import numpy as np
from pytesseract import *
pytesseract.tesseract_cmd="C:/Program Files/Tesseract-OCR/tesseract.exe"
def main(request):
return render(request,'main.html')
def maintest(request):
return render(request,'maintest.html')
def kakaomap(request):
hospital = Hospital.objects.all()
return render(request,'kakaomap.html',{'hospital':hospital })
def camera(request):
return render(request,'camera.html')
def history(request):
img = image.objects.all()
return render(request,'history.html',{'img':img})
def result(request):
prescription = image.objects.create(
sample=request.FILES.get('camera'),
)
pic = prescription.sample
pic = "./media/"+ str(pic)
img = cv2.imread("test4.jpg")
orig = img.copy() #원본 이미지 복사
rect_img = img[355:660, 60:317]
#r = 800.0 / img.shape[0]
#dim = (int(img.shape[1] * r), 800)
#img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
#print("STEP 1: Edge Detection")
#cv2.namedWindow('img', cv2.WINDOW_NORMAL)
#cv2.namedWindow('edged', cv2.WINDOW_NORMAL)
#print(str(pytesseract.image_to_string(img)))
custom_config = 'outputbase nobatch digits'
number = pytesseract.image_to_string(rect_img,config=custom_config)
dist = ""
db = []
for num in number:
dist += num
if(num == "\n"):
try:
db.append(Medicine.objects.get(m_Code=int(dist)))
except:
continue
count = len(db)
return render(request,'result.html',{'db':db, 'count':count})
|
YounngR/Graduation-work
|
DB/views.py
|
views.py
|
py
| 1,680 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23262217725
|
########################################################
### Programmers: Steffan Davies ###
### Contact: [email protected] ###
### Date: 27/12/2022 ###
########################################################
# This script will demonstrate dictionaries
favorite_numbers = {
"john": [3, 5, 4],
"sarah": [5],
"michael": [1, 6],
"doug": [13, 7],
"sam": [7],
}
for person, numbers in favorite_numbers.items():
if len(numbers) > 1:
print(f"{person.title()}'s favorite numbers are:")
elif len(numbers) == 1:
print(f"{person.title()}'s favorite number is:")
for number in numbers:
print(f"\t{number}")
|
SteffanDavies/python-crash-course-exercises
|
chapter-06/06-10/favorite_numbers_2.py
|
favorite_numbers_2.py
|
py
| 729 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15047866942
|
# from __future__ import absolute_import
import torch
import torch.nn as nn
import onnx
from typing import List, Dict, Union, Optional, Tuple, Sequence
import copy
from .util import*
from torch.autograd import Variable
class onnxTorchModel(nn.Module):
def __init__(self,onnx_model: onnx.ModelProto,cfg:dict):
super(onnxTorchModel,self).__init__()
self.onnx_model=onnx_model
self.nodes=self.onnx_model.graph.node
self.pad_split=cfg["pad_split"]
self.weights_in_constant_flg=False
if len(onnx_model.graph.initializer)==0:
self.weights_in_constant_flg=True
self.op_type_list=[]
self.current_id=0
self.forwardExcList=[]
self.onnxBlobNameTable={}
self.generateOnnxBlobNameTable()
self.parseOnnx()
def getOnnxNameFromTable(self,name):
for n in self.onnxBlobNameTable.keys():
if self.onnxBlobNameTable[n]==name:
return n
def forward(self, input):
net_input=self.onnx_model.graph.input
net_output=self.onnx_model.graph.output
if len(net_input)==1:
exc_str="{node_input}=input".format(node_input=self.onnxBlobNameTable[net_input[0].name])
exec(exc_str)
for exc_info in self.forwardExcList:
if "exec_pad" in exc_info.keys():
exec(exc_info["exec_pad"])
exc_str=exc_info["exec"]
exec(exc_str)
if len(net_output)==1:
exc_str="self.net_output={node_output}".format(node_output=self.onnxBlobNameTable[net_output[0].name])
exec(exc_str)
return self.net_output
def parseOnnx(self):
nodes = self.onnx_model.graph.node
for nid,node in enumerate(nodes):
self.current_id=nid
op_type=node.op_type
if op_type not in self.op_type_list:
self.op_type_list.append(op_type)
print("Parsing onnx:",op_type)
if op_type=="Conv":
self.parseConv(node)
elif op_type=="BatchNormalization":
self.parseBN(node)
elif op_type=="Flatten":
self.parseFlatten(node)
elif op_type=="Relu":
self.parseRelu(node)
elif op_type=="MaxPool":
self.parseMaxPool(node)
elif op_type=="Add":
self.parseAdd(node)
elif op_type=="GlobalAveragePool":
self.parseGlobalAveragePool(node)
elif op_type=="MatMul":
self.parseMatMul(node)
elif op_type=="Softmax":
self.parseSoftmax(node)
elif op_type=="Identity":
self.parseIdentity(node)
elif op_type=="Constant":
self.parseNonWeightsConstant(node)
# torch.nn.Conv2d(in_channels: int, out_channels: int,
# kernel_size: Union[T, Tuple[T, T]], stride: Union[T, Tuple[T, T]] = 1,
# padding: Union[T, Tuple[T, T]] = 0, dilation: Union[T, Tuple[T, T]] = 1,
# groups: int = 1, bias: bool = True, padding_mode: str = 'zeros')
def parseConv(self,node):
attr=attribute_to_dict(node.attribute)
if(self.weights_in_constant_flg):
wt,bt=get_conv_params_in_constant(node,self.onnx_model.graph.node)
has_bias=True
if len(node.input)==2:
has_bias=False
c,n,k_w,k_h=wt.shape
c=c*int(attr["group"])
n=n*int(attr["group"])
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
pad_t=attr["pads"][0]
pad_b=attr["pads"][2]
pad_l=attr["pads"][1]
pad_r=attr["pads"][3]
if(pad_t!=pad_b or pad_l!=pad_r or self.pad_split):
exc_str_pad="{var_name}_pad=nn.ConstantPad2d(padding={padding},value={value})".format(var_name=var_name,padding=(pad_l,pad_r,pad_t,pad_b),value=0)
exc_str_conv="{var_name}=nn.Conv2d(in_channels={in_channels},out_channels={out_channels},kernel_size={kernel_size},stride={stride},padding={padding},dilation={dilation},groups={groups},bias={bias})".format(var_name=var_name,\
in_channels=c,\
out_channels=n,\
kernel_size=tuple(attr["kernel_shape"]),\
stride=tuple(attr["strides"]),\
padding=(0,0),\
dilation=tuple(attr["dilations"]),\
groups=attr["group"],\
bias=True)
self.generateForwardExec(node,var_name,op_pad_split=True)
exec(exc_str_pad)
exec(exc_str_conv)
exc_init_weights_str="{var_name}.weight=torch.nn.Parameter(torch.Tensor(wt))".format(var_name=var_name)
exec(exc_init_weights_str)
else:
exc_str="{var_name}=nn.Conv2d(in_channels={in_channels},out_channels={out_channels},kernel_size={kernel_size},stride={stride},padding={padding},dilation={dilation},groups={groups},bias={bias})".format(var_name=var_name,\
in_channels=c,\
out_channels=n,\
kernel_size=tuple(attr["kernel_shape"]),\
stride=tuple(attr["strides"]),\
padding=tuple(attr["pads"][:2]),\
dilation=tuple(attr["dilations"]),\
groups=attr["group"],\
bias=True)
self.generateForwardExec(node,var_name)
exec(exc_str)
exc_init_weights_str="{var_name}.weight=torch.nn.Parameter(torch.Tensor(wt))".format(var_name=var_name)
exec(exc_init_weights_str)
if has_bias:
self.forwardExcList[len(self.forwardExcList)-1]["has_bias"]=True
exc_init_bias_str="{var_name}.bias=torch.nn.Parameter(torch.Tensor(bt))".format(var_name=var_name)
exec(exc_init_bias_str)
else:
self.forwardExcList[len(self.forwardExcList)-1]["has_bias"]=False
exc_init_bias_str="nn.init.constant_({var_name}.bias, 0)".format(var_name=var_name)
exec(exc_init_bias_str)
# torch.nn.BatchNorm2d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
def parseBN(self,node):
attr=attribute_to_dict(node.attribute)
if(self.weights_in_constant_flg):
bn_scale,bn_B,bn_mean,bn_var=get_bn_params_in_constant(node,self.onnx_model.graph.node)
n=bn_scale.shape[0]
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.BatchNorm2d(num_features={num_features},eps={eps},momentum={momentum})".format(var_name=var_name,\
num_features=n,eps=attr["epsilon"],momentum=attr["momentum"])
exec(exc_str)
bn_scale,bn_B,bn_mean,bn_var=get_bn_params_in_constant(node, self.nodes)
exc_init_scale_str="{var_name}.weight=torch.nn.Parameter(torch.Tensor(bn_scale))".format(var_name=var_name)
exc_init_bias_str="{var_name}.bias=torch.nn.Parameter(torch.Tensor(bn_B))".format(var_name=var_name)
exc_init_mean_str="{var_name}.running_mean=torch.Tensor(bn_mean)".format(var_name=var_name)
exc_init_var_str="{var_name}.running_var=torch.Tensor(bn_var)".format(var_name=var_name)
exec(exc_init_scale_str)
exec(exc_init_bias_str)
exec(exc_init_mean_str)
exec(exc_init_var_str)
self.generateForwardExec(node,var_name)
def parseFlatten(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.Flatten(start_dim={start_dim})".format(var_name=var_name,start_dim=attr["axis"])
self.generateForwardExec(node,var_name)
exec(exc_str)
def parseRelu(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.ReLU()".format(var_name=var_name)
self.generateForwardExec(node,var_name)
exec(exc_str)
# torch.nn.MaxPool2d(kernel_size: Union[T, Tuple[T, ...]],
# stride: Optional[Union[T, Tuple[T, ...]]] = None,
# padding: Union[T, Tuple[T, ...]] = 0, dilation: Union[T, Tuple[T, ...]] = 1,
# return_indices: bool = False, ceil_mode: bool = False)
def parseMaxPool(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
pad_t=attr["pads"][0]
pad_b=attr["pads"][2]
pad_l=attr["pads"][1]
pad_r=attr["pads"][3]
if(pad_t!=pad_b or pad_l!=pad_r or pad_r!=pad_t or self.pad_split):
exc_str_pad="{var_name}_pad=nn.ConstantPad2d(padding={padding},value={value})".format(var_name=var_name,padding=(pad_l,pad_r,pad_t,pad_b),value=0)
exc_str="{var_name}=nn.MaxPool2d(kernel_size={kernel_shape},padding={pads},stride={strides})".format(var_name=var_name,\
kernel_shape=tuple(attr["kernel_shape"]),\
pads=0,\
strides=tuple(attr["strides"]))
exec(exc_str_pad)
exec(exc_str)
self.generateForwardExec(node,var_name,op_pad_split=True)
else:
exc_str="{var_name}=nn.MaxPool2d(kernel_size={kernel_shape},padding={pads},stride={strides})".format(var_name=var_name,\
kernel_shape=tuple(attr["kernel_shape"]),\
pads=attr["pads"][0],\
strides=tuple(attr["strides"]))
exec(exc_str)
self.generateForwardExec(node,var_name)
def parseAdd(self,node):
attr=attribute_to_dict(node.attribute)
var_name="torch.add"
self.generateForwardExecMultiInput(node,var_name,filter_const=False,is_instance=False)
def parseGlobalAveragePool(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.AdaptiveAvgPool2d((1, 1))".format(var_name=var_name)
self.generateForwardExec(node,var_name)
exec(exc_str)
def parseMatMul(self,node):
attr=attribute_to_dict(node.attribute)
var_name="torch.matmul"
self.generateForwardExecMultiInput(node,var_name,filter_const=False,is_instance=False)
def parseSoftmax(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
if attr["axis"]==-1:
exc_str="{var_name}=nn.Softmax(dim=1)".format(var_name=var_name)
exec(exc_str)
else:
exc_str="{var_name}=nn.Softmax(dim={dim})".format(var_name=var_name,dim= attr["axis"])
exec(exc_str)
self.generateForwardExec(node,var_name)
def parseIdentity(self,node):
inputs=node.input
outputs=node.output
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
input_blob=self.onnxBlobNameTable[inputs[0]]
output_blob=self.onnxBlobNameTable[outputs[0]]
forwardExcStr="{output_name}={input_name}".format(output_name=output_blob,input_name=input_blob)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":"Identity","input":[input_blob],"output":[output_blob],"is_instance":False,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
def parseNonWeightsConstant(self,node):
output_name=node.output[0]
next_type=get_node_type_by_input(output_name,self.nodes)
weight_node_list=["Conv","BatchNormalization"]
if next_type not in weight_node_list:
constant_tonser=get_tensor_in_constant(output_name,self.nodes)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
output_blob=self.onnxBlobNameTable[output_name]
exc_str="{var_name}=torch.nn.Parameter(torch.tensor(constant_tonser))".format(var_name=var_name)
exec(exc_str)
forwardExcStr="{output}={var_name}".format(output=output_blob,var_name=var_name)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":node.op_type,"input":[],"output":[output_blob],"is_instance":True}
self.forwardExcList.append(nodeInfoDict)
###################################### support func area
def generateForwardExec(self,node,var_name,filter_const=True,is_instance=True,op_pad_split=False):
inputs=node.input
outputs=node.output
# node_type=node.op_type
# next_type=
dynamic_input=[]
dynamic_output=[]
for inputname in inputs:
if filter_const and get_node_type_by_output(inputname,self.nodes)=="Constant":
continue
dynamic_input.append(self.onnxBlobNameTable[inputname])
for outputname in outputs:
dynamic_output.append(self.onnxBlobNameTable[outputname])
if len(dynamic_input)>1:
assert(0)
if len(dynamic_input)==0:
dynamic_input.append(self.onnxBlobNameTable[inputs[0]])
input_blob=dynamic_input[0]
output_blob=dynamic_output[0]
if op_pad_split:
forwardExcStrPad="{output_name}_pad={var_name}_pad({input_name})".format(output_name=input_blob,var_name=var_name,input_name=input_blob)
forwardExcStr="{output_name}={var_name}({input_name}_pad)".format(output_name=output_blob,var_name=var_name,input_name=input_blob)
nodeInfoDict={"exec":forwardExcStr,"exec_pad":forwardExcStrPad,"var_name":var_name,"type":node.op_type,"input":dynamic_input,"output":[output_blob],"is_instance":is_instance,"id":self.current_id}
else:
forwardExcStr="{output_name}={var_name}({input_name})".format(output_name=output_blob,var_name=var_name,input_name=input_blob)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":node.op_type,"input":dynamic_input,"output":[output_blob],"is_instance":is_instance,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
for i in range(1,len(dynamic_output)):
forwardExcStr="{output_name}={input_name}".format(output_name=dynamic_output[i],input_name=dynamic_output[0])
nodeInfoDict={"exec":forwardExcStr,"var_name":"Copy","type":"Copy","input":[dynamic_output[0]],"output":[output_blob],"is_instance":False,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
def generateForwardExecMultiInput(self,node,var_name,filter_const=True,is_instance=True):
inputs=node.input
outputs=node.output
dynamic_input=[]
dynamic_output=[]
for inputname in inputs:
if filter_const and get_node_type_by_output(inputname,self.nodes)=="Constant":
continue
dynamic_input.append(self.onnxBlobNameTable[inputname])
for outputname in outputs:
dynamic_output.append(self.onnxBlobNameTable[outputname])
input_blob=dynamic_input[0]
output_blob=dynamic_output[0]
input_blob_str=""
for input_blob in dynamic_input:
input_blob_str+=","+input_blob
input_blob_str=input_blob_str[1:]
forwardExcStr="{output_name}={var_name}({input_name})".format(output_name=output_blob,var_name=var_name,input_name=input_blob_str)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":node.op_type,"input":dynamic_input,"output":[output_blob],"is_instance":is_instance,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
for i in range(1,len(dynamic_output)):
forwardExcStr="{output_name}={input_name}".format(output_name=dynamic_output[i],input_name=dynamic_output[0])
nodeInfoDict={"exec":forwardExcStr,"var_name":"Copy","type":"Copy","input":[dynamic_output[0]],"output":[output_blob],"is_instance":False,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
def generateOnnxBlobNameTable(self):
nodes = self.onnx_model.graph.node
id_count=0
for nid,node in enumerate(nodes):
inputs=node.input
outputs=node.output
for name in inputs:
if name not in self.onnxBlobNameTable.keys():
self.onnxBlobNameTable[name]="self.blob_"+str(id_count)
id_count+=1
for name in outputs:
if name not in self.onnxBlobNameTable.keys():
self.onnxBlobNameTable[name]="self.blob_"+str(id_count)
id_count+=1
def getFeatureTensor(self,name):
exec("self.outTensor= {name}".format(name=name))
return self.outTensor
|
diamour/onnxQuanter
|
onnx_torch_engine/converter.py
|
converter.py
|
py
| 16,899 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33225197622
|
# -*- coding: utf-8 -*-
""" #+begin_org
* *[Summary]* :: A =CmndLib= for providing currents configuration to CS-s.
#+end_org """
####+BEGIN: b:py3:cs:file/dblockControls :classification "cs-u"
""" #+begin_org
* [[elisp:(org-cycle)][| /Control Parameters Of This File/ |]] :: dblk ctrls classifications=cs-u
#+BEGIN_SRC emacs-lisp
(setq-local b:dblockControls t) ; (setq-local b:dblockControls nil)
(put 'b:dblockControls 'py3:cs:Classification "cs-u") ; one of cs-mu, cs-u, cs-lib, bpf-lib, pyLibPure
#+END_SRC
#+RESULTS:
: cs-u
#+end_org """
####+END:
####+BEGIN: b:prog:file/proclamations :outLevel 1
""" #+begin_org
* *[[elisp:(org-cycle)][| Proclamations |]]* :: Libre-Halaal Software --- Part Of BISOS --- Poly-COMEEGA Format.
** This is Libre-Halaal Software. © Neda Communications, Inc. Subject to AGPL.
** It is part of BISOS (ByStar Internet Services OS)
** Best read and edited with Blee in Poly-COMEEGA (Polymode Colaborative Org-Mode Enhance Emacs Generalized Authorship)
#+end_org """
####+END:
####+BEGIN: b:prog:file/particulars :authors ("./inserts/authors-mb.org")
""" #+begin_org
* *[[elisp:(org-cycle)][| Particulars |]]* :: Authors, version
** This File: /bisos/git/auth/bxRepos/bisos-pip/currents/py3/bisos/currents/currentsConfig.py
** Authors: Mohsen BANAN, http://mohsen.banan.1.byname.net/contact
#+end_org """
####+END:
####+BEGIN: b:python:file/particulars-csInfo :status "inUse"
""" #+begin_org
* *[[elisp:(org-cycle)][| Particulars-csInfo |]]*
#+end_org """
import typing
csInfo: typing.Dict[str, typing.Any] = { 'moduleName': ['currentsConfig'], }
csInfo['version'] = '202209290819'
csInfo['status'] = 'inUse'
csInfo['panel'] = 'currentsConfig-Panel.org'
csInfo['groupingType'] = 'IcmGroupingType-pkged'
csInfo['cmndParts'] = 'IcmCmndParts[common] IcmCmndParts[param]'
####+END:
""" #+begin_org
* /[[elisp:(org-cycle)][| Description |]]/ :: [[file:/bisos/git/auth/bxRepos/blee-binders/bisos-core/COMEEGA/_nodeBase_/fullUsagePanel-en.org][BISOS COMEEGA Panel]]
Module description comes here.
** Relevant Panels:
** Status: In use with blee3
** /[[elisp:(org-cycle)][| Planned Improvements |]]/ :
*** TODO complete fileName in particulars.
#+end_org """
####+BEGIN: b:prog:file/orgTopControls :outLevel 1
""" #+begin_org
* [[elisp:(org-cycle)][| Controls |]] :: [[elisp:(delete-other-windows)][(1)]] | [[elisp:(show-all)][Show-All]] [[elisp:(org-shifttab)][Overview]] [[elisp:(progn (org-shifttab) (org-content))][Content]] | [[elisp:(blee:ppmm:org-mode-toggle)][Nat]] | [[elisp:(bx:org:run-me)][Run]] | [[elisp:(bx:org:run-me-eml)][RunEml]] | [[elisp:(progn (save-buffer) (kill-buffer))][S&Q]] [[elisp:(save-buffer)][Save]] [[elisp:(kill-buffer)][Quit]] [[elisp:(org-cycle)][| ]]
** /Version Control/ :: [[elisp:(call-interactively (quote cvs-update))][cvs-update]] [[elisp:(vc-update)][vc-update]] | [[elisp:(bx:org:agenda:this-file-otherWin)][Agenda-List]] [[elisp:(bx:org:todo:this-file-otherWin)][ToDo-List]]
#+end_org """
####+END:
####+BEGIN: b:python:file/workbench :outLevel 1
""" #+begin_org
* [[elisp:(org-cycle)][| Workbench |]] :: [[elisp:(python-check (format "/bisos/venv/py3/bisos3/bin/python -m pyclbr %s" (bx:buf-fname))))][pyclbr]] || [[elisp:(python-check (format "/bisos/venv/py3/bisos3/bin/python -m pydoc ./%s" (bx:buf-fname))))][pydoc]] || [[elisp:(python-check (format "/bisos/pipx/bin/pyflakes %s" (bx:buf-fname)))][pyflakes]] | [[elisp:(python-check (format "/bisos/pipx/bin/pychecker %s" (bx:buf-fname))))][pychecker (executes)]] | [[elisp:(python-check (format "/bisos/pipx/bin/pycodestyle %s" (bx:buf-fname))))][pycodestyle]] | [[elisp:(python-check (format "/bisos/pipx/bin/flake8 %s" (bx:buf-fname))))][flake8]] | [[elisp:(python-check (format "/bisos/pipx/bin/pylint %s" (bx:buf-fname))))][pylint]] [[elisp:(org-cycle)][| ]]
#+end_org """
####+END:
####+BEGIN: b:py3:cs:orgItem/basic :type "=PyImports= " :title "*Py Library IMPORTS*" :comment "-- with classification based framework/imports"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* =PyImports= [[elisp:(outline-show-subtree+toggle)][||]] *Py Library IMPORTS* -- with classification based framework/imports [[elisp:(org-cycle)][| ]]
#+end_org """
####+END:
####+BEGIN: b:py3:cs:framework/imports :basedOn "classification"
""" #+begin_org
** Imports Based On Classification=cs-u
#+end_org """
from bisos import b
from bisos.b import cs
from bisos.b import b_io
import collections
####+END:
import os
import collections
#import enum
import shutil
import sys
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "Obtain Package Bases" :extraInfo ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *Obtain Package Bases:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: b:py3:cs:func/typing :funcName "configBaseDir_obtain" :deco "track"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-T- [[elisp:(outline-show-subtree+toggle)][||]] /configBaseDir_obtain/ deco=track [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def configBaseDir_obtain(
####+END:
) -> str:
""" #+begin_org
** [[elisp:(org-cycle)][| *DocStr | ]
#+end_org """
outcome = b.subProc.WOpW(invedBy=None, log=0).bash(
f"""usgBpos.sh -i usgBpos_usageEnvs_fullUse_bxoPath""")
if outcome.isProblematic():
b_io.eh.badOutcome(outcome)
return ""
retVal = outcome.stdout.rstrip('\n')
return retVal
####+BEGIN: bx:cs:python:func :funcName "configUsgCursBaseDir_obtain" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /configUsgCursBaseDir_obtain/ retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def configUsgCursBaseDir_obtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return os.path.abspath(os.path.join(configBaseDir, "control/currents"))
####+BEGIN: bx:cs:python:func :funcName "configUsgCursFpBaseDir_obtain" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /configUsgCursFpBaseDir_obtain/ retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def configUsgCursFpBaseDir_obtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return os.path.abspath(os.path.join(configBaseDir,"control/currents/fp"))
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "File Parameters Obtain"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *File Parameters Obtain:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "bxoId_fpObtain" :comment "Configuration Parameter" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /bxoId_fpObtain/ =Configuration Parameter= retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def bxoId_fpObtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return(
b.fp.FileParamValueReadFrom(
parRoot= os.path.abspath("{}/usgCurs/fp".format(configBaseDir)),
parName="bxoId")
)
####+BEGIN: bx:cs:python:func :funcName "sr_fpObtain" :comment "Configuration Parameter" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /sr_fpObtain/ =Configuration Parameter= retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def sr_fpObtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return(
b.fp.FileParamValueReadFrom(
parRoot= os.path.abspath("{}/usgCurs/fp".format(configBaseDir)),
parName="sr")
)
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "Common Command Parameter Specification"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *Common Command Parameter Specification:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "commonParamsSpecify" :funcType "void" :retType "bool" :deco "" :argsList "csParams"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-void [[elisp:(outline-show-subtree+toggle)][||]] /commonParamsSpecify/ retType=bool argsList=(csParams) [[elisp:(org-cycle)][| ]]
#+end_org """
def commonParamsSpecify(
csParams,
):
####+END:
csParams.parDictAdd(
parName='configBaseDir',
parDescription="Root Of usgCurs/fp from which file parameters will be read",
parDataType=None,
parDefault=None,
parChoices=["any"],
# parScope=cs.CmndParamScope.TargetParam,
argparseShortOpt=None,
argparseLongOpt='--configBaseDir',
)
csParams.parDictAdd(
parName='bxoId',
parDescription="BISOS Default UserName",
parDataType=None,
parDefault=None,
parChoices=["any"],
# parScope=cs.CmndParamScope.TargetParam,
argparseShortOpt=None,
argparseLongOpt='--bxoId',
)
csParams.parDictAdd(
parName='sr',
parDescription="BISOS Default GroupName",
parDataType=None,
parDefault=None,
parChoices=["any"],
# parScope=cs.CmndParamScope.TargetParam,
argparseShortOpt=None,
argparseLongOpt='--sr',
)
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "Common Command Examples Sections"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *Common Command Examples Sections:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "examples_usgCursParsFull" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /examples_usgCursParsFull/ retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def examples_usgCursParsFull(
configBaseDir,
):
####+END:
"""
** Auxiliary examples to be commonly used.
"""
def cpsInit(): return collections.OrderedDict()
def menuItem(verbosity): cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity=verbosity,
comment='none', icmWrapper=None, icmName=None) # verbosity: 'little' 'basic' 'none'
def execLineEx(cmndStr): cs.examples.execInsert(execLine=cmndStr)
cs.examples.menuChapter(' =FP Values= *usgCurs Clear InfoBase --- Deletes All FPs*')
cmndName = "usgCursParsDelete" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['configBaseDir'] = configBaseDir
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsDelete" ; cmndArgs = "" ; cps=cpsInit(); menuItem(verbosity='none')
cmndName = "usgCursParsDelete" ; cmndArgs = "anyName" ;
cps = collections.OrderedDict() ;
cs.examples.cmndInsert(cmndName, cps, cmndArgs, icmWrapper="echo", verbosity='little')
cs.examples.menuChapter(' =FP Values= *usgCurs Get Parameters*')
cmndName = "usgCursParsGet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['configBaseDir'] = configBaseDir
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsGet" ; cmndArgs = "" ; cps=cpsInit(); menuItem(verbosity='none')
cs.examples.menuChapter(' =FP Values= *UsgCurs Defaults ParsSet --*')
cmndName = "usgCursParsDefaultsSet" ; cmndArgs = "bxoPolicy /" ;
cpsInit(); menuItem('none')
cmndName = "usgCursParsDefaultsSet" ; cmndArgs = "bxoPolicy /tmp" ;
cpsInit(); menuItem('none')
cs.examples.menuChapter(' =FP Values= *UsgCurs ParsSet -- Set Parameters Explicitly*')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['bxoId'] = "mcm"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['bxoId'] = "ea-59043"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['sr'] = "marme/dsnProc"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['sr'] = "apache2/plone3"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
# cmndName = "usgCursParsSet" ; cmndArgs = "" ;
# cps = collections.OrderedDict() ; cps['configBaseDir'] = configBaseDir ; cps['platformControlBaseDir'] = "${HOME}/bisosControl"
# cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "anyName=anyValue" ;
cps = collections.OrderedDict() ;
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "anyName=anyValue" ;
cps = collections.OrderedDict() ;
cs.examples.cmndInsert(cmndName, cps, cmndArgs, icmWrapper="echo", verbosity='little')
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "File Parameters Get/Set -- Commands"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *File Parameters Get/Set -- Commands:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "FP_readTreeAtBaseDir_CmndOutput" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "interactive fpBaseDir cmndOutcome"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /FP_readTreeAtBaseDir_CmndOutput/ retType=bool argsList=(interactive fpBaseDir cmndOutcome) [[elisp:(org-cycle)][| ]]
#+end_org """
def FP_readTreeAtBaseDir_CmndOutput(
interactive,
fpBaseDir,
cmndOutcome,
):
####+END:
"""Invokes FP_readTreeAtBaseDir.cmnd as interactive-output only."""
#
# Interactive-Output + Chained-Outcome Command Invokation
#
FP_readTreeAtBaseDir = icm.FP_readTreeAtBaseDir()
FP_readTreeAtBaseDir.cmndLineInputOverRide = True
FP_readTreeAtBaseDir.cmndOutcome = cmndOutcome
return FP_readTreeAtBaseDir.cmnd(
interactive=interactive,
FPsDir=fpBaseDir,
)
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsDelete" :comment "" :parsMand "" :parsOpt "configBaseDir" :argsMin 0 :argsMax 9999 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsDelete>> =verify= parsOpt=configBaseDir argsMax=9999 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsDelete(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', ]
cmndArgsLen = {'Min': 0, 'Max': 9999,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] Remove The entire infoBaseDir
#+end_org """)
if not configBaseDir:
configBaseDir = configUsgCursFpBaseDir_obtain(None)
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
if len(cmndArgs) == 0:
try:
shutil.rmtree(configBaseDir)
except OSError as e:
print(f"Error: {configBaseDir} : {e.strerror}")
b.dir.createIfNotThere(configBaseDir)
else:
for each in cmndArgs:
parNameFullPath = os.path.join(
configBaseDir,
each
)
try:
shutil.rmtree(parNameFullPath)
except OSError as e:
print(f"Error: {parNameFullPath} : {e.strerror}")
return cmndOutcome
####+BEGIN: b:py3:cs:method/args :methodName "cmndArgsSpec" :methodType "anyOrNone" :retType "bool" :deco "default" :argsList "self"
""" #+begin_org
** _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* Mtd-T-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /cmndArgsSpec/ deco=default deco=default [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmndArgsSpec(self, ):
####+END:
"""
***** Cmnd Args Specification
"""
cmndArgsSpecDict = cs.CmndArgsSpecDict()
cmndArgsSpecDict.argsDictAdd(
argPosition="0&-1",
argName="cmndArgs",
argDefault=None,
argChoices='any',
argDescription="A sequence of parNames"
)
return cmndArgsSpecDict
####+BEGIN: b:py3:cs:func/typing :funcName "curParsGetAsDictValue_wOp" :funcType "WOp" :retType "extTyped" :deco "" :argsList ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-T-WOp [[elisp:(outline-show-subtree+toggle)][||]] /curParsGetAsDictValue_wOp/ [[elisp:(org-cycle)][| ]]
#+end_org """
def curParsGetAsDictValue_wOp(
####+END:
parNamesList: list,
outcome: b.op.Outcome = None,
) -> b.op.Outcome:
""" #+begin_org
** [[elisp:(org-cycle)][| *DocStr | ] A Wrapped Operation with results being a dictionary of values.
if not ~parNamesList~, get all the values.
*** TODO --- NOTYET This needs to be moved to
#+end_org """
configBaseDir = configUsgCursFpBaseDir_obtain(None)
return (
FP_parsGetAsDictValue_wOp(parNamesList, configBaseDir, outcome)
)
####+BEGIN: b:py3:cs:func/typing :funcName "FP_parsGetAsDictValue_wOp" :funcType "wOp" :retType "OpOutcome" :deco "" :argsList ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-T-wOp [[elisp:(outline-show-subtree+toggle)][||]] /FP_parsGetAsDictValue_wOp/ [[elisp:(org-cycle)][| ]]
#+end_org """
def FP_parsGetAsDictValue_wOp(
####+END:
parNamesList: list,
configBaseDir,
outcome: b.op.Outcome = None,
) -> b.op.Outcome:
""" #+begin_org
** [[elisp:(org-cycle)][| *DocStr | ] A Wrapped Operation with results being a dictionary of values.
if not ~parNamesList~, get all the values.
*** TODO --- NOTYET This needs to be moved to
#+end_org """
return b.fp.parsGetAsDictValue_wOp(parNamesList, configBaseDir, outcome=outcome)
if not outcome:
outcome = b.op.Outcome()
FP_readTreeAtBaseDir_CmndOutput(
interactive=False,
fpBaseDir=configBaseDir,
cmndOutcome=outcome,
)
results = outcome.results
opResults = dict()
opErrors = ""
if parNamesList:
for each in parNamesList:
# NOTYET, If no results[each], we need to record it in opErrors
opResults[each] = results[each].parValueGet()
#print(f"{each} {eachFpValue}")
else:
for eachFpName in results:
opResults[eachFpName] = results[eachFpName].parValueGet()
#print(f"{eachFpName} {eachFpValue}")
return outcome.set(
opError=b.OpError.Success,
opResults=opResults,
)
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsGetK2" :comment "" :parsMand "" :parsOpt "configBaseDir" :argsMin 0 :argsMax 9999 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsGetK2>> =verify= parsOpt=configBaseDir argsMax=9999 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsGetK2(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', ]
cmndArgsLen = {'Min': 0, 'Max': 9999,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] it reads from ../usgCurs/fp.
#+end_org """)
if not configBaseDir:
configBaseDir = configUsgCursFpBaseDir_obtain(None)
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
# FP_readTreeAtBaseDir_CmndOutput(
# interactive=False,
# fpBaseDir=configBaseDir,
# cmndOutcome=cmndOutcome,
# )
b.fp.readTreeAtBaseDir_wOp(configBaseDir, cmndOutcome=cmndOutcome)
results = cmndOutcome.results
if len(cmndArgs) == 0:
for eachFpName in results:
eachFpValue = results[eachFpName].parValueGet()
print(f"{eachFpName} {eachFpValue}")
else:
for each in cmndArgs:
eachFpValue = results[each].parValueGet()
print(f"{each} {eachFpValue}")
return cmndOutcome
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsGet" :comment "" :parsMand "" :parsOpt "configBaseDir" :argsMin 0 :argsMax 9999 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsGet>> =verify= parsOpt=configBaseDir argsMax=9999 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsGet(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', ]
cmndArgsLen = {'Min': 0, 'Max': 9999,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] it reads from ../usgCurs/fp.
#+end_org """)
if not configBaseDir:
configBaseDir = configUsgCursFpBaseDir_obtain(None)
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
curParsGetAsDictValue_wOp(cmndArgs, cmndOutcome)
results = cmndOutcome.results
if rtInv.outs:
for eachKey in results:
print(f"{eachKey}: {results[eachKey]}")
return cmndOutcome
####+BEGIN: b:py3:cs:method/args :methodName "cmndArgsSpec" :methodType "anyOrNone" :retType "bool" :deco "default" :argsList "self"
""" #+begin_org
** _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* Mtd-T-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /cmndArgsSpec/ deco=default deco=default [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmndArgsSpec(self, ):
####+END:
"""
***** Cmnd Args Specification
"""
cmndArgsSpecDict = cs.CmndArgsSpecDict()
cmndArgsSpecDict.argsDictAdd(
argPosition="0&-1",
argName="cmndArgs",
argDefault=None,
argChoices='any',
argDescription="A sequence of parNames"
)
return cmndArgsSpecDict
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsSet" :comment "" :parsMand "" :parsOpt "configBaseDir bxoId sr" :argsMin 0 :argsMax 1000 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsSet>> =verify= parsOpt=configBaseDir bxoId sr argsMax=1000 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsSet(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', 'bxoId', 'sr', ]
cmndArgsLen = {'Min': 0, 'Max': 1000,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
bxoId: typing.Optional[str]=None, # Cs Optional Param
sr: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, 'bxoId': bxoId, 'sr': sr, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] Args are in the form of a list of varName=varValue. Well known pars can also be set.
=configBaseDir= defaults to ~configBaseDir_obtain()~
#+end_org """)
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
parNameFullPath = ""
def createPathAndFpWrite(
fpPath,
valuePath,
):
valuePath = os.path.abspath(valuePath)
try:
os.makedirs(valuePath)
except OSError:
if not os.path.isdir(valuePath):
raise
b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=fpPath,
parValue=valuePath,
)
parNameFullPath = fpPath
# Any number of Name=Value can be passed as args
for each in cmndArgs:
varNameValue = each.split('=')
parNameFullPath = os.path.join(
configUsgCursFpBaseDir_obtain(configBaseDir=configBaseDir),
varNameValue[0],
)
b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=parNameFullPath,
parValue=varNameValue[1],
)
if bxoId:
parNameFullPath = b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=os.path.join(
configUsgCursFpBaseDir_obtain(configBaseDir=configBaseDir),
"bxoId",
),
parValue=bxoId,
)
if sr:
parNameFullPath = b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=os.path.join(configUsgCursFpBaseDir_obtain(configBaseDir=configBaseDir),
"sr",
),
parValue=sr,
)
if rtInv.outs:
parValue = b.fp.FileParamValueReadFromPath(parNameFullPath)
b_io.ann.here("usgCursParsSet: {parValue} at {parNameFullPath}".
format(parValue=parValue, parNameFullPath=parNameFullPath))
return cmndOutcome.set(
opError=b.OpError.Success,
opResults=True,
)
####+BEGIN: b:py3:cs:method/args :methodName "cmndArgsSpec" :methodType "anyOrNone" :retType "bool" :deco "default" :argsList "self"
""" #+begin_org
** _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* Mtd-T-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /cmndArgsSpec/ deco=default deco=default [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmndArgsSpec(self, ):
####+END:
"""
***** Cmnd Args Specification
"""
cmndArgsSpecDict = cs.CmndArgsSpecDict()
cmndArgsSpecDict.argsDictAdd(
argPosition="0&-1",
argName="cmndArgs",
argDefault=None,
argChoices='any',
argDescription="A sequence of varName=varValue"
)
return cmndArgsSpecDict
####+BEGIN: b:prog:file/endOfFile :extraParams nil
""" #+begin_org
* *[[elisp:(org-cycle)][| END-OF-FILE |]]* :: emacs and org variables and control parameters
#+end_org """
### local variables:
### no-byte-compile: t
### end:
####+END:
|
bisos-pip/currents
|
py3/bisos/currents/currentsConfig.py
|
currentsConfig.py
|
py
| 33,875 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26057428953
|
import re
import requests
from bs4 import BeautifulSoup
URL = "https://sourcesup.renater.fr/scm/viewvc.php/rec/2019-CONVECS/REC/"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
for link in soup.find_all('a', href=True):
print(link['href'])
if 'name' in link:
print(link['name'])
m = re.search(r"/(\w+)\.rec", link["href"])
if m is not None:
print(m.group(1))
name = m.group(1)
URL = f"https://sourcesup.renater.fr/scm/viewvc.php/rec/2019-CONVECS/REC/{name}.rec?revision=3&view=co"
page = requests.get(URL)
print(page.content)
f = open(f"rec/{name}.rec", "wb")
f.write(page.content)
f.close()
|
philzook58/egglog-rec
|
scraper.py
|
scraper.py
|
py
| 711 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40327690661
|
from pyecharts import options as opts
from typing import Any,Optional
from pyecharts.charts import Radar
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from easy_pyechart import constants,baseParams,radar_base_config,round_radar_base_config
class eRadar():
def __init__(
self,
lableList:Optional[list] = [],
valueList:Optional[list] = [],
):
self.opts: dict = {
"lengend":Radar,
"lableList":lableList,
"valueList":valueList,
}
#基本雷达图
def basic_radar_chart(self,baseParams):
self.opts.update(baseParams.opts)
return radar_base_config(self)
#单选模式
def radar_selected_mode(self,baseParams):
self.opts.update(baseParams.opts)
c=radar_base_config(self)
c.set_global_opts(
legend_opts=opts.LegendOpts(selected_mode="single"),
title_opts=opts.TitleOpts(title=self.opts['title'],subtitle=self.opts['subTitle'],))
return c
#
def radar_air_quality(self,baseParams):
self.opts.update(baseParams.opts)
return radar_base_config(self)
#设置带有阴影区域的雷达图
def radar_angle_radius_axis(self,baseParams):
self.opts.update(baseParams.opts)
return round_radar_base_config(self)
|
jayz2017/easy_pyechart.py
|
easy_pyechart/easy_radar.py
|
easy_radar.py
|
py
| 1,470 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44407917630
|
'''
Created on 16/ago/2011
@author: Marco
'''
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
from math import sqrt
import ModelsCache
import Configuration
class PdfStructure(object):
'''
classdocs
'''
__markerList = []
__modelsCache = ModelsCache.ModelsCache()
@staticmethod
def AddMarker(digitalMarker):
PdfStructure.__markerList.append(digitalMarker)
@staticmethod
def RemoveMarker(tagName):
for tag in PdfStructure.__markerList:
if tag.name == tagName:
PdfStructure.__markerList.remove(tag)
@staticmethod
def GeneratePDF(fileName):
c = canvas.Canvas(fileName);
for digitalMarker in PdfStructure.__markerList:
inputFile = open(Configuration.TAG_DIR()+digitalMarker.name+".txt","r")
tagDefinition = inputFile.read()
lines = tagDefinition.split("\n")
(x,y) = digitalMarker.GetCenter();
tX = (float(x)/424)*21
tY = (float(y)/600)*29.7
for line in lines:
ellipse = line.split(" ")
if len(ellipse) == 10:
xCenter = -1*float(ellipse[3])
xCenter = (float(xCenter)/digitalMarker.defaultSize)*digitalMarker.size
yCenter = -1*float(ellipse[6])
yCenter = (float(yCenter)/digitalMarker.defaultSize)*digitalMarker.size
radius = ((0.5*sqrt((float(ellipse[3])*2)*(float(ellipse[3])*2)+(float(ellipse[6])*2)*(float(ellipse[6])*2)-4*float(ellipse[9])))/224)*digitalMarker.size
c.circle(xCenter/10*cm+tX*cm, yCenter/10*cm+tY*cm, radius/10*cm, fill=True)
c.save()
@staticmethod
def SaveModel(modelName):
out_file = open(Configuration.MODEL_DIR()+modelName+".model","a")
if not modelName:
raise Exception("ERROR: name is empty")
if not PdfStructure.__markerList:
raise Exception("ERROR: nothing to save as model")
for model in PdfStructure.__modelsCache.models:
if modelName == model.name:
raise Exception("ERROR: duplicated name")
model_names_file = open("ModelNames","a")
model_names_file.write(modelName+"\n")
model_names_file.close()
runeNames = []
runePositions = []
runeSizes = []
runeDefaultSizes = []
for rune in PdfStructure.__markerList:
runeNames.append(rune.name)
runePositions.append((rune.x, rune.y))
runeSizes.append(rune.size)
runeDefaultSizes.append(rune.defaultSize)
out_file.write(rune.name+" "+str(rune.x)+" "+str(rune.y)+" "+str(rune.size)+" "+str(rune.defaultSize)+"\n")
out_file.close()
PdfStructure.__modelsCache.AddModel(modelName, runeNames, runePositions, runeSizes, runeDefaultSizes)
@staticmethod
def GetModelNames():
modelNames = []
for model in PdfStructure.__modelsCache.models:
modelNames.append(model.name)
return modelNames
@staticmethod
def GetModel(modelName):
return PdfStructure.__modelsCache.GetModel(modelName)
@staticmethod
def DeleteModel(name):
model_names_file = open("ModelNames","r")
modelNames = model_names_file.read()
model_names_file.close()
model_names_file = open("ModelNames","w")
modelNames = modelNames.replace(name, "")
model_names_file.write(modelNames)
model_names_file.close()
for model in PdfStructure.__modelsCache.models:
if model.name == name:
PdfStructure.__modelsCache.models.remove(model)
|
mziccard/RuneTagDrawer
|
PdfStructure.py
|
PdfStructure.py
|
py
| 3,883 |
python
|
en
|
code
| 3 |
github-code
|
6
|
23775563757
|
import flask
import grpc
import search_pb2_grpc as pb2_grpc
import search_pb2 as pb2
import redis
import json
from google.protobuf.json_format import MessageToJson
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
class SearchClient(object):
"""
Client for gRPC functionality
"""
def __init__(self):
self.host = 'localhost'
self.server_port = 50051
self.channel = grpc.insecure_channel(
'{}:{}'.format(self.host, self.server_port))
self.stub = pb2_grpc.SearchStub(self.channel)
def get_results(self, message):
"""
Client function to call the rpc for GetServerResponse
"""
message = pb2.Message(message=message)
print(f'{message}')
return self.stub.GetServerResponse(message)
@app.route('/inventory/search', methods=['GET'])
def busqueda():
if 'q' in request.args:
busqueda= request.args['q']
r= redis.Redis(host='localhost', port=6379, db=0)
resultado = (r.get(busqueda))
if(resultado!=None):
products= json.loads(resultado)
return jsonify(products)
else:
client = SearchClient()
result = client.get_results(busqueda)
print(result.product[0].name + "*******")
serialized = MessageToJson(result)
r.set(busqueda, serialized)
return serialized
else:
return "Error, porfavor especifique la busqueda a realizar"
app.run()
|
manfruta/Sistemas-Tarea1
|
cliente_app.py
|
cliente_app.py
|
py
| 1,587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71608254269
|
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from djtransgan.dataset import batchlize
class DataLoaderSampler():
def __init__(self, dataset, batch_size, drop_last=True, shuffle=True):
self.count = 0
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.shuffle = shuffle
self.current = self.get_new_dataloader()
self.length = len(self.current)
def get_new_dataloader(self):
return iter(batchlize(self.dataset, self.batch_size, shuffle=self.shuffle))
def __call__(self):
self.count += 1
if self.count > self.length:
self.current = self.get_new_dataloader()
self.length = len(self.current)
self.count += 1
return next(self.current)
|
ChenPaulYu/DJtransGAN
|
djtransgan/dataset/datasampler.py
|
datasampler.py
|
py
| 911 |
python
|
en
|
code
| 86 |
github-code
|
6
|
35841523790
|
from .player import Player1
from .enemy import (
Enemy1, Enemy2, Boss1, Boss2
)
from .asteroids import Asteroid1
from .background import (
Backgr_lev1, Backgr_lev1a, Backgr_lev2, Backgr_lev2a, Backgr_lev3, Backgr_lev3a,
Backgr_lev4, Backgr_lev4a, Backgr_lev5,
Backgr_lev5a
)
from .startboard import StartBoard
from .explosion import (
ExplBig, ExplSmall)
from .shootall import (
ShootPlayer, ShootEnemy)
from .staticobjects import (
Arrow_left, Arrow_right, Shoot_label)
from .labels import (
Label1, Label2, Label3, Label4, Label5)
class UnitFactory():
units_dict = {
'startboard': StartBoard,
'player1':Player1,
'asteroid1':Asteroid1,
'enemy1':Enemy1,
'enemy2':Enemy2,
'boss1':Boss1,
'boss2':Boss2,
'asteroids':Asteroid1,
'explbig':ExplBig,
'explsmall':ExplSmall,
'shootplayer':ShootPlayer,
'shootenemy':ShootEnemy,
'backgrlev1':Backgr_lev1,
'backgrlev1a':Backgr_lev1a,
'backgrlev2':Backgr_lev2,
'backgrlev2a':Backgr_lev2a,
'backgrlev3':Backgr_lev3,
'backgrlev3a':Backgr_lev3a,
'backgrlev4':Backgr_lev4,
'backgrlev4a':Backgr_lev4a,
'backgrlev5':Backgr_lev5,
'backgrlev5a':Backgr_lev5a,
'arrowleft':Arrow_left,
'arrowright':Arrow_right,
'shootlabel':Shoot_label,
'label1':Label1,
'label2':Label2,
'label3':Label3,
'label4':Label4,
'label5':Label5
}
@classmethod
def create_unit(cls, unit_type, *a, **kwa):
return cls.units_dict.get(unit_type)(*a, **kwa)
class Units():
factory = UnitFactory
def build_unit(self, unit_type, *a, **kwa):
unit = self.__class__.factory.create_unit(unit_type, *a, **kwa)
#self.update_records(unit_type)
return unit
#def update_records(self,unit_type):
#pass
def cr_sources(self):
sources = [
'startboard',
'player1',
'asteroid1',
'enemy1',
'enemy2',
'boss1',
'boss2',
'asteroids',
'explbig',
'explsmall',
'shootplayer',
'shootenemy',
'backgrlev1',
'backgrlev1a',
'backgrlev2',
'backgrlev2a',
'backgrlev3',
'backgrlev3a',
'backgrlev4',
'backgrlev4a',
'backgrlev5',
'backgrlev5a',
'arrowleft',
'arrowright',
'shootlabel',
'label1',
'label2',
'label3',
'label4',
'label5'
]
for n in sources:
self.build_unit(n)
|
MMiirrkk/Galaxy_Shooter_I
|
objects/unitfactory.py
|
unitfactory.py
|
py
| 2,700 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21546390354
|
import re
from collections import Counter, defaultdict
from itertools import combinations
from typing import Dict, List, Tuple, Set
import numpy as np
from helper import load_input
def create_input():
'''Extract puzzle input and transform'''
# creates pattern for extracting replcements
pattern = r"(\w+) => (\w+)"
# splits puzzle input into replacements and molecule
replacements, molecule = load_input(day=19).read().strip("\n").split("\n\n")
# regex and init empty dict of lists
matches = re.findall(pattern, replacements)
replacements_dict = defaultdict(list)
# converts the replacements into dictionary of lists
for match in matches:
replacements_dict[match[0]].append(match[1])
return replacements_dict, molecule
def insert_replacements(start: str, end: str, replacements: List[str]) -> List[str]:
'''
Given start & end of molecule and a list of replacements,
incrementally inserts replacements between start and end to create new molecules.
Returns this as a list.
'''
return [
start + replacement + end
for replacement in replacements
]
def generate_molecules(replacements_dict: Dict[str, List[str]], molecule: str) -> Set[str]:
'''
Given the replacements and starting molecule,
generates all the possible molecules after replacement,
and returns as a set.
'''
# Prep storage for generated molecules
generated_molecules = set()
# loop through each element in starting molecule
for i, element in enumerate(molecule):
# extract replacements if a match
replacement1 = replacements_dict.get(element, None)
replacement2 = replacements_dict.get(molecule[i:i+2], None)
# slice the correct end of molecule, dependent on length
if replacement1:
end = molecule[i+1:]
elif replacement2:
end = molecule[i+2:]
else:
continue
# Updates the generated molecules set with new molecules after replacement
generated_molecules.update(insert_replacements(
start = molecule[:i],
end = end,
replacements = replacement1 or replacement2)
)
return generated_molecules
def part1():
'''
How many distinct molecules can be created
after all the different ways you can do one replacement on the medicine molecule
'''
replacements_dict, molecule = create_input()
return len(generate_molecules(replacements_dict, molecule))
def part2():
...
if __name__ == '__main__':
print(part1())
print(part2())
|
rick-62/advent-of-code
|
advent_of_code_2015/solutions/day19.py
|
day19.py
|
py
| 2,647 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32802770666
|
from elasticsearch import Elasticsearch, helpers
import csv
import json
import time
mvar = "clara"
matching_query = { "query_string": {
"query": mvar
}
}
def main():
#sundesh
es = Elasticsearch(host = "localhost", port = 9200)
#anagnwsh arxeiou
f = open('BX-Books.csv',"r",encoding="utf8")
reader = csv.DictReader(f)
#pairnw ws list o,ti paizei mesa se reader
#lst = list(reader)
#dhmiourgeia arxeiou ann auto den yparxei
helpers.bulk(es, reader, index="bx_books_2")
if __name__ == "__main__":
main()
|
d4g10ur0s/InformationRetrieval_21_22
|
save_books.py
|
save_books.py
|
py
| 627 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38649682103
|
import http
import requests
import telegram
from flask import Blueprint, Response, request
from sqlalchemy_utils import create_database, database_exists
from config import BUILD_NUMBER, DATABASE_URL, REBASE_URL, VERSION
from .bot import dispatcher
from .db import db, test_db
from .utils import log
routes = Blueprint('routes', __name__, url_prefix='/')
@routes.get('/health')
def health_check() -> Response:
try:
if not database_exists(DATABASE_URL):
create_database(DATABASE_URL)
db.create_all()
except Exception as exc:
log.exception('Health checking database... %s: %s', 'ERR', exc)
return {
'bot': 'up' if dispatcher is not None else 'down',
'version': f'{VERSION}-{BUILD_NUMBER}',
'db': 'up' if test_db() else 'down',
}, http.HTTPStatus.OK
@routes.get('/rebase')
def reset() -> Response:
if REBASE_URL is None:
return { 'error': 'No rebase URL provided' }, http.HTTPStatus.INTERNAL_SERVER_ERROR
return requests.get(
f'https://api.telegram.org/bot{dispatcher.bot.token}/setWebhook?url={REBASE_URL}'
).content
@routes.post('/')
def index() -> Response:
if dispatcher is None:
return 'Bot is inactive', http.HTTPStatus.INTERNAL_SERVER_ERROR
update = telegram.Update.de_json(request.get_json(force=True), dispatcher.bot)
dispatcher.process_update(update)
return '', http.HTTPStatus.NO_CONTENT
|
andrewscwei/python-telegram-bot-starter-kit
|
app/routes.py
|
routes.py
|
py
| 1,370 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39697340199
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# plots intensity time series for MDRE model
def plotIntensity ():
# index boundaries for time 3D plot
nStart = 0
nEnd = 10000
with open("time_series.txt", "r") as file:
lines = file.readlines()
time = []
intensity = []
rho_GS_e_act = []
rho_GS_h_act = []
rho_GS_e_inact = []
rho_GS_h_inact = []
rho_ES_e = []
rho_ES_h = []
E_real = []
E_imag = []
for line in lines:
time.append(float((line.split(' ')[0])))
intensity.append(float((line.split(' ')[1])))
E_real.append(float((line.split(' ')[2])))
E_imag.append(float((line.split(' ')[3])))
rho_GS_e_act.append(float((line.split(' ')[6])))
rho_GS_h_act.append(float((line.split(' ')[7])))
rho_GS_e_inact.append(float((line.split(' ')[8])))
rho_GS_h_inact.append(float((line.split(' ')[9])))
rho_ES_e.append(float((line.split(' ')[10])))
rho_ES_h.append(float((line.split(' ')[11])))
time = np.array(time)
intensity = np.array(intensity)
E_real = np.array(E_real)
E_imag = np.array(E_imag)
rho_GS_e_act = np.array(rho_GS_e_act)
rho_GS_h_act = np.array(rho_GS_h_act)
rho_GS_e_inact = np.array(rho_GS_e_inact)
rho_GS_h_inact = np.array(rho_GS_h_inact)
rho_ES_e = np.array(rho_ES_e)
rho_ES_h = np.array(rho_ES_h)
# calculation of inversion
inversion_GS_act = rho_GS_e_act + rho_GS_h_act - 1.0
inversion_GS_inact = rho_GS_e_inact + rho_GS_h_inact - 1.0
inversion_ES = rho_ES_e + rho_ES_h - 1.0
fig, (ax1, ax2) = plt.subplots(1, 2) #sharey=True
ax12 = ax1.twinx()
fig.set_size_inches(5.9, 3.2)
plt.rcParams.update({"font.size": 9})
fig.subplots_adjust(wspace=0.7, top=0.99, bottom=0.22, left=0.08, right=0.99)
fig.text(0.005, 0.93, "a)")
ax1.plot(time[nStart:nEnd], intensity[nStart:nEnd], color="crimson")
ax1.set_xlabel(r"time $t$ / ns", size=9.0)
ax1.set_ylabel(r"intensity $|E|^2$", color="crimson", size=9.0)
ax1.set_ylim(np.min(intensity) - 0.1, np.max(intensity) + 0.3)
ax1.set_xticks([0.0, 5.0, 10.0])
ax1.set_yticks([0.0, 1.0, 2.0, 3.0])
ax1.tick_params(axis="x", labelsize=9.0)
ax1.tick_params(axis="y", colors="crimson", labelsize=9.0)
ax1.set_zorder(1)
ax1.set_facecolor("none")
ax12.plot(time[nStart:nEnd], inversion_GS_act[nStart:nEnd], color="orange", label="GS act")
ax12.plot(time[nStart:nEnd], inversion_GS_inact[nStart:nEnd], color="gray", linestyle="--", label="GS inact")
ax12.plot(time[nStart:nEnd], inversion_ES[nStart:nEnd], color="cornflowerblue", label="ES")
ax12.set_ylabel(r"population inversion" + "\n" + r"$\rho_{m,e}^{(in)act} + \rho_{m,h}^{(in)act} - 1$", size=9.0)
ax12.set_ylim(-1.075, 1.075)
ax12.set_yticks([-1.0, 0.0, 1.0])
ax12.tick_params(axis="y", labelsize=9.0)
ax12.set_zorder(2)
ax12.legend(bbox_to_anchor=(0.44, 0.33))
# ~ fig, ax = plt.subplots()
# ~ fig.set_size_inches(5.9, 4.8)
# ~ fig.subplots_adjust(top=0.99, bottom=0.15, left=0.10, right=0.99)
fig.text(0.575, 0.93, "b)")
ax2.plot(inversion_GS_act, intensity, color="orange", label="GS act")
ax2.plot(inversion_GS_inact, intensity, color="gray", linestyle="--", label="GS inact")
ax2.plot(inversion_ES, intensity, color="cornflowerblue", label="ES")
ax2.set_xlabel(r"population inversion" + "\n" + r"$\rho_{m,e}^{(in)act} + \rho_{m,h}^{(in)act} - 1$", size=9.0)
ax2.set_ylabel(r"intensity $|E|^2$", color="crimson", size=9.0)
ax2.set_xlim(-1.075, 1.075)
ax2.set_ylim(-0.15, 3.15)
ax2.set_xticks([-1.0, 0.0, 1.0])
ax2.set_yticks([0.0, 1.0, 2.0, 3.0])
ax2.tick_params(axis="x", labelsize=9.0)
ax2.tick_params(axis="y", colors="crimson", labelsize=9.0)
ax2.grid(color="lightgray")
ax2.legend(loc="upper left")
plt.show()
plotIntensity()
|
sir-aak/microscopically-derived-rate-equations
|
plotscripts/mdre_plotscript_intensity_inversion.py
|
mdre_plotscript_intensity_inversion.py
|
py
| 3,810 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34652323206
|
# Subgroup enumeration for cyclic, dicyclic, and tricyclic integer groups.
# PM Larsen, 2019
#
# The theory implemented here is described for two-dimensional groups in:
# Representing and counting the subgroups of the group Z_m x Z_n
# Mario Hampejs, Nicki Holighaus, László Tóth, and Christoph Wiesmeyr
# Journal of Numbers, vol. 2014, Article ID 491428
# http://dx.doi.org./10.1155/2014/491428
# https://arxiv.org/abs/1211.1797
#
# and for three-dimensional groups in:
# On the subgroups of finite Abelian groups of rank three
# Mario Hampejs and László Tóth
# Annales Univ. Sci. Budapest., Sect. Comp. 39 (2013), 111–124
# https://arxiv.org/abs/1304.2961
import itertools
import numpy as np
from math import gcd
def get_divisors(n):
return [i for i in range(1, n + 1) if n % i == 0]
def get_subgroup_elements(orders, H):
size = 1
for e, x in zip(np.diag(H), orders):
if e != 0:
size *= x // e
dimension = len(orders)
indices = np.zeros((size, dimension), dtype=int)
indices[:, 0] = H[0, 0] * np.arange(size)
for i, order in enumerate(orders):
if i > 0 and H[i, i] != 0:
k = np.prod(orders[:i]) // np.prod(np.diag(H)[:i])
p = np.arange(size) // k
for j in range(i + 1):
indices[:, j] += H[i, j] * p
return indices % orders
def consistent_first_rows(dimension, dm, ffilter):
for a in dm:
H = np.zeros((dimension, dimension), dtype=int)
H[0, 0] = a
if ffilter is None or ffilter(H):
yield a
def solve_linear_congruence(r, a, b, c, s, v):
for u in range(a + 1):
if (r // c * u) % a == (r * v * s // (b * c)) % a:
return u
raise Exception("u not found")
def enumerate_subgroup_bases(orders, ffilter=None,
min_index=1, max_index=float("inf")):
"""Get the subgroup bases of a cyclic/dicyclic/tricyclic integer group.
Parameters:
orders: list-like integer object
Orders of the constituent groups.
[m] if the group is a cyclic group Zm
[m, n] if the group is a dicyclic group Zm x Zn
[m, n, r] if the group is a tricyclic group Zm x Zn x Zr
ffilter: function, optional
A boolean filter function. Avoids generation of unwanted subgroups by
rejecting partial bases.
Returns iterator object yielding:
H: integer ndarray
Subgroup basis.
"""
dimension = len(orders)
assert dimension in [1, 2, 3]
if dimension == 1:
m = orders[0]
elif dimension == 2:
m, n = orders
else:
m, n, r = orders
dm = get_divisors(m)
if dimension == 1:
for d in consistent_first_rows(dimension, dm, ffilter):
group_index = m // d
if group_index >= min_index and group_index <= max_index:
yield np.array([[d]])
elif dimension == 2:
dn = get_divisors(n)
for a in consistent_first_rows(dimension, dm, ffilter):
for b in dn:
group_index = m * n // (a * b)
if group_index < min_index or group_index > max_index:
continue
for t in range(gcd(a, n // b)):
s = t * a // gcd(a, n // b)
H = np.array([[a, 0], [s, b]])
if ffilter is None or ffilter(H):
yield H
elif dimension == 3:
dn = get_divisors(n)
dr = get_divisors(r)
for a in consistent_first_rows(dimension, dm, ffilter):
for b, c in itertools.product(dn, dr):
group_index = m * n * r // (a * b * c)
if group_index < min_index or group_index > max_index:
continue
A = gcd(a, n // b)
B = gcd(b, r // c)
C = gcd(a, r // c)
ABC = A * B * C
X = ABC // gcd(a * r // c, ABC)
for t in range(A):
s = a * t // A
H = np.zeros((dimension, dimension), dtype=int)
H[0] = [a, 0, 0]
H[1] = [s, b, 0]
H[2, 2] = r
if ffilter is not None and not ffilter(H):
continue
for w in range(B * gcd(t, X) // X):
v = b * X * w // (B * gcd(t, X))
u0 = solve_linear_congruence(r, a, b, c, s, v)
for z in range(C):
u = u0 + a * z // C
H = np.array([[a, 0, 0], [s, b, 0], [u, v, c]])
if ffilter is None or ffilter(H):
yield H
def count_subgroups(orders):
"""Count the number of subgroups of a cyclic/dicyclic/tricyclic integer
group.
Parameters:
orders: list-like integer object
Orders of the constituent groups.
[m] if the group is a cyclic group Zm
[m, n] if the group is a dicyclic group Zm x Zn
[m, n, r] if the group is a tricyclic group Zm x Zn x Zr
Returns:
n: integer
Subgroup basis.
"""
def P(n):
return sum([gcd(k, n) for k in range(1, n + 1)])
dimension = len(orders)
assert dimension in [1, 2, 3]
if dimension == 1:
m = orders[0]
elif dimension == 2:
m, n = orders
else:
m, n, r = orders
dm = get_divisors(m)
if dimension == 1:
return len(dm)
elif dimension == 2:
dn = get_divisors(n)
return sum([gcd(a, b) for a in dm for b in dn])
else:
dn = get_divisors(n)
dr = get_divisors(r)
total = 0
for a, b, c in itertools.product(dm, dn, dr):
A = gcd(a, n // b)
B = gcd(b, r // c)
C = gcd(a, r // c)
ABC = A * B * C
X = ABC // gcd(a * r // c, ABC)
total += ABC // X**2 * P(X)
return total
|
pmla/evgraf
|
evgraf/subgroup_enumeration.py
|
subgroup_enumeration.py
|
py
| 6,050 |
python
|
en
|
code
| 13 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.