seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
5292164947
|
#right now, I am using this script to play around w/ differnt definitions of signal and control regions
import ROOT
from TIMBER.Analyzer import HistGroup, CutGroup
from TIMBER.Tools.Common import CompileCpp
from argparse import ArgumentParser
from XHYbbWW_class import XHYbbWW
from collections import OrderedDict
def KinematicLepton(self): #bringing this function in here so I can select lepton w/o quality/isolation cuts
self.a.Define('kinEleIdx','kinElectron(Electron_pt,Electron_eta,Electron_phi,Higgs_phi,Wqq_phi)')
self.a.Define('kinMuIdx','kinMuon(Muon_pt,Muon_eta,Muon_phi,Higgs_phi,Wqq_phi)')
self.a.Cut('kinLepton_cut','kinEleIdx != -1 || kinMuIdx != -1') #at least one good lepton
self.a.Define('LeptonType','LeptonIdx(kinEleIdx,kinMuIdx,Electron_pt,Muon_pt)') #picks higher pt signal lepton - output = 0 (lepton is electron) or 1 (lepton is muon)
self.SIGLEP = self.getNweighted()
self.AddCutflowColumn(self.SIGLEP,'SIGLEP')
#For ease, merge some lepton columns that will be useful later (for lepton-type specific variables, use LeptonType to determine if electron or muon)
self.a.Define('Lepton_pt','LeptonType == 1 ? Muon_pt[kinMuIdx] : Electron_pt[kinEleIdx]')
self.a.Define('Lepton_eta','LeptonType == 1 ? Muon_eta[kinMuIdx] : Electron_eta[kinEleIdx]')
self.a.Define('Lepton_phi','LeptonType == 1 ? Muon_phi[kinMuIdx] : Electron_phi[kinEleIdx]')
self.a.Define('Lepton_mass','LeptonType == 1 ? Muon_mass[kinMuIdx] : Electron_mass[kinEleIdx]')
return self.a.GetActiveNode()
def MXvsMY_studies(self):
##### NEW VARIABLES FOR LATER USE #####
#W_leptonic transverse mass
self.a.Define('W_massTran','TransverseMass(MET_pt,Lepton_pt,MET_phi,Lepton_phi)') #Transverse W mass
# self.a.Define('W_massTran_genMET','TransverseMass(MET_fiducialGenPt,Lepton_pt,MET_fiducialGenPhi,Lepton_phi)') #using generator-level MET variables
#Lorentz 4-vectors
self.a.Define('MET_vect','hardware::TLvector(MET_pt,0,MET_phi,0)') #neutrino mass negligable, for now assuming MET_eta = 0 (p_z = 0)
self.a.Define('Lepton_vect','hardware::TLvector(Lepton_pt,Lepton_eta,Lepton_phi,Lepton_mass)')
self.a.Define('Wqq_vect','hardware::TLvector(Wqq_pt,Wqq_eta,Wqq_phi,Wqq_msoftdrop)')
self.a.Define('Hbb_vect','hardware::TLvector(Higgs_pt,Higgs_eta,Higgs_phi,Higgs_msoftdrop)')
#Invariant masses of W/Y/X
self.a.Define('W_massInv','hardware::InvariantMass({MET_vect,Lepton_vect})') #full invariant mass
self.a.Define('Y_mass','hardware::InvariantMass({Lepton_vect,MET_vect,Wqq_vect})')
self.a.Define('X_mass','hardware::InvariantMass({Lepton_vect,MET_vect,Wqq_vect,Hbb_vect})')
studiesPlots = HistGroup('studiesPlots')
#######################################
#First lets make some plots examining the lepton quality cuts in the different MC samples
#Muon_mediumId, Electron_mvaFall17V2noIso vs eta, Electron_mvaFall17V2noIso_WP80, Electron_mvaFall17V2noIso_WP90, Electron_mvaFall17V2noIso_WPL
start=self.a.GetActiveNode()
muonEvents=self.a.Cut('Muon_events','LeptonType == 1')
self.a.SetActiveNode(muonEvents)
self.a.ObjectFromCollection('kinMu','Muon','kinMuIdx')
#studiesPlots.Add('kinMu_mediumId',self.a.GetActiveNode().DataFrame.Histo1D(('kinMu_mediumId','kinMu_mediumId',2,0,2),'kinMu_mediumId','weight__nominal')) #bins may not work
self.a.SetActiveNode(start)
electronEvents=self.a.Cut('Electron_events','LeptonType == 0')
self.a.SetActiveNode(electronEvents)
self.a.ObjectFromCollection('kinEle','Electron','kinEleIdx')
#studiesPlots.Add('kinEle_mvaFall17V2noIso vs eta',self.a.DataFrame.Histo2D(('kinEle_mvaFall17V2noIso vs eta','kinEle_mvaFall17V2noIso vs eta',1000,0,1,250,0,2.5),'kinEle_mvaFall17V2noIso', 'kinEle_eta','weight__nominal'))
#Make three plots for electron mva (for different etas/ECAL regions)
no_eta = self.a.GetActiveNode()
inner_barrel = self.a.Cut('inner_barrel','abs(kinEle_eta) < 0.8')
self.a.SetActiveNode(inner_barrel)
studiesPlots.Add('kinEle_mvaFall17V2noIso (inner barrel)',self.a.DataFrame.Histo1D(('kinEle_mvaFall17V2noIso (|eta| < 0.8)','kinEle_mvaFall17V2noIso (inner barrel - |eta| < 0.8)',100,0,1),'kinEle_mvaFall17V2noIso', 'weight__nominal'))
self.a.SetActiveNode(no_eta)
outer_barrel = self.a.Cut('outer_barrel','abs(kinEle_eta) > 0.8 && abs(kinEle_eta) < 1.479')
self.a.SetActiveNode(outer_barrel)
studiesPlots.Add('kinEle_mvaFall17V2noIso (outer barrel)',self.a.DataFrame.Histo1D(('kinEle_mvaFall17V2noIso (0.8 < |eta| < 1.479)','kinEle_mvaFall17V2noIso (outer barrel - 0.8 < |eta| < 1.479)',100,0,1),'kinEle_mvaFall17V2noIso', 'weight__nominal'))
self.a.SetActiveNode(no_eta)
endcap = self.a.Cut('endcap','abs(kinEle_eta) > 1.479 && abs(kinEle_eta) < 2.5')
self.a.SetActiveNode(endcap)
studiesPlots.Add('kinEle_mvaFall17V2noIso (endcap)',self.a.DataFrame.Histo1D(('kinEle_mvaFall17V2noIso (1.479 < |eta| < 2.5)','kinEle_mvaFall17V2noIso (endcap - 1.479 < |eta| < 2.5)',100,0,1),'kinEle_mvaFall17V2noIso', 'weight__nominal'))
'''
studiesPlots.Add('kinEle_mvaFall17V2noIso_WP80',self.a.GetActiveNode().DataFrame.Histo1D(('kinEle_mvaFall17V2noIso_WP80','kinEle_mvaFall17V2noIso_WP80',2,0,2),'kinEle_mvaFall17V2noIso_WP80','weight__nominal').GetValue())
print('kinele_mvaWP80 plot made')
studiesPlots.Add('kinEle_mvaFall17V2noIso_WP90',self.a.GetActiveNode().DataFrame.Histo1D(('kinEle_mvaFall17V2noIso_WP90','kinEle_mvaFall17V2noIso_WP90',2,0,2),'kinEle_mvaFall17V2noIso_WP90','weight__nominal').GetValue())
print('kinele_mvaWP90 plot made')
studiesPlots.Add('kinEle_mvaFall17V2noIso_WPL',self.a.GetActiveNode().DataFrame.Histo1D(('kinEle_mvaFall17V2noIso_WPL','kinEle_mvaFall17V2noIso_WPL',2,0,2),'kinEle_mvaFall17V2noIso_WPL','weight__nominal').GetValue())
print('kinele_mvaWPL plot made')
'''
self.a.SetActiveNode(start)
taggers = ['particleNetMD']
# now we want to plot mX vs mY for QCD, ttbar, and signal
for t in taggers:
self.ApplyMassCuts()
start=self.a.GetActiveNode()
# We use Wqq tagging scores to divide data into two regions: signal (enriched in signal) and control (enriched in background)
# - Signal: Wqq > 0.8, pass lepton medium ID
# - Control: Wqq < 0.8, fail lepton medium ID
# We define a pass/fail criteria for the Hbb score within each region
# - Region 1 (fail): Hbb < 0.94
# - Region 2 (pass): Hbb > 0.94
SR=self.ApplySRorCR('SR',t)
SR_FP=self.ApplyPassFail('SR',t)
self.a.SetActiveNode(start)
CR=self.ApplySRorCR('CR',t)
CR_FP=self.ApplyPassFail('CR',t)
nodes=OrderedDict()
nodes.update(SR_FP)
nodes.update(CR_FP)
bins = [80,0,4500]
for node in nodes.keys():
self.a.SetActiveNode(nodes[node])
print('MX vs MY: Plotting for {}'.format(node))
studiesPlots.Add('MXvsMY_{}'.format(node), self.a.DataFrame.Histo2D(('MXvsMY_{}'.format(node), 'X vs Y Invariant Mass - {} {}'.format(node.split('_')[1],node.split('_')[0]), bins[0], bins[1], bins[2], bins[0], bins[1], bins[2]), 'X_mass', 'Y_mass', 'weight__nominal'))
outFile = ROOT.TFile.Open('{}_{}_{}_MXvsMYstudies.root'.format(self.setname,self.year,self.ijob),'RECREATE')
outFile.cd()
studiesPlots.Do('Write')
#self.a.PrintNodeTree('NodeTree.pdf',verbose=True)
outFile.Close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-s', type=str, dest='setname',
action='store',help='name of data set to run on')
parser.add_argument('-y', type=str, dest='year',
action='store', help='year',required=False)
parser.add_argument('-j', type=int, dest='ijob',required=False,
action='store', help='current job')
parser.add_argument('-n', type=int, dest='njobs',required=False,
action='store', help='number of jobs')
args = parser.parse_args()
setname=args.setname
year=args.year
ijob=args.ijob
njobs=args.njobs
filename='snapshots/{}_{}_snapshot.txt'.format(setname,year)
ana = XHYbbWW(filename,ijob,njobs)
# ana.ApplyStandardCorrections(post_snapshot=True)
ana.Dijets()
KinematicLepton(ana)
MXvsMY_studies(ana)
|
michaelhesford/XHYbbWW_semileptonic
|
MXvsMY_studies.py
|
MXvsMY_studies.py
|
py
| 8,394 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33186103812
|
from simplejson import dumps
from webob import Response
from pycurl import Curl
from subprocess import Popen, PIPE
from multiprocessing import Queue
from traceback import format_exc
from time import sleep
import logging
import tarfile
import os
import os.path
import urllib
import uuid
import sys
import os
from config import conf
from common import RequestHandler
class GitRepository(object):
def __init__(self, path=None):
self.path = path
def _cmd(self, args, shell=False):
try:
os.chdir(self.path)
except: pass
logging.debug('cwd: %s exec: %s' % (os.getcwd(), ' '.join(args)))
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=shell)
ret = (p.communicate(), p.returncode)
if ret[0][0]:
logging.debug('\n'.join(ret[0]))
return ret
def _git(self, args):
return self._cmd(['/usr/bin/git'] + args)
def clone(self, gitpath):
return self._git(['clone', gitpath, self.path])
def checkout(self, ref):
return self._git(['checkout', ref])
def submodule_init(self):
return self._git(['submodule', 'init'])
def submodule_update(self):
return self._git(['submodule', 'update'])
def ls_remote(self, gitpath):
output, retcode = self._git(['ls-remote', '--heads', '--tags', gitpath])
stdout, stderr = output
return [x.split('\t') for x in stdout.split('\n') if x]
def show_ref(self):
output, retcode = self._git(['show-ref', '--heads', '--tags'])
stdout, stderr = output
return [x.split(' ', 1) for x in stdout.split('\n') if x]
def build(self, signkey, pbuilderrc, resultsdir):
if 'refs/heads/upstream' in [x[1] for x in self.show_ref()]:
cmd = ['/usr/bin/git-buildpackage', '--git-sign', '--git-cleaner="fakeroot debian/rules clean"', '--git-keyid="%s"' % signkey, '--git-builder="pdebuild --debsign-k %s --auto-debsign --configfile %s --debbuildopts "-i.git -I.git -sa" --buildresult %s' % (signkey, pbuilderrc, resultsdir)]
else:
cmd = ['/usr/bin/pdebuild', '--debsign-k', signkey, '--auto-debsign', '--debbuildopts', '-i.git -I.git -sa', '--configfile', pbuilderrc, '--buildresult', resultsdir]
return self._cmd(cmd)
class PackageHandler(RequestHandler):
def get(self, gitpath, gitrepo):
gitpath = os.path.join(conf('buildbot.gitpath.%s' % gitpath), gitrepo)
repo = GitRepository()
refs = repo.ls_remote(gitpath)
return Response(status=200, body=dumps(refs))
def post(self, gitpath, gitrepo):
if not 'ref' in self.request.params:
return Response(status=400, body='Required parameter "ref" is missing. You must pass a git tag, branch, or commit ID to be built.\n')
gitpath = os.path.join(conf('buildbot.gitpath.%s' % gitpath), gitrepo)
ref = self.request.params['ref']
cburl = self.request.params.get('cburl', None)
submodules = self.request.params.get('submodules', None)
buildid = uuid.uuid4().hex
build_worker(gitpath, ref, buildid, cburl, submodules)
return Response(status=200, body=buildid + '\n')
class RepoListHandler(RequestHandler):
def get(self, gitpath):
try:
gitindex = conf('buildbot.gitindex.%s' % gitpath)
except KeyError:
return Response(status=404, body='Unknown git path')
response = urllib.urlopen(gitindex)
index = response.read()
index = [x.strip('\r\n ').split(' ')[0].rsplit('.')[0] for x in index.split('\n') if x.strip('\r\n ')]
return Response(status=200, body=dumps(index))
class TarballHandler(RequestHandler):
def get(self, buildid):
builddir = os.path.join(conf('buildbot.buildpath'), buildid)
if not os.path.exists(builddir):
return Response(status=404, body='The build ID does not exist.\n')
tarpath = os.path.join(builddir, 'package.tar.gz')
if not os.path.exists(tarpath):
return Response(status=400, body='The build is not done yet.\n')
else:
fd = file(tarpath, 'rb')
data = fd.read()
fd.close()
return Response(status=200, body=data, content_type='application/x-tar-gz')
class StatusHandler(RequestHandler):
def get(self, buildid):
builddir = os.path.join(conf('buildbot.buildpath'), buildid)
if not os.path.exists(builddir):
return Response(status=404, body='The build ID does not exist.\n')
try:
log = file('%s/build.log' % builddir, 'r').read()
except:
log = ''
if not os.path.exists(builddir + '/package.tar.gz'):
return Response(status=400, body='The build is not done yet.\n' + log)
else:
return Response(status=200, body='Build complete.\n' + log)
def buildlog(buildid, message):
filename = os.path.join(conf('buildbot.buildpath'), '%s/build.log' % buildid)
fd = file(filename, 'a+')
fd.write(message + '\n')
fd.close()
logging.debug(message)
def build_thread(gitpath, ref, buildid, cburl=None, submodules=False):
tmpdir = os.path.join(conf('buildbot.buildpath'), buildid)
repo = GitRepository(tmpdir)
output, retcode = repo.clone(gitpath)
if retcode:
buildlog(buildid, 'Unable to clone %s. %s\n' % (gitpath, '\n'.join(output)))
return
output, retcode = repo.checkout(ref)
if retcode:
buildlog(buildid, 'Unable to checkout %s. %s\n' % (ref, '\n'.join(output)))
return
if submodules:
output, retcode = repo.submodule_init()
buildlog(buildid, output[0])
buildlog(buildid, output[1])
output, retcode = repo.submodule_update()
buildlog(buildid, output[0])
buildlog(buildid, output[1])
resultsdir = os.path.join(tmpdir, '.build_results')
os.makedirs(resultsdir)
output, retcode = repo.build(conf('buildbot.signkey'), conf('buildbot.pbuilderrc'), resultsdir)
buildlog(buildid, output[0])
buildlog(buildid, output[1])
#logging.debug(output[0])
#logging.debug(output[1])
os.chdir(resultsdir)
if not os.listdir(resultsdir) or retcode != 0:
buildlog(buildid, 'Nothing in results directory. Giving up.')
return
tarpath = os.path.join(tmpdir, 'package.tar.gz')
tar = tarfile.open(tarpath, 'w:gz')
for name in os.listdir(resultsdir):
tar.add(name)
tar.close()
buildlog(buildid, 'Build complete. Results in %s\n' % tarpath)
data = file(tarpath, 'rb').read()
buildlog(buildid, 'Built %i byte tarball' % len(data))
if cburl:
buildlog(buildid, 'Performing callback: %s' % cburl)
req = Curl()
req.setopt(req.POST, 1)
req.setopt(req.URL, str(cburl))
req.setopt(req.HTTPPOST, [('package', (req.FORM_FILE, str(tarpath)))])
req.setopt(req.WRITEDATA, file('%s/build.log' % tmpdir, 'a+'))
req.perform()
req.close()
def build_worker(gitpath, ref, buildid, cburl, submodules):
if os.fork() == 0:
build_thread(gitpath, ref, buildid, cburl, submodules)
|
JeremyGrosser/repoman
|
repoman/buildbot.py
|
buildbot.py
|
py
| 7,178 |
python
|
en
|
code
| 84 |
github-code
|
6
|
9345182500
|
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import lib.db
from lib.helper import remove_tags, open_selenium
from lib.log import log_text as log
url = "https://2e.aonprd.com/Ancestries.aspx"
def upload_heritage_data():
log("Starting Heritage Upload Preperation")
heritage_data = organize_heritage_data()
log("Preparation Done")
log("Clearing Table")
conn, row_count, result = lib.db.query_database("DELETE FROM official_heritages;", get_result=True, close_conn=False)
log("Starting INSERT Process")
for heritage in heritage_data:
log("Inserting " + heritage + " Into Database")
conn = lib.db.query_database("INSERT INTO official_heritages VALUES (" + heritage + ");", connection=conn, close_conn=False)[0]
log("Commiting Database Changes")
conn.commit()
log("Closing Connection")
conn.close()
def grab_heritage_data():
heritage_output = []
log("Opening Browser")
driver = open_selenium()
log("Going to Page: " + url)
driver.get(url)
log("Waiting for Page to Load")
time.sleep(5)
log("Getting Page Source")
html = driver.page_source
log("Setting up BeautifulSoup with Source")
soup = BeautifulSoup(html, "html.parser")
log("Finding Initial HTML Container")
container = soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput")
log("Finding All Categories in Container")
name_list = container.find_all("h2")
for item in name_list:
log("Grabbing Name in Category")
elements = item.text.split("\n")
log("Found: " + elements[0])
log("Getting All Links in Category")
links = item.find_all("a")
output_link = ""
log("Finding Ancestry Page Link")
for link in links:
if link.get("href").startswith("Ancestries.aspx"):
output_link = "https://2e.aonprd.com/" + link.get("href")
log("Found: " + output_link)
break
log("Opening Ancestry Page")
ancestry_driver = open_selenium()
ancestry_driver.get(output_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Getting Ancestry Page Source")
ancestry_html = ancestry_driver.page_source
log("Setting up BeautifulSoup with Page Source")
ancestry_soup = BeautifulSoup(ancestry_html, "html.parser")
log("Finding Sub Navigation")
sub_nav_container = ancestry_soup.find(id="ctl00_RadDrawer1_Content_MainContent_SubNavigation")
sub_nav_list = sub_nav_container.find_all("h2")
log("Getting All Sub Navigation Headings")
heritage_list_link = ""
log("Searching Headings for Heritage Link")
for nav in sub_nav_list:
nav_links = nav.find_all("a")
for n in nav_links:
if n.get("href").startswith("Heritages.aspx"):
heritage_list_link = "https://2e.aonprd.com/" + n.get("href")
log(f"Found Heritage Link for {elements[0]}: {heritage_list_link}")
log("Closing Ancestry Browser. Opening Heritage Browser")
ancestry_driver.close()
heritage_driver = open_selenium()
heritage_driver.get(heritage_list_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Setting up BeautifulSoup with Page Source")
heritage_html = heritage_driver.page_source
heritage_soup = BeautifulSoup(heritage_html, "html.parser")
log("Getting Heritage List Container")
heritage_container = heritage_soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput")
log("Getting All Headings")
heritage_list = heritage_container.find_all("h2")
heritage_name = ""
heritage_link = ""
heritage_summary = ""
log("Starting Search for Heritages")
i = 0
for heritage in heritage_list:
heritage_links = heritage.find_all("a")
for l in heritage_links:
if l.get("href").startswith("Heritages.aspx"):
heritage_name = l.text.split("\n")[0]
log("Found Heritage: " + heritage_name)
heritage_link = "https://2e.aonprd.com/" + l.get("href")
link_pos = heritage_html.find(l.get("href"))
print(f"Link Pos: {link_pos}")
versatile_heritage_pos = heritage_html.index("<h1 class=\"title\">Versatile Heritages</h1>")
half_human_heritage_pos = heritage_html.find("<h1 class=\"title\">Half-Human Heritages")
if half_human_heritage_pos == -1 or link_pos < half_human_heritage_pos:
start_pos = heritage_html.index("<br>", link_pos) + len("<br>")
else:
first_break_pos = heritage_html.index("<br>", link_pos) + len("<br>")
start_pos = heritage_html.index("<br>", first_break_pos) + len("<br>")
h3_pos = heritage_html.find("<h3", start_pos)
br_pos = heritage_html.find("<br>", start_pos)
end_pos = 0
print(f"H3 Pos: {h3_pos}; BR Pos: {br_pos}")
if h3_pos < br_pos and h3_pos != -1:
end_pos = h3_pos
elif br_pos < h3_pos and br_pos != -1:
end_pos = br_pos
elif br_pos != -1 and h3_pos == -1:
end_pos = br_pos
elif h3_pos != -1 and br_pos == -1:
end_pos = h3_pos
if end_pos > versatile_heritage_pos:
end_pos = versatile_heritage_pos
if start_pos > versatile_heritage_pos:
break
print(f"End Pos: {end_pos}; Next 50 Characters: {heritage_html[end_pos: end_pos + 50]}")
heritage_summary = heritage_html[start_pos:end_pos].strip()
print(heritage_summary)
if heritage_summary.find("<b>Source</b>") > -1:
end_pos += 3
temp_pos = heritage_html.find("<b>Source</b>", start_pos)
start_pos = heritage_html.find("<br>", temp_pos)
h3_pos = heritage_html.find("<h3", end_pos)
br_pos = heritage_html.find("<br>", end_pos)
if h3_pos < br_pos and h3_pos != -1:
end_pos = h3_pos
elif br_pos < h3_pos and br_pos != -1:
end_pos = br_pos
if end_pos > versatile_heritage_pos:
end_pos = versatile_heritage_pos
if start_pos > versatile_heritage_pos:
break
heritage_summary = heritage_html[start_pos:end_pos].strip()
if heritage_summary.find("PFS Note") > -1:
end_pos += 3
temp_pos = heritage_html.find("PFS Note", start_pos)
start_pos = heritage_html.find("<br>", temp_pos)
h3_pos = heritage_html.find("<h3", end_pos)
br_pos = heritage_html.find("<br>", end_pos)
if h3_pos < br_pos and h3_pos != -1:
end_pos = h3_pos
elif br_pos < h3_pos and br_pos != -1:
end_pos = br_pos
if end_pos > versatile_heritage_pos:
end_pos = versatile_heritage_pos
if start_pos > versatile_heritage_pos:
break
heritage_summary = heritage_html[start_pos:end_pos].strip()
heritage_summary = remove_tags(heritage_summary, tag_to_remove="h2", remove_inside=True)
heritage_summary = remove_tags(heritage_summary, tag_to_remove="table", remove_inside=True)
heritage_summary = remove_tags(heritage_summary, tag_to_remove="i")
heritage_summary = remove_tags(heritage_summary, tag_to_remove="u")
heritage_summary = remove_tags(heritage_summary, tag_to_remove="b")
heritage_summary = remove_tags(heritage_summary, tag_to_remove="a")
log(str([heritage_name, heritage_link, elements[0], heritage_summary]))
heritage_output.append([heritage_name, heritage_link, elements[0], heritage_summary])
nav_container = soup.find(id="ctl00_RadDrawer1_Content_MainContent_Navigation")
nav_links = nav_container.find_all("a")
for link in nav_links:
if link.get("href").endswith("Versatile=true"):
versatile_heritage_link = "https://2e.aonprd.com/" + link.get("href")
log("Opening Versatile Heritage Browser")
versatile_heritage_driver = open_selenium()
versatile_heritage_driver.get(versatile_heritage_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Setting up BeautifulSoup with Page Source")
versatile_heritage_html = versatile_heritage_driver.page_source
versatile_heritage_soup = BeautifulSoup(versatile_heritage_html, "html.parser")
log("Getting Heritage List Container")
versatile_heritage_container = versatile_heritage_soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput")
log("Getting All Headings")
versatile_heritage_list = versatile_heritage_container.find_all("h2")
versatile_heritage_name = ""
versatile_heritage_link = ""
versatile_heritage_summary = ""
log("Searching For Versatile Heritages")
for heritage in versatile_heritage_list:
vh_links = heritage.find_all("a")
for l in vh_links:
if l.get("href").startswith("Ancestries.aspx"):
versatile_heritage_name = l.text.split("\n")[0]
log("Found Heritage: " + versatile_heritage_name)
vh_ancestry_link = "https://2e.aonprd.com/" + l.get("href")
log("Opening Versatile Heritage Ancestry Browser")
vh_ancestry_driver = open_selenium()
vh_ancestry_driver.get(vh_ancestry_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Setting up BeautifulSoup with Page Source")
vh_ancestry_html = vh_ancestry_driver.page_source
vh_ancestry_soup = BeautifulSoup(vh_ancestry_html, "html.parser")
content_pos = vh_ancestry_soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput").sourcepos
vh_h1_pos = vh_ancestry_html.index("<h1 class=\"title\">Versatile Heritage</h1>", content_pos)
vh_h2_pos = vh_ancestry_html.index("</h2>", vh_h1_pos) + len("</h2>")
break_pos_1 = vh_ancestry_html.index("<br>", vh_h2_pos) + len("<br>")
break_pos_2 = vh_ancestry_html.index("<br>", break_pos_1) + len("<br>")
break_pos_3 = vh_ancestry_html.index("<br>", break_pos_2) + len("<br>")
end_pos = 0
span_pos = vh_ancestry_html.find("</span>", break_pos_3)
h3_pos = vh_ancestry_html.find("<h3 class", break_pos_3)
if h3_pos == -1:
end_pos = span_pos
else:
if span_pos < h3_pos and span_pos != -1:
end_pos = span_pos
elif h3_pos < span_pos and h3_pos != -1:
end_pos = h3_pos
versatile_heritage_summary = vh_ancestry_html[break_pos_3:end_pos]
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="h2", remove_inside=True)
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="table", remove_inside=True)
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="i")
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="a")
log(str([versatile_heritage_name, vh_ancestry_link, "Versatile", versatile_heritage_summary]))
heritage_output.append([versatile_heritage_name, vh_ancestry_link, "Versatile", versatile_heritage_summary])
return heritage_output
def organize_heritage_data():
log("Getting Heritage Data")
output = grab_heritage_data()
organized_data = []
log("Starting to Organize Heritage Data")
for heritage in output:
organized_data.append(f"\"{heritage[0]}\", \"{heritage[1]}\", \"{heritage[2]}\", \"{heritage[3]}\"")
log(f"Added \"{heritage[0]}\", \"{heritage[1]}\", \"{heritage[2]}\", \"{heritage[3]}\" to Organized Data")
return organized_data
|
sean-francis113/pf2edatascraper
|
lib/heritages.py
|
heritages.py
|
py
| 13,500 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7002219401
|
import sendgrid
from ...common import config
sg = sendgrid.SendGridClient(config.sendgrid_api_key)
def send(name, email, subject, html):
message = sendgrid.Mail()
message.add_to('{}'.format(email))
message.set_subject(subject)
message.set_html(html)
message.set_from(config.from_header)
status, msg = sg.send(message)
|
minupalaniappan/gradfire
|
daviscoursesearch/flaskapp/service/email.py
|
email.py
|
py
| 330 |
python
|
en
|
code
| 12 |
github-code
|
6
|
28128126397
|
'''
Count the nodes in the global phylogeny
python3 count_nodes.py after_usher_optimized_fasttree_iter6.tree
'''
import sys
from ete3 import Tree
t = Tree(sys.argv[1])
ct = 0
for node in t.traverse('postorder'):
if node.is_leaf():
ct += 1
print(ct)
|
bpt26/parsimony
|
2_optimize_starting_tree/results/2.3.5/count_nodes.py
|
count_nodes.py
|
py
| 270 |
python
|
en
|
code
| 2 |
github-code
|
6
|
436459396
|
from gensim.corpora import TextCorpus, TextDirectoryCorpus
from gensim.models.doc2vec import TaggedDocument
from trec.treccorpus import TrecCorpus
def test_get_texts():
path = "F:/Corpus/trectest/"
file = path + "fr881.dat"
# with open(file, 'r') as fp:
# print(fp.read())
trecc = TrecCorpus(path, dictionary={})
for text, docno in trecc.get_texts():
print(docno, text)
# print(trecc.getstream())
def test_parse_file():
def test():
for i in range(0,10):
yield i
for i in test():
print(i)
break
for i in test():
print(i)
break
def test_read_doc():
a = "ddsad"
b = [1,2,3,4,5]
class TaggedTrecDocument(object):
def __init__(self, trec):
self.trec = trec
self.trec.metadata = True
def __iter__(self):
for content, (doc_id, title) in self.trec.get_texts():
yield TaggedDocument(content, [doc_id])
def test_parse_text2222():
# from trec.treccorpus import TrecCorpus
pname = "f:/Corpus/trectest/"
textt = TextDirectoryCorpus(pname, dictionary={}, metadata=True, lines_are_documents=True)
documents = TaggedTrecDocument(textt)
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
def test_parse_text():
# from trec.treccorpus import TrecCorpus
pname = "f:/Corpus/trectest/"
trecc = TrecCorpus(pname, dictionary={}, metadata=True)
documents = TaggedTrecDocument(trecc)
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
# total = 0
# print()
# for text, (docno, title) in trecc.get_texts():
# # print(docno)
# total += 1
# print(docno)
# # print(next(trecc.get_texts()))
# print(total)
def test_traverse_all_docs():
# pname = "f:/Corpus/TrecData/"
pname = "f:/Corpus/trectest/"
trecc = TrecCorpus(pname, dictionary={})
count = 0
for text, docno in trecc.get_texts():
count += 1
if count % 1000 == 0:
print(docno, text)
break
def test_save_to_file():
pname = "f:/Corpus/trectest/"
trecc = TrecCorpus(pname, dictionary={})
sfile = "f:/Corpus/savetest.csv"
trecc.save_to_file(sfile)
|
kongyq/Project-Arcs
|
trec/test_treccorpus.py
|
test_treccorpus.py
|
py
| 2,325 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30754133985
|
for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split(' ')))
if a[0] < 0:
neg = True
a0 = a[0]
else:
neg = False
a0 = a[0]
somme = 0
for i in range(1, n):
if a[i] < 0 and neg:
a0 = max(a0, a[i])
elif a[i] > 0 and neg:
somme += a0
neg = False
a0 = a[i]
elif a[i] < 0 and not neg:
somme += a0
neg = True
a0 = a[i]
elif a[i] > 0 and not neg:
a0 = max(a0, a[i])
somme += a0
print(somme)
|
Tanguyvans/Codeforces
|
636/C.py
|
C.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24082328734
|
#!/usr/bin/python3
"""
Make petitions to the Reddit API
"""
from requests import get
def number_of_subscribers(subreddit):
"""
Takes a subreddit and compute the quantity of subs
"""
base_url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
header = {
'User-Agent': 'Linux:api_advanced:v0.0.0 (by /u/ElEnriquez)'
}
response = get(base_url, headers=header, allow_redirects=False)
if (response.status_code != 200):
return (0)
data = response.json()
subs = data.get('data').get('subscribers')
return (subs)
|
WardenCode/holbertonschool-system_engineering-devops
|
0x16-api_advanced/0-subs.py
|
0-subs.py
|
py
| 584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24025809080
|
__author__ = 'sivvaidyanathan'
from urllib2 import urlopen
from bs4 import BeautifulSoup
import codecs, sys
filename = sys.argv[0]
reader = open(filename, 'r')
writer = codecs.open(filename + "_canonical", 'w', 'utf-8')
for line in reader:
url = line.strip()
if url.find("http") == -1:
url = "http://" + url
data = urlopen(url).read()
soup = BeautifulSoup(data)
links = soup.findAll('link', rel="canonical")
for link in links:
writer.write(url + "\t" + link["href"] + "\n")
|
sivaramakrishnanvaidyanathan/crawler
|
histogram/link_canonical.py
|
link_canonical.py
|
py
| 520 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26538911991
|
import hashlib
import os.path
from typing import List, Optional
import requests
from connectors.Triage.const import TRIAGE_URL, TRIAGE_LAST_100_RESULTS_FROM_NOW, TRIAGE_HEADER, OUTPUT_FOLDER
from connectors.utils import upload_file_to_malstream
def get_last_100_analysis() -> List:
r = requests.get(f"{TRIAGE_URL}{TRIAGE_LAST_100_RESULTS_FROM_NOW}", headers=TRIAGE_HEADER)
if r.status_code != 200:
return []
return r.json()['data']
def download_file(_id: str) -> Optional[str]:
r = requests.get(f"{TRIAGE_URL}/samples/{_id}/sample", headers=TRIAGE_HEADER)
if r.status_code != 200:
return None
file_path = os.path.join(OUTPUT_FOLDER, hashlib.sha256(r.content).hexdigest())
with open(file_path, 'wb') as f:
f.write(r.content)
return file_path
def main():
res = get_last_100_analysis()
for r in res:
file_path = download_file(r['id'])
if not file_path:
print(f'Error while download sample {r["id"]}')
continue
status_code = upload_file_to_malstream(file_path)
if status_code != 200 and status_code != 409:
print(f'Error on upload {file_path}')
print(f"Cleaning extracted file {OUTPUT_FOLDER}")
for f in os.listdir(OUTPUT_FOLDER):
os.remove(os.path.join(OUTPUT_FOLDER, f))
if __name__ == '__main__':
main()
|
CorraMatte/malstream
|
connectors/Triage/connector.py
|
connector.py
|
py
| 1,374 |
python
|
en
|
code
| 3 |
github-code
|
6
|
15260123974
|
import datetime
import hashlib
import json
from flask import Flask, jsonify
# Building a Blockchain
class Blockchain:
def __init__(self):
"""
Create Blockchain and a genesis block
"""
self.chain = []
self.create_block(proof=1, previous_hash='0')
def create_block(self, proof, previous_hash):
"""
:param proof: Proof of new block
:param previous_hash: hash of the previous block in Blockchain
:return: newly created block
"""
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash}
self.chain.append(block)
return block
def get_previous_block(self):
"""
:return: Last block of Blockchain
"""
return self.chain[-1]
def proof_of_work(self, previous_proof):
"""
:param previous_proof: hash of the previous block in Blockchain
:return: proof on new block
"""
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str((new_proof ** 2) - (previous_proof ** 2)).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
"""
:param block: A block in a Blockchain
:return: hash of the block
"""
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
"""
:param chain: list of blocks in Blockchain
:return: True if chain is valid, otherwise False
"""
block_index = 1
previous_block = chain[0]
while block_index < len(chain):
block = chain[block_index]
# Checks if previous_hash of current block is equal to hash of previous block
if block['previous_hash'] != self.hash(previous_block):
return False
# Check if proof of current block satisfies the 4 zeroes condition or not
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str((proof ** 2) - (previous_proof ** 2)).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# Creating a web app
app = Flask(__name__)
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash']}
return jsonify(response), 200
# Getting the full Blockchain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
@app.route('/is_valid', methods=['GET'])
def is_valid():
if blockchain.is_chain_valid(blockchain.chain):
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'We have a problem. The Blockchain is not valid'}
return jsonify(response), 200
# Running the app
app.run(host='0.0.0.0', port=1710)
|
imnishant/Blockchain
|
main.py
|
main.py
|
py
| 3,966 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12701283102
|
import sys
ground = []
ground_data = dict()
TIME_BY_DIG, TIME_BY_PUT = 2, 1
min_time, that_height = 128000000, 0
def try_to_make(ground_data, trying_height):
time = 0
for data in ground_data.items():
if data[0] < trying_height:
time += TIME_BY_PUT * data[1] * (trying_height - data[0])
elif data[0] > trying_height:
time += TIME_BY_DIG * data[1] * (data[0] - trying_height)
return time, trying_height
n, m, b = map(int, sys.stdin.readline().rstrip().split())
for i in range(n):
ground += map(int, sys.stdin.readline().rstrip().split())
for i in ground:
if i not in ground_data:
ground_data[i] = 1
else:
ground_data[i] += 1
existing_blocks = sum(ground) + b
for height in range(257):
if n * m * height > existing_blocks:
continue
temp_time, temp_that_height = try_to_make(ground_data, height)
if temp_time < min_time:
min_time, that_height = temp_time, temp_that_height
elif temp_time == min_time:
if that_height < temp_that_height:
min_time, that_height = temp_time, temp_that_height
print(min_time, that_height)
|
MinChoi0129/Algorithm_Problems
|
BOJ_Problems/18111.py
|
18111.py
|
py
| 1,158 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18464680736
|
#!/usr/bin/python3
#encoding: utf-8
import requests
import re
from bs4 import BeautifulSoup
import json
#登录获取cookie
login_url = "http://210.30.1.140/index.php/Public/checkLogin"
#登录信息
logindata={
"txtName":"2015083216",
"txtPass":"2015083216",
"txtCheck":"no",
}
#获取cookie
logind = requests.post(login_url,data=logindata)
cookie = logind.cookies
#提交题目
d = {
"submit_language":"1",
"submit_code":"#include <iostream> \n using namespace std;\n int main()\n{int a,b;cin>>a>>b; cout<<a+b<<endl; return 0;}",
"problem_id":"303",
"test_id":"",
"__hash__":"a8edbf0347b55fdb7b7567c1505c15b1_d0ad44986cc057b42f6762993b550404"
}
url = "http://210.30.1.140/index.php/Problems/saveCode"
for i in range(1,3): #循环填写请求的次数
r = requests.post(url, data=d,cookies=cookie)
print(r.text) #返回请求后的内容
'''
requests post请求参考资料:http://blog.csdn.net/junli_chen/article/details/53670887
form形式
json形式
multipat形式
'''
|
chinazhenzhen/PythonLearn
|
RE4/5+.py
|
5+.py
|
py
| 1,019 |
python
|
en
|
code
| 0 |
github-code
|
6
|
51412731
|
from typing import *
class Solution:
def countTriplets(self, nums: List[int]) -> int:
M = max(nums)
cnt = [0]*(M+1)
for i in nums:
for j in nums:
# AND can only decrease the number
cnt[i&j] += 1
res = 0
for k in nums:
for m in range(M+1):
if (k&m) == 0:
res += cnt[m]
return res
|
code-cp/leetcode
|
solutions/982/main.py
|
main.py
|
py
| 436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42366132351
|
import tensorflow as tf
import numpy as np
IMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32)
def read_image_label_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
key_images = []
current_images = []
labels = []
for line in f:
try:
image_line = line[:-1].split('\n')[0]
except ValueError: # Adhoc for test.
image_line = line.strip("\n")
if image_line == '':
continue
if len(image_line.split(' ')) == 3:
key_image_path, current_image_path, label_path = image_line.split(' ')
key_image = data_dir + key_image_path
current_image = data_dir + current_image_path
label = data_dir + label_path
if not tf.gfile.Exists(key_image):
raise ValueError('Failed to find file: ' + key_image)
if not tf.gfile.Exists(label):
raise ValueError('Failed to find file: ' + label)
key_images.append(key_image)
current_images.append(current_image)
labels.append(label)
else:
key_image_path = image_line.split(' ')
key_image = data_dir + key_image_path
if not tf.gfile.Exists(key_image):
raise ValueError('Failed to find file: ' + key_image)
key_images.append(key_image)
f.close()
return key_images, current_images, labels
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
for line in f:
try:
image = line[:-1].split('\n')[0]
except ValueError: # Adhoc for test.
image = line.strip("\n")
image = data_dir+image
if not tf.gfile.Exists(image):
raise ValueError('Failed to find file: ' + image)
images.append(image)
f.close()
return images
def resizer(raw_image, input_size):
return tf.image.resize_image_with_crop_or_pad(raw_image, input_size[0], input_size[1])
def read_images_from_disk(input_queue, input_size, overlap, img_mean=IMG_MEAN):
"""Consumes a single filename and label as a ' '-delimited string.
Args:
filename_tensor: A scalar string tensor.
Returns:
Three tensors: the decoded images and flos.
"""
height = input_size[0]//2
height_overlap = height+overlap
width = input_size[1]//2
width_overlap = width+overlap
image_file = tf.read_file(input_queue[0])
image = tf.image.decode_image(image_file)
image = tf.cast(image,tf.float32)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=image)
image_bgr = tf.concat(axis=2, values=[img_b, img_g, img_r])
image_bgr.set_shape((None, None, 3))
image_bgr = tf.expand_dims(tf.image.resize_images(image_bgr, input_size), 0)
print(' before spliting ', image_bgr.shape)
images = tf.concat([image_bgr[:, :height+overlap, :width+overlap, :],
image_bgr[:, :height+overlap, width-overlap:, :],
image_bgr[:, height-overlap:, :width+overlap, :],
image_bgr[:, height-overlap:, width-overlap:, :]],0)
print(' after spliting ', images.shape)
# Preprocess.
image_s = images-img_mean
image_f = tf.image.resize_images(images/255.0, [(height_overlap)//2, (width_overlap)//2])
return image_s, image_f
def to_bgr(image):
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=image)
image_bgr = tf.concat(axis=2, values=[img_b, img_g, img_r])
return image_bgr
def crop_and_upsample(prob, resized_image, raw_image, mask, num_classes):
resized_h = tf.shape(resized_image)[1]
resized_w = tf.shape(resized_image)[2]
resized_shape = tf.stack([1, resized_h, resized_w, num_classes ])
raw_shape = tf.shape(raw_image)[:2]
cropped_prob = tf.boolean_mask(
tf.squeeze(prob), tf.squeeze(tf.equal(mask, 0)))
reshaped_prob = tf.reshape(cropped_prob, resized_shape)
upsampled_prob = tf.image.resize_bilinear(reshaped_prob, raw_shape)
return tf.squeeze(tf.cast(tf.argmax(upsampled_prob, axis=-1), tf.int32))
def read_image_from_filename(data_dir, data_list, batch_size, input_size_to_rescale):
key_image_list, current_image_list, label_list = read_image_label_list(data_dir, data_list)
key_image_tensor = tf.convert_to_tensor(key_image_list, dtype=tf.string)
current_image_tensor = tf.convert_to_tensor(current_image_list, dtype=tf.string)
label_tensor = tf.convert_to_tensor(label_list, dtype=tf.string)
queue = tf.train.slice_input_producer(
[key_image_tensor, current_image_tensor, label_tensor], shuffle=False)
key_image_contents = tf.read_file(queue[0])
current_image_contents = tf.read_file(queue[1])
label_contents = tf.read_file(queue[2])
key_images = tf.image.decode_png(key_image_contents, channels=3)
current_images = tf.image.decode_png(current_image_contents, channels=3)
labels = tf.image.decode_png(label_contents, channels=1)
return key_images, current_images, labels
def scale_and_mask(key_image, current_image, labels, input_size_to_rescale):
cropped_key_image, cropped_current_image, resized_image, mask = scale_fixed_size(key_image, current_image, labels, input_size_to_rescale)
return cropped_key_image, cropped_current_image, resized_image, mask
# return _generate_image_and_label_batch_with_mask(cropped_image, cropped_f_image, mask, batch_size)
def read_segment_flownet_images(input_queue, input_size, overlap):
height = input_size[0]
width = input_size[1]
image_file = tf.read_file(input_queue[0])
image = tf.image.decode_image(image_file)
image = resizer(image, [height, width])
image_s = image
image_f = to_bgr(image)
image_s = tf.cast(image_s, tf.float32)
image_f = tf.cast(image_f, tf.float32)
image_s.set_shape([None, None, 3])
image_f.set_shape([None, None, 3])
height = height + overlap
width = width + overlap
image_s = tf.image.resize_images((image_s) / 255.0, (height // 1, width // 1))
image_f = tf.image.resize_images((image_f) / 255.0, (height // 2, width // 2))
return image_s, image_f
def _generate_image_and_label_batch_with_mask(image_s, image_f, mask, batch_size):
"""Construct a queued batch of images and labels.
Args:
image_s, image_f: 3-D Tensor of input image of type.float32.
batch_size: Number of images per batch.
Returns:
bimages: Images. 4D tensor of [batch_size, height, width, 3] size.
bflo: Flos. 4D tensor of [batch_size, height, width, 2] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 4
bimage_s, bimage_f, bi_mask = tf.train.batch(
[image_s, image_f, mask],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=1)
return bimage_s, bimage_f, bi_mask
def _generate_image_and_label_batch(image_s, image_f, batch_size):
"""Construct a queued batch of images and labels.
Args:
image_s, image_f: 3-D Tensor of input image of type.float32.
batch_size: Number of images per batch.
Returns:
bimages: Images. 4D tensor of [batch_size, height, width, 3] size.
bflo: Flos. 4D tensor of [batch_size, height, width, 2] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 4
bimage_s, bimage_f = tf.train.batch(
[image_s, image_f],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=1)
return bimage_s, bimage_f
def scale_fixed_size(key_image, current_image, raw_label, output_shape, ignore_label=255):
current_f = to_bgr(current_image)
key_image = tf.cast(key_image, tf.float32) / 255.
current_f = tf.cast(current_f, tf.float32) / 255.
raw_label = tf.cast(raw_label, tf.int32)
raw_height = tf.shape(key_image)[0]
raw_width = tf.shape(key_image)[1]
image_batch = tf.expand_dims(key_image, 0)
current_f_batch = tf.expand_dims(current_f, 0)
label_batch = tf.expand_dims(raw_label, 0)
raw_label_size = tf.shape(image_batch)
raw_image_size = tf.shape(label_batch)
image_f_size = tf.shape(current_f_batch)
input_shape = tf.to_float(raw_image_size[1:3])
scale_shape = output_shape / input_shape
scale = tf.reduce_min(scale_shape)
scaled_input_shape = tf.to_int32(tf.round(input_shape * scale))
resized_image = tf.image.resize_nearest_neighbor(
image_batch, scaled_input_shape)
resized_current_f_image = tf.image.resize_nearest_neighbor(
current_f_batch, scaled_input_shape)
resized_label = tf.image.resize_nearest_neighbor(
label_batch, scaled_input_shape)
shifted_classes = resized_label + 1
cropped_key_image = tf.image.resize_image_with_crop_or_pad(
resized_image, output_shape[0] // 2, output_shape[1] // 2)
cropped_current_f_image = tf.image.resize_image_with_crop_or_pad(
resized_current_f_image, output_shape[0] // 2, output_shape[1] // 2)
cropped_label = tf.image.resize_image_with_crop_or_pad(
shifted_classes, output_shape[0], output_shape[1])
mask = tf.to_int32(tf.equal(cropped_label, 0)) * (ignore_label + 1)
cropped_label = cropped_label + mask - 1
return cropped_key_image, cropped_current_f_image, resized_image, mask
def input_images(data_dir, data_list, batch_size, input_size, overlap):
image_list = read_labeled_image_list(data_dir, data_list)
images = tf.convert_to_tensor(image_list, dtype=tf.string)
input_queue = tf.train.slice_input_producer([images], shuffle=False)
image_s, image_f = read_segment_flownet_images(input_queue=input_queue, input_size=input_size, overlap=overlap)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(image_s, image_f, batch_size)
def inputs(data_dir, data_list, batch_size, input_size, overlap, img_mean=IMG_MEAN):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
data_dir: Path to the FlowNet data directory.
batch_size: Number of images per batch.
Returns:
image1, image2: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
"""
image_list = read_labeled_image_list(data_dir, data_list)
images = tf.convert_to_tensor(image_list, dtype=tf.string)
input_queue = tf.train.slice_input_producer([images], shuffle=False)
image_s, image_f = read_images_from_disk(input_queue, input_size, overlap, img_mean)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(image_s, image_f, batch_size)
|
rashmi-patil-1492/video-semantic-segmentation-network
|
tools/image_reader.py
|
image_reader.py
|
py
| 11,641 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38544511106
|
import cv2
import mss
from PIL import Image
import numpy as np
import time
import json
import math
with open('Crypt.json', 'r') as json_file:
data = json.load(json_file)
with open('ItemGroups.json', 'r') as json_file:
item_data = json.load(json_file)
# record video of screen using cv2
fps = 30
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('output.mp4', fourcc, fps, (2560, 1440))
mon = {'left': 0, 'top': 0, 'width': 2560, 'height': 1440}
map_unfound = cv2.imread('Crypt_06.png')
map_found = map_unfound # Assign default value
map_unfound_grey = cv2.cvtColor(map_found, cv2.COLOR_BGR2GRAY)
MIN_CONFIDENCE = 0.55
map_count = 1
resized = False
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# Draw a blue dot at the clicked location
cv2.circle(map_found, (x, y), 5, (255, 0, 0), -1)
# Log the coordinates of the click
print(f'Clicked at ({x}, {y})')
def transform (point, map, scale = 1):
h, w, _ = map.shape
x, y = point
x = scale * (1 * x + 0)
y = scale * (1 * y + 0)
x = w - x *2
y = h - y*2
y = h - y
return (x, y)
with mss.mss() as sct:
detected_location = False
while True:
img = sct.grab(mon)
frame = Image.frombytes(
'RGB',
(img.width, img.height),
img.rgb,
)
frame = np.array( frame)
out.write(frame)
# Resize the frame, Convert to grayscale. 1440p
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = frame[1160:1380, 2240:2460]
frame = cv2.resize(frame, (int(frame.shape[1] * 0.8), int(frame.shape[0] * 0.8)))
if not(detected_location):
if(map_count < 6):
map_count += 1
else:
map_count = 1
map_unfound = cv2.imread(f'Crypt_0{map_count}.png')
map_unfound_grey = cv2.cvtColor(map_unfound, cv2.COLOR_BGR2GRAY)
map_unfound = cv2.resize(map_unfound, (1100,1100))
map_unfound = map_unfound[86:1010, 87:1002]
map_unfound = cv2.resize(map_unfound, (690,690))
map_unfound_grey = cv2.cvtColor(map_unfound, cv2.COLOR_BGR2GRAY)
resized = True
else:
MIN_CONFIDENCE = 0.32
map_found = map_unfound
cv2.imshow('map ' + str(map_count), map_found)
if "map" + str(map_count) in data:
for entry in data["map" + str(map_count)]:
entry_id = entry.get("id")
coordinates = entry.get("coordinates")
lat, lng = transform((coordinates["lat"], coordinates["lng"]), map_found)
lat += 50; lng -= 55
for item in item_data["Golden Chest"]:
if(entry_id == item):
cv2.circle(map_found, (int(lng), int(lat)), 5, (23, 229, 232), -1)
break
if(entry_id == "Id_Spawner_Props_Statue01"):
cv2.circle(map_found, (int(lng), int(lat)), 5, (65, 232, 23), -1)
if(entry_id == "BP_CryptEscape"):
cv2.circle(map_found, (int(lng), int(lat)), 5, (232, 159, 23), -1)
#if(entry_id == "SpawnPoint"):
#cv2.circle(map_found, (int(lng), int(lat)), 5, (245, 27, 238), -1)
cv2.setMouseCallback('map ' + str(map_count), click_event)
result = cv2.matchTemplate(map_unfound_grey, frame, cv2.TM_CCOEFF_NORMED)
if (result.max() > MIN_CONFIDENCE):
detected_location = True
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Draw player's location on reference map
cv2.circle(
map_found,
(int(max_loc[0] + 25 + frame.shape[1] / 2),
int(max_loc[1] - 25 + frame.shape[0] / 2)),
5, (0, 0, 255), -1)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time.sleep(1/fps)
out.release()
|
debug-it/DarkAndDarker-MapHelper
|
record.py
|
record.py
|
py
| 4,278 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22003370311
|
from reviewminer.core import *
import pandas as pd
import reviewminer as rm
reviews_df = pd.read_csv("./reviews.csv")
print("comment" in reviews_df.columns)
rm = ReviewMiner(reviews_df.head(100), 'id', 'comments')
rm.aspect_opinon_for_all_comments()
rm.popular_aspects_view(_testing=True)
print(rm.top_aspects)
rm.aspect_mute_list = ['room']
print('room' not in rm.top_aspects)
#isinstance(rm.return_negative_comments_of_aspect('bed'), list) is True
# rm.aspect_opinon_for_all_comments()
# rm.overall_sentiment(_testing=True)
#rm.id_column = 1
#rm._examine_id_column(1)
# rm.one_time_analysis()
# rm.aspect_opinon_for_all_comments()
# print(rm.top_aspects)
#print(ss.sentiment_for_one_comment(ss.df.iloc[10,1]))
# aoe = AspectOpinionExtractor(reviews_df.head(100), 'id', 'comments')
# aoe.aspect_opinon_for_all_comments()
# aoe.popular_aspects_view()
# #aoe.single_aspect_view("room")
#aoe.single_aspect_view("room", num_top_words=5, xticks_rotation=30)
# print(aoe.most_popular_opinions("room", 5))
#
# sample_df = pd.DataFrame({
# 'id': [100, 101, 102, 103],
# 'comments': ['The room is comfortable. The room is spacious.',
# 'The sunny room is very spacious.',
# 'The spacious room is sunny',
# 'The spacious room is sunny. The beautiful room is comfortable']})
#
# aoe = AspectOpinionExtractor(sample_df, 'id', 'comments')
# aoe.aspect_opinon_for_all_comments()
# print(len(aoe.df_with_aspects_opinions.loc[0, "aspects_opinions"]))
# print(aoe.df_with_aspects_opinions)
#
# aoe.aspect_opinon_for_all_comments()
# print(aoe.most_popular_opinions("room"))
|
tianyiwangnova/2021_project__ReviewMiner
|
sample_data/work_on_sample_data.py
|
work_on_sample_data.py
|
py
| 1,668 |
python
|
en
|
code
| 5 |
github-code
|
6
|
926386452
|
import os
from unittest.mock import patch
import pytest
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.externalapis.etherscan import Etherscan
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.typing import ExternalService, ExternalServiceApiCredentials
@pytest.fixture(scope='function')
def temp_etherscan(function_scope_messages_aggregator, tmpdir_factory):
directory = tmpdir_factory.mktemp('data')
db = DBHandler(
user_data_dir=directory,
password='123',
msg_aggregator=function_scope_messages_aggregator,
initial_settings=None,
)
# Test with etherscan API key
api_key = os.environ.get('ETHERSCAN_API_KEY', None)
if api_key:
db.add_external_service_credentials(credentials=[
ExternalServiceApiCredentials(service=ExternalService.ETHERSCAN, api_key=api_key),
])
etherscan = Etherscan(database=db, msg_aggregator=function_scope_messages_aggregator)
return etherscan
def patch_etherscan(etherscan):
count = 0
def mock_requests_get(_url):
nonlocal count
if count == 0:
response = (
'{"status":"0","message":"NOTOK",'
'"result":"Max rate limit reached, please use API Key for higher rate limit"}'
)
else:
response = '{"jsonrpc":"2.0","id":1,"result":"0x1337"}'
count += 1
return MockResponse(200, response)
return patch.object(etherscan.session, 'get', wraps=mock_requests_get)
def test_maximum_rate_limit_reached(temp_etherscan):
"""
Test that we can handle etherscan's rate limit repsponse properly
Regression test for https://github.com/rotki/rotki/issues/772"
"""
etherscan = temp_etherscan
etherscan_patch = patch_etherscan(etherscan)
with etherscan_patch:
result = etherscan.eth_call(
'0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4',
'0xc455279100000000000000000000000027a2eaaa8bebea8d23db486fb49627c165baacb5',
)
assert result == '0x1337'
|
fakecoinbase/rotkislashrotki
|
rotkehlchen/tests/external_apis/test_etherscan.py
|
test_etherscan.py
|
py
| 2,080 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3889512912
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 11:25:11 2017
Dempster-Shafer Combination rule
@author: Zhiming
"""
from numpy import *
def DSCombination (Dic1, Dic2):
## extract the frame dicernment
sets=set(Dic1.keys()).union(set(Dic2.keys()))
Result=dict.fromkeys(sets,0)
## Combination process
for i in Dic1.keys():
for j in Dic2.keys():
if set(str(i)).intersection(set(str(j))) == set(str(i)):
Result[i]+=Dic1[i]*Dic2[j]
elif set(str(i)).intersection(set(str(j))) == set(str(j)):
Result[j]+=Dic1[i]*Dic2[j]
## normalize the results
f= sum(list(Result.values()))
for i in Result.keys():
Result[i] /=f
return Result
|
Zhiming-Huang/Dempster-shafer-combination-rules
|
DS.py
|
DS.py
|
py
| 784 |
python
|
en
|
code
| 16 |
github-code
|
6
|
16312524711
|
import jax.numpy as np
from jax import grad, nn, random, jit
from jax.experimental import stax, optimizers
from jax.experimental.optimizers import l2_norm
from jax.numpy import linalg
from jax.experimental.stax import Dense, Relu, Tanh, Conv, MaxPool, Flatten, Softmax, LogSoftmax, Sigmoid
from jax.tree_util import tree_flatten, tree_unflatten, tree_map
from jax.nn import log_sigmoid
from mnist import mnist
from tqdm import tqdm
import itertools
import pickle
LogSigmoid = elementwise(log_sigmoid)
def model(rng):
"""Feature extraction network."""
init_params, forward = stax.serial(
Conv(16, (8, 8), padding='SAME', strides=(2, 2)),
Relu,
MaxPool((2, 2), (1, 1)),
Conv(32, (4, 4), padding='VALID', strides=(2, 2)),
Relu,
MaxPool((2, 2), (1, 1)),
Flatten,
Dense(32),
Relu,
Dense(1),
LogSigmoid,
)
temp, rng = random.split(rng)
params = init_params(temp, (-1, 28, 28, 1))[1]
return params, forward
def data_stream(rng, batch_size, X, y):
num_complete_batches, leftover = divmod(X.shape[0], batch_size)
num_batches = num_complete_batches + bool(leftover)
while True:
temp, rng = random.split(rng)
perm = random.permutation(temp, X.shape[0])
for i in range(num_batches):
batch_idx = perm[i*batch_size:(i+1)*batch_size]
yield X[batch_idx], y[batch_idx]
if __name__ == "__main__":
rng = random.PRNGKey(0)
X, y, X_test, y_test = mnist()
X, X_test = X.reshape(-1, 28, 28, 1), X_test.reshape(-1, 28, 28, 1)
y, y_test = (np.argmax(y, 1) % 2 == 1).astype(np.float32), (np.argmax(y_test, 1) % 1 == 1).astype(np.float32)
temp, rng = random.split(rng)
params, predict = model(temp)
def loss(params, batch, l2=0.05):
X, y = batch
y_hat = predict(params, X).reshape(-1)
return -np.mean(y * np.log(y_hat) + (1. - y) * np.log(1. - y_hat))
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
iterations = 5000
batch_size = 64
step_size = 0.001
opt_init, opt_update, get_params = optimizers.adam(step_size)
opt_state = opt_init(params)
temp, rng = random.split(rng)
batches = data_stream(temp, batch_size, X, y)
for i in tqdm(range(iterations)):
opt_state = update(i, opt_state, next(batches))
if i % 1000 == 0:
params = get_params(opt_state)
print('Loss: {:.4f}'.format(loss(params, (X, y))))
params = get_params(opt_state)
exit()
pickle.dump(lr_params, open('logistic_regression_params.pkl', 'wb'))
pickle.dump(logistic_regression, open('logistic_regression.pkl', 'wb'))
pickle.dump(fe_params, open('feature_extractor_params.pkl', 'wb'))
pickle.dump(feature_extractor, open('feature_extractor.pkl', 'wb'))
|
ChrisWaites/data-deletion
|
src/d2d/projected_mnist/debug_for_seth.py
|
debug_for_seth.py
|
py
| 2,756 |
python
|
en
|
code
| 5 |
github-code
|
6
|
34197263982
|
import sys, math
from math import pi as pi
import numpy as np
import cv2
from PyQt5.QtCore import QPoint, QRect, QSize, Qt, QPointF, QRectF, pyqtSignal, QTimer
from PyQt5.QtGui import (QBrush, QConicalGradient, QLinearGradient, QPainter, QPainterPath, QPalette, QPen, QPixmap, QPolygon, QRadialGradient, QColor, QTransform, QPolygonF, QKeySequence, QIcon)
from PyQt5.QtWidgets import (QApplication, QProgressBar, QCheckBox, QComboBox, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel, QSpinBox, QWidget, QPushButton, QSpacerItem, QSizePolicy, QLCDNumber )
from PyQt5 import QtGui, QtCore
from parallelIce.pose3dClient import Pose3DClient
from parallelIce.laserClient import LaserClient
import easyiceconfig as EasyIce
from gui.threadGUI import ThreadGUI
class MainWindow(QWidget):
updGUI=pyqtSignal()
def __init__(self, pose3d, laser1, laser2, laser3, parent=None):
super(MainWindow, self).__init__(parent)
layout = QGridLayout()
self.quesito = quesoWidget(self, pose3d)
self.tiempo = tiempoWidget(self)
self.calidad = calidadWidget(self, laser1, laser2, laser3)
self.distancia = distanciaWidget(self, pose3d)
self.nota = notaWidget(self,pose3d, self.tiempo, self.calidad, self.distancia)
self.logo = logoWidget(self)
layout.addWidget(self.quesito,1,0)
layout.addWidget(self.tiempo,0,0)
layout.addWidget(self.distancia,0,2)
layout.addWidget(self.calidad,1,2)
layout.addWidget(self.nota,0,1)
layout.addWidget(self.logo,2,2)
vSpacer = QSpacerItem(30, 50, QSizePolicy.Ignored, QSizePolicy.Ignored)
layout.addItem(vSpacer,1,0)
self.setFixedSize(940,640);
self.setLayout(layout)
self.updGUI.connect(self.update)
def update(self):
self.quesito.updateG()
self.distancia.updateG()
self.calidad.updateG()
self.nota.updateG()
class logoWidget(QWidget):
def __init__(self, winParent):
super(logoWidget, self).__init__()
self.winParent=winParent
self.logo = cv2.imread("resources/logo_jderobot1.png",cv2.IMREAD_UNCHANGED)
self.logo = cv2.resize(self.logo, (100, 100))
image = QtGui.QImage(self.logo.data, self.logo.shape[1], self.logo.shape[0], QtGui.QImage.Format_ARGB32);
self.pixmap = QtGui.QPixmap.fromImage(image)
self.height = self.pixmap.height()
self.width = self.pixmap.width()
self.mapWidget = QLabel(self)
self.mapWidget.setPixmap(self.pixmap)
self.mapWidget.resize(self.width, self.height)
self.setMinimumSize(100,100)
class calidadWidget(QWidget):
def __init__(self,winParent, laser1, laser2, laser3):
super(calidadWidget, self).__init__()
self.winParent=winParent
self.laser1 = laser1
self.laser2 = laser2
self.laser3 = laser3
self.numCrash = 0
self.MAX_CRASH = 1000
vLayout = QVBoxLayout()
choquesLabel = QLabel("Choques:")
self.bar = QProgressBar()
self.bar.setValue(self.numCrash)
st = "QProgressBar::chunk {background-color: #ff0000;}\n QProgressBar {border: 1px solid grey;border-radius: 2px;text-align: center;background: #eeeeee;}"
self.bar.setStyleSheet(st)
self.bar.setTextVisible(False)
vLayout.addWidget(choquesLabel, 0)
vLayout.addWidget(self.bar, 0)
vSpacer = QSpacerItem(30, 80, QSizePolicy.Ignored, QSizePolicy.Ignored)
vLayout.addItem(vSpacer)
self.setLayout(vLayout)
def get_laser_distance(self, laser):
DIST = 15
maxAngle = 180
crash = False
for i in range(0, maxAngle+1):
# Distance in millimeters, we change to cm
laserI = float(laser.distanceData[i])/float(10)
if i != 0 and i != 180:
if laserI <= DIST:
crash = True
return crash
def updateG(self):
laser_data_Front = self.laser1.getLaserData()
laser_data_Rear = self.laser2.getLaserData()
laser_data_Right = self.laser3.getLaserData()
crashFront = self.get_laser_distance(laser_data_Front)
crashRear = self.get_laser_distance(laser_data_Rear)
crashRight = self.get_laser_distance(laser_data_Right)
if crashFront or crashRear or crashRight:
self.numCrash = self.numCrash + 1
percentajeCrash = self.numCrash * 100/self.MAX_CRASH
self.bar.setValue(self.numCrash)
self.update()
class distanciaWidget(QWidget):
def __init__(self,winParent, pose3d):
super(distanciaWidget, self).__init__()
self.winParent=winParent
self.pose3d = pose3d
self.distFrontFinal = 0
self.distRearFinal = 0
self.distanceSidewalk = 0
vLayout = QVBoxLayout()
self.distances()
distancesLabel = QLabel("Distancias:")
self.distanceFrontalLabel = QLabel("Distancia frontal: " + str(round(self.distFrontFinal, 3)) + ' m')
self.distanceRearLabel = QLabel("Distancia trasera: " + str(round(self.distRearFinal, 3)) + ' m')
self.distanceSidewalkLabel = QLabel("Distancia a la acera: " + str(round(self.distanceSidewalk, 3)) + ' m')
vLayout.addWidget(distancesLabel, 0)
vLayout.addWidget(self.distanceFrontalLabel, 0)
vLayout.addWidget(self.distanceRearLabel, 0)
vLayout.addWidget(self.distanceSidewalkLabel, 0)
self.setLayout(vLayout)
def RTx(self, angle, tx, ty, tz):
RT = np.matrix([[1, 0, 0, tx], [0, math.cos(angle), -math.sin(angle), ty], [0, math.sin(angle), math.cos(angle), tz], [0,0,0,1]])
return RT
def RTy(self, angle, tx, ty, tz):
RT = np.matrix([[math.cos(angle), 0, math.sin(angle), tx], [0, 1, 0, ty], [-math.sin(angle), 0, math.cos(angle), tz], [0,0,0,1]])
return RT
def RTz(self, angle, tx, ty, tz):
RT = np.matrix([[math.cos(angle), -math.sin(angle), 0, tx], [math.sin(angle), math.cos(angle),0, ty], [0, 0, 1, tz], [0,0,0,1]])
return RT
def RTCar(self):
yaw = self.pose3d.getYaw()
RTz = self.RTz(yaw, 0, 0, 0)
return RTz
def distancePoint2Segment(self, A, B, C):
# Segment: A[ax,ay] ; B[bx,by]
# Point: C[cx, cy]
# Calculate U parameter
u = self.parameterU(A, B, C)
if u < 0:
distance = self.distancePoint2Point(A, C)
elif u > 1:
distance = self.distancePoint2Point(B, C)
else:
distance = self.distancePoint2Rect(A, B, C)
return distance
def parameterU(self, A, B, C):
# Point A: [ax, ay]
# Point B: [bx, by]
# Point C: [cx, cy]
# Parameter U of equations: Px = ax + u*(bx-ax); and Py = ay + u*(by-ay)
u = ((C[0] - A[0])*(B[0] - A[0]) + (C[1] - A[1])*(B[1] - A[1])) / (pow((B[0] - A[0]),2) + pow((B[1] - A[1]),2))
return u
def distancePoint2Point(self, Point1, Point2):
# Point: 1[x1,y1]
# Point: 2[x2,y2]
return math.sqrt(pow((Point2[0]-Point1[0]),2) + pow((Point2[1]-Point1[1]),2))
def distancePoint2Rect(self, A, B, C):
# Rect: A[ax,ay] ; B[bx,by]
# Point: C[cx,cy]
distance = abs((B[0] - A[0])*(C[1] - A[1]) - (B[1] - A[1])*(C[0] - A[0])) / (math.sqrt(pow((B[0]-A[0]),2) + pow((B[1]-A[1]),2)))
return distance
def distanceCar2Car(self, pointCarLeft, pointCarRight, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight):
# Mide la minima distancia desde los 4 vertices de un coche a la parte delantera o trasera de otro coche (segmento)
# Segment: pointCarLeft[x,y] ; pointCarRight[x,y]
# Point 1: pointFrontLeft[x,y]
# Point 2: pointFrontRight[x,y]
# Poitn 3: pointRearLeft[x,y]
# Point 4: pointRearRight[x,y]
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointFrontLeft)
if (self.distancePoint2Segment(pointCarLeft, pointCarRight, pointFrontRight) < distance):
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointFrontRight)
if (self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearLeft) < distance):
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearLeft)
if (self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearRight) < distance):
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearRight)
return distance
def distances(self):
carSize = [5.75, 2.5]
carSizeTaxi = [4, 2]
#Poses sidewalk
positionSideWalk_start = [-25, -4.25]
positionSideWalk_final = [35, -4.25]
# Poses parked cars (origin poses)
# Frontal car
pointCarFrontal_RearLeft = [14 - carSize[0]/2, -3+carSize[1]/2]
pointCarFrontal_RearRight = [14 - carSize[0]/2, -3-carSize[1]/2]
pointCarFrontal_FrontLeft = [14 + carSize[0]/2, -3+carSize[1]/2]
pointCarFrontal_FrontRight = [14 + carSize[0]/2, -3-carSize[1]/2]
# Rear Car
pointCarRear_FrontLeft = [0.5 + carSize[0]/2, -3+carSize[1]/2]
pointCarRear_FrontRight = [0.5 + carSize[0]/2, -3-carSize[1]/2]
pointCarRear_RearLeft = [0.5 - carSize[0]/2, -3+carSize[1]/2]
pointCarRear_RearRight = [0.5 - carSize[0]/2, -3-carSize[1]/2]
# Pose 3D (origin poses)
xFront = self.pose3d.getX() + carSizeTaxi[0]/2
xRear = self.pose3d.getX() - carSizeTaxi[0]/2
yLeft = self.pose3d.getY() + carSizeTaxi[1]/2
yRight = self.pose3d.getY() - carSizeTaxi[1]/2
# Final poses (Car's rotation)
pointFrontLeft = self.RTCar() * np.matrix([[xFront], [yLeft], [1], [1]])
pointFrontLeft = [pointFrontLeft.flat[0],pointFrontLeft.flat[1]]
pointFrontRight = self.RTCar() * np.matrix([[xFront], [yRight], [1], [1]])
pointFrontRight = [pointFrontRight.flat[0], pointFrontRight.flat[1]]
pointRearLeft = self.RTCar() * np.matrix([[xRear], [yLeft], [1], [1]])
pointRearLeft = [pointRearLeft.flat[0],pointRearLeft.flat[1]]
pointRearRight = self.RTCar() * np.matrix([[xRear], [yRight], [1], [1]])
pointRearRight = [pointRearRight.flat[0],pointRearRight.flat[1]]
# Distance car -> parked front car
distFrontFinal_1 = self.distanceCar2Car(pointCarFrontal_RearLeft, pointCarFrontal_RearRight, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight)
# Distance parked front car -> car
distFrontFinal_2 = self.distanceCar2Car(pointFrontLeft, pointFrontRight, pointCarFrontal_RearLeft, pointCarFrontal_RearRight, pointCarFrontal_FrontLeft , pointCarFrontal_FrontRight)
# Distance car -> parked rear car
distRearFinal_1 = self.distanceCar2Car(pointCarRear_FrontLeft, pointCarRear_FrontRight, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight)
# Distance parked rear car -> car
distRearFinal_2 = self.distanceCar2Car(pointRearLeft, pointRearRight, pointCarRear_FrontLeft , pointCarRear_FrontRight, pointCarRear_RearLeft , pointCarRear_RearRight)
# Minimal distance
if distFrontFinal_1 > distFrontFinal_2:
self.distFrontFinal = distFrontFinal_1
else:
self.distFrontFinal = distFrontFinal_2
if distRearFinal_1 > distRearFinal_2:
self.distRearFinal = distRearFinal_1
else:
self.distRearFinal = distRearFinal_2
# Distance car -> sidewalk
self.distanceSidewalk = self.distanceCar2Car(positionSideWalk_start, positionSideWalk_final, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight)
def updateG(self):
self.distances()
self.distanceFrontalLabel.setText("Distancia frontal: " + str(round(self.distFrontFinal, 3)) + ' m')
self.distanceRearLabel.setText("Distancia trasera: " + str(round(self.distRearFinal, 3)) + ' m')
self.distanceSidewalkLabel.setText("Distancia a la acera: " + str(round(self.distanceSidewalk, 3)) + ' m')
self.update()
class notaWidget(QWidget):
def __init__(self,winParent,pose3d, tiempo, calidad, distancia):
super(notaWidget, self).__init__()
self.winParent=winParent
self.pose3d = pose3d
self.time = tiempo
self.calidad = calidad
self.distancia = distancia
self.hLayout = QHBoxLayout()
self.button = QPushButton('Show me my mark')
self.button.clicked.connect(self.notaFinal)
self.hLayout.addWidget(self.button, 0)
self.setLayout(self.hLayout)
def notaFinal(self):
notaAngle = self.testAngle() * 0.025
notaTime = self.testTime() * 0.025
notaDist = self.testDistance() * 0.025
notaCol = self.testCollision() * 0.025
nota = notaAngle + notaTime + notaDist + notaCol
notaLabel = QLabel('Nota final: ' + str(nota))
self.hLayout.addWidget(notaLabel, 0)
def testAngle(self):
yawRad = self.pose3d.getYaw()
angle = math.degrees(yawRad) + 90
if (angle >= 85 and angle <= 105):
notaAngle = 100
elif (angle < 85 and angle >= 70 or angle > 105 and angle <= 120):
notaAngle = 80
elif (angle < 70 and angle >= 60 or angle > 120 and angle <= 130):
notaAngle = 50
else:
notaAngle = 0
return notaAngle
def testTime(self):
minTime = 170
myTime = self.time.seconds
notaTime = float(minTime*100)/float(myTime)
if myTime < 170:
notaTime = 100
return notaTime
def testDistance(self):
MyDistFront = self.distancia.distFrontFinal
MyDistRear = self.distancia.distRearFinal
MyDistSidewalk = self.distancia.distanceSidewalk
if MyDistFront >= 1.5 and MyDistFront < 3.5:
notaDistFront = 100
elif MyDistFront < 1.5 and MyDistFront >= 1:
notaDistFront = 50
else:
notaDistFront = 0
if MyDistRear >= 1.5 and MyDistRear < 3.5:
notaDistRear = 100
elif MyDistRear < 1.5 and MyDistRear >= 1:
notaDistRear = 50
else:
notaDistRear = 0
if MyDistSidewalk > 0 and MyDistSidewalk <= 0.75:
notaDistSidewalk = 100
elif MyDistSidewalk > 0.75 and MyDistSidewalk < 1.5:
notaDistSidewalk = 50
else:
notaDistSidewalk = 0
notaDist = float(notaDistFront+notaDistRear+notaDistSidewalk)/float(3)
return notaDist
def testCollision(self):
minCrash = 0
if self.calidad.numCrash == 0:
notaCol = 100
else:
notaCol = float(minCrash*100)/float(self.calidad.numCrash)
return notaCol
def updateG(self):
self.update()
class tiempoWidget(QWidget):
time = pyqtSignal()
def __init__(self,winParent):
super(tiempoWidget, self).__init__()
self.winParent=winParent
self.seconds = 0
hLayout = QHBoxLayout()
tiempoLabel = QLabel("Tiempo")
self.lcd = QLCDNumber(self)
self.lcd.setMaximumSize(100,50)
hLayout.addWidget(tiempoLabel,0)
hLayout.addWidget(self.lcd, 1)
hSpacer = QSpacerItem(300, 30, QSizePolicy.Ignored, QSizePolicy.Ignored)
hLayout.addItem(hSpacer)
self.setLayout(hLayout)
timer = QTimer(self)
timer.start(1000)
timer.timeout.connect(self.printTime)
# get the palette
palette = self.lcd.palette()
# foreground color
palette.setColor(palette.WindowText, QColor(85, 85, 255))
# background color
palette.setColor(palette.Background, QColor(0, 170, 255))
# "light" border
palette.setColor(palette.Light, QColor(255, 0, 0))
# "dark" border
palette.setColor(palette.Dark, QColor(0, 255, 0))
# set the palette
self.lcd.setPalette(palette)
def printTime(self):
self.seconds += 1
self.lcd.display(self.seconds)
class quesoWidget(QWidget):
def __init__(self,winParent, pose3d):
super(quesoWidget, self).__init__()
self.winParent=winParent
self.rectangle = QRectF(0.0, 0.0, 300.0, 300.0)
self.pose3d = pose3d
def drawRedZones(self, painter):
self.setStyle(painter, QColor(255,70,70),QColor(255,70,70),1)
startAngle = 0 * 16
spanAngle = 45 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 135 * 16
spanAngle = 45 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 180 * 16
spanAngle = 180 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
def drawOrangeZones(self, painter):
self.setStyle(painter, QColor(255,220,23),QColor(255,220,23),1)
startAngle = 45 * 16
spanAngle = 30 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 105 * 16
spanAngle = 30 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
def drawGreenZones(self, painter):
self.setStyle(painter, QColor(117,240,154),QColor(117,240,154),1)
startAngle = 75 * 16
spanAngle = 15 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 90 * 16
spanAngle = 15 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
def drawArrow(self, painter, angle=90):
radius = 130
yawRad = self.pose3d.getYaw()
angle = -(yawRad + pi/2) # PI/2 para centrar la aguja
origx = self.rectangle.width() / 2
origy = self.rectangle.height() / 2
finx = radius * math.cos(angle) + origx
finy = radius * math.sin(angle) + origy
self.setStyle(painter, Qt.black,Qt.black,3)
painter.drawLine(QPoint(origx,origy), QPoint(finx,finy))
painter.drawEllipse(145,145, 10, 10)
def resetPen(self, painter):
pen = QPen(Qt.black, 1)
brush = QBrush()
painter.setPen(pen)
painter.setBrush(brush)
def setStyle(self, painter, fillColor, penColor, stroke):
brush = QBrush()
pen = QPen(penColor, stroke)
brush.setColor(fillColor)
brush.setStyle(Qt.SolidPattern)
painter.setBrush(brush)
painter.setPen(pen)
painter.setRenderHint(QPainter.Antialiasing)
def paintEvent(self, event):
painter = QPainter(self)
self.drawRedZones(painter)
self.drawOrangeZones(painter)
self.drawGreenZones(painter)
self.drawArrow(painter,120)
def updateG(self):
self.update()
if __name__ == "__main__":
app = QApplication(sys.argv)
ic = EasyIce.initialize(sys.argv)
pose3d = Pose3DClient(ic, "Autopark.Pose3D", True)
laser1 = LaserClient(ic, "Autopark.Laser1", True)
laser2 = LaserClient(ic, "Autopark.Laser2", True)
laser3 = LaserClient(ic, "Autopark.Laser3", True)
myGUI = MainWindow(pose3d, laser1, laser2, laser3)
myGUI.show()
t2 = ThreadGUI(myGUI)
t2.daemon=True
t2.start()
sys.exit(app.exec_())
|
RoboticsLabURJC/2016-tfg-irene-lope
|
AutoPark_Practice/referee.py
|
referee.py
|
py
| 19,643 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39610762661
|
from nltk.corpus import brown
import nltk
cfd = nltk.ConditionalFreqDist(
(genre,word)
for genre in brown.categories()
for word in brown.words(categories=genre))
genre_word = [(genre, word)
for genre in ['news']
for word in brown.words(categories=genre)]
print(len(genre_word))
print(genre_word[:5])
|
milliongashawbeza/PublicNLPA
|
counting_words.py
|
counting_words.py
|
py
| 319 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38290564345
|
# Find the path with the maximum sum in a given binary tree.
# Write a function that returns the maximum sum.
# A path can be defined as a sequence of nodes between any two nodes and
# doesn’t necessarily pass through the root.
import math
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class MaxTreeNode():
def find_maximum_path_sum(self, root):
self.maxPathSum = -math.inf
self.findPathSum(root)
return self.maxPathSum
def findPathSum(self, currentNode):
if not currentNode:
return 0
leftPathSum = self.findPathSum(currentNode.left)
rightPathSum = self.findPathSum(currentNode.right)
leftPathSum = max(leftPathSum, 0)
rightPathSum = max(rightPathSum, 0)
pathSum = currentNode.val + leftPathSum + rightPathSum
self.maxPathSum = max(self.maxPathSum, pathSum)
return currentNode.val + max(leftPathSum, rightPathSum)
def main():
maxTreeNode = MaxTreeNode()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
print("Maximum Path Sum: " + str(maxTreeNode.find_maximum_path_sum(root)))
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right.left = TreeNode(5)
root.right.right = TreeNode(6)
root.right.left.left = TreeNode(7)
root.right.left.right = TreeNode(8)
root.right.right.left = TreeNode(9)
print("Maximum Path Sum: " + str(maxTreeNode.find_maximum_path_sum(root)))
root = TreeNode(-1)
root.left = TreeNode(-3)
print("Maximum Path Sum: " + str(maxTreeNode.find_maximum_path_sum(root)))
main()
# time complexity: O(N)
# space complexity: O(N)
|
nanup/DSA
|
8. Depth First Search Revisit I/124. Binary Tree Maximum Path Sum.py
|
124. Binary Tree Maximum Path Sum.py
|
py
| 1,614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31877451015
|
import os
import math
import copy
import codecs
import numpy as np
import srt
import subprocess
import datetime
from utils import mkdir, basename_without_ext
from voice_detector import VoiceDetector
from tqdm import tqdm
def shift_by_delay(bin_arr2, delay_by_frames):
if delay_by_frames < 0:
return bin_arr2[abs(delay_by_frames):]
return np.concatenate([np.zeros(delay_by_frames).astype(np.uint8), bin_arr2])
def make_list_length_equal(lst1, lst2):
len_lst1 = lst1.shape[0]
len_lst2 = lst2.shape[0]
max_len = max(len_lst1, len_lst2)
return np.concatenate([lst1, np.zeros(max_len - len_lst1).astype(np.uint8)]), np.concatenate([lst2, np.zeros(max_len - len_lst2).astype(np.uint8)])
def error(bin_arr1, bin_arr2):
# MAE
return np.sum(bin_arr1.astype(np.uint8) ^ bin_arr2.astype(np.uint8)) / float(len(bin_arr1))
def get_err(tmp_bin_arr1, tmp_bin_arr2, delay_by_frames):
#tmp_bin_arr1 = arr1[:]
#tmp_bin_arr2 = arr2[:]
# shift by delay
tmp_bin_arr2 = shift_by_delay(tmp_bin_arr2, delay_by_frames)
# align arrays
tmp_bin_arr1, tmp_bin_arr2 = make_list_length_equal(tmp_bin_arr1, tmp_bin_arr2)
# calculate error
tmp_err = error(tmp_bin_arr1, tmp_bin_arr2)
return delay_by_frames, tmp_err
class GetSub:
def __init__(self, aggressiveness, frame_duration_ms, padding_duration_ms):
self.vad = VoiceDetector(
aggressiveness, frame_duration_ms, padding_duration_ms)
def timedelta_to_frame(self, td):
ms = float(td.seconds) * 1000.0 + float(td.microseconds) * 0.001
return int(ms / self.vad.frame_duration_ms)
def binary_array_from_srt(self, srt_path):
common_encodings = ['latin1', 'utf-8', 'utf-16', 'cp1252']
subs = []
for encoding in common_encodings:
try:
srt_file = codecs.open(srt_path, 'r', encoding=encoding)
srt_string = srt_file.read()
srt_file.close()
subs = np.array(list(srt.parse(srt_string)))
break
except BaseException as error:
pass
# print('An exception occurred: {}'.format(error))
start_end_pairs = [(self.timedelta_to_frame(sub.start), self.timedelta_to_frame(sub.end)) for sub in subs]
# convert seconds and microseconds to milliseconds
first_sub_frame = start_end_pairs[0][0]
last_sub_frame = start_end_pairs[-1][1]
bin_array = np.zeros(last_sub_frame).astype(np.uint8)
print('Creating Binary Array from SRT..')
for start_frame, end_frame in tqdm(start_end_pairs):
bin_array[start_frame:end_frame] = 1
# TODO
five_second_delay = int(5 * 1000 / self.vad.frame_duration_ms)
# set max delay to 5% of video
max_delay = max(five_second_delay, int(len(bin_array) * 0.05))
return bin_array, -first_sub_frame, max_delay, subs
def chunks(self, lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def find_best_delay_milliseconds(self, bin_arr1, bin_arr2, delay_range_start, delay_range_end, error_csv_out):
err = math.inf
best_delay = 0
delay_range_len = delay_range_end - delay_range_start
rows = np.zeros((delay_range_len, 2))
early_stop = False
print('Finding Best Delay..')
#with Parallel(n_jobs=cpus, prefer="threads") as parallel:
for i, delay_by_frames in tqdm(enumerate(range(delay_range_start, delay_range_end)), total=delay_range_len):
delay_by_frames, tmp_err = get_err(
bin_arr1,
bin_arr2,
delay_by_frames,
)
if tmp_err < err:
err = tmp_err
best_delay = delay_by_frames
rows[i][0] = delay_by_frames * self.vad.frame_duration_ms * 0.001
rows[i][1] = tmp_err
percent_change = (tmp_err - err) / err
if percent_change > 0.1:
early_stop = True
rows = rows[:(i + 1)]
break
if early_stop:
print('stopping early at', str(int(i / delay_range_len * 100.0)) + '%')
#df = pd.DataFrame(rows, columns=["delay_in_seconds", "MAE"])
#df.set_index("delay_in_seconds", inplace=True)
#df.to_csv(error_csv_out)
return best_delay * self.vad.frame_duration_ms
def align(self, vid_file_path, srt_path, out_dir, original_name):
bin_arr1 = np.array(list(self.vad.detect(vid_file_path))).astype(np.uint8)
bin_arr2, delay_range_start, delay_range_end, subs = self.binary_array_from_srt(srt_path)
best_delay_ms = self.find_best_delay_milliseconds(
bin_arr1,
bin_arr2,
delay_range_start,
delay_range_end,
os.path.join(out_dir, original_name + "_error.csv"),
)
best_delay_sec = best_delay_ms * 0.001
print(f"best delay: {best_delay_sec}s")
out_path = os.path.join(out_dir, original_name + "_synced.srt")
td_to_shift = datetime.timedelta(seconds=best_delay_sec)
print('Shifting Subtitles..')
for subtitle in tqdm(subs):
subtitle.start += td_to_shift
subtitle.end += td_to_shift
with open(out_path, 'w') as file:
file.write(srt.compose(subs))
print('output aligned subs to:', out_path)
def download(self, vid_file_path, language):
out_dir = os.path.dirname(vid_file_path)
temp_dir = "/temp/"
mkdir(out_dir)
mkdir(temp_dir)
command1 = "python OpenSubtitlesDownload.py --cli --auto {} --output {} --lang {}"
command1_list = command1.format(vid_file_path, temp_dir, language).split(" ")
subprocess.call(command1_list)
original_name = basename_without_ext(vid_file_path)
srt_path = os.path.join(temp_dir, original_name + ".srt")
# save original file as 'filename_unsynced.srt'
out_path_unsynced = os.path.join(out_dir, original_name + "_unsynced.srt")
command2 = "cp {} {}"
command2_list = command2.format(srt_path, out_path_unsynced).split(" ")
subprocess.call(command2_list)
print('downloaded subs:', srt_path)
self.align(vid_file_path, srt_path, out_dir, original_name)
|
derrick56007/getsub
|
src/get_sub.py
|
get_sub.py
|
py
| 6,510 |
python
|
en
|
code
| 5 |
github-code
|
6
|
4397925600
|
from multiprocessing import Process,Array
from time import time
import sqlite3
from .config import KASTEN_ANZ,VOK_DIR
class vokabelKartei(Process):
def __init__(self):
self.conn = sqlite3.connect(VOK_DIR+"kartei.sqlite")
self.conn.text_factory = str
self.c = self.conn.cursor()
self.c.execute("""CREATE TABLE IF NOT EXISTS sprachen
(id INTEGER PRIMARY KEY, name TEXT, spr1 TEXT,
spr2 TEXT)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS kapitel
(id INTEGER PRIMARY KEY, name TEXT, spr_id INT)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS vokabeln
(id INTEGER PRIMARY KEY, spr1 TEXT, spr2 TEXT,
kap_id INT, kasten INT, spr_id INT, last_date INT)""")
self.COMMIT_MODE = True
self.DEBUG_MODE = False
def close(self):
self.c.close()
def commit(self):
if self.COMMIT_MODE == True and self.DEBUG_MODE == False:
self.conn.commit()
def execute(self,query_str,args=()):
if self.DEBUG_MODE == True:
print(query_str, args)
self.c.execute(query_str,args)
def set_commit_mode(self,mode):
if mode == True and self.COMMIT_MODE == False:
self.COMMIT_MODE = True
self.commit()
elif mode == False and self.COMMIT_MODE == True:
self.COMMIT_MODE = False
def get_kapitel(self,sprache,kap_id=-1):
if kap_id != -1:
self.execute("SELECT * FROM kapitel WHERE spr_id=? AND id=?",
(sprache,kap_id))
else:
self.execute("SELECT * FROM kapitel WHERE spr_id=?", (sprache,))
return self.c.fetchall()
def get_vok(self,vok_id):
self.execute("SELECT * FROM vokabeln WHERE id=?", (vok_id,))
return list(self.c.fetchall()[0])
def get_sprachen(self,spr_id=None):
if spr_id != None:
self.execute("SELECT * FROM sprachen WHERE id=?", (spr_id,))
else:
self.execute("SELECT * FROM sprachen ORDER BY name ASC")
return [list(x) for x in self.c.fetchall()]
def get_stapel(self,sprache,kapitel=-1,kasten=0):
if kapitel != -1 and kasten != 0:
self.execute("""SELECT * FROM vokabeln
WHERE spr_id=? AND kap_id=? AND kasten=?""",
(sprache,kapitel,kasten))
elif kapitel != -1:
self.execute("""SELECT * FROM vokabeln
WHERE spr_id=? AND kap_id=?""",
(sprache,kapitel))
elif kasten != 0:
self.execute("""SELECT * FROM vokabeln
WHERE spr_id=? AND kasten=?""",
(sprache,kasten))
else:
self.execute("SELECT * FROM vokabeln WHERE spr_id=?", (sprache,))
return self.c.fetchall()
def rem_vok(self,vokids):
if list != type(vokids):
vokids = [vokids]
for vok in vokids:
self.execute("""DELETE FROM vokabeln WHERE id=?""", (vok,))
self.commit()
def rem_kap(self,kap_id):
self.execute("""DELETE FROM kapitel WHERE id=?""", (kap_id,))
self.execute("""DELETE FROM vokabeln WHERE kap_id=?""", (kap_id,))
self.commit()
def rem_sprache(self,spr_id):
self.execute("""DELETE FROM sprachen WHERE id=?""", (spr_id,))
self.execute("""DELETE FROM vokabeln WHERE spr_id=?""", (spr_id,))
self.execute("""DELETE FROM kapitel WHERE spr_id=?""", (spr_id,))
self.commit()
def add_vok(self,*vok):
kapitel = vok[3]
if vok[3] == -1:
kapitel = 0
self.execute("""INSERT INTO vokabeln(spr1,spr2,kap_id,kasten,spr_id)
VALUES (?,?,?,?,?)""",
(vok[0],vok[1],kapitel,1,vok[2]))
self.commit()
return self.c.lastrowid
def add_sprache(self,name,spr1,spr2):
self.execute("""INSERT INTO sprachen(name,spr1,spr2)
VALUES (?,?,?)""",
(name,spr1,spr2))
self.commit()
return self.c.lastrowid
def add_kapitel(self,name,spr_id):
self.execute("""INSERT INTO kapitel(name,spr_id)
VALUES (?,?)""",
(name,spr_id))
self.commit()
return self.c.lastrowid
def edit_sprache(self,spr_id,name,spr1,spr2):
self.execute("""UPDATE sprachen SET name=?,spr1=?,spr2=?
WHERE id=?""",
(name,spr1,spr2,spr_id))
self.commit()
def edit_kapitel(self,kap_id,name):
self.execute("""UPDATE kapitel SET name=? WHERE id=?""", (name,kap_id))
self.commit()
def edit_vok(self,vok_id,spr1,spr2):
self.execute("""UPDATE vokabeln SET spr1=?,spr2=?
WHERE id=?""",
(spr1,spr2,vok_id))
self.commit()
def count_vok(self,sprache,kapitel=0,kasten=0):
if kapitel != 0 and kasten != 0:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=? AND kap_id=? AND kasten=?""",
(sprache,kapitel,kasten))
elif kasten != 0:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=? AND kasten=?""",
(sprache,kasten))
elif kapitel != 0:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=? AND kap_id=?""",
(sprache,kapitel))
else:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=?""",
(sprache,))
return self.c.fetchall()[0][0]
def change_kasten(self,vok_id,kasten):
if kasten <= KASTEN_ANZ:
self.execute("""UPDATE vokabeln SET kasten=?
WHERE id=?""",
(kasten,vok_id))
self.commit()
def touch_vok(self,vok_id,null=False):
timestamp = int(time())
if null:
timestamp = 0
self.execute("""UPDATE vokabeln SET last_date=?
WHERE id=?""",
(timestamp,vok_id))
self.commit()
def change_kap(self,vok_id,kapitel):
self.execute("""UPDATE vokabeln SET kap_id=?
WHERE id=?""",
(kapitel,vok_id))
self.commit()
def get_duplicate(self,spr1,spr_id,kap_id=-1):
if kap_id != -1:
self.execute("""SELECT * FROM vokabeln
WHERE spr1=? AND spr_id=? AND kap_id=?""",
(spr1,spr_id,kap_id))
else:
self.execute("""SELECT * FROM vokabeln
WHERE spr1=? AND spr_id=?""",
(spr1,spr_id))
ergebnis = self.c.fetchall()
if len(ergebnis) == 0:
return None
else:
return list(ergebnis[0])
|
tuxor1337/voktrainer
|
vok/core/kartei.py
|
kartei.py
|
py
| 7,065 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41195169929
|
import tensorflow.compat.v1 as tf
import pandas as pd
import numpy as np
import time
tf.disable_v2_behavior()
def filterData():
df = pd.read_csv('diabetic_data.csv')
print("how large the data sould be?")
data_size = input()
data = df.drop(['encounter_id', 'patient_nbr', 'weight', 'payer_code', 'medical_specialty', 'number_outpatient',
'number_inpatient'], axis=1)
data = data.replace(
["[0-10)", "[10-20)", "[20-30)", "[30-40)", "[40-50)", "[50-60)", "[60-70)", "[70-80)", "[80-90)", "[90-100)"],
[5, 15, 25, 35, 45, 55, 65, 75, 85, 95])
data = data.replace(["Up", "Down", "Ch", "Steady", "Yes", "No"], [3, 0, 1, 2, 1, 0])
data = data.replace(["None", "Normal", "Norm", ">200", ">300"], [0, 1, 1, 2, 3])
data = data.replace([">7", ">8"], [2, 3])
data = data.replace(["NO", "<30", ">30"], [0, 1, 2])
data = pd.get_dummies(data, columns=['race', 'gender', 'admission_source_id', 'discharge_disposition_id', 'admission_source_id', 'diag_1', 'diag_2', 'diag_3'])
if data_size=="all":
data_size=len(data)
data_size = int(data_size)
print(data_size)
data = data[:data_size]
print("done")
data_train = data[:round(len(data)*7/10)]
data_verif = data[round(len(data)*7/10):]
print("training set length : "+str(len(data_train)))
print("verification set length : "+str(len(data_verif)))
label_train1 = data_train[['readmitted']]
label_verif1 = data_verif[['readmitted']]
data_train = data_train.drop(['readmitted'], axis=1)
data_verif = data_verif.drop(['readmitted'], axis=1)
data_train = data_train.to_numpy()
data_verif = data_verif.to_numpy()
label_train1 = label_train1.to_numpy()
label_verif1 = label_verif1.to_numpy()
label_train = np.zeros((len(label_train1), 3))
label_verif = np.zeros((len(label_verif1), 3))
for i in range(len(label_train1)):
if label_train1[i][0] == 0:
label_train[i] = np.array([1, 0, 0])
elif label_train1[i][0] == 1:
label_train[i] = np.array([0, 1, 0])
elif label_train1[i][0] == 2:
label_train[i] = np.array([0, 0, 1])
for i in range(len(label_verif1)):
if label_verif1[i][0] == 0:
label_verif[i] = np.array([1, 0, 0])
elif label_verif1[i][0] == 1:
label_verif[i] = np.array([0, 1, 0])
elif label_verif1[i][0] == 2:
label_verif[i] = np.array([0, 0, 1])
return data_train , label_train ,data_verif ,label_verif
def train_model():
data_x = data_train
data_y = label_train
print("start training the model")
start_time = time.time()
for i in range(0, 1000):
sess.run(update, feed_dict={x: data_x, y_: data_y}) # BGD
print("finish training the model")
print("--- %s seconds ---" % round(time.time() - start_time))
print("w:", sess.run(W), " b:", sess.run(b), " loss:", loss.eval(session=sess, feed_dict={x: data_x, y_: data_y}))
def verification():
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("accuracy : " + str(sess.run(accuracy, feed_dict={x: data_verif, y_: label_verif})))
data_train,label_train ,data_verif , label_verif = filterData()
features = len(data_train[0])
categories =3
x = tf.placeholder(tf.float32, [None, features])
y_ = tf.placeholder(tf.float32, [None, categories])
W = tf.Variable(tf.zeros([features,categories]))
b = tf.Variable(tf.zeros([categories]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y)
cross_entropy = tf.reduce_mean(loss)
update = tf.train.AdamOptimizer().minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
train_model()
verification()
|
sschwarcz/Diabetic-Re-admission-prediction
|
Codes/SoftmaxTensorflow.py
|
SoftmaxTensorflow.py
|
py
| 4,003 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34493734325
|
from typing import Dict, List, Optional, Tuple, Union
from flask import (
abort, g, jsonify, render_template, request, make_response, Response
)
from werkzeug.exceptions import (
BadRequest, Forbidden, HTTPException, InternalServerError, NotFound
)
from plot_weather import (BAD_REQUEST_IMAGE_DATA,
INTERNAL_SERVER_ERROR_IMAGE_DATA, DebugOutRequest,
app, app_logger, app_logger_debug)
from plot_weather.dao.weathercommon import WEATHER_CONF
from plot_weather.dao.weatherdao import WeatherDao
from plot_weather.dao.devicedao import DeviceDao, DeviceRecord
from plot_weather.db.sqlite3conv import DateFormatError, strdate2timestamp
from plot_weather.plotter.plotterweather import (
ImageDateType, gen_plot_image, ImageDateParams, ParamKey
)
from werkzeug.datastructures import Headers, MultiDict
import psycopg2
from psycopg2.pool import SimpleConnectionPool
from psycopg2.extensions import connection
import plot_weather.util.dateutil as date_util
APP_ROOT: str = app.config["APPLICATION_ROOT"]
# エラーメッセージの内容 ※messages.confで定義
MSG_REQUIRED: str = app.config["MSG_REQUIRED"]
MSG_INVALID: str = app.config["MSG_INVALID"]
MSG_NOT_FOUND: str = app.config["MSG_NOT_FOUND"]
# ヘッダー
# トークン ※携帯端末では必須, 一致 ※ない場合は不一致とみなす
# messages.conf で定義済み
# 端末サイズ情報 ※携帯端末では必須, 形式は 幅x高さx密度
MSG_PHONE_IMG: str = "phone image size"
REQUIRED_PHONE_IMG: str = f"401,{MSG_PHONE_IMG} {MSG_REQUIRED}"
INVALID_PHONE_IMG: str = f"402,{MSG_PHONE_IMG} {MSG_INVALID}"
# リクエストパラメータ
PARAM_DEVICE: str = "device_name"
PARAM_START_DAY: str = "start_day"
PARAM_BOFORE_DAYS: str = "before_days"
PARAM_YEAR_MONTH: str = "year_month"
# リクエストパラメータエラー時のコード: 421番台以降
# デバイス名: 必須, 長さチェック (1-20byte), 未登録
DEVICE_LENGTH: int = 20
# デバイスリスト取得クリエスと以外の全てのリクエスト
REQUIRED_DEVICE: str = f"421,{PARAM_DEVICE} {MSG_REQUIRED}"
INVALIDD_DEVICE: str = f"422,{PARAM_DEVICE} {MSG_INVALID}"
DEVICE_NOT_FOUND: str = f"423,{PARAM_DEVICE} {MSG_NOT_FOUND}"
# 期間指定画像取得リクエスト
# (1)検索開始日["start_day"]: 任意 ※未指定ならシステム日付を検索開始日とする
# 日付形式(ISO8601: YYYY-mm-dd), 10文字一致
INVALID_START_DAY: str = f"431,{PARAM_START_DAY} {MSG_INVALID}"
# (2)検索開始日から N日前 (1,2,3,7日): 必須
REQUIRED_BOFORE_DAY: str = f"433,{PARAM_BOFORE_DAYS} {MSG_REQUIRED}"
INVALID_BOFORE_DAY: str = f"434,{PARAM_BOFORE_DAYS} {MSG_INVALID}"
# 月間指定画像取得リクエスト
# 年月: 必須, 形式(YYYY-mm), 7文字一致
REQUIRED_YEAR_MONTH: str = f"435,{PARAM_YEAR_MONTH} {MSG_REQUIRED}"
INVALID_YEAR_MONTH: str = f"436,{PARAM_YEAR_MONTH} {MSG_INVALID}"
# エラーメッセージを格納する辞書オブジェクト定義
MSG_DESCRIPTION: str = "error_message"
# 固定メッセージエラー辞書オブジェクト
ABORT_DICT_UNMATCH_TOKEN: Dict[str, str] = {MSG_DESCRIPTION: app.config["UNMATCH_TOKEN"]}
# 可変メッセージエラー辞書オブジェクト: ""部分を置き換える
ABORT_DICT_BLANK_MESSAGE: Dict[str, str] = {MSG_DESCRIPTION: ""}
def get_connection() -> connection:
if 'db' not in g:
conn_pool: SimpleConnectionPool = app.config["postgreSQL_pool"]
g.db: connection = conn_pool.getconn()
g.db.set_session(readonly=True, autocommit=True)
if app_logger_debug:
app_logger.debug(f"g.db:{g.db}")
return g.db
@app.teardown_appcontext
def close_connection(exception=None) -> None:
db: connection = g.pop('db', None)
if app_logger_debug:
app_logger.debug(f"db:{db}")
if db is not None:
app.config["postgreSQL_pool"].putconn(db)
@app.route(APP_ROOT, methods=["GET"])
def index() -> str:
"""本日データ表示画面 (初回リクエストのみ)
:return: 本日データ表示HTMLページ (matplotlibでプロットした画像含む)
"""
if app_logger_debug:
app_logger.debug(request.path)
try:
conn: connection = get_connection()
# 年月日リスト取得
dao = WeatherDao(conn, logger=app_logger)
yearMonthList: List[str] = dao.getGroupbyMonths(
device_name=WEATHER_CONF["DEVICE_NAME"],
start_date=WEATHER_CONF["STA_YEARMONTH"],
)
if app_logger_debug:
app_logger.debug(f"yearMonthList:{yearMonthList}")
# 本日データプロット画像取得
image_date_params = ImageDateParams(ImageDateType.TODAY)
img_base64_encoded: str = gen_plot_image(
conn, image_date_params=image_date_params, logger=app_logger
)
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.codde, InternalServerError(original_exception=exp))
strToday: str = app.config.get("STR_TODAY", "")
titleSuffix: str = app.config.get("TITLE_SUFFIX", "")
defaultMainTitle: str = strToday + titleSuffix
return render_template(
"showplotweather.html",
ip_host=app.config["SERVER_NAME"],
app_root_url=APP_ROOT,
path_get_today="/gettoday",
path_get_month="/getmonth/",
str_today=strToday,
title_suffix=titleSuffix,
info_today_update_interval=app.config.get("INFO_TODAY_UPDATE_INTERVAL"),
default_main_title=defaultMainTitle,
year_month_list=yearMonthList,
img_src=img_base64_encoded,
)
@app.route("/plot_weather/gettoday", methods=["GET"])
def getTodayImage() -> Response:
"""本日データ取得リクエスト(2回以降) JavaScriptからのリクエスト想定
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:image/png;base64,... base64encoded data ...')
"""
if app_logger_debug:
app_logger.debug(request.path)
try:
conn: connection = get_connection()
# 本日データプロット画像取得
image_date_params = ImageDateParams(ImageDateType.TODAY)
img_base64_encoded: str = gen_plot_image(
conn, image_date_params, logger=app_logger
)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
return _createErrorImageResponse(InternalServerError.code)
return _createImageResponse(img_base64_encoded)
@app.route("/plot_weather/getmonth/<yearmonth>", methods=["GET"])
def getMonthImage(yearmonth) -> Response:
"""要求された年月の月間データ取得
:param yearmonth str: 年月 (例) 2022-01
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:image/png;base64,... base64encoded data ...')
"""
if app_logger_debug:
app_logger.debug(request.path)
try:
# リクエストパラメータの妥当性チェック: "YYYY-mm" + "-01"
chk_yyyymmdd = yearmonth + "-01"
# 日付チェック(YYYY-mm-dd): 日付不正の場合例外スロー
strdate2timestamp(chk_yyyymmdd, raise_error=True)
conn: connection = get_connection()
# 指定年月(year_month)データプロット画像取得
image_date_params = ImageDateParams(ImageDateType.YEAR_MONTH)
param: Dict[ParamKey, str] = image_date_params.getParam()
param[ParamKey.YEAR_MONTH] = yearmonth
image_date_params.setParam(param)
img_base64_encoded: str = gen_plot_image(
conn, image_date_params, logger=app_logger
)
except DateFormatError as dfe:
# BAD Request
app_logger.warning(dfe)
return _createErrorImageResponse(BadRequest.code)
except psycopg2.Error as db_err:
# DBエラー
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
# バグ, DBサーバーダウンなど想定
app_logger.error(exp)
return _createErrorImageResponse(InternalServerError.code)
return _createImageResponse(img_base64_encoded)
@app.route("/plot_weather/getlastdataforphone", methods=["GET"])
def getLastDataForPhone() -> Response:
"""最新の気象データを取得する (スマートホン専用)
[仕様変更] 2023-09-09
(1) リクエストパラメータ追加
device_name: デバイス名 ※必須
:param: request parameter: device_name="xxxxx"
"""
if app_logger_debug:
app_logger.debug(request.path)
# Debug output request.headers or request.arg or both
_debugOutRequestObj(request, debugout=DebugOutRequest.HEADERS)
# トークン必須
headers: Headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名必須
param_device_name: str = _checkDeviceName(request.args)
try:
conn: connection = get_connection()
# 現在時刻時点の最新の気象データ取得
dao = WeatherDao(conn, logger=app_logger)
rec_count: int
row: Optional[Tuple[str, float, float, float, float]]
# デバイス名に対応する最新のレコード取得
row = dao.getLastData(device_name=param_device_name)
if row:
rec_count = 1
measurement_time, temp_out, temp_in, humid, pressure = row
return _responseLastDataForPhone(
measurement_time, temp_out, temp_in, humid, pressure, rec_count)
else:
# デバイス名に対応するレコード無し
rec_count = 0
return _responseLastDataForPhone(None, None, None, None, None, rec_count)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/getfirstregisterdayforphone", methods=["GET"])
def getFirstRegisterDayForPhone() -> Response:
"""デバイスの観測データの初回登録日を取得する (スマートホン専用)
[仕様追加] 2023-09-13
:param: request parameter: device_name="xxxxx"
"""
if app_logger_debug:
app_logger.debug(request.path)
# Debug output request.headers or request.arg or both
_debugOutRequestObj(request, debugout=DebugOutRequest.HEADERS)
# トークン必須
headers: Headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名必須
param_device_name: str = _checkDeviceName(request.args)
try:
conn: connection = get_connection()
dao = WeatherDao(conn, logger=app_logger)
# デバイス名に対応する初回登録日取得
first_register_day: Optional[str] = dao.getFisrtRegisterDay(param_device_name)
if app_logger_debug:
app_logger.debug(f"first_register_day[{type(first_register_day)}]: {first_register_day}")
if first_register_day:
return _responseFirstRegisterDayForPhone(first_register_day, 1)
else:
# デバイス名に対応するレコード無し
return _responseFirstRegisterDayForPhone(None, 0)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/gettodayimageforphone", methods=["GET"])
def getTodayImageForPhone() -> Response:
"""本日データ画像取得リクエスト (スマートホン専用)
[仕様変更] 2023-09-09
(1) リクエストパラメータ追加
device_name: デバイス名 ※必須
(2) レスポンスにレコード件数を追加 ※0件エラーの抑止
:param: request parameter: device_name="xxxxx"
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:': 'img_src':'image/png;base64,... base64encoded data ...',
'rec_count':xxx)
"""
if app_logger_debug:
app_logger.debug(request.path)
_debugOutRequestObj(request, debugout=DebugOutRequest.HEADERS)
# トークン必須
headers: Headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名必須
param_device_name: str = _checkDeviceName(request.args)
# 表示領域サイズ+密度は必須: 形式(横x縦x密度)
str_img_size: str = _checkPhoneImageSize(headers)
try:
conn: connection = get_connection()
image_date_params = ImageDateParams(ImageDateType.TODAY)
param: Dict[ParamKey, str] = image_date_params.getParam()
param[ParamKey.PHONE_SIZE] = str_img_size
image_date_params.setParam(param)
rec_count: int
img_base64_encoded: str
rec_count, img_base64_encoded = gen_plot_image(
conn, param_device_name, image_date_params, logger=app_logger
)
return _responseImageForPhone(rec_count, img_base64_encoded)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/getbeforedaysimageforphone", methods=["GET"])
def getBeforeDateImageForPhone() -> Response:
"""過去経過日指定データ画像取得リクエスト (スマートホン専用)
[仕様変更] 2023-09-09
(1) リクエストパラメータ追加
device_name: デバイス名 ※必須
start_day: 検索開始日(iso8601形式) ※任意
(2) レスポンスにレコード件数を追加 ※0件エラーの抑止
:param: request parameter: ?device_name=xxxxx&start_day=2023-05-01&before_days=(2|3|7)
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:': 'img_src':'image/png;base64,... base64encoded data ...',
'rec_count':xxx)
"""
if app_logger_debug:
app_logger.debug(request.path)
_debugOutRequestObj(request, debugout=DebugOutRequest.BOTH)
# トークン必須
headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名 ※必須チェック
param_device_name: str = _checkDeviceName(request.args)
# 検索開始日 ※任意、指定されている場合はISO8601形式チェック
str_start_day: Optional[str] = _checkStartDay(request.args)
if str_start_day is None:
# 検索開始日がない場合は当日を設定
str_start_day = date_util.getTodayIsoDate()
# Check before_days query parameter
str_before_days: str = _checkBeforeDays(request.args)
# 表示領域サイズ+密度は必須: 形式(横x縦x密度)
str_img_size: str = _checkPhoneImageSize(headers)
try:
conn: connection = get_connection()
image_date_params = ImageDateParams(ImageDateType.RANGE)
param: Dict[ParamKey, str] = image_date_params.getParam()
param[ParamKey.START_DAY] = str_start_day
param[ParamKey.BEFORE_DAYS] = str_before_days
param[ParamKey.PHONE_SIZE] = str_img_size
image_date_params.setParam(param)
rec_count: int
img_base64_encoded: str
rec_count, img_base64_encoded = gen_plot_image(
conn, param_device_name, image_date_params, logger=app_logger
)
return _responseImageForPhone(rec_count,img_base64_encoded)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/get_devices", methods=["GET"])
def getDevices() -> Response:
"""センサーディバイスリスト取得リクエスト
:return: JSON形式(idを除くセンサーディバイスリスト)
(出力内容) JSON({"data":{"devices":[...]}')
"""
if app_logger_debug:
app_logger.debug(request.path)
devices_with_dict: List[Dict]
try:
conn: connection = get_connection()
dao: DeviceDao = DeviceDao(conn, logger=app_logger)
devices: List[DeviceRecord] = dao.get_devices()
devices_with_dict = DeviceDao.to_dict_without_id(devices)
resp_obj: Dict[str, Dict] = {
"data": {"devices": devices_with_dict},
"status": {"code": 0, "message": "OK"}
}
return _make_respose(resp_obj, 200)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
def _debugOutRequestObj(request, debugout=DebugOutRequest.ARGS) -> None:
if debugout == DebugOutRequest.ARGS or debugout == DebugOutRequest.BOTH:
app_logger.debug(f"reqeust.args: {request.args}")
if debugout == DebugOutRequest.HEADERS or debugout == DebugOutRequest.BOTH:
app_logger.debug(f"request.headers: {request.headers}")
def _matchToken(headers: Headers) -> bool:
"""トークン一致チェック
:param headers: request header
:return: if match token True, not False.
"""
token_value: str = app.config.get("HEADER_REQUEST_PHONE_TOKEN_VALUE", "!")
req_token_value: Optional[str] = headers.get(
key=app.config.get("HEADER_REQUEST_PHONE_TOKEN_KEY", "!"),
type=str,
default=""
)
if req_token_value != token_value:
app_logger.warning("Invalid request token!")
return False
return True
def _checkPhoneImageSize(headers: Headers) -> str:
"""
ヘッダーに表示領域サイズ+密度([width]x[height]x[density])をつけてくる
※1.トークンチェックを通過しているのでセットされている前提で処理
※2.途中でエラー (Androidアプリ側のBUG) ならExceptionで補足されJSONでメッセージが返却される
:param headers: request header
:return: (imageWidth, imageHeight, density)
"""
img_size: str = headers.get(
app.config.get("HEADER_REQUEST_IMAGE_SIZE_KEY", ""), type=str, default=""
)
if app_logger_debug:
app_logger.debug(f"Phone imgSize: {img_size}")
if len(img_size) == 0:
abort(BadRequest.code, _set_errormessage(REQUIRED_PHONE_IMG))
sizes: List[str] = img_size.split("x")
try:
img_wd: int = int(sizes[0])
img_ht: int = int(sizes[1])
density: float = float(sizes[2])
if app_logger_debug:
app_logger.debug(f"imgWd: {img_wd}, imgHt: {img_ht}, density: {density}")
return img_size
except Exception as exp:
# ログには例外メッセージ
app_logger.warning(f"[phone image size] {exp}")
abort(BadRequest.code, _set_errormessage(INVALID_PHONE_IMG))
def _checkBeforeDays(args: MultiDict) -> str:
# QueryParameter: before_days in (1,2,3,7)
# before_days = args.get("before_days", default=-1, type=int)
# args.get(key): keyが無い場合も キーが有る場合で数値以外でも -1 となり必須チェックができない
# before_days = args.pop("before_days"): TypeError: 'ImmutableMultiDict' objects are immutable
if len(args.keys()) == 0 or PARAM_BOFORE_DAYS not in args.keys():
abort(BadRequest.code, _set_errormessage(REQUIRED_BOFORE_DAY))
before_days = args.get(PARAM_BOFORE_DAYS, default=-1, type=int)
if before_days not in [1,2,3,7]:
abort(BadRequest.code, _set_errormessage(INVALID_BOFORE_DAY))
return str(before_days)
def _checkDeviceName(args: MultiDict) -> str:
"""デバイス名チェック
パラメータなし: abort(BadRequest)
該当レコードなし: abort(NotFound)
return デバイス名
"""
# 必須チェック
if len(args.keys()) == 0 or PARAM_DEVICE not in args.keys():
abort(BadRequest.code, _set_errormessage(REQUIRED_DEVICE))
# 長さチェック: 1 - 20
param_device_name: str = args.get(PARAM_DEVICE, default="", type=str)
chk_size: int = len(param_device_name)
if chk_size < 1 or chk_size > DEVICE_LENGTH:
abort(BadRequest.code, _set_errormessage(INVALIDD_DEVICE))
# 存在チェック
if app_logger_debug:
app_logger.debug("requestParam.device_name: " + param_device_name)
exists: bool = False
try:
conn: connection = get_connection()
dao: DeviceDao = DeviceDao(conn, logger=app_logger)
exists = dao.exists(param_device_name)
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
if exists is True:
return param_device_name
else:
abort(BadRequest.code, _set_errormessage(DEVICE_NOT_FOUND))
def _checkStartDay(args: MultiDict) -> Optional[str]:
"""検索開始日の形式チェック
パラメータなし: OK
パラメータ有り: ISO8601形式チェック
return 検索開始日 | None
"""
if len(args.keys()) == 0 or PARAM_START_DAY not in args.keys():
return None
# 形式チェック
param_start_day: str = args.get(PARAM_START_DAY, default="", type=str)
if app_logger_debug:
app_logger.debug(f"start_day: {param_start_day}")
valid: bool = date_util.checkIso8601Date(param_start_day)
if valid is True:
return param_start_day
else:
# 不正パラメータ
abort(BadRequest.code, _set_errormessage(INVALID_START_DAY))
def _createImageResponse(img_src: str) -> Response:
"""画像レスポンスを返却する (JavaScript用)"""
resp_obj = {"status": "success", "data": {"img_src": img_src}}
return _make_respose(resp_obj, 200)
def _createErrorImageResponse(err_code) -> Response:
"""エラー画像レスポンスを返却する (JavaScript用)"""
resp_obj = {"status": "error", "code": err_code}
if err_code == BadRequest.code:
resp_obj["data"] = {"img_src": BAD_REQUEST_IMAGE_DATA}
elif err_code == InternalServerError.code:
resp_obj["data"] = {"img_src": INTERNAL_SERVER_ERROR_IMAGE_DATA}
return _make_respose(resp_obj, err_code)
def _responseLastDataForPhone(
mesurement_time: str,
temp_out: float,
temp_in: float,
humid: float,
pressure: float,
rec_count: int
) -> Response:
"""気象データの最終レコードを返却する (スマホアプリ用)"""
resp_obj: Dict[str, Dict[str, Union[str, float]]] = {
"status":
{"code": 0, "message": "OK"},
"data": {
"measurement_time": mesurement_time,
"temp_out": temp_out,
"temp_in": temp_in,
"humid": humid,
"pressure": pressure,
"rec_count": rec_count
}
}
return _make_respose(resp_obj, 200)
def _responseFirstRegisterDayForPhone(
first_day: Optional[str],
rec_count: int
) -> Response:
"""気象データの初回登録日を返却する (スマホアプリ用)"""
resp_obj: Dict[str, Dict[str, Union[str, int]]] = {
"status":
{"code": 0, "message": "OK"},
"data": {
"first_register_day": first_day,
"rec_count": rec_count
}
}
return _make_respose(resp_obj, 200)
def _responseImageForPhone(rec_count: int, img_src: str) -> Response:
"""Matplotlib生成画像を返却する (スマホアプリ用)
[仕様変更] 2023-09-09
レスポンスにレコード件数を追加 ※0件エラーの抑止
"""
resp_obj: Dict[str, Dict[str, Union[int, str]]] = {
"status": {"code": 0, "message": "OK"},
"data": {
"img_src": img_src,
"rec_count": rec_count
}
}
return _make_respose(resp_obj, 200)
def _set_errormessage(message: str) -> Dict:
ABORT_DICT_BLANK_MESSAGE[MSG_DESCRIPTION] = message
return ABORT_DICT_BLANK_MESSAGE
# Request parameter check error.
@app.errorhandler(BadRequest.code)
# Token error.
@app.errorhandler(Forbidden.code)
# Device not found.
@app.errorhandler(NotFound.code)
@app.errorhandler(InternalServerError.code)
def error_handler(error: HTTPException) -> Response:
app_logger.warning(f"error_type:{type(error)}, {error}")
# Bugfix: 2023-09-06
err_msg: str
if isinstance(error.description, dict):
# アプリが呼び出すabort()の場合は辞書オブジェクト
err_msg = error.description["error_message"]
else:
# Flaskが出す場合は HTTPException)
err_msg = error.description
resp_obj: Dict[str, Dict[str, Union[int, str]]] = {
"status": {"code": error.code, "message": err_msg}
}
return _make_respose(resp_obj, error.code)
def _make_respose(resp_obj: Dict, resp_code) -> Response:
response = make_response(jsonify(resp_obj), resp_code)
response.headers["Content-Type"] = "application/json"
return response
|
pipito-yukio/plot_weather_flaskapp
|
src/plot_weather/views/app_main.py
|
app_main.py
|
py
| 26,178 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
71888723067
|
import numpy
from sklearn.metrics import cohen_kappa_score, classification_report
import torch
from torch.autograd import Variable
from tqdm import tqdm
import torch.nn as nn
from sklearn.metrics import cohen_kappa_score, classification_report
from models import FitNet_4
from torch import optim
import numpy as np
def evaluation(test_dataloader, model, class_names, epoch, criterion):
eval_loss_list = []
eval_acc = 0
pred_list = []
GT_list = []
pbar_test = tqdm(test_dataloader, total=len(test_dataloader))
with torch.no_grad():
for image, label in pbar_test:
image = Variable(image).cuda()
label = Variable(label).cuda()
out = model(image)
loss = criterion(out, label)
eval_loss_list.append(loss.item())
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
pred_list.extend(pred.cpu().numpy().tolist())
GT_list.extend(label.cpu().numpy().tolist())
eval_acc += num_correct.item()
pbar_test.set_description("Testing:epoch{} loss:{}".format(epoch, loss.item()))
epoch_test_acc = eval_acc / len(pbar_test)
print(
"Testing:epoch{} finished! Total loss:{}".format(epoch,
np.mean(eval_loss_list)))
print(classification_report(y_true=GT_list, y_pred=pred_list, target_names=class_names))
kappa = cohen_kappa_score(y1=pred_list, y2=GT_list)
print("Kappa:{}".format(kappa))
|
Fivethousand5k/Pytorch-implemented-ECNN
|
eval.py
|
eval.py
|
py
| 1,542 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36096748364
|
from threading import Thread
import time
import classifierAlexa
import classifier_pyqt5
def main():
try:
classifierAlexa_thread = Thread(target=classifierAlexa.app.run)
classifierAlexa_thread.start()
time.sleep(1)
classifier_thread = Thread(target=classifier_pyqt5.startApp())
classifier_thread.start()
except Exception:
print("Unknown exception occurred!")
raise
if __name__ == '__main__':
main()
|
KAIST-ITC/fall_detection
|
alexa_posture_classifier/main.py
|
main.py
|
py
| 472 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6871646913
|
import numpy as np
from PIL import Image
def normalize(x):
min = np.min(x)
max = np.max(x)
print(min,max)
return ((x - min)/(max - min) * 255).astype(int)
W=np.load('./data/W.npy')
b=np.load('./data/b.npy')
zero = np.zeros(W.shape)
nag = normalize(np.minimum(W,0))
pos = normalize(np.maximum(W,0))
print("shape", nag[:,0].reshape((28,28)).shape)
ns = nag[:,0].reshape((28,28))
print(np.expand_dims(ns, axis=2))
'''w, h = 512, 512
data = np.zeros((h, w, 3), dtype=np.uint8)
data[256, 256] = [255, 0, 0]
img = Image.fromarray(data, 'RGB')
img.save('my.png')
img.show()'''
#print(nag[nag != 255], pos[pos != 1])
#print(nag[nag != 0],pos[pos != 0], pos_m, nag_m, np.sum(nag), np.sum(pos))
#print(W.shape)
#print(W[W != 0])
|
tuntunwin/tf-tutorial
|
mnist1-plotmodel.py
|
mnist1-plotmodel.py
|
py
| 745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30709482723
|
import cv2 as cv
import numpy as np
from process import Resize, NormalizeImage
class PicoDetProcess():
def __init__(self,
trainsize=[320,320],
mean=[0.485,0.456,0.406],
std=[0.229,0.224,0.225],
score_threshold=0.4,
nms_threshold=0.5
):
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.resize =Resize(trainsize)
self.normalizeImage = NormalizeImage(mean = mean,std =std)
def preprocess(self, images):
input_im_lst = []
input_im_info_lst = []
for im in images:
im, im_info = self.processim(im)
input_im_lst.append(im)
input_im_info_lst.append(im_info)
inputs = self.create_inputs(input_im_lst, input_im_info_lst)
return inputs
def create_inputs(self, imgs, im_info):
"""generate input for different model type
Args:
imgs (list(numpy)): list of images (np.ndarray)
im_info (list(dict)): list of image info
Returns:
inputs (dict): input of model
"""
inputs = {}
im_shape = []
scale_factor = []
if len(imgs) == 1:
inputs['image'] = np.array((imgs[0], )).astype('float32')
inputs['im_shape'] = np.array(
(im_info[0]['im_shape'], )).astype('float32')
inputs['scale_factor'] = np.array(
(im_info[0]['scale_factor'], )).astype('float32')
return inputs
for e in im_info:
im_shape.append(np.array((e['im_shape'], )).astype('float32'))
scale_factor.append(np.array((e['scale_factor'], )).astype('float32'))
inputs['im_shape'] = np.concatenate(im_shape, axis=0)
inputs['scale_factor'] = np.concatenate(scale_factor, axis=0)
imgs_shape = [[e.shape[1], e.shape[2]] for e in imgs]
max_shape_h = max([e[0] for e in imgs_shape])
max_shape_w = max([e[1] for e in imgs_shape])
padding_imgs = []
for img in imgs:
im_c, im_h, im_w = img.shape[:]
padding_im = np.zeros(
(im_c, max_shape_h, max_shape_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = img
padding_imgs.append(padding_im)
inputs['image'] = np.stack(padding_imgs, axis=0)
return inputs
def processim(self, im):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
im,im_info = self.resize(im,im_info)
im,im_info = self.normalizeImage(im,im_info)
# im = im.transpose((2, 0, 1)).copy()
return im, im_info
def postprocess(self, inputs, scale_factor):
bboxs = inputs['bboxes']
scores = inputs['scores']
bbox,score = self.nms(bboxs[0],scores[0][0])
for box in bbox:
box[0] = box[0] / scale_factor[1]
box[1] = box[1] / scale_factor[0]
box[2] = box[2] / scale_factor[1]
box[3] = box[3] / scale_factor[0]
outputs = dict(bboxes=np.array(bbox), scores=np.array(score))
return outputs
def nms(self, bounding_boxes, confidence_score):
'''
:param bounding_boxes: 候选框列表,[左上角坐标, 右下角坐标], [min_x, min_y, max_x, max_y], 原点在图像左上角
:param confidence_score: 候选框置信度
:param threshold: IOU阈值
:return: 抑制后的bbox和置信度
'''
picked = []
for i in range(confidence_score.shape[-1]):
if confidence_score[i] > self.score_threshold:
picked.append(i)
bounding_boxes = bounding_boxes[picked,:]
confidence_score = confidence_score[picked]
# 如果没有bbox,则返回空列表
if len(bounding_boxes) == 0:
return [], []
# bbox转为numpy格式方便计算
boxes = np.array(bounding_boxes)
# 分别取出bbox的坐标
start_x = boxes[:, 0]
start_y = boxes[:, 1]
end_x = boxes[:, 2]
end_y = boxes[:, 3]
# 置信度转为numpy格式方便计算
score = np.array(confidence_score) # [0.9 0.75 0.8 0.85]
# 筛选后的bbox和置信度
picked_boxes = []
picked_score = []
# 计算每一个框的面积
areas = (end_x - start_x + 1) * (end_y - start_y + 1)
# 将score中的元素从小到大排列,提取其对应的index(索引),然后输出到order
order = np.argsort(score) # [1 2 3 0]
# Iterate bounding boxes
while order.size > 0:
# The index of largest confidence score
# 取出最大置信度的索引
index = order[-1]
# Pick the bounding box with largest confidence score
# 将最大置信度和最大置信度对应的框添加进筛选列表里
picked_boxes.append(bounding_boxes[index])
picked_score.append(confidence_score[index])
# 求置信度最大的框与其他所有框相交的长宽,为下面计算相交面积做准备
# 令左上角为原点,
# 两个框的左上角坐标x取大值,右下角坐标x取小值,小值-大值+1==相交区域的长度
# 两个框的左上角坐标y取大值,右下角坐标y取小值,小值-大值+1==相交区域的高度
# 这里可以在草稿纸上画个图,清晰明了
x1 = np.maximum(start_x[index], start_x[order[:-1]])
x2 = np.minimum(end_x[index], end_x[order[:-1]])
y1 = np.maximum(start_y[index], start_y[order[:-1]])
y2 = np.minimum(end_y[index], end_y[order[:-1]])
# 计算相交面积,当两个框不相交时,w和h必有一个为0,面积也为0
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
intersection = w * h
# 计算IOU
ratio = intersection / (areas[index] + areas[order[:-1]] - intersection)
# 保留小于阈值的框的索引
left = np.where(ratio < self.nms_threshold)
# 根据该索引修正order中的索引(order里放的是按置信度从小到大排列的索引)
order = order[left]
return picked_boxes, picked_score
|
guojin-yan/Automatic_aiming
|
aiming/person_process.py
|
person_process.py
|
py
| 6,767 |
python
|
en
|
code
| 3 |
github-code
|
6
|
70383323069
|
import copy
from typing import List, Optional
def deep_merge_dicts(original: dict, new_dict: dict) -> dict:
"""
Overview:
Merge two dicts by calling ``deep_update``
Arguments:
- original (:obj:`dict`): Dict 1.
- new_dict (:obj:`dict`): Dict 2.
Returns:
- merged_dict (:obj:`dict`): A new dict that is d1 and d2 deeply merged.
"""
original = original or {}
new_dict = new_dict or {}
merged = copy.deepcopy(original)
if new_dict: # if new_dict is neither empty dict nor None
deep_update(merged, new_dict, True, [])
return merged
def deep_update(
original: dict,
new_dict: dict,
new_keys_allowed: bool = False,
whitelist: Optional[List[str]] = None,
override_all_if_type_changes: Optional[List[str]] = None
):
"""
Overview:
Update original dict with values from new_dict recursively.
Arguments:
- original (:obj:`dict`): Dictionary with default values.
- new_dict (:obj:`dict`): Dictionary with values to be updated
- new_keys_allowed (:obj:`bool`): Whether new keys are allowed.
- whitelist (:obj:`Optional[List[str]]`):
List of keys that correspond to dict
values where new subkeys can be introduced. This is only at the top
level.
- override_all_if_type_changes(:obj:`Optional[List[str]]`):
List of top level
keys with value=dict, for which we always simply override the
entire value (:obj:`dict`), if the "type" key in that value dict changes.
.. note::
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the whitelist, then new subkeys can be introduced.
"""
whitelist = whitelist or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise RuntimeError("Unknown config parameter `{}`. Base config have: {}.".format(k, original.keys()))
# Both original value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Whitelisted key -> ok to add new subkeys.
elif k in whitelist:
deep_update(original[k], value, True)
# Non-whitelisted key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original
|
opendilab/GoBigger
|
gobigger/utils/config_utils.py
|
config_utils.py
|
py
| 2,978 |
python
|
en
|
code
| 483 |
github-code
|
6
|
34961662452
|
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
class DynamicEvolutionStats:
def __init__(self, seeds, specialist_type):
self.seeds = seeds
self.specialist_type = specialist_type
self.init_data()
def init_data(self):
self.data = {}
for seed in self.seeds:
self.data[seed] = pd.DataFrame(columns=['generation', 'score', 'cycle'])
def get_data(self, suffix='score'):
for seed in self.seeds:
df = pd.read_csv(f'../../data/specialist/dynamic_evolution/{self.specialist_type}/{seed}_{suffix}.csv')
self.data[seed] = pd.concat([self.data[seed], df]).query("generation >= 1600")
def get_seed(self, seed):
return self.data.get(seed)
def describe_seeds(self):
describe = []
for seed in self.seeds:
df = self.get_seed(seed)
describe.append([
df.score.mean(),
len(df.query('cycle == "score"')),
len(df.query('cycle == "fit"')),
])
return pd.DataFrame(
describe,
columns=['score', 'score_time', 'fit_time']
)
def plot_seeds_scatter(self):
for seed in self.seeds:
df = self.get_seed(seed)
plt.scatter(df.generation, df.score, s=1)
plt.legend(self.seeds)
plt.title(f'All Seeds Specialist Score')
plt.xlabel('generation')
plt.ylabel('score')
plt.show()
def describe_score(self):
df = self.describe_seeds()
plt.boxplot(df.score, labels=['mean'])
plt.title(f'All Seeds Specialist Score Mean')
plt.ylabel('score')
plt.show()
def describe_cycles(self):
df = self.describe_seeds()
plt.boxplot(df[['score_time', 'fit_time']], labels=['score_time', 'fit_time'])
plt.title(f'All Seeds Specialist Cycles')
plt.xlabel('process')
plt.ylabel('cycles')
plt.show()
|
arthur-plautz/curriculum-learning
|
models/specialist/stats/dynamic_evolution_stats.py
|
dynamic_evolution_stats.py
|
py
| 2,023 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75342220666
|
from django.shortcuts import render
from .forms import getData, getTraningInfo
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime
import folium
import geocoder
# Create your views here.
runs = {}
def calculate_difference(key):
run_date_str = runs[key]["date"][:10] + " 0:0:0"
today = datetime.now()
run_day = datetime.strptime(run_date_str, "%Y-%m-%d %H:%M:%S")
difference = (run_day - today).days
minus = 0
if today.weekday() != 0:
minus += 7 - today.weekday()
return difference - minus
def calculate_distance(key):
distance = ''
i = 0
while runs[key]["distance"][i] != "k":
distance += runs[key]["distance"][i]
i += 1
return int(distance)
def calculate_speed(hour, minutes, distance):
minutes += hour*60
speed = minutes / distance
return speed
def speed_to_str(speed):
minutes = speed // 1
sek = speed - minutes
sek *= 60
if sek < 10:
sek = "0" + str(round(sek))
else:
sek = str(round(sek))
minutes = str(round(minutes))
return minutes + ":" + sek
def basic_introduction(weeks):
plan4 = {
'1': {'pon': 'odpoczynek', 'wt': 'bieg 10 min', 'sr': 'opdoczynek', 'czw': 'bieg 10 min', 'pt': 'odpoczynek',
'weekend': 'bieg 15 min'},
'2': {'pon': 'odpoczynek', 'wt': 'bieg 15 min', 'sr': 'opdoczynek', 'czw': 'bieg 15 min', 'pt': 'odpoczynek',
'weekend': 'bieg 20 min'},
'3': {'pon': 'odpoczynek', 'wt': 'bieg 20 min', 'sr': 'opdoczynek', 'czw': 'bieg 25 min', 'pt': 'odpoczynek',
'weekend': 'bieg 25 min'},
'4': {'pon': 'odpoczynek', 'wt': 'bieg 25 min', 'sr': 'opdoczynek', 'czw': 'bieg 30 min', 'pt': 'odpoczynek',
'weekend': 'bieg 30 min'}}
plan5 = {
'1': {'pon': 'odpoczynek', 'wt': 'bieg 10 min', 'sr': 'opdoczynek', 'czw': 'bieg 10 min', 'pt': 'odpoczynek',
'weekend': 'bieg 10 min'},
'2': {'pon': 'odpoczynek', 'wt': 'bieg 15 min', 'sr': 'opdoczynek', 'czw': 'bieg 15 min', 'pt': 'odpoczynek',
'weekend': 'bieg 15 min'},
'3': {'pon': 'odpoczynek', 'wt': 'bieg 20 min', 'sr': 'opdoczynek', 'czw': 'bieg 20 min', 'pt': 'odpoczynek',
'weekend': 'bieg 20 min'},
'4': {'pon': 'odpoczynek', 'wt': 'bieg 25 min', 'sr': 'opdoczynek', 'czw': 'bieg 25 min', 'pt': 'odpoczynek',
'weekend': 'bieg 25 min'},
'5': {'pon': 'odpoczynek', 'wt': 'bieg 30 min', 'sr': 'opdoczynek', 'czw': 'bieg 30 min', 'pt': 'odpoczynek',
'weekend': 'bieg 30 min'}}
plan2 = {
'1': {'pon': 'odpoczynek', 'wt': 'bieg 10 min', 'sr': 'opdoczynek', 'czw': 'bieg 15 min', 'pt': 'odpoczynek',
'weekend': 'bieg 15 min'},
'2': {'pon': 'odpoczynek', 'wt': 'bieg 15 min', 'sr': 'opdoczynek', 'czw': 'bieg 20 min', 'pt': 'odpoczynek',
'weekend': 'bieg 30 min'},
}
if weeks == 4:
return plan4, weeks - 4, 5
elif weeks == 5:
return plan5, weeks - 5, 6
else:
print("tu jestem")
return plan2, 2, 3
def introduction(weeks, actual_week, distance, week, mode, plan, weeks_for_introduction=0):
mins = 0
if mode == "Basic":
# pocztatkowy dystans dla basic
mins = 2
if distance < 11:
if weeks_for_introduction == 0:
# ilosc tygodni na dostosowanie dystansu
weeks_for_introduction = 4
elif distance < 22:
if weeks_for_introduction == 0:
weeks_for_introduction = 10
else:
if weeks_for_introduction == 0:
weeks_for_introduction = 15
# jako ze dystans jest bardzo duzy trening odbywa sie na max 3/4 jego wartosic
distance *= 0.75
if mode == "Medium":
mins = 5
if distance < 22:
if weeks_for_introduction == 0:
weeks_for_introduction = 4
else:
if weeks_for_introduction == 0:
weeks_for_introduction = 10
distance *= 0.75
if mode == "Advance":
mins = 10
if weeks_for_introduction == 0:
weeks_for_introduction = 10
distance *= 0.75
# ilosc kilometrow jaka zwiekszamy co kazdy tydzien
jump = (distance - mins) / (weeks_for_introduction - 1)
# iterowanie przez kazdy tydzien traningu wprowadzajacego
for i in range(actual_week, actual_week + weeks_for_introduction):
plan[str(i)] = {}
weeks -= 1
# iterowanie przez kazdy dzien tygodnia (weekend jako 1 dzien czyli mozna se wybrac sob lub nd)
for day in range(0, len(week)):
if day % 2 == 0:
plan[str(i)][week[day]] = "odpoczynek"
elif (day == 1 or day == 3) and mins > 5:
plan[str(i)][week[day]] = "bieg na " + str(round(mins / 2)) + "km"
else:
plan[str(i)][week[day]] = "bieg na " + str(round(mins)) + "km"
mins += jump
#aktualizowanie aktualnego tygonia
actual_week += weeks_for_introduction
return plan, weeks, actual_week
def full_training(weeks, actual_week, distance, week, mode, plan, speed):
# range (actual_week, actual_week + weeks)
if mode == "Basic":
# min predkosc po introduction ktora jest zwiekszana z klejnymi tygodniami
min_speed = 10
if distance >= 22:
distance *= 0.75
elif mode == "Medium":
min_speed = 8
if distance >= 22:
distance *= 0.75
else:
min_speed = 7
if distance >= 22:
distance *= 0.75
# zwiekszanie predkosci co tydzien o jump
jump = (min_speed - speed) / weeks
for i in range(actual_week, actual_week + weeks):
plan[str(i)] = {}
min_speed -= jump
weeks -= 1
actual_week += 1
for day in range(0, len(week)):
if day % 2 == 0:
plan[str(i)][week[day]] = "odpoczynek"
elif day == 1 and 5 < distance < 11:
plan[str(i)][week[day]] = "bieg na " + str(round(distance / 2)) + "km w czasie " + \
speed_to_str(min_speed * 0.7) + " min/km"
elif day == 1 and 5 < distance < 22:
plan[str(i)][week[day]] = "bieg na " + str(round(distance / 2)) + "km w czasie " + \
speed_to_str(min_speed * 0.8) + " min/km"
elif day == 1 and 5 < distance:
plan[str(i)][week[day]] = "bieg na " + str(round(distance / 2)) + "km w czasie " + \
speed_to_str(min_speed * 0.9) + " min/km"
elif day == 3 and mode != "Advance":
plan[str(i)][week[day]] = "bieg interwalowy: 5x (bieg 1.5 min na maksimum mozliwosci + " \
"2 min wolnego truchtu) + wybiganie na " + str(round(distance / 2)) + "km"
elif day == 3:
plan[str(i)][week[day]] = "bieg interwalowy: 5x (bieg 1.5 min na maksimum mozliwosci pod gorke + " \
"2 min wolnego truchtu z gorki) + wybiganie na " + \
str(round(distance / 2)) + "km"
else:
plan[str(i)][week[day]] = "bieg na " + str(distance) + "km w czasie " + speed_to_str(min_speed) + \
" min/km"
return plan, weeks, actual_week
def home(request):
global runs
runs = {}
if request.method == "POST":
runs = {}
# pobranie danych z forms po wcisnieciu przycisku
form = getData(request.POST)
# ustawienie zmiennej na global w celu modyfikacji dict runs
if form.is_valid():
# pobieranie danych
city = form.cleaned_data["city"]
date_from_wrong = form.cleaned_data["date_from"]
date_to_wrong = form.cleaned_data["date_to"]
distance_from = form.cleaned_data["distance_from"]
distance_to = form.cleaned_data["distance_to"]
# zamiana daty
if date_from_wrong is not None:
date_from_correct = str(date_from_wrong.year) + "-" + str(date_from_wrong.month) + "-" + \
str(date_from_wrong.day)
else:
date_from_correct = ""
if date_to_wrong is not None:
date_to_correct = str(date_to_wrong.year) + "-" + str(date_to_wrong.month) + "-" + \
str(date_to_wrong.day)
else:
date_to_correct = ""
# wyczyszczenie input-ow
form = getData()
# pobranie danych ze strony
url = "https://run-log.com/events/?terms=" + city + "&date_from=" + date_from_correct + \
"&date_to=" + date_to_correct + "&distance_from=" + str(distance_from) + \
"&distance_to=" + str(distance_to) + "&location_radius=&action="
website = requests.get(url)
result = website.text
doc = BeautifulSoup(result, "html.parser")
table = doc.tbody
trs = table.contents
i = 0
# iterowanie po kazdym elemenecie tabeli z danymi zawodow
for tr in trs:
i += 1
# sprawdzenie czy w tabeli istenieja biegi oraz czy nie sprawdzania jest pusty wiersz
if i % 2 == 0 and i <= 10 and len(tr.contents) >= 10:
run = {}
date, name, distance, shit, location = tr.contents[1::2]
run["date"] = date.text
run["distance"] = distance.text.strip()
run["location"] = location.text
run["number"] = i/2
name = name.a.string
# wyszukiwanie linkow do obrazu dla kazdego miasta w ktorym jest bieg
r = requests.get(
'https://commons.wikimedia.org/w/index.php?search=' + run["location"]
+ '&title=Special:MediaSearch&go=Go&type=image')
result = r.text
doc = BeautifulSoup(result, "html.parser")
images = doc.find('a', {'class': 'sdms-image-result'})
print(images)
if not images:
run["image"] = "#"
else:
r = requests.get(images['href'])
result = r.text
doc = BeautifulSoup(result, "html.parser")
doc2 = doc.find('div', {'class': 'mw-body-content'})
image = doc2.find('img')
run["image"] = image['src']
# w wypadku wystapnie biegu z taka sama nazwa dodanie numerka do nazyw
if name in runs:
runs[name+" ("+str(i/2)[0]+")"] = run
else:
runs[name] = run
else:
form = getData()
return render(request, "runsite/home.html", {"Data": form, "Runs": runs})
def run_plan(request):
# pobranie url storny (zawiera index dictionary z dpowiednimi zawodami)
url = int(request.build_absolute_uri()[22])
key = list(runs.keys())[url-1]
working = 1
# oblicznie ile dni oraz tygodni jest do zawodow
days = calculate_difference(key)
weeks = days//7
week = ['pon', 'wt', 'sr', 'cw', 'pt', 'weekend']
plan = {}
# konwertowanie dystansu ze slownika na typ int (pomijanie metrow)
distance = calculate_distance(key)
# generowanie mapy ze znacznikiem lokalizacji biegu
try:
location = geocoder.location(runs[key]['location'])
lat = location.lat
lng = location.lng
mapa = folium.Map(location=[lat, lng], zoom_start=12)
folium.Marker([lat, lng]).add_to(mapa)
except:
location = geocoder.osm('PL')
lat = location.lat
lng = location.lng
mapa = folium.Map(location=[lat, lng], zoom_start=12)
mapa = mapa._repr_html_()
if request.method == "POST":
working = 1
# pobranie danych z forms po wcisnieciu przycisku
form = getTraningInfo(request.POST)
if form.is_valid():
type_of_training = form.cleaned_data["type"]
time_hours = form.cleaned_data["time_hours"]
if time_hours:
try:
time_hours = int(time_hours)
except ValueError:
working = 0
time_hours = 0
else:
time_hours = 0
time_minutes = form.cleaned_data["time_minutes"]
if time_minutes:
try:
time_minutes = int(time_minutes)
except ValueError:
working = 0
time_minutes = 0
else:
time_minutes = 0
speed = calculate_speed(time_hours, time_minutes, distance)
if time_minutes < 0 or time_hours < 0 or speed < 2.5:
working = 0
form = getTraningInfo()
if type_of_training == "Basic":
speed *= 1.2
if weeks <= 3:
print("nie da sie wygnerowa traningu1")
working = 0
elif weeks <= 20:
#pierwszy tryb (najkrotrze zawody)
if distance < 11:
if weeks < 6:
plan, weeks, actual_week = basic_introduction(weeks)
elif weeks >= 6:
# pamietaj 6 - 2(basic_introduction)
# pamietaj ze full_training() z tych weekow co zostaly po introduction
plan, dif, actual_week = basic_introduction(2)
print(weeks)
# odjecie od pozostalych tygodni juz wykorzystanych
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
#drugi tryb (srednio dlugie zaowdy)
elif distance < 22:
if weeks < 12:
print("nie da sie wygenerowac treningu2")
working = 0
elif weeks >= 12:
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
print(weeks)
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
#trzeci tryb(dlugie zawody)
else:
if weeks < 17:
print("nie da sie wygenerowac treningu2")
working = 0
if weeks >= 17:
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
# ----------------------------------
else:
if distance < 11:
# wyliczenie na korym tygoniu konczy sie introducion (+2 by uwzglednic basic_introdution)
weeks_for_introduction = round((weeks * 0.2)//1 + 2)
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
elif distance < 22:
weeks_for_introduction = round((weeks * 0.5) // 1 + 2)
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
weeks_for_introduction = round((weeks * 0.75) // 1 + 2)
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
elif type_of_training == "Medium":
if weeks <= 3:
print("nie da sie wygnerowa traningu1")
working = 0
elif distance < 11:
# dla malego dystansu w trybie medium nie ma introduction
plan, weeks, actual_week = full_training(weeks, 1, distance, week,
type_of_training, {}, speed)
print(plan)
print(weeks)
print(actual_week)
elif weeks <= 20:
if distance < 22:
if weeks < 4:
print("nie da sie wygnerowa traningu2")
working = 0
else:
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {})
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
if weeks < 10:
print("nie da sie wygnerowa traningu2")
working = 0
else:
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {})
print(plan)
print(weeks)
print(actual_week)
print(speed)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
if distance < 22:
weeks_for_introduction = round((weeks * 0.2) // 1)
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {}, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
weeks_for_introduction = round((weeks * 0.5) // 1)
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {}, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
speed *= 0.9
if weeks <= 3:
print("nie da sie wygnerowa traningu1")
working = 0
elif distance < 22:
# dla malego dystansu oraz sredniego w trybie advance nie ma introduction
plan, weeks, actual_week = full_training(weeks, 1, distance, week,
type_of_training, {}, speed)
print(plan)
print(weeks)
print(actual_week)
elif weeks < 10:
print("nie da sie wygnerowa traningu3")
working = 0
elif weeks <= 20:
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {})
#print(plan)
print(weeks)
print(actual_week)
print(speed)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
weeks_for_introduction = round((weeks * 0.5) // 1)
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {}, weeks_for_introduction)
#print(plan)
print(weeks)
print(actual_week)
print(speed)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
if working == 0:
plan = {}
for name, values in plan.items():
print(name)
print(values)
else:
form = getTraningInfo()
return render(request, "runsite/runPlan.html", {"Forms": form, "Key": key, "Run": runs[key], "Mapa": mapa,
"Plan": plan, "Working": working})
|
kaczorwarka/Running-Events-Search-Engine-and-Traning-Plan-Generator
|
runsite/views.py
|
views.py
|
py
| 26,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19270736433
|
named_params = {
"Rest_time_T": float,
"Duration_step": float,
"Record_every_dT": float,
"Record_every_dE": float,
"Record_every_dI": float,
"E_Range": int,
"I_Range": int,
"Current_step": float,
"Voltage_step": float,
"Scan_Rate": float,
"vs_initial": bool,
"Test1_Config": int,
"Test1_Value": float,
"Test2_Config": int,
"Test2_Value": float,
"Test3_Config": int,
"Test3_Value": float,
"Exit_Cond": int,
"N_Cycles": int,
"Step_number": int,
"Scan_number": int,
"loop_N_times": int,
"protocol_number": int,
"Begin_measuring_I": float,
"End_measuring_I": float,
"Begin_measuring_E": float,
"End_measuring_E": float,
}
I_ranges = {
"keep": -1,
"100 pA": 0,
"1 nA": 1,
"10 nA": 2,
"100 nA": 3,
"1 uA": 4,
"10 uA": 5,
"100 uA": 6,
"1 mA": 7,
"10 mA": 8,
"100 mA": 9,
"1 A": 10,
"booster": 11,
"auto": 12,
}
E_ranges = {
"+-2.5 V": 0,
"+-5.0 V": 1,
"+-10 V": 2,
"auto": 3,
}
datatypes = {
"VMP3": {
"OCV": ["t_high", "t_low", "Ewe", "Ece"],
"CPLIMIT": ["t_high", "t_low", "Ewe", "I", "cycle"],
"CALIMIT": ["t_high", "t_low", "Ewe", "I", "cycle"],
"PDYNLIMIT": ["t_high", "t_low", "Ec", "<I>", "<Ewe>", "cycle"],
"GDYNLIMIT": ["t_high", "t_low", "Ic", "<I>", "<Ewe>", "cycle"],
},
"SP-300": {
"OCV": ["t_high", "t_low", "Ewe"],
"CPLIMIT": ["t_high", "t_low", "Ewe", "I", "cycle"],
"CALIMIT": ["t_high", "t_low", "Ewe", "I", "cycle"],
"PDYNLIMIT": ["t_high", "t_low", "<I>", "<Ewe>", "cycle"],
"GDYNLIMIT": ["t_high", "t_low", "<I>", "<Ewe>", "cycle"],
},
}
techfiles = {
"VMP3": {
"open_circuit_voltage": "ocv.ecc",
"constant_current": "cplimit.ecc",
"constant_voltage": "calimit.ecc",
"sweep_voltage": "vscanlimit.ecc",
"sweep_current": "iscanlimit.ecc",
"loop": "loop.ecc",
},
"SP-300": {
"open_circuit_voltage": "ocv4.ecc",
"constant_current": "cplimit4.ecc",
"constant_voltage": "calimit4.ecc",
"sweep_voltage": "vscanlimit4.ecc",
"sweep_current": "iscanlimit4.ecc",
"loop": "loop4.ecc",
},
}
|
dgbowl/tomato
|
src/tomato/drivers/biologic/tech_params.py
|
tech_params.py
|
py
| 2,286 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41714902184
|
from Crypto.Util.number import getPrime
from Crypto.Util.number import inverse
import hashlib
import socket
from threading import Thread
host = 'localhost'
port = 6000
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try :
mysocket.connect((host, port))
except socket.error :
print("connexion echouer avec le serveur ")
exit()
print("connexion etablie avec le serveur")
def gen_rsa_keypair (bits):
p=getPrime(bits//2)
q=getPrime(bits//2)
n=p*q
e=655337
d=inverse(e, (p-1)*(q-1))
return((e,n), (d,n)) #cle pub et cle priv
key = gen_rsa_keypair(256)
def rsa(m,key):
return pow(m,key[0],key[1])
def rsa_enc(msg, key):
m = int.from_bytes(msg.encode('utf-8'),'big')
c = rsa(m, key)
return c
def rsa_dec(msg, key):
txt_clair = rsa(msg, key)
return txt_clair.to_bytes((txt_clair.bit_length()+7) // 8,'big').decode('utf-8')
class Send(Thread):
def __init__(self,arg):
Thread.__init__(self)
#super(Send, self).__init__()
self.arg = arg
def run(self):
continuer = True
while(continuer):
message = input()
message1 = self.arg.sendall(repr(key[0]).encode('utf8'))#cle
try:
enchifrer = rsa_enc(message, key[0])
#print("enchiffreeer = ",enchifrer)
#self.arg.send(repr(enchifrer).encode('utf-8'))
dechiffrer = rsa_dec(enchifrer, key[1])
#print("dechiffrer ", dechiffrer)
self.arg.send(dechiffrer.encode('utf-8'))
except socket.error:
continuer = False
break
self.arg.close()
class receive(Thread):
def __init__(self,arg):
Thread.__init__(self)
# super(receive, self).__init__()
self.arg = arg
def run(self):
continuer = True
while(continuer):
try:
message = self.arg.recv(1024).decode('utf-8')
except socket.error:
continuer = False
break
else :
print(">>>>>> {0}".format(message))
self.arg.close()
if __name__ == "__main__":
sn = Send(mysocket)
sn.start()
rv = receive(mysocket)
rv.start()
|
samyberkane23/chat_s-curis-
|
client.py
|
client.py
|
py
| 2,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14852849903
|
from collections import ChainMap
import yaml
with open('eve_static_data/invFlags.yaml') as flags_file:
INV_FLAGS = {item['flagID']: item for item in yaml.full_load(flags_file)}
INVENTORY_POSITIONS = [
*range(92, 99+1), # Rigs
*range(27, 34+1), # High Slots
*range(19, 26+1), # Med Slots
*range(11, 18+1), # Low Slots
0 # Everything Else
]
|
DeForce/py_killboard
|
helpers/static.py
|
static.py
|
py
| 386 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41860678548
|
import logging
log = logging.getLogger(__name__)
import re
import requests
from bs4 import BeautifulSoup
try:
# Python 2 has a standard urlparse library
from urlparse import urlparse, ParseResult
except:
# Python 3 has the same library hidden in urllib.parse
from urllib.parse import urlparse, ParseResult
MAX_FILEIZE = 2**19 # bytes; this is .5MB
MAX_CONNECTIONTIME = 20 # in seconds
RE_bad_title = re.compile(
"""(?:<title>|<title>)(.*)(?:<?/title>|(?:<)?/title>)""", re.I)
REGEX_doctype = re.compile("^\s*<!DOCTYPE[^>]*>", re.IGNORECASE)
RE_whitespace = re.compile("\s+")
PARSE_SAFE_FILES = ('html', 'txt', 'json', 'htm', 'xml',
'php', 'asp', 'aspx', 'ece', 'xhtml', 'cfm', 'cgi')
# based on DJANGO
# https://github.com/django/django/blob/master/django/core/validators.py
# not testing ipv6 right now, because rules are needed for ensuring they are correct
RE_VALID_HOSTNAME = re.compile(
r'(?:'
r'(?P<ipv4>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ipv4
r'|'
# r'(?P<ipv6>\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
# r'|'
r'(?P<localhost>localhost)' # localhost...
r'|'
r'(?P<domain>([A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?))' # domain...
r'(?P<port>:\d+)?' # optional port
r')', re.IGNORECASE)
RE_PORT = re.compile(
r'^'
r'(?P<main>.+)'
r':'
r'(?P<port>\d+)'
r'$', re.IGNORECASE
)
RE_DOMAIN_NAME = re.compile(
r"""(^
(?:
[A-Z0-9]
(?:
[A-Z0-9-]{0,61}
[A-Z0-9]
)?
\.
)+
(?:
[A-Z]{2,6}\.?
|
[A-Z0-9-]{2,}
(?<!-)\.?)
$)""",
re.VERBOSE | re.IGNORECASE)
RE_IPV4_ADDRESS = re.compile(
r'^(\d{1,3})\.(\d{1,3}).(\d{1,3}).(\d{1,3})$' # grab 4 octets
)
RE_ALL_NUMERIC = re.compile("^[\d\.]+$")
def is_parsed_valid_url(parsed, require_public_netloc=True, http_only=True):
"""returns bool
`http_only`
defaults True
requires http or https for the scheme
"""
assert isinstance(parsed, ParseResult)
log.debug("is_parsed_valid_url = %s", parsed)
if not all((parsed.scheme, parsed.netloc)):
log.debug(" FALSE - missing `scheme` or `netloc`")
return False
if http_only:
if parsed.scheme not in ('http', 'https'):
log.debug(" FALSE - invalid `scheme`")
return False
if require_public_netloc:
log.debug(" validating netloc")
_netloc_match = RE_VALID_HOSTNAME.match(parsed.netloc)
if not _netloc_match:
log.debug(" did not match regex")
return False
# we may assign these
_netloc_clean = parsed.netloc
_port = None
_netloc_ported = RE_PORT.match(parsed.netloc)
if _netloc_ported:
_netloc_ported_groudict = _netloc_ported.groupdict()
_netloc_clean = _netloc_ported_groudict['main']
_port = _netloc_ported_groudict['port']
_netloc_groudict = _netloc_match.groupdict()
if _netloc_groudict['ipv4'] is not None:
octets = RE_IPV4_ADDRESS.match(_netloc_clean)
if octets:
log.debug(" validating against ipv4")
for g in octets.groups():
g = int(g)
if int(g) > 255:
log.debug(" invalid ipv4; encountered an octect > 255")
return False
log.debug(" valid ipv4")
return True
log.debug(" invalid ipv4")
return False
else:
if _netloc_clean == 'localhost':
log.debug(" localhost!")
return True
if RE_ALL_NUMERIC.match(_netloc_clean):
log.debug(" This only has numeric characters. "
"this is probably a fake or typo ip address.")
return False
if _port:
try:
_port = int(_port)
if parsed.port != _port:
log.debug(" netloc.port does not match our regex _port")
return False
except:
raise
log.debug(" _port is not an int")
return False
if RE_DOMAIN_NAME.match(_netloc_clean):
log.debug(" valid public domain name format")
return True
log.debug(" this appears to be invalid")
return False
return True
def is_parsed_valid_relative(parsed):
"""returns bool"""
assert isinstance(parsed, ParseResult)
if parsed.path and not any((parsed.scheme, parsed.hostname)):
return True
return False
def parsed_to_relative(parsed):
"""turns a parsed url into a full relative url"""
assert isinstance(parsed, ParseResult)
_path = parsed.path
# cleanup, might be unnecessary now
if _path and _path[0] != "/":
# prepend a slash
_path = "/%s" % _path
if parsed.query:
_path += "?" + parsed.query
if parsed.fragment:
_path += "#" + parsed.fragment
return _path
def is_url_valid(url, require_public_netloc=None):
"""
tries to parse a url. if valid returns `ParseResult`
(boolean eval is True); if invalid returns `False`
"""
if url is None:
return False
parsed = urlparse(url)
if is_parsed_valid_url(parsed, require_public_netloc=require_public_netloc):
return parsed
return False
def url_to_absolute_url(url_test, url_fallback=None, require_public_netloc=None):
"""
returns an "absolute url" if we have one.
if we don't, it tries to fix the current url based on the fallback
this shouldn't be needed, but it is.
called by:
MetadataParser.absolute_url()
MetadataParser.get_discrete_url()
args:
`url_test` - the url to return/fix
`url_fallback` - a fallback url. this is returned in VERY bad
errors. in "not so bad" errors, this is parsed and used as the
base to construct a new url.
`require_public_netloc` - requires the hostname/netloc to be a
valid IPV4 or public dns domain name
"""
if url_test is None and url_fallback is not None:
return url_fallback
parsed = urlparse(url_test)
_path = parsed.path
if _path:
# sanity check
# some stock plugins create invalid urls/files like '/...' in meta-data
if _path[0] != "/":
# prepend a slash
_path = "/%s" % _path
known_invalid_plugins = ['/...', ]
if _path in known_invalid_plugins:
return url_fallback
# finally, fix the path
# this isn't nested, because we could have kwargs
_path = parsed_to_relative(parsed)
if not _path:
# so if our _path is BLANK, fuck it.
# this can happen if someone puts in "" for the canonical
return url_fallback
rval = None
# we'll use a placeholder for a source 'parsed' object that has a domain...
parsed_domain_source = None
# if we have a valid URL (OMFG, PLEASE)...
if is_parsed_valid_url(parsed, require_public_netloc=require_public_netloc):
parsed_domain_source = parsed
else:
# ok, the URL isn't valid
# can we re-assemble it
if url_fallback:
parsed_fallback = urlparse(url_fallback)
if is_parsed_valid_url(
parsed_fallback,
require_public_netloc=require_public_netloc
):
parsed_domain_source = parsed_fallback
if parsed_domain_source:
rval = "%s://%s%s" % (
parsed_domain_source.scheme,
parsed_domain_source.netloc, _path)
return rval
class NotParsable(Exception):
def __init__(self, message='', raised=None, code=None):
self.message = message
self.raised = raised
self.code = code
def __str__(self):
return "ApiError: %s | %s | %s" % (self.message, self.code, self.raised)
class NotParsableFetchError(NotParsable):
pass
class MetadataParser(object):
"""
turns text or a URL into a dict of dicts, extracting as much relevant
metadata as possible.
the 'keys' will be either the 'name' or 'property' attribute of the node.
we EXPECT/REQUIRE a `head` in the document.
the attribute's prefix are removed when storing into it's bucket
eg:
og:title -> 'og':{'title':''}
metadata is stored into subgroups:
page
extracted from page elements
saved into MetadataParser.metadata['page']
example:
<head><title>Awesome</title></head>
MetadataParser.metadata = {'page': {'title':'Awesome'}}
opengraph
has 'og:' prefix
saved into MetadataParser.metadata['og']
example:
<meta property="og:title" content="Awesome"/>
MetadataParser.metadata = {'og': {'og:title':'Awesome'}}
dublin core
has 'dc:' prefix
saved into MetadataParser.metadata['dc']
example:
<meta property="dc:title" content="Awesome"/>
MetadataParser.metadata = {'dc': {'dc:title':'Awesome'}}
meta
has no prefix
saved into MetadataParser.metadata['meta']
example:
<meta property="title" content="Awesome"/>
MetadataParser.metadata = {'meta': {'dc:title':'Awesome'}}
NOTE:
passing in ssl_verify=False will turn off ssl verification checking
in the requests library.
this can be necessary on development machines
"""
url = None
url_actual = None
strategy = None
metadata = None
LEN_MAX_TITLE = 255
only_parse_file_extensions = None
require_public_netloc = None
force_doctype = None
requests_timeout = None
# allow for the beautiful_soup to be saved
soup = None
og_minimum_requirements = ['title', 'type', 'image', 'url']
twitter_sections = ['card', 'title', 'site', 'description']
strategy = ['og', 'dc', 'meta', 'page']
def __init__(
self,
url=None, html=None, strategy=None, url_data=None, url_headers=None,
force_parse=False, ssl_verify=True, only_parse_file_extensions=None,
force_parse_invalid_content_type=False, require_public_netloc=True,
force_doctype=False, requests_timeout=None,
):
"""
creates a new `MetadataParser` instance.
kwargs:
`url`
url to parse
`html`
instead of a url, parse raw html
`strategy`
default: None
sets default metadata strategy (['og', 'dc', 'meta', 'page'])
see also `MetadataParser.get_metadata()`
`url_data`
data passed to `requests` library as `params`
`url_headers`
data passed to `requests` library as `headers`
`force_parse`
default: False
force parsing invalid content
`ssl_verify`
default: True
disable ssl verification, sometimes needed in development
`only_parse_file_extensions`
default: None
set a list of valid file extensions.
see `metadata_parser.PARSE_SAFE_FILES` for an example list
`force_parse_invalid_content_type`
default: False
force parsing invalid content types
by default this will only parse text/html content
`require_public_netloc`
default: True
require a valid `netloc` for the host. if `True`, valid hosts
must be a properly formatted public domain name, IPV4 address
or "localhost"
`force_doctype`
default: False
if set to true, will replace a doctype with 'html'
why? some cms give a bad doctype (like nasa.gov)
which can break lxml/bsd
`requests_timeout`
default: None
if set, proxies the value into `requests.get` as `timeout`
"""
self.metadata = {
'og': {},
'meta': {},
'dc': {},
'page': {},
'twitter': {}
}
if strategy:
self.strategy = strategy
if url is not None:
url = url.strip()
self.url = url
self.url_actual = url
self.ssl_verify = ssl_verify
self.soup = None
self.force_doctype = force_doctype
self.response = None
self.response_headers = {}
self.require_public_netloc = require_public_netloc
self.requests_timeout = requests_timeout
if only_parse_file_extensions is not None:
self.only_parse_file_extensions = only_parse_file_extensions
if html is None:
html = self.fetch_url(
url_data=url_data, url_headers=url_headers,
force_parse=force_parse,
force_parse_invalid_content_type=force_parse_invalid_content_type
)
self.parser(html, force_parse=force_parse)
def is_opengraph_minimum(self):
"""
returns true/false if the page has the minimum amount of opengraph tags
"""
return all([hasattr(self, attr)
for attr in self.og_minimum_requirements])
def fetch_url(
self,
url_data=None, url_headers=None, force_parse=False,
force_parse_invalid_content_type=False
):
"""
fetches the url and returns it.
this was busted out so you could subclass.
"""
# should we even download/parse this?
if not force_parse and self.only_parse_file_extensions is not None:
parsed = urlparse(self.url)
path = parsed.path
if path:
url_fpath = path.split('.')
if len(url_fpath) == 0:
# i have no idea what this file is, it's likely using a
# directory index
pass
elif len(url_fpath) > 1:
url_fext = url_fpath[-1]
if url_fext in self.only_parse_file_extensions:
pass
else:
raise NotParsable("I don't know what this file is")
# borrowing some ideas from
# http://code.google.com/p/feedparser/source/browse/trunk/feedparser/feedparser.py#3701
if not url_headers:
url_headers = {}
# if someone does usertracking with sharethis.com, they get a hashbang
# like this: http://example.com/page#.UHeGb2nuVo8
# that fucks things up.
url = self.url.split('#')[0]
r = None
try:
# requests gives us unicode and the correct encoding, yay
r = requests.get(
url, params=url_data, headers=url_headers,
allow_redirects=True, verify=self.ssl_verify,
timeout=self.requests_timeout, stream=True,
)
content_type = None
if 'content-type' in r.headers:
content_type = r.headers['content-type']
# content type can have a character encoding in it...
content_type = [i.strip() for i in content_type.split(';')]
content_type = content_type[0].lower()
if (
(
(content_type is None)
or
(content_type != 'text/html')
)
and
(not force_parse_invalid_content_type)
):
raise NotParsable("I don't know what type of file this is! "
"content-type:'[%s]" % content_type)
# okay, now we need to read
## TODO
## TODO
## TODO
## TODO
html = r.text
self.response = r
# lowercase all of the HTTP headers for comparisons per RFC 2616
self.response_headers = dict((k.lower(), v)
for k, v in r.headers.items())
self.url_actual = r.url
if r.status_code != 200:
raise NotParsableFetchError(
message="Status Code is not 200",
code=r.status_code
)
except requests.exceptions.RequestException as error:
raise NotParsableFetchError(
message="Error with `requests` library. Inspect the `raised`"
" attribute of this error.",
raised=error
)
return html
def absolute_url(self, link=None):
"""
makes the url absolute, as sometimes people use a relative url. sigh.
"""
url_fallback = self.url_actual or self.url or None
return url_to_absolute_url(
link,
url_fallback=url_fallback,
require_public_netloc=self.require_public_netloc
)
def parser(self, html, force_parse=False):
"""parses the html
"""
if not isinstance(html, BeautifulSoup):
# clean the html?
if self.force_doctype:
html = REGEX_doctype.sub("<!DOCTYPE html>", html)
try:
doc = BeautifulSoup(html, "lxml")
except:
doc = BeautifulSoup(html, "html.parser")
else:
doc = html
# let's ensure that we have a real document...
if not doc or not doc.html or not doc.html.head:
return
# stash the bs4 doc for further operations
self.soup = doc
ogs = doc.html.head.findAll(
'meta',
attrs={'property': re.compile(r'^og')}
)
for og in ogs:
try:
self.metadata['og'][og['property'][3:]] = og['content'].strip()
except (AttributeError, KeyError):
pass
except:
log.debug("Ran into a serious error parsing `og`")
pass
twitters = doc.html.head.findAll(
'meta',
attrs={'name': re.compile(r'^twitter')}
)
for twitter in twitters:
try:
self.metadata['twitter'][
twitter['name'][8:]] = twitter['value'].strip()
except (AttributeError, KeyError):
pass
# pull the text off the title
try:
_title_text = doc.html.head.title.text
if len(_title_text) > self.LEN_MAX_TITLE:
_title_text = _title_text[:self.LEN_MAX_TITLE]
self.metadata['page']['title'] = _title_text
except AttributeError:
pass
# is there an image_src?
images = doc.findAll(
'link',
attrs={'rel': re.compile("^image_src$", re.I)}
)
if images:
image = images[0]
if image.has_attr("href"):
img_url = image['href'].strip()
self.metadata['page']['image'] = img_url
elif image.has_attr("content"):
img_url = image['content'].strip()
self.metadata['page']['image'] = img_url
else:
pass
# figure out the canonical url
canonicals = doc.findAll(
'link',
attrs={'rel': re.compile("^canonical$", re.I)}
)
if canonicals:
canonical = canonicals[0]
if canonical.has_attr("href"):
link = canonical['href'].strip()
self.metadata['page']['canonical'] = link
elif canonical.has_attr("content"):
link = canonical['content'].strip()
self.metadata['page']['canonical'] = link
else:
pass
# pull out all the metadata
meta = doc.html.head.findAll(name='meta')
for m in meta:
try:
k = None
v = None
attrs = m.attrs
k = None
if 'name' in attrs:
k = 'name'
elif 'property' in attrs:
k = 'property'
elif 'http-equiv' in attrs:
k = 'http-equiv'
if k:
k = attrs[k].strip()
if 'content' in attrs:
v = attrs['content'].strip()
if (len(k) > 3) and (k[:3] == 'dc:'):
self.metadata['dc'][k[3:]] = v
else:
self.metadata['meta'][k] = v
except AttributeError:
pass
def get_metadata(self, field, strategy=None):
"""
looks for the field in various stores. defaults to the core
strategy, though you may specify a certain item. if you search for
'all' it will return a dict of all values.
"""
if strategy:
_strategy = strategy
else:
_strategy = self.strategy
if _strategy == 'all':
rval = {}
for store in self.metadata:
if field in self.metadata[store]:
rval[store] = self.metadata[store][field]
return rval
for store in _strategy:
if store in self.metadata:
if field in self.metadata[store]:
return self.metadata[store][field]
return None
def get_discrete_url(
self,
og_first=True, canonical_first=False, allow_invalid=False
):
"""convenience method.
if `allow_invalid` is True, it will return the raw data.
if `allow_invalid` is False (default), it will try to correct
the data (relative to absolute) or reset to None.
"""
og = self.get_metadata('url', strategy=['og'])
canonical = self.get_metadata('canonical', strategy=['page'])
if not allow_invalid:
# fallback url is used to drop a domain
url_fallback = self.url_actual or self.url or None
if og and not is_url_valid(
og,
require_public_netloc=self.require_public_netloc
):
# try making it absolute
og = url_to_absolute_url(
og,
url_fallback=url_fallback,
require_public_netloc=self.require_public_netloc
)
if not is_url_valid(
og,
require_public_netloc=self.require_public_netloc
):
# set to NONE if invalid
og = None
if canonical and not is_url_valid(
canonical,
require_public_netloc=self.require_public_netloc
):
# try making it absolute
canonical = url_to_absolute_url(
canonical,
url_fallback=url_fallback,
require_public_netloc=self.require_public_netloc
)
if not is_url_valid(
canonical,
require_public_netloc=self.require_public_netloc
):
# set to NONE if invalid
canonical = None
rval = []
if og_first:
rval = (og, canonical)
elif canonical_first:
rval = (canonical, og)
for i in rval:
if i:
return i
return self.absolute_url()
def get_metadata_link(self, field, strategy=None):
"""sometimes links are bad; this tries to fix them. most useful for meta images"""
# `_value` will be our raw value
_value = self.get_metadata(field, strategy=strategy)
if not _value:
return None
# `value` will be our clean value
# remove whitespace, because some bad blogging platforms add in whitespace by printing elements on multiple lines. d'oh!
value = RE_whitespace.sub('', _value)
# if the url is valid, RETURN IT
if is_url_valid(value, require_public_netloc=self.require_public_netloc):
return value
# fallback url is used to drop a domain
url_fallback = self.url_actual or self.url or None
# try making it absolute
value_fixed = url_to_absolute_url(
value,
url_fallback = url_fallback,
require_public_netloc = self.require_public_netloc
)
if is_url_valid(value_fixed, require_public_netloc=self.require_public_netloc):
return value_fixed
return None
|
xethorn/metadata_parser
|
metadata_parser/__init__.py
|
__init__.py
|
py
| 25,325 |
python
|
en
|
code
| null |
github-code
|
6
|
43967691056
|
#!/usr/bin/env python
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="split blast results by organism")
parser.add_argument("blast", type=argparse.FileType("r"))
args = parser.parse_args()
blast = [x.split("\t") for x in args.blast.readlines()]
for row in blast:
if "<>" in row[24]:
for i in row[24].strip().split("<>"):
row_copy = row
row_copy[24] = i
print("\t".join(row_copy).strip())
else:
print("\t".join(row).strip())
|
TAMU-CPT/galaxy-tools
|
tools/blast/split_blast.py
|
split_blast.py
|
py
| 576 |
python
|
en
|
code
| 5 |
github-code
|
6
|
18075698551
|
def plunder(city, people_to_kill, gold_to_steal):
town = [t for t in towns if t.name == city][0]
town.population -= people_to_kill
town.gold -= gold_to_steal
print(f"{city} plundered! {gold_to_steal} gold stolen, {people_to_kill} citizens killed.")
if town.population <= 0 or town.gold <= 0:
towns.remove(town)
print(f"{city} has been wiped off the map!")
def prosper(city, gold_to_add):
town = [t for t in towns if t.name == city][0]
if gold_to_add < 0:
print("Gold added cannot be a negative number!")
else:
town.gold += gold_to_add
print(f"{gold_to_add} gold added to the city treasury. {city} now has {town.gold} gold.")
class Town:
def __init__(self, name, population, gold):
self.name = name
self.population = population
self.gold = gold
def __repr__(self):
return f"{self.name} -> Population: {self.population} citizens, Gold: {self.gold} kg"
towns = []
while True:
command = input()
if command == "Sail":
break
tokens = command.split("||")
current_town = tokens[0]
current_population = int(tokens[1])
current_gold = int(tokens[2])
if towns:
existing_town = [t for t in towns if t.name == current_town]
if existing_town:
existing_town[0].population += current_population
existing_town[0].gold += current_gold
continue
town = Town(current_town, current_population, current_gold)
towns.append(town)
while True:
command = input()
if command == "End":
break
tokens = command.split("=>")
if tokens[0] == "Plunder":
plunder(tokens[1], int(tokens[2]), int(tokens[3]))
elif tokens[0] == "Prosper":
prosper(tokens[1], int(tokens[2]))
sorted_towns = sorted(towns, key=lambda t: (-t.gold, t.name))
if towns:
print(f"Ahoy, Captain! There are {len(towns)} wealthy settlements to go to:")
for town in sorted_towns:
print(town)
else:
print("Ahoy, Captain! All targets have been plundered and destroyed!")
|
liusska/Python-Fundamentals-Jan-2021
|
Final Exam Solutions/04.04.2020_2/p!rates_CLASS_solution_03.py
|
p!rates_CLASS_solution_03.py
|
py
| 2,146 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11214657296
|
import pika
import sys
conn = pika.BlockingConnection(pika.URLParameters('amqp://guest:guest@localhost:25672/%2F'))
channel = conn.channel()
channel.exchange_declare(exchange='direct_logs', exchange_type='direct')
severity = sys.argv[1] if len(sys.argv) > 1 else 'info'
message = ' '.join(sys.argv[2:]) or "Hello World!"
channel.basic_publish(
exchange='direct_logs', routing_key=severity, body=message
)
print(f" [*] Sent {severity}:{message}")
conn.close()
|
lamida/rabbit-hole
|
04-routing/emit_log_direct.py
|
emit_log_direct.py
|
py
| 465 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39255343614
|
from django.conf.urls import url
from network.views import views_auth
from network.views import views_app
urlpatterns = [
# Main page
url(r'^home/(?P<msg>.*)$', views_auth.main_page, name="Home"),
# url(r'$', views_auth.main_page, name="Home"),
# Auth urls
url(r'^login/(?P<info>.*)$', views_auth.login_page, name="Login"),
url(r'^logout', views_auth.logout_page, name="Logout"),
url(r'^registration', views_auth.registration_page),
# App urls
url(r'^userpage/(?P<usr_id>.[0-9])', views_app.user_page, name="UserPage"),
url(r'^userpage/wall/new_record', views_app.new_wall_record, name="NewWallRecord"),
url(r'^userpage/wall/new_like', views_app.new_like, name="AddLike"),
url(r'^userpage/wall/new_comment', views_app.new_comment, name="AddComment"),
url(r'^userpage/wall/delete_post', views_app.delete_post, name="DeletePost"),
url(r'^error/', views_app.error_page, name="UserPage"),
url(r'^im/', views_app.mail_page, name="UserMail"),
url(r'^send_msg/(?P<user_id>.[0-9])', views_app.send_msg, name="SendMessage"),
url(r'^friend_request/', views_app.send_friend_request, name="FriendRequest"),
url(r'^friends/', views_app.user_friends, name="Friends"),
url(r'^delete_friend/', views_app.delete_friend, name="Delete Friend"),
url(r'^sent/', views_app.user_sent_msgs, name="Sent msgs"),
url(r'^requests/', views_app.user_requests, name="Friend requests"),
url(r'^accept_request/', views_app.accept_request, name="Accept_request"),
url(r'^decline_request/', views_app.decline_request, name="Decline_request")
]
|
Sipleman/Course-work_SocialNetwork
|
network/urls.py
|
urls.py
|
py
| 1,612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4818229922
|
import numpy as np
def linan(s1, s2):
try:
a1, b1, c1 = map(float, s1.split(" "))
a2, b2, c2 = map(float, s2.split(" "))
a = np.array([[a1, b1], [a2, b2]])
b = np.array([c1, c2])
solution = np.linalg.solve(a, b)
return f"{solution[0]} {solution[1]}"
except np.linalg.LinAlgError as e:
return "Нет решений"
str1 = input()
str2 = input()
print(linan(str1, str2))
|
SmartOven/itmo-ml
|
lab1/task1.py
|
task1.py
|
py
| 439 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11995663710
|
#!/usr/bin/env python3
import rospy
import numpy as np
from geometry_msgs.msg import Point, PointStamped
from queenie.msg import ExtremePoints
import tf2_ros
import tf2_geometry_msgs
import time
class PointTransform:
def __init__(self):
self.tf_buffer = tf2_ros.Buffer() # tf buffer length
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
# self.pub = rospy.Publisher('/reward_signal', Float32, queue_size=10)
self.extreme_points_seb = rospy.Subscriber("extreme_points_camera_frame", ExtremePoints, self.extreme_points_cb)
self.handle_centroid_sub = rospy.Subscriber("/handle_centroid", PointStamped, self.handle_centroid_cb)
self.extreme_points_transformed_pub = rospy.Publisher("extreme_points", ExtremePoints, queue_size=1)
self.handle_centroid_pub = rospy.Publisher("handle_centroid_transformed", PointStamped, queue_size=1)
self.count = 0
self.rightmost_point = Point()
self.leftmost_point = Point()
self.rate = rospy.Rate(25)
def handle_centroid_cb(self, data):
x = 0
while x < 5:
try:
trans = self.tf_buffer.lookup_transform("odom", "camera_optical", rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
self.rate.sleep()
continue
x += 1
handle_centroid_transformed = tf2_geometry_msgs.do_transform_point(data, trans)
self.handle_centroid_pub.publish(handle_centroid_transformed)
def extreme_points_cb(self, data:ExtremePoints):
x = 0
while x < 5:
try:
trans = self.tf_buffer.lookup_transform("odom", "camera_optical", rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
self.rate.sleep()
continue
x += 1
rightmost_transformed = tf2_geometry_msgs.do_transform_point(data.rightmost, trans)
leftmost_transformed = tf2_geometry_msgs.do_transform_point(data.leftmost, trans)
object_centroid_transformed = tf2_geometry_msgs.do_transform_point(data.point_centroid, trans)
extreme_points_transformed = ExtremePoints()
extreme_points_transformed.leftmost = leftmost_transformed
extreme_points_transformed.rightmost = rightmost_transformed
extreme_points_transformed.point_centroid = object_centroid_transformed
self.extreme_points_transformed_pub.publish(extreme_points_transformed)
def main():
rospy.init_node('point_transform', anonymous=True)
point_transform = PointTransform()
rospy.spin()
if __name__ == '__main__':
main()
|
arehman1806/queenie
|
src/nodes/point_transform.py
|
point_transform.py
|
py
| 2,827 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30543883588
|
from . import pblm
import sys
import torch
import torch.nn as nn
class CNN_A(pblm.PrebuiltLightningModule):
def __init__(self, classes):
super().__init__(self.__class__.__name__)
# Model Layer Declaration
self.conv1 = nn.Conv1d(1, 16, kernel_size=5, stride=2)
self.conv2 = nn.Conv1d(16, 32, kernel_size=5, stride=2)
self.conv3 = nn.Conv1d(32, 64, kernel_size=5, stride=2)
self.dense1 = nn.Linear(64 * 309, 512)
self.dense2 = nn.Linear(512, 256)
self.dense3 = nn.Linear(256, classes)
def forward(self, x):
x = x.reshape(x.shape[0], 1, -1)
# Convolutional Layer
x = self.conv1(x)
x = nn.functional.relu(x)
x = self.conv2(x)
x = nn.functional.relu(x)
x = self.conv3(x)
x = nn.functional.relu(x)
# Flattening
x = x.reshape(x.shape[0], -1)
# Dense Layers
x = self.dense1(x)
x = nn.functional.relu(x)
x = self.dense2(x)
x = nn.functional.relu(x)
x = self.dense3(x)
return x
if __name__ == "__main__":
model = CNN_A(4)
|
kendreaditya/heart-auscultation
|
src/models/modules/CNN/CNN.py
|
CNN.py
|
py
| 1,135 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7584763107
|
import random
import os
from modnn import Neuron
from modnn import Connection
from modnn import utils
class Genome:
def __init__(self, config):
self.config = config
self.input_num = self.config['INPUT_NUM']
self.output_num = self.config['OUTPUT_NUM']
self.normal_num = self.config['NORMAL_NUM']
self.lv1_num = self.config['LV1_MODULATORY_NUM']
self.lv2_num = self.config['LV2_MODULATORY_NUM']
self.connection_num = self.config['CONNECTION_NUM']
self.max_bias = self.config['MAX_BIAS']
self.min_bias = self.config['MIN_BIAS']
self.min_weight = self.config['MIN_WEIGHT']
self.max_weight = self.config['MAX_WEIGHT']
self.weight_upper_limit = self.config['WEIGHT_UPPER_LIMIT']
self.weight_lower_limit = self.config['WEIGHT_LOWER_LIMIT']
self.min_abcd = self.config['MIN_ABCD']
self.max_abcd = self.config['MAX_ABCD']
self.input_neurons = [Neuron(id = i, bias = random.uniform(self.min_bias, self.max_bias)) for i in range(self.input_num)]
self.output_neurons = [Neuron(id = i + self.input_num , bias = random.uniform(self.min_bias, self.max_bias)) for i in range(self.output_num)]
self.normal_neurons = [Neuron(id = i + self.input_num + self.output_num, bias = random.uniform(self.min_bias, self.min_bias)) for i in range(self.normal_num)]
self.lv1_neurons = [Neuron(id = i + self.input_num + self.output_num + self.normal_num, bias = random.uniform(self.min_bias, self.max_bias)) for i in range(self.lv1_num)]
self.lv2_neurons = [Neuron(id = i + self.input_num + self.output_num + self.normal_num + self.lv1_num, bias = random.uniform(self.min_bias, self.max_bias)) for i in range(self.lv2_num)]
total_neuron_num = len(self.input_neurons) + len(self.output_neurons) + len(self.normal_neurons) + len(self.lv1_neurons) + len(self.lv2_neurons)
self.connections = [ Connection(random.randint(0, total_neuron_num -1), random.randint(0, total_neuron_num -1), random.uniform(self.min_weight, self.max_weight)) for i in range(self.connection_num)]
self.a = random.uniform(self.min_abcd, self.max_abcd)
self.b = random.uniform(self.min_abcd, self.max_abcd)
self.c = random.uniform(self.min_abcd, self.max_abcd)
self.d = random.uniform(self.min_abcd, self.max_abcd)
#ニューロンidからニューロンの種類を取得する
def get_neuron_type(self, neuron_id):
if neuron_id < self.input_num:
return 'input'
elif neuron_id < self.input_num + self.output_num:
return 'output'
elif neuron_id < self.input_num + self.output_num + self.neuron_num:
return 'normal'
elif neuron_id < self.input_num + self.output_num + self.neuron_num + self.lv1_num:
return 'lv1'
else:
return 'lv2'
#結合がルールに則っているかを判定する
def is_valid_connection(self, connection):
in_type = self.get_neuron_type(connection.from_neuron_id)
out_type = self.get_neuron_type(connection.to_neuron_id)
#出力ニューロンは結合元になれない
if in_type == 'output':
return False
#入力ニューロンは結合先になれない
elif out_type == 'input':
return False
#結合元と結合先が同じニューロンになれない
elif connection.from_neuron_id == connection.to_neuron_id:
return False
#Lv.1の修飾ニューロンは通常ニューロン・出力ニューロン以外に結合できない
elif in_type == 'lv1' and out_type != 'normal' and out_type != 'output':
return False
#Lv.2の修飾ニューロンはLv.1の修飾ニューロン以外に結合できない
elif in_type == 'lv2' and out_type != 'lv1':
return False
else:
return True
if __name__ == '__main__':
# 設定ファイルのパス
pwd = os.path.dirname(os.path.abspath(__file__)) # このファイルのディレクトリの絶対パスを取得
print(pwd)
config_file_path = './tests/config.txt'
# 設定ファイルを読み込む
config = utils.read_config_file(config_file_path)
# 読み込んだ設定をプログラム内で利用する例
normal_num = config['NORMAL_NUM']
input_num = config['INPUT_NUM']
output_num = config['OUTPUT_NUM']
connection_num = config['CONNECTION_NUM']
# 利用例として、読み込んだ設定を出力してみる
print("Normal neurons:", normal_num)
print("Input neurons:", input_num)
print("Output neurons:", output_num)
print("Number of connections:", connection_num)
genome = Genome(config_file_path)
print(genome.input_neurons)
print(genome.output_neurons)
print(genome.normal_neurons)
print(genome.lv1_neurons)
print(genome.lv2_neurons)
|
kato-mahiro/modnn
|
modnn/genome.py
|
genome.py
|
py
| 4,986 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30754324975
|
from heapq import heappop, heappush
n = int(input())
a = list(map(int, input().split()))
hp = 0
ans = 0
pt = list()
for i in range(n):
if a[i] > 0:
hp += a[i]
ans += 1
elif hp + a[i] >= 0:
hp += a[i]
ans += 1
heappush(pt, a[i])
elif pt:
a1 = heappop(pt)
if a1 < a[i]:
hp = hp - a1 + a[i]
heappush(pt, a[i])
else:
heappush(pt, a1)
print(ans)
|
Tanguyvans/Codeforces
|
723/C2.py
|
C2.py
|
py
| 459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36317150759
|
import numpy as np
def P_generator(MatingPool,Boundary,Coding,MaxOffspring):
# % 交叉, 变异并生成新的种群
# % 输入: MatingPool, 交配池, 其中每第i个和第i + 1
# 个个体交叉产生两个子代, i为奇数
# % Boundary, 决策空间, 其第一行为空间中每维的上界, 第二行为下界
# % Coding, 编码方式, 不同的编码方式采用不同的交叉变异方法
# % MaxOffspring, 返回的子代数目, 若缺省则返回所有产生的子代, 即和交配池的大小相同
# % 输出: Offspring, 产生的子代新种群
N, D = MatingPool.shape
if MaxOffspring < 1 or MaxOffspring > N:
MaxOffspring = N
if Coding == "Real":
ProC = 1
ProM = 1/D
DisC = 20
DisM = 20
Offspring = np.zeros((N, D))
for i in range(0,N,2):
beta = np.zeros((D,))
miu = np.random.random((D,)) #np.random.rand(D,)
beta[miu <= 0.5] = (2 * miu[miu <= 0.5])**(1/(DisC+1))
beta[miu > 0.5] = (2-2 * miu[miu > 0.5]) ** (-1 / (DisC + 1))
beta = beta * ((-1) ** (np.random.randint(0, 2, (D,))))
beta[np.random.random((D,)) > ProC] = 1
Offspring[i, :] = ((MatingPool[i, :] + MatingPool[i+1, :] )/2) + (np.multiply(beta, (MatingPool[i, :] - MatingPool[i+1, :])/2 ))
Offspring[i+1, :] = ((MatingPool[i, :] + MatingPool[i+1, :] )/2) - (np.multiply(beta, (MatingPool[i, :] - MatingPool[i+1, :])/2 ))
Offspring_temp = Offspring[:MaxOffspring,:]
# print(range(MaxOffspring,Offspring.shape[0]))
# np.delete(Offspring, range(MaxOffspring,Offspring.shape[0]), axis=0) 并没有真正的对 对象进行操作,仅仅你是个浅操作
Offspring = Offspring_temp
if MaxOffspring == 1:
MaxValue = Boundary[0,:]
MinValue = Boundary[1,:]
else:
MaxValue = np.tile(Boundary[0,:],(MaxOffspring,1))
MinValue = np.tile(Boundary[1,:],(MaxOffspring,1))
#np.bitwise_and 用于矩阵的逻辑运算
k = np.random.random((MaxOffspring, D))
miu = np.random.random((MaxOffspring, D))
Temp = np.bitwise_and(k <= ProM, miu <0.5)
Offspring[Temp] = Offspring[Temp] + np.multiply((MaxValue[Temp] - MinValue[Temp]), ((2 * miu[Temp] + np.multiply(
1 - 2 * miu[Temp],
(1 - (Offspring[Temp] - MinValue[Temp]) / (MaxValue[Temp] - MinValue[Temp])) ** (DisM + 1))) ** (1 / (
DisM + 1)) - 1))
Temp = np.bitwise_and(k <= ProM, miu >= 0.5)
Offspring[Temp] = Offspring[Temp] + np.multiply((MaxValue[Temp] - MinValue[Temp]), (1-((2 *(1-miu[Temp])) + np.multiply(
2 * (miu[Temp]-0.5),
(1 - (MaxValue[Temp] - Offspring[Temp]) / (MaxValue[Temp] - MinValue[Temp])) ** (DisM + 1))) ** (1 / (
DisM + 1)) ))
Offspring[Offspring > MaxValue] = MaxValue[Offspring>MaxValue]
Offspring[Offspring < MinValue] = MinValue[Offspring < MinValue]
elif Coding == "Binary":
Offspring = []
return Offspring
|
DevilYangS/NSGA-II-python
|
NSGA_II/public/P_generator.py
|
P_generator.py
|
py
| 3,156 |
python
|
en
|
code
| 5 |
github-code
|
6
|
35782338526
|
#%%
import numpy as np
import matplotlib.pyplot as plt
#%%
x = np.arange(0, 6 * np.pi, 0.025)
y_true = np.sin(x)
y = y_true + np.random.normal(scale=1, size=len(x))
plt.scatter(x, y, color="k")
plt.plot(x, y_true, color="red")
#%%
np.random.seed(42)
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
model = HistGradientBoostingRegressor(random_state=42, max_iter=20, max_leaf_nodes=64, min_samples_leaf=30)
model.fit(x.reshape(-1, 1), y)
preds = model.predict(x.reshape(-1, 1))
plt.scatter(x, y)
plt.plot(x, preds, color="red")
#%%
def gen_one_frame(use_fraction: float, left_to_right: bool):
use_fraction = round(use_fraction, 3)
print(use_fraction)
if left_to_right:
visible_idx = np.arange(0, len(preds) * use_fraction).astype("int")
else:
visible_idx = np.arange(len(preds) * use_fraction, len(preds)).astype("int")
fig, ax = plt.subplots(figsize=(10, 5))
ax.scatter(x, y, color="k", alpha=0.1)
ax.plot(x[visible_idx], preds[visible_idx], color="blue")
ax.set_title(f"frac = {use_fraction}")
fig.savefig(
f"ML-Basics/frames/{'ltr' if left_to_right else 'rtl'}_frame_{use_fraction}.png"
)
plt.close()
for f in np.arange(0.01, 1, 0.005):
gen_one_frame(use_fraction=f, left_to_right=True)
for f in np.arange(0.01, 1, 0.005):
gen_one_frame(use_fraction=f, left_to_right=False)
#%%
import glob
from PIL import Image
# filepaths
fp_in = "ML-Basics/frames/*.png"
fp_out = "ML-Basics/out_gif.gif"
imgs = (Image.open(f) for f in sorted(glob.glob(fp_in)))
img = next(imgs) # extract first image from iterator
img.save(fp=fp_out, format="GIF", append_images=imgs, save_all=True, duration=100, loop=0)
|
moritzwilksch/DataScienceEducation
|
ML-Basics/fancy_gif.py
|
fancy_gif.py
|
py
| 1,801 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70337573629
|
import math
import copy
import numpy as np
import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.datasets.mixup import MixUp
from slowfast.models import build_model
from slowfast.utils.meters import EpochTimer, TrainMeter, ValMeter, AdaMeter
logger = logging.get_logger(__name__)
def train_epoch(
train_loaders,
model,
optimizers,
scaler,
train_meter,
cur_epoch,
cfg,
writer=None,
):
"""
Perform the video training for one epoch.
Args:
train_loaders (list of loader): source and target video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
source_loader = train_loaders[0]
target_unl_loader = train_loaders[1]
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
target_lab_loader = train_loaders[2]
optimizer_f, optimizer_c = optimizers[0], optimizers[1]
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(source_loader)
target_unl_iter = iter(target_unl_loader)
target_unl_size = len(target_unl_loader)
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
target_lab_iter = iter(target_lab_loader)
target_lab_size = len(target_lab_loader)
for cur_iter, (inputs_source, labels_source, _, _) in enumerate(source_loader):
# Load the data.
if cur_iter%target_unl_size==0:
target_unl_iter = iter(target_unl_loader)
inputs_target_unl, labels_target_unl, _, _ = next(target_unl_iter)
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
if cur_iter%target_lab_size==0:
target_lab_iter = iter(target_lab_loader)
inputs_target_lab, labels_target_lab, _, _ = next(target_lab_iter)
# Transfer the data to the current GPU device.
for i in range(len(inputs_source)):
inputs_source[i] = inputs_source[i].cuda(non_blocking=True)
inputs_target_unl[i] = inputs_target_unl[i].cuda(non_blocking=True)
labels_source = labels_source.cuda()
labels_target_unl = labels_target_unl.cuda()
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
for i in range(len(inputs_source)):
inputs_target_lab[i] = inputs_target_lab[i].cuda(non_blocking=True)
labels_target_lab = labels_target_lab.cuda()
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer_f, lr)
optim.set_lr(optimizer_c, lr)
train_meter.data_toc()
source_weak = inputs_source[1]
source_strong = inputs_source[0]
target_unl_weak = inputs_target_unl[1]
target_unl_strong = inputs_target_unl[0]
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
target_lab_weak = inputs_target_lab[1]
target_lab_strong = inputs_target_lab[0]
if not cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
lab_inputs = [source_strong]
lab_labels = labels_source
unl_inputs = [target_unl_weak]
unl_labels = labels_target_unl
else:
lab_inputs = [torch.cat((source_strong, target_lab_strong), dim=0)]
lab_labels = torch.cat((labels_source, labels_target_lab), dim=0)
unl_inputs = [target_unl_weak]
unl_labels = labels_target_unl
with torch.cuda.amp.autocast(enabled=cfg.TRAIN.MIXED_PRECISION):
# Step A train all networks to minimize loss on source domain
optimizer_f.zero_grad()
optimizer_c.zero_grad()
lab_preds, lab_feats = model(lab_inputs)
criterion = nn.CrossEntropyLoss()
loss_s = criterion(lab_preds, lab_labels)
loss_s.backward()
optimizer_f.step()
optimizer_c.step()
# Step B train classifier to maximize discrepancy
optimizer_f.zero_grad()
optimizer_c.zero_grad()
unl_preds, unl_feats = model(unl_inputs, reverse=True)
new_preds = F.softmax(unl_preds, dim=1)
loss_h = cfg.MME.LAMBDA * torch.mean(
torch.sum(new_preds * (torch.log(new_preds + 1e-5)), 1))
loss_h.backward()
optimizer_f.step()
optimizer_c.step()
prototypes = model.module.head.weight.clone().detach()
# Compute the errors.
num_topks_correct = metrics.topks_correct(lab_preds, lab_labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / lab_preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss_s, loss_h, top1_err, top5_err = du.all_reduce(
[loss_s, loss_h, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss_s, loss_h, top1_err, top5_err = (
loss_s.item(),
loss_h.item(),
top1_err.item(),
top5_err.item()
)
batch_size = inputs_source[0].size(0)*max(cfg.NUM_GPUS, 1)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss_s,
lr,
batch_size,
)
# write to tensorboard format if available.
if writer is not None:
dict2write = {
"Train/loss_s": loss_s,
"Train/loss_h": -loss_h,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
}
writer.add_scalars(dict2write, global_step=data_size * cur_epoch + cur_iter)
if cfg.TENSORBOARD.DIST_VIS.ENABLE and (data_size * cur_epoch + cur_iter)%cfg.TENSORBOARD.DIST_VIS.LOG_PERIOD==1:
writer.add_confusion_matrix(
torch.argmax(torch.cat(train_meter.all_source_weak, dim=0), dim=1),
torch.cat(train_meter.all_source_labels, dim=0),
tag="Confusion/Labeled",
global_step=data_size * cur_epoch + cur_iter
)
writer.add_confusion_matrix(
torch.argmax(torch.cat(train_meter.all_target_weak, dim=0), dim=1),
torch.cat(train_meter.all_target_labels, dim=0),
tag="Confusion/Unlabeled",
global_step=data_size * cur_epoch + cur_iter
)
if cfg.TENSORBOARD.SAMPLE_VIS.ENABLE and (data_size * cur_epoch + cur_iter)%cfg.TENSORBOARD.SAMPLE_VIS.LOG_PERIOD==0:
writer.add_video_pred(
lab_inputs[0],
torch.argmax(lab_preds, dim=1),
lab_labels,
tag="Sample/Source",
global_step = data_size * cur_epoch + cur_iter,
)
writer.add_video_pred(
unl_inputs[0],
torch.argmax(unl_preds, dim=1),
unl_labels,
tag="Sample/Target",
global_step = data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.update_predictions(
lab_preds, lab_feats, lab_labels,
unl_preds, unl_feats, unl_labels, prototypes,
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
torch.cuda.synchronize()
train_meter.iter_tic()
del inputs_source, inputs_target_unl, labels_source, labels_target_unl
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
del inputs_target_lab, labels_target_lab
# in case of fragmented memory
torch.cuda.empty_cache()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.TENSORBOARD.EPOCH_LOG.ENABLE:
writer.writer.add_scalars(
"Error/Top1_err",
{"Train": train_meter.num_top1_mis / train_meter.num_samples}, global_step=cur_epoch
)
writer.writer.add_scalars(
"Error/Top5_err",
{"Train": train_meter.num_top5_mis / train_meter.num_samples}, global_step=cur_epoch
)
if cfg.TENSORBOARD.CONFUSION_MATRIX.ENABLE:
all_preds = [pred.clone().detach() for pred in train_meter.all_source_strong]
all_labels = [label.clone().detach() for label in train_meter.all_source_labels]
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds,
labels=all_labels,
global_step=cur_epoch,
tag="Confusion/Train"
)
train_meter.reset()
@torch.no_grad()
def eval_epoch(
val_loader, model, val_meter, cur_epoch, cfg, writer=None
):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
preds, _ = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
if cfg.TENSORBOARD.SAMPLE_VIS.ENABLE and (len(val_loader) * cur_epoch + cur_iter)%cfg.TENSORBOARD.SAMPLE_VIS.LOG_PERIOD==0:
writer.add_video_pred(
inputs[0],
torch.argmax(preds, dim=1),
labels,
tag="Sample/Val",
global_step = len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.TENSORBOARD.EPOCH_LOG.ENABLE:
writer.writer.add_scalars(
"Error/Top1_err",
{"Val": val_meter.num_top1_mis / val_meter.num_samples}, global_step=cur_epoch
)
writer.writer.add_scalars(
"Error/Top5_err",
{"Val": val_meter.num_top5_mis / val_meter.num_samples}, global_step=cur_epoch
)
if cfg.TENSORBOARD.CONFUSION_MATRIX.ENABLE:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds,
labels=all_labels,
global_step=cur_epoch,
tag="Confusion/Val"
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
cfg.EXTRACT.ENABLE = True
cfg.SWIN.TEMP = cfg.MME.TEMP
cfg.SWIN.ETA = cfg.MME.ETA
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
sub_modules = []
if cfg.NUM_GPUS > 1:
for name, sub_module in model.module.named_modules():
if name!="head":
sub_modules.append(sub_module)
else:
for name, sub_module in model.named_modules():
if name!="head":
sub_modules.append(sub_module)
backbone = nn.Sequential(*sub_modules)
classifier = model.module.get_submodule("head")
optimizer_f = optim.construct_optimizer(backbone, cfg)
optimizer_c = optim.construct_optimizer(classifier, cfg)
optimizers = [optimizer_f, optimizer_c]
# Create a GradScaler for mixed precision training
scaler = torch.cuda.amp.GradScaler(enabled=cfg.TRAIN.MIXED_PRECISION)
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer_f,
scaler if cfg.TRAIN.MIXED_PRECISION else None)
# Create the video train and val loaders.
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
source_cfg = copy.deepcopy(cfg)
source_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE
source_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.TARGET
source_loader = loader.construct_loader(source_cfg, "train")
val_loader = loader.construct_loader(source_cfg, "val")
target_lab_cfg = copy.deepcopy(cfg)
target_lab_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.TARGET
target_lab_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.SOURCE
target_lab_cfg.TRAIN.BATCH_SIZE = int(cfg.ADAPTATION.ALPHA * source_cfg.TRAIN.BATCH_SIZE)
target_lab_loader = loader.construct_loader(target_lab_cfg, "lab")
target_unl_cfg = copy.deepcopy(cfg)
target_unl_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.TARGET
target_unl_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.SOURCE
target_unl_cfg.TRAIN.BATCH_SIZE = int(cfg.ADAPTATION.BETA * source_cfg.TRAIN.BATCH_SIZE)
target_unl_loader = loader.construct_loader(target_unl_cfg, "unl")
bn_cfg = copy.deepcopy(cfg)
bn_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE + cfg.ADAPTATION.TARGET
bn_cfg.ADAMATCH.ENABLE = False
precise_bn_loader = (
loader.construct_loader(bn_cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
train_loaders = [source_loader, target_unl_loader, target_lab_loader]
else:
source_cfg = copy.deepcopy(cfg)
source_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE
source_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.TARGET
source_loader = loader.construct_loader(source_cfg, "train")
val_loader = loader.construct_loader(source_cfg, "val")
target_unl_cfg = copy.deepcopy(cfg)
target_unl_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.TARGET
target_unl_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.SOURCE
target_unl_cfg.TRAIN.BATCH_SIZE = int(cfg.ADAPTATION.BETA * source_cfg.TRAIN.BATCH_SIZE)
target_unl_loader = loader.construct_loader(target_unl_cfg, "train")
bn_cfg = copy.deepcopy(cfg)
bn_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE + cfg.ADAPTATION.TARGET
bn_cfg.ADAMATCH.ENABLE = False
precise_bn_loader = (
loader.construct_loader(bn_cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
train_loaders = [source_loader, target_unl_loader]
# Create meters.
train_meter = AdaMeter(len(train_loaders[0]), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
epoch_timer = EpochTimer()
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
# Shuffle the dataset.
for train_loader in train_loaders:
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
epoch_timer.epoch_tic()
train_epoch(
train_loaders,
model,
optimizers,
scaler,
train_meter,
cur_epoch,
cfg,
writer,
)
epoch_timer.epoch_toc()
logger.info(
f"Epoch {cur_epoch} takes {epoch_timer.last_epoch_time():.2f}s. Epochs "
f"from {start_epoch} to {cur_epoch} take "
f"{epoch_timer.avg_epoch_time():.2f}s in average and "
f"{epoch_timer.median_epoch_time():.2f}s in median."
)
logger.info(
f"For epoch {cur_epoch}, each iteraction takes "
f"{epoch_timer.last_epoch_time()/len(train_loaders[0]):.2f}s in average. "
f"From epoch {start_epoch} to {cur_epoch}, each iteraction takes "
f"{epoch_timer.avg_epoch_time()/len(train_loaders[0]):.2f}s in average."
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None
)
is_eval_epoch = misc.is_eval_epoch(
cfg,
cur_epoch,
None
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(
cfg.OUTPUT_DIR,
model,
optimizer_f,
cur_epoch,
cfg,
scaler if cfg.TRAIN.MIXED_PRECISION else None,
)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(
val_loader,
model,
val_meter,
cur_epoch,
cfg,
writer,
)
if writer is not None:
writer.close()
raise SystemExit('Training Ends')
|
alimottaghi/slowfast
|
tools/train_mme.py
|
train_mme.py
|
py
| 22,644 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26225206883
|
# Unedited
def reallocate(banks):
n = len(banks)
i = banks.argmax()
k = banks[i]
banks[i] = 0
for j in range(1, k + 1):
banks[(i + j) % n] += 1
k -= 1
counter = 0
while True:
reallocate(banks)
counter += 1
tup = tuple(banks)
if tup in tracker:
print(counter, tracker[tup], counter - tracker[tup])
break
else:
tracker[tup] = counter
|
pirsquared/Advent-of-Code
|
2017/Day06.py
|
Day06.py
|
py
| 415 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23061764300
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import re
import os
import random
import time
from time import sleep
import json
from collections import OrderedDict
def dropSameListEle(inList):
outList=[]
for x in inList:
if x not in outList and x != '':
outList.append(x)
return outList
class FWIKI:
#初始化,传入起始页码,截止页码
def __init__(self):
self.baseUrl="http://fgowiki.com/guide/petdetail/"
#抓取页面
def getPage(self,url):
try:
request=urllib.request.Request(url)
response=urllib.request.urlopen(request)
page=response.read().decode('utf-8')
return page
except (urllib.request.URLError,e):
print('erro')
if hasattr(e,'reason'):
print('reason',e.reason)
return None
#提取信息
def getInf(self,regExpress,page,pos):
pattern=re.compile(regExpress,re.S)
result=re.search(pattern,page)
if result:
result = result.group(pos).strip()
result = re.sub(r'・',r'·',result)
result = re.sub(r'〔(.*?)〕',r'(\1)',result)
result = re.sub(r'((.*?))',r'(\1)',result)
return result
else:
return None
f=FWIKI()
whiteList=[83,149,151,152,168]
startPage=1
endPage=182
skillList=[]
pSkillList=[]
NPList=[]
nameDict=OrderedDict()
while startPage<=endPage:
try:
if startPage in whiteList:
startPage = startPage + 1
continue
url=f.baseUrl+str(startPage)
page=f.getPage(url)
page=page.encode().decode('unicode_escape')
name=f.getInf(r'"NAME":"(.*?)"',page,1)
nameDict[startPage]=name
skill=f.getInf(r'"SKILL_R1":"(.*?)"',page,1)
skillList.append(skill)
skill=f.getInf(r'"SKILL_R2":"(.*?)"',page,1)
skillList.append(skill)
skill=f.getInf(r'"SKILL_R3":"(.*?)"',page,1)
skillList.append(skill)
np= f.getInf(r'"T_NAME":"(.*?)"',page,1)
np = re.sub(r'\(.*?\)','',np)
NPList.append(np)
pSkill=f.getInf(r'"CSKILL_R1":"(.*?)"',page,1)
pSkillList.append(pSkill)
pSkill=f.getInf(r'"CSKILL_R2":"(.*?)"',page,1)
pSkillList.append(pSkill)
pSkill=f.getInf(r'"CSKILL_R3":"(.*?)"',page,1)
pSkillList.append(pSkill)
pSkill=f.getInf(r'"CSKILL_R4":"(.*?)"',page,1)
pSkillList.append(pSkill)
print(str(startPage))
if startPage <= endPage:
sleep(random.uniform(3,5))
startPage = startPage + 1
except Exception as e:
print('Error:',e)
if startPage<=endPage:
sleep(random.uniform(2,3))
NPList=dropSameListEle(NPList)
skillList=dropSameListEle(skillList)
pSkillList=dropSameListEle(pSkillList)
lines='var servantsDict = {\n'
for x in nameDict:
lines+='\t"'+str(x)+'" : "'+nameDict[x]+'",\n'
lines+='};\n\n\n\n'
lines+='var noblePhantasmsDict = {\n'
for x in NPList:
lines+='\t"" : "'+str(x)+'",\n'
lines+='};\n\n\n\n'
lines+='var skillsDict = {\n'
for x in skillList:
lines+='\t"" : "'+str(x)+'",\n'
lines+='};\n\n\n\n'
lines+='var passiveSkillsDict = {\n'
for x in pSkillList:
lines+='\t"" : "'+str(x)+'",\n'
lines+='};\n\n\n\n'
with open('servants_new.json','w+',encoding='utf-8') as wpoint:
wpoint.write(lines)
print('Task is finished!')
|
pplost/for-test
|
tools/新建文件夹/fetch - 副本.py
|
fetch - 副本.py
|
py
| 3,180 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34508776850
|
# https://practice.geeksforgeeks.org/problems/maximum-index-1587115620/1/?track=md-arrays&batchId=144
def max_index(a,n ):
max_diff = -1
for i in range(n):
j = n-1
while i < j:
if a[i] <=a[j] and max_diff < (j-i):
max_diff = j-i
j = j-1
return max_diff
# For a given array arr[],
# returns the maximum j - i
# such that arr[j] > arr[i]
def maxIndexDiff(arr, n):
maxDiff = 0;
LMin = [0] * n
RMax = [0] * n
# Construct LMin[] such that
# LMin[i] stores the minimum
# value from (arr[0], arr[1],
# ... arr[i])
LMin[0] = arr[0]
for i in range(1, n):
LMin[i] = min(arr[i], LMin[i - 1])
# Construct RMax[] such that
# RMax[j] stores the maximum
# value from (arr[j], arr[j + 1],
# ..arr[n-1])
RMax[n - 1] = arr[n - 1]
for j in range(n - 2, -1, -1):
RMax[j] = max(arr[j], RMax[j + 1]);
# Traverse both arrays from left
# to right to find optimum j - i
# This process is similar to
# merge() of MergeSort
i, j = 0, 0
maxDiff = -1
while (j < n and i < n):
if (LMin[i] <= RMax[j]):
maxDiff = max(maxDiff, j - i)
j = j + 1
else:
i = i + 1
return maxDiff
if __name__ == "__main__":
a = [9, 2, 3, 4, 5, 6, 7, 8, 18, 0]
print(maxIndexDiff(a, len(a)))
|
ved93/deliberate-practice-challenges
|
code-everyday-challenge/n195_max_index.py
|
n195_max_index.py
|
py
| 1,399 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19115978156
|
s = input()
count = 0
for i in s:
if i == 'R':
count += 1
elif i != 'R' and count == 0:
continue
else:
break
print(count)
'''
#alternative solution
S = input()
print(S.count("R") if S.count("R") != 2 else 2 if S[1] == "R" else 1)#See separate sheet
'''
|
NPE-NPE/code
|
python/abc/175/a.py
|
a.py
|
py
| 290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21844969705
|
texts = []
for i in range(4):
text = input()
texts.append(text)
length = int(input("Enter length you want to check: "))
is_found = False
for i in texts:
if length > len(i):
is_found = True
else:
is_found = False
if is_found:
print("Available")
else:
print("Unavailable")
|
Areg14/DroneEduLab
|
Lesson12/Problem6.py
|
Problem6.py
|
py
| 313 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2518952492
|
# 删除一个字符串所有的a,并复制所有的b
def str_remove(str):
n = len(str)
count = 0
i = 0
j = 0
while i < len(str):
if str[i] == "a":
str= str[:i] + str[i+1:]
i -=1
i +=1
while j < len(str):
if str[j] =="b":
count +=1
j +=1
return str,count
# 节省了空间但是改变了原来数组的顺序
def str_remove_array(str):
str = list(str)
i = 0
j = 0
count = 0
while i <len(str):
if str[i] == "a":
str[i]=str[-1]
str.pop()
i -=1
i +=1
while j <len(str):
if str[j] =="b":
count +=1
j +=1
return "".join(str),count
def space(str):
i = 0
while i < len(str):
if str[i] ==" ":
str = str[:i] +"%20"+str[i+1:]
i -=1
i+=1
return str
if __name__ == '__main__':
a = " b cdefagabdb "
# str,count = str_remove_array(a)
# print(str)
# print(count)
b = space(a)
print(b)
|
youyuebingchen/Algorithms
|
qiyue_alg/str_02.py
|
str_02.py
|
py
| 1,054 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9179526990
|
import os
import stat
import string
from absl.testing import absltest
from src.test.py.bazel import test_base
# pylint: disable=g-import-not-at-top
if os.name == 'nt':
import win32api
class LauncherTest(test_base.TestBase):
def _buildJavaTargets(self, bazel_bin, binary_suffix):
self.RunBazel(['build', '//foo'])
main_binary = os.path.join(bazel_bin, 'foo/foo%s' % binary_suffix)
self.assertTrue(os.path.isfile(main_binary))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, 'foo/foo%s.runfiles' % binary_suffix)))
if self.IsWindows():
self.assertTrue(os.path.isfile(main_binary))
self.AssertRunfilesManifestContains(
os.path.join(
bazel_bin, 'foo/foo%s.runfiles/MANIFEST' % binary_suffix
),
'_main/bar/bar.txt',
)
else:
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/foo.runfiles/_main/bar/bar.txt')
)
)
_, stdout, _ = self.RunProgram([main_binary])
self.assertEqual(len(stdout), 4)
self.assertEqual(stdout[0], 'hello java')
if self.IsWindows():
self.assertRegexpMatches(
stdout[1], r'java_runfiles=.*foo\\foo%s.runfiles' % binary_suffix)
self.assertEqual(stdout[2], 'runfiles_manifest_only=1')
self.assertRegexpMatches(
stdout[3], r'^runfiles_manifest_file=[a-zA-Z]:[/\\].*MANIFEST$')
else:
self.assertRegexpMatches(stdout[1], r'java_runfiles=.*/foo/foo.runfiles')
self.assertEqual(stdout[2], 'runfiles_manifest_only=')
self.assertRegexpMatches(stdout[3], r'^runfiles_manifest_file.*MANIFEST$')
def _buildShBinaryTargets(self, bazel_bin, bin1_suffix):
self.RunBazel(['build', '//foo:bin1.sh'])
bin1 = os.path.join(bazel_bin, 'foo', 'bin1.sh%s' % bin1_suffix)
self.assertTrue(os.path.exists(bin1))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, 'foo/bin1.sh%s.runfiles' % bin1_suffix)))
self.RunBazel(['build', '//foo:bin2.cmd'])
bin2 = os.path.join(bazel_bin, 'foo/bin2.cmd')
self.assertTrue(os.path.exists(bin2))
self.assertTrue(
os.path.isdir(os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles')))
exit_code, _, stderr = self.RunBazel(
['build', '//foo:bin3.bat'], allow_failure=True
)
if self.IsWindows():
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn('target name extension should match source file extension',
os.linesep.join(stderr))
else:
bin3 = os.path.join(bazel_bin, 'foo', 'bin3.bat')
self.assertTrue(os.path.exists(bin3))
self.assertTrue(
os.path.isdir(os.path.join(bazel_bin, 'foo/bin3.bat.runfiles')))
if self.IsWindows():
self.assertTrue(os.path.isfile(bin1))
self.assertTrue(os.path.isfile(bin2))
else:
self.assertTrue(os.path.islink(bin1))
self.assertTrue(os.path.islink(bin2))
self.assertTrue(os.path.islink(bin3))
if self.IsWindows():
self.AssertRunfilesManifestContains(
os.path.join(
bazel_bin, 'foo/bin1.sh%s.runfiles/MANIFEST' % bin1_suffix
),
'_main/bar/bar.txt',
)
self.AssertRunfilesManifestContains(
os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles/MANIFEST'),
'_main/bar/bar.txt',
)
else:
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/bin1.sh.runfiles/_main/bar/bar.txt')
)
)
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles/_main/bar/bar.txt')
)
)
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/bin3.bat.runfiles/_main/bar/bar.txt')
)
)
_, stdout, _ = self.RunProgram([bin1])
self.assertEqual(len(stdout), 3)
self.assertEqual(stdout[0], 'hello shell')
if self.IsWindows():
self.assertEqual(stdout[1], 'runfiles_manifest_only=1')
self.assertRegexpMatches(
stdout[2],
(r'^runfiles_manifest_file='
r'[a-zA-Z]:/.*/foo/bin1.sh%s.runfiles/MANIFEST$' % bin1_suffix))
else:
# TODO(laszlocsomor): Find out whether the runfiles-related envvars should
# be set on Linux (e.g. $RUNFILES, $RUNFILES_MANIFEST_FILE). Currently
# they aren't, and that may be a bug. If it's indeed a bug, fix that bug
# and update this test.
self.assertEqual(stdout[1], 'runfiles_manifest_only=')
self.assertEqual(stdout[2], 'runfiles_manifest_file=')
if self.IsWindows():
exit_code, stdout, stderr = self.RunProgram([bin2])
self.AssertExitCode(exit_code, 0, stderr)
self.assertEqual(stdout[0], 'hello batch')
def _buildPyTargets(self, bazel_bin, binary_suffix):
# Verify that the build of our py_binary succeeds.
self.RunBazel(['build', '//foo:foo'])
# Verify that generated files exist.
foo_bin = os.path.join(bazel_bin, 'foo', 'foo%s' % binary_suffix)
self.assertTrue(os.path.isfile(foo_bin))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, 'foo/foo%s.runfiles' % binary_suffix)))
# Verify contents of runfiles (manifest).
if self.IsWindows():
self.AssertRunfilesManifestContains(
os.path.join(
bazel_bin, 'foo/foo%s.runfiles/MANIFEST' % binary_suffix
),
'_main/bar/bar.txt',
)
else:
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/foo.runfiles/_main/bar/bar.txt')
)
)
# Try to run the built py_binary.
_, stdout, _ = self.RunProgram([foo_bin])
self.assertEqual(stdout[0], 'Hello World!')
# Try to use the py_binary as an executable in a Starlark rule.
self.RunBazel(['build', '//foo:hello'])
# Verify that the Starlark action generated the right output.
hello_path = os.path.join(bazel_bin, 'foo', 'hello.txt')
self.assertTrue(os.path.isfile(hello_path))
with open(hello_path, 'r') as f:
self.assertEqual(f.read(), 'Hello World!')
# Verify that running py_test succeeds.
self.RunBazel(['test', '//foo:test'])
def _buildAndCheckArgumentPassing(self, package, target_name):
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//%s:%s' % (package, target_name)])
bin_suffix = '.exe' if self.IsWindows() else ''
bin1 = os.path.join(bazel_bin, package, '%s%s' % (target_name, bin_suffix))
self.assertTrue(os.path.exists(bin1))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, '%s/%s%s.runfiles' % (package, target_name,
bin_suffix))))
arguments = ['a', 'a b', '"b"', 'C:\\a\\b\\', '"C:\\a b\\c\\"']
_, stdout, _ = self.RunProgram([bin1] + arguments)
self.assertEqual(stdout, arguments)
def testJavaBinaryLauncher(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'java_binary(',
' name = "foo",',
' srcs = ["Main.java"],',
' main_class = "Main",',
' data = ["//bar:bar.txt"],',
')',
])
self.ScratchFile('foo/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' System.out.println("hello java");',
' System.out.println("java_runfiles=" + ',
' System.getenv("JAVA_RUNFILES"));',
' System.out.println("runfiles_manifest_only=" + ',
' System.getenv("RUNFILES_MANIFEST_ONLY"));',
' System.out.println("runfiles_manifest_file=" + ',
' System.getenv("RUNFILES_MANIFEST_FILE"));',
' }',
'}',
])
self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])'])
self.ScratchFile('bar/bar.txt', ['hello'])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self._buildJavaTargets(bazel_bin, '.exe' if self.IsWindows() else '')
def testJavaBinaryArgumentPassing(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'java_binary(',
' name = "bin",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
])
self.ScratchFile('foo/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' for (String arg : args) {',
' System.out.println(arg);',
' }'
' }',
'}',
])
self._buildAndCheckArgumentPassing('foo', 'bin')
def testShBinaryLauncher(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'foo/BUILD',
[
# On Linux/MacOS, all sh_binary rules generate an output file with
# the same name as the rule, and this is a symlink to the file in
# `srcs`. (Bazel allows only one file in `sh_binary.srcs`.)
# On Windows, if the srcs's extension is one of ".exe", ".cmd", or
# ".bat", then Bazel requires the rule's name has the same
# extension, and the output file will be a copy of the source file.
'sh_binary(',
' name = "bin1.sh",',
' srcs = ["foo.sh"],',
' data = ["//bar:bar.txt"],',
')',
'sh_binary(',
' name = "bin2.cmd",', # name's extension matches that of srcs[0]
' srcs = ["foo.cmd"],',
' data = ["//bar:bar.txt"],',
')',
'sh_binary(',
' name = "bin3.bat",', # name's extension doesn't match srcs[0]'s
' srcs = ["foo.cmd"],',
' data = ["//bar:bar.txt"],',
')',
])
foo_sh = self.ScratchFile('foo/foo.sh', [
'#!/bin/bash',
'echo hello shell',
'echo runfiles_manifest_only=${RUNFILES_MANIFEST_ONLY:-}',
'echo runfiles_manifest_file=${RUNFILES_MANIFEST_FILE:-}',
])
foo_cmd = self.ScratchFile('foo/foo.cmd', ['@echo hello batch'])
self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])'])
self.ScratchFile('bar/bar.txt', ['hello'])
os.chmod(foo_sh, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
os.chmod(foo_cmd, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self._buildShBinaryTargets(bazel_bin, '.exe' if self.IsWindows() else '')
def testShBinaryArgumentPassing(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'sh_binary(',
' name = "bin",',
' srcs = ["bin.sh"],',
')',
])
foo_sh = self.ScratchFile('foo/bin.sh', [
'#!/bin/bash',
'# Store arguments in a array',
'args=("$@")',
'# Get the number of arguments',
'N=${#args[@]}',
'# Echo each argument',
'for (( i=0;i<$N;i++)); do',
' echo ${args[${i}]}',
'done',
])
os.chmod(foo_sh, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
self._buildAndCheckArgumentPassing('foo', 'bin')
def testPyBinaryLauncher(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'foo/foo.bzl',
[
'def _impl(ctx):',
' ctx.actions.run(',
' arguments=[ctx.outputs.out.path],',
' outputs=[ctx.outputs.out],',
' executable=ctx.executable._hello_world,',
' use_default_shell_env=True)',
'',
'helloworld = rule(',
' implementation=_impl,',
' attrs={',
' "srcs": attr.label_list(allow_files=True),',
' "out": attr.output(mandatory=True),',
' "_hello_world": attr.label(executable=True, cfg="exec",',
' allow_files=True,',
' default=Label("//foo:foo"))',
' }',
')',
],
)
self.ScratchFile('foo/BUILD', [
'load(":foo.bzl", "helloworld")', '', 'py_binary(', ' name = "foo",',
' srcs = ["foo.py"],', ' data = ["//bar:bar.txt"],', ')', '',
'py_test(', ' name = "test",', ' srcs = ["test.py"],', ')', '',
'helloworld(', ' name = "hello",', ' out = "hello.txt",', ')'
])
foo_py = self.ScratchFile('foo/foo.py', [
'#!/usr/bin/env python3',
'import sys',
'if len(sys.argv) == 2:',
' with open(sys.argv[1], "w") as f:',
' f.write("Hello World!")',
'else:',
' print("Hello World!")',
])
test_py = self.ScratchFile('foo/test.py', [
'#!/usr/bin/env python3',
'import unittest',
'class MyTest(unittest.TestCase):',
' def test_dummy(self):',
' pass',
'if __name__ == \'__main__\':',
' unittest.main()',
])
self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])'])
self.ScratchFile('bar/bar.txt', ['hello'])
os.chmod(foo_py, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
os.chmod(test_py, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self._buildPyTargets(bazel_bin, '.exe' if self.IsWindows() else '')
def testPyBinaryArgumentPassing(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'py_binary(',
' name = "bin",',
' srcs = ["bin.py"],',
')',
])
self.ScratchFile('foo/bin.py', [
'import sys',
'for arg in sys.argv[1:]:',
' print(arg)',
])
self._buildAndCheckArgumentPassing('foo', 'bin')
def testPyBinaryLauncherWithDifferentArgv0(self):
"""Test for https://github.com/bazelbuild/bazel/issues/14343."""
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'py_binary(',
' name = "bin",',
' srcs = ["bin.py"],',
')',
])
self.ScratchFile('foo/bin.py', ['print("Hello world")'])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
# Verify that the build of our py_binary succeeds.
self.RunBazel(['build', '//foo:bin'])
# Try to run the built py_binary.
binary_suffix = '.exe' if self.IsWindows() else ''
foo_bin = os.path.join(bazel_bin, 'foo', 'bin%s' % binary_suffix)
args = [r'C:\Invalid.exe' if self.IsWindows() else '/invalid']
_, stdout, _ = self.RunProgram(args, executable=foo_bin)
self.assertEqual(stdout[0], 'Hello world')
def testWindowsJavaExeLauncher(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'java_binary(',
' name = "foo",',
' srcs = ["Main.java"],',
' main_class = "Main",',
' jvm_flags = ["--flag1", "--flag2"],',
' data = ["advice-1.jar", "advice-2.jar"],',
')',
])
self.ScratchFile('foo/advice-1.jar')
self.ScratchFile('foo/advice-2.jar')
self.ScratchFile('foo/Main.java', [
'public class Main {',
' public static void main(String[] args) {',
' System.out.println("helloworld");',
' }',
'}',
])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//foo:foo'])
binary = os.path.join(bazel_bin, 'foo', 'foo.exe')
self.assertTrue(os.path.exists(binary))
# Add this flag to make launcher print the command it generated instead of
# launching the real program.
print_cmd = '--print_launcher_command'
_, stdout, _ = self.RunProgram([binary, '--debug', print_cmd])
self.assertIn(
'-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005',
stdout)
_, stdout, _ = self.RunProgram(
[binary, '--debug', print_cmd],
env_add={'DEFAULT_JVM_DEBUG_PORT': '12345'},
)
self.assertIn(
'-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=12345',
stdout)
_, stdout, _ = self.RunProgram(
[binary, '--debug=12345', print_cmd],
env_add={
'DEFAULT_JVM_DEBUG_SUSPEND': 'n',
'PERSISTENT_TEST_RUNNER': 'true',
},
)
self.assertIn(
'-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=12345'
',quiet=y', stdout)
_, stdout, _ = self.RunProgram([binary, '--main_advice=MyMain', print_cmd])
self.assertIn('MyMain', stdout)
_, stdout, _ = self.RunProgram([
binary,
'--main_advice_classpath=foo/advice-1.jar;foo/advice-2.jar',
print_cmd,
])
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertIn('foo/advice-1.jar', classpath)
self.assertIn('foo/advice-2.jar', classpath)
_, stdout, _ = self.RunProgram(
[binary, '--main_advice_classpath=C:\\foo\\bar', print_cmd]
)
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertIn('C:\\foo\\bar', classpath)
_, stdout, _ = self.RunProgram(
[binary, '--jvm_flag="--some_path="./a b/c""', print_cmd]
)
self.assertIn('"--some_path=\\"./a b/c\\""', stdout)
_, stdout, _ = self.RunProgram(
[binary, '--jvm_flags="--path1=a --path2=b"', print_cmd]
)
self.assertIn('--path1=a', stdout)
self.assertIn('--path2=b', stdout)
_, stdout, _ = self.RunProgram(
[binary, print_cmd], env_add={'JVM_FLAGS': '--foo --bar'}
)
self.assertIn('--flag1', stdout)
self.assertIn('--flag2', stdout)
self.assertIn('--foo', stdout)
self.assertIn('--bar', stdout)
exit_code, stdout, stderr = self.RunProgram(
[binary, '--singlejar', print_cmd], allow_failure=True
)
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn('foo_deploy.jar does not exist', ''.join(stderr))
self.RunBazel(['build', '//foo:foo_deploy.jar'])
_, stdout, _ = self.RunProgram([binary, '--singlejar', print_cmd])
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertIn('foo_deploy.jar', classpath)
_, stdout, _ = self.RunProgram([binary, '--print_javabin'])
self.assertIn('local_jdk/bin/java.exe', ''.join(stdout))
my_tmp_dir = self.ScratchDir('my/temp/dir')
_, stdout, _ = self.RunProgram(
[binary, print_cmd], env_add={'TEST_TMPDIR': my_tmp_dir}
)
self.assertIn('-Djava.io.tmpdir=%s' % my_tmp_dir, stdout)
_, stdout, _ = self.RunProgram([binary, '--classpath_limit=0', print_cmd])
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertRegexpMatches(classpath, r'foo-[A-Za-z0-9]+-classpath.jar$')
def testWindowsNativeLauncherInNonEnglishPath(self):
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('bin/BUILD', [
'java_binary(',
' name = "bin_java",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
'sh_binary(',
' name = "bin_sh",',
' srcs = ["main.sh"],',
')',
])
self.ScratchFile('bin/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' System.out.println("helloworld");',
' }',
'}',
])
self.ScratchFile('bin/main.sh', [
'echo "helloworld"',
])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//bin/...'])
for f in [
'bin_java.exe',
'bin_java.exe.runfiles_manifest',
'bin_sh.exe',
'bin_sh',
'bin_sh.exe.runfiles_manifest',
]:
self.CopyFile(os.path.join(bazel_bin, 'bin', f),
os.path.join(u'./\u6d4b\u8bd5', f))
unicode_binary_path = u'./\u6d4b\u8bd5/bin_java.exe'
_, stdout, _ = self.RunProgram([unicode_binary_path])
self.assertEqual('helloworld', ''.join(stdout))
unicode_binary_path = u'./\u6d4b\u8bd5/bin_sh.exe'
_, stdout, _ = self.RunProgram([unicode_binary_path])
self.assertEqual('helloworld', ''.join(stdout))
def testWindowsNativeLauncherInLongPath(self):
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'bin/BUILD',
[
'java_binary(',
' name = "not_short_bin_java",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
'sh_binary(',
' name = "not_short_bin_sh",',
' srcs = ["main.sh"],',
')',
'py_binary(',
' name = "not_short_bin_py",',
' srcs = ["not_short_bin_py.py"],',
')',
],
)
self.ScratchFile('bin/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' System.out.println("helloworld");',
' }',
'}',
])
self.ScratchFile('bin/main.sh', [
'echo "helloworld"',
])
self.ScratchFile(
'bin/not_short_bin_py.py',
[
'print("helloworld")',
],
)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
exit_code, _, stderr = self.RunBazel(['build', '//bin/...'])
self.AssertExitCode(exit_code, 0, stderr)
# Create a directory with a path longer than 260
long_dir_path = './' + '/'.join(
[(c * 8 + '.' + c * 3) for c in string.ascii_lowercase])
# The 'not_short_' prefix ensures that the basenames are not already 8.3
# short paths. Due to the long directory path, the basename will thus be
# replaced with a short path such as "not_sh~1.exe" below.
for f in [
'not_short_bin_java.exe',
'not_short_bin_java.exe.runfiles_manifest',
'not_short_bin_sh.exe',
'not_short_bin_sh',
'not_short_bin_sh.exe.runfiles_manifest',
'not_short_bin_py.exe',
'not_short_bin_py.zip',
'not_short_bin_py.exe.runfiles_manifest',
]:
self.CopyFile(
os.path.join(bazel_bin, 'bin', f), os.path.join(long_dir_path, f))
long_binary_path = os.path.abspath(
long_dir_path + '/not_short_bin_java.exe'
)
# subprocess doesn't support long path without shell=True
_, stdout, _ = self.RunProgram([long_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
# Make sure we can launch the binary with a shortened Windows 8dot3 path
short_binary_path = win32api.GetShortPathName(long_binary_path)
self.assertIn('~', os.path.basename(short_binary_path))
_, stdout, _ = self.RunProgram([short_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
long_binary_path = os.path.abspath(long_dir_path + '/not_short_bin_sh.exe')
# subprocess doesn't support long path without shell=True
_, stdout, _ = self.RunProgram([long_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
# Make sure we can launch the binary with a shortened Windows 8dot3 path
short_binary_path = win32api.GetShortPathName(long_binary_path)
self.assertIn('~', os.path.basename(short_binary_path))
_, stdout, _ = self.RunProgram([short_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
long_binary_path = os.path.abspath(long_dir_path + '/not_short_bin_py.exe')
# subprocess doesn't support long path without shell=True
_, stdout, _ = self.RunProgram([long_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
# Make sure we can launch the binary with a shortened Windows 8dot3 path
short_binary_path = win32api.GetShortPathName(long_binary_path)
self.assertIn('~', os.path.basename(short_binary_path))
_, stdout, _ = self.RunProgram([short_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
def testWindowsNativeLauncherInvalidArgv0(self):
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'bin/BUILD',
[
'java_binary(',
' name = "bin_java",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
'sh_binary(',
' name = "bin_sh",',
' srcs = ["main.sh"],',
')',
'py_binary(',
' name = "bin_py",',
' srcs = ["bin_py.py"],',
')',
],
)
self.ScratchFile(
'bin/Main.java',
[
'public class Main {',
(
' public static void main(String[] args) {'
' System.out.println("helloworld");'
),
' }',
'}',
],
)
self.ScratchFile(
'bin/main.sh',
[
'echo "helloworld"',
],
)
self.ScratchFile(
'bin/bin_py.py',
[
'print("helloworld")',
],
)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//bin/...'])
_, stdout, _ = self.RunProgram(
['C:\\Invalid'],
executable=os.path.join(bazel_bin, 'bin', 'bin_java.exe'),
)
self.assertEqual('helloworld', ''.join(stdout))
_, stdout, _ = self.RunProgram(
['C:\\Invalid'], executable=os.path.join(bazel_bin, 'bin', 'bin_sh.exe')
)
self.assertEqual('helloworld', ''.join(stdout))
_, stdout, _ = self.RunProgram(
['C:\\Invalid'], executable=os.path.join(bazel_bin, 'bin', 'bin_py.exe')
)
self.assertEqual('helloworld', ''.join(stdout))
def AssertRunfilesManifestContains(self, manifest, entry):
with open(manifest, 'r') as f:
for l in f:
tokens = l.strip().split(' ', 1)
if len(tokens) == 2 and tokens[0] == entry:
return
self.fail('Runfiles manifest "%s" did not contain "%s"' % (manifest, entry))
if __name__ == '__main__':
absltest.main()
|
bazelbuild/bazel
|
src/test/py/bazel/launcher_test.py
|
launcher_test.py
|
py
| 26,523 |
python
|
en
|
code
| 21,632 |
github-code
|
6
|
24293929563
|
def sqrt(x):
low = 0
high = x
while high - low > 0.001:
mid = (high + low) / 2
if abs(mid ** 2 - x) < 0.0001:
return mid
if mid ** 2 > x:
high = mid
elif mid ** 2 < x:
low = mid
return round((high+low)/2, 3)
def main():
assert sqrt(5) == 2.236
if __name__ == '__main__':
main()
|
ckallum/Daily-Interview-Pro
|
solutions/square_root.py
|
square_root.py
|
py
| 374 |
python
|
en
|
code
| 16 |
github-code
|
6
|
32505732795
|
# -*- coding: utf-8 *-
import pprint
import re
import sys
import importlib
from Symfopy.Component.HttpFoundation import Request, Response
class Router(object):
var_regex = re.compile(r'\{(\w+)(?::([^}]+))?\}')
def __init__(self, routes = {}):
self.routes = dict()
for name in routes:
vars = routes[name].get('defaults', {})
self.add_route(name, routes[name]['route'],\
routes[name]['controller'], **vars)
def load_controller(self, string):
module_name, func_name = string.split(':', 1)
module = importlib.import_module(module_name)
#__import__(module_name)
#module = sys.modules[module_name]
func = getattr(module, func_name)
return func
def add_route(self, name, route, controller, **vars):
#if isinstance(controller, basestring):
# controller = self.load_controller(controller)
self.routes[name] = (re.compile(self.template_to_regex(route)),
controller, vars)
@staticmethod
def template_to_regex(template):
regex = ''
last_pos = 0
for match in Router.var_regex.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return regex
def __str__(self):
return pprint.pformat(self.__dict__)
@staticmethod
def notfound(message = None, **kwargs):
content = ['<h1>Not Found</h1>']
if isinstance(message, basestring):
content.append('<p>'+ message + '</p>')
elif isinstance(message, list):
for x in message:
if isinstance(x, basestring):
content.append('<p>'+ x + '</p>')
return Response(content, 404)
def rest_controller(cls):
def replacement(request, **urlvars):
action = urlvars.get('action', None)
if action:
action += '_' + request.get_method().lower()
urlvars.pop('action')
else:
if isinstance(action, basestring):
urlvars.pop('action')
action = request.get_method().lower()
instance = cls(**urlvars)
try:
method = getattr(instance, action)
except Exception:
return Router.notfound('No action ' + action)
return method(request)
return replacement
def rest_controller_template(cls):
def replacement(request, template = None, **urlvars):
action = urlvars.get('action', None)
if action:
action += '_' + request.get_method().lower()
urlvars.pop('action')
else:
if isinstance(action, basestring):
urlvars.pop('action')
action = request.get_method().lower()
instance = cls(**urlvars)
try:
method = getattr(instance, action)
except Exception:
return Router.notfound('No action ' + action)
if template:
return method(request, template)
else:
return method(request)
replacement.member_func = cls
return replacement
|
alculquicondor/Symfopy
|
vendor/Symfopy/Component/Routing.py
|
Routing.py
|
py
| 3,380 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2398607314
|
"""
Makes a movie of the previously downloaded GEOS data
"""
import os
import pathlib
from typing import List, Tuple, Union
import numpy as np
import matplotlib.pyplot as plt
import DownloadData
import ReadNetCDF4
import VideoWriter
plt.style.use('myDarkStyle.mplstyle')
# ======================================================================================================================
# Constants
FILL_VALUE = 0x3fff
FILL_VALUE2 = 1023
CMAP = 'hot'
FPS = 12
FIG_SIZE = [16, 9]
# ======================================================================================================================
class MovieFigure:
"""
A Simple class for holding the figure to made into a movie
"""
def __init__(self,
numImages: int = 1,
figsize: Tuple[float, float] = (19.2, 10.8)):
"""
Constructor
Args:
numImages: the number of images wide
figsize: the overall figure size
"""
self._fig, self._axes = plt.subplots(nrows=1,
ncols=numImages,
figsize=figsize)
self._setup()
# ==================================================================================================================
@property
def fig(self) -> plt.Figure:
"""
Returns the figure handle
"""
return self._fig
# ==================================================================================================================
def updateFigure(self,
axisNumber: int,
image: np.ndarray,
dateAndTime: str,
band: DownloadData.Band,
**plotKwargs) -> None:
"""
Updates the figure
Args:
axisNumber: the axis number to update
image: the numpy array of the image, or filepath to the .nc file
dateAndTime: the date and time of the image
band: the GEOS band
plotKwargs: the kwargs to pass to matplotlib imshow()
"""
if axisNumber >= len(self._axes):
raise IndexError(f'axisNumber={axisNumber} is out of the range [0, {len(self._axes)})')
self._axes[axisNumber].imshow(X=image, **plotKwargs)
title = f'Band {band.name.replace("_", "-")} {dateAndTime}'
self._axes[axisNumber].set_title(title)
# ==================================================================================================================
def update(self) -> None:
"""
Updates the figure handle
"""
self._fig.canvas.draw()
# ==================================================================================================================
def _setup(self) -> None:
"""
Sets up the figure axes
"""
for axis in self._axes:
axis.set_xticks([])
axis.set_yticks([])
axis.set_yticklabels([])
axis.set_xticklabels([])
axis.grid(b=False)
axis.set_title('')
plt.tight_layout()
# ======================================================================================================================
def makeMovie(dataDirs: List[str],
outputDir: str,
outputName: str,
cMax: Union[float, List[float]] = None) -> None:
"""
Makes a movie of the data found in the input directory. Expects the data to
be orginized into day directories under dataDir
Args:
dataDirs: the data directory
outputDir: the output directory to save the movie to
outputName: the name of the output movie file
cMax: list of maximum of the clim
"""
if not os.path.isdir(outputDir):
# attempt to make the output directory if it doesn't already exist
os.mkdir(outputDir)
vw = VideoWriter.VideoWriter(filename=os.path.join(outputDir, outputName),
fps=FPS,
isColor=True)
allFiles = list()
for dataDir in dataDirs:
allFiles.append(getAllImageFiles(dataDir=dataDir))
numFiles = [len(files) for files in allFiles]
if numFiles.count(numFiles[0]) != len(numFiles):
raise RuntimeError(f'Different number of image files in the data directories')
for fileIdx in range(len(allFiles[0])):
# matplotlib appears to be a memory hog for some reason, so instantiate a new fig for each set of files
# instead of simply updating...
movieFig = MovieFigure(numImages=len(dataDirs),
figsize=FIG_SIZE)
for dirIdx in range(len(allFiles)):
file = allFiles[dirIdx][fileIdx]
print(f'Processing File {file}')
image, dateAndTime, band = ReadNetCDF4.readImage(filename=str(file),
doPlot=False)
# a bit of cleanup
image[image == FILL_VALUE] = np.nan
image[image == FILL_VALUE2] = np.nan
cLimMax = None # get rid of IDE warning
if cMax is not None:
if type(cMax) is list:
cLimMax = cMax[dirIdx]
elif type(cMax) is float:
cLimMax = cMax
else:
cLimMax = np.nanmax(image)
movieFig.updateFigure(axisNumber=dirIdx,
image=image,
dateAndTime=dateAndTime,
band=band,
clim=[0, cLimMax],
cmap=CMAP)
movieFig.update()
vw.addMatplotlibFigureHandle(fig=movieFig.fig,
doPlot=False)
plt.close(movieFig.fig)
# ======================================================================================================================
def getAllImageFiles(dataDir: str) -> List[pathlib.Path]:
"""
Return all of the image files in dataDir. Assumes a folder structure of days and hours beneath
Args:
dataDir: the data directory
Returns:
list of files
"""
if not os.path.isdir(dataDir):
raise RuntimeError(f'Input directory can not be found\n\t{dataDir}')
files = list()
dayDirs = os.listdir(dataDir)
for dayDir in dayDirs:
fullDayDir = os.path.join(dataDir, dayDir)
if not os.path.isdir(fullDayDir):
continue
hourDirs = os.listdir(fullDayDir)
for hourDir in hourDirs:
fullHourDir = os.path.join(fullDayDir, hourDir)
files.extend(pathlib.Path(fullHourDir).glob('*.nc'))
return files
# ======================================================================================================================
if __name__ == '__main__':
MOVIE_NAME = 'GOES_16'
OUTPUT_DIR = os.path.join(pathlib.Path(os.path.abspath(__file__)).parent, '..', 'movie')
DATA_TOP_DIR = os.path.join(pathlib.Path(os.path.abspath(__file__)).parent, '..', 'data')
DATA_DIRS = list()
DATA_DIRS.append(os.path.join(DATA_TOP_DIR, 'BLUE_1'))
DATA_DIRS.append(os.path.join(DATA_TOP_DIR, 'SWIR_7'))
CMAX = [600, 4]
makeMovie(dataDirs=DATA_DIRS,
outputDir=OUTPUT_DIR,
outputName=MOVIE_NAME,
cMax=CMAX)
|
dpilger26/GOES
|
scripts/MakeMovie.py
|
MakeMovie.py
|
py
| 7,699 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34291432876
|
import os, csv
class CarBase:
def __init__(self, brand, photo_file_name, carrying):
self.photo_file_name = photo_file_name
self.brand = brand
self.carrying = carrying
def get_photo_file_ext(self):
return os.path.splitext(self.photo_file_name)[1]
class Car(CarBase):
def __init__(self, brand, photo_file_name, carrying, passenger_seats_count):
super().__init__(brand, photo_file_name, carrying)
self.car_type = "car"
self.passenger_seats_count = int(passenger_seats_count)
class Truck(CarBase):
def __init__(self, brand, photo_file_name, carrying, body_whl):
super().__init__(brand, photo_file_name, carrying)
self.car_type = "truck"
if body_whl == '': body_whl = '0x0x0'
self.body_width = float(body_whl.split('x')[0])
self.body_height = float(body_whl.split('x')[1])
self.body_length = float(body_whl.split('x')[2])
def get_body_volume(self):
return self.body_width * self.body_height * self.body_length
class SpecMachine(CarBase):
def __init__(self, brand, photo_file_name, carrying, extra):
super().__init__(brand, photo_file_name, carrying)
self.car_type = "spec_machine"
self.extra = extra
def get_car_list(csv_filename):
car_list = []
with open(csv_filename, 'r') as csv_f:
reader_s = csv.reader(csv_f, delimiter=';')
for row in reader_s:
if (row[0:1] == ['']) or (row[1:2] == ['']) or (row[3:4] == ['']) or (row[5:6] == ['']):
continue
else:
if row[0:1] == ['car']:
if row[2:3] == ['']:
continue
else:
car_list.append(Car(''.join(row[1:2]), ''.join(row[3:4]), ''.join(row[5:6]), ''.join(row[2:3])))
elif row[0:1] == ['truck']:
car_list.append(Truck(''.join(row[1:2]), ''.join(row[3:4]), ''.join(row[5:6]), ''.join(row[4:5])))
elif row[0:1] == ['spec_machine']:
car_list.append(SpecMachine(''.join(row[1:2]), ''.join(row[3:4]), ''.join(row[5:6]), ''.join(row[6:7])))
return car_list
|
evgp/learning_python
|
w3_cars/w3_2_autodrom.py
|
w3_2_autodrom.py
|
py
| 2,251 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22165701043
|
inteiros = [1,3,4,5,7,8,9]
pares = [x for x in inteiros if x % 2 == 0]
print(pares)
quadrados = [n*n for n in inteiros]
print(quadrados)
frutas = ["maçã", "banana", "laranja", "melancia"]
frutas = [fruta.upper() for fruta in frutas]
print(frutas)
|
sergiaoprogramador/introducaozinha-rapida-python
|
list_comprehensions.py
|
list_comprehensions.py
|
py
| 250 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
43926964501
|
# Coda con priorita' per creare la frontiera
from queue import PriorityQueue
# put per inserire
# get per prendere
class StrutturaMappa():
def __init__(self):
self.mappa = dict()
def aggiungiVia(self, viaInput):
# inserisce nodo senza collegamento e senza peso
self.mappa.update({viaInput : list()})
# Aggiorna il nodo con il valore uguale a viaPartenza
# se non esiste, ne crea uno nuovo
def aggiungiCollegamento(self, viaPartenza, viaArrivo, pesoArco):
# Peso espresso in metri
self.mappa[viaPartenza].append({viaArrivo : pesoArco})
def visualizzaStrade(self):
for strade in self.mappa:
print(strade)
def visualizzaCollegamenti(self):
for strade in self.mappa.keys():
print('Strada: ', strade)
print('Collegata con: ')
for collegamento in self.mappa.get(strade):
chiavi = list(collegamento.keys())
for chiave in chiavi:
print('\t',chiave,' distanza:',collegamento.get(chiave),'metri')
print('\n')
def getVicini(self, viaPartenza):
return self.mappa.get(viaPartenza)
# Restituisci il costo dato un elemento preso dall'insieme dei vicini
def getCosto(self, elementoInsiemeVicini):
# Trasformo l'elemento in lista, e prendo l'unico elemento in posizione 0 (unico)
chiave = list(elementoInsiemeVicini.keys())[0]
# Restituisco il valore della chiave, ovvero il costo
return elementoInsiemeVicini.get(chiave)
# Oggetto
mappa = StrutturaMappa()
# Inserimento delle strade senza archi
mappa.aggiungiVia("Via Capruzzi")
mappa.aggiungiVia("Via Policlinico")
mappa.aggiungiVia("Viale Aviatori")
mappa.aggiungiVia("Via Marcuzzi")
mappa.aggiungiVia("Via Napoli")
mappa.aggiungiVia("Corso Roma")
mappa.aggiungiVia("Via Lattea")
mappa.aggiungiVia("Via degli Dei")
mappa.aggiungiVia("Via delle querce")
mappa.aggiungiVia("Viale del Todis")
mappa.aggiungiVia("Corso Umberto Primo")
# Inserimento degli archi con peso (metri)
mappa.aggiungiCollegamento('Via Capruzzi','Via Marcuzzi', 200)
mappa.aggiungiCollegamento('Via Policlinico','Viale Aviatori', 100)
mappa.aggiungiCollegamento('Viale Aviatori','Via Policlinico', 100)
mappa.aggiungiCollegamento('Viale Aviatori','Via Marcuzzi', 100)
mappa.aggiungiCollegamento('Viale Aviatori','Via Napoli', 100)
mappa.aggiungiCollegamento('Via Marcuzzi','Via Capruzzi', 200)
mappa.aggiungiCollegamento('Via Marcuzzi','Viale Aviatori', 200)
mappa.aggiungiCollegamento('Via Napoli','Viale Aviatori', 100)
mappa.aggiungiCollegamento('Corso Roma','Corso Giannone', 100)
mappa.aggiungiCollegamento('Via Lattea','Via degli Dei', 200)
mappa.aggiungiCollegamento('Via Lattea','Via delle querce', 400)
mappa.aggiungiCollegamento('Via degli Dei','Via Lattea', 200)
mappa.aggiungiCollegamento('Via degli Dei','Viale del Todis', 200)
mappa.aggiungiCollegamento('Via delle querce','Via Lattea', 400)
mappa.aggiungiCollegamento('Via delle querce','Corso Umberto Primo', 200)
mappa.aggiungiCollegamento('Viale del Todis','Via degli Dei', 200)
mappa.aggiungiCollegamento('Corso Umberto Primo','Via delle querce', 200)
#----------------------------------------------------------------------
#vicino = mappa.getVicini('Viale Aviatori')[0]
#print(mappa.getCosto(vicino))
|
GianmarcoMo/ProgettoICon
|
grafo.py
|
grafo.py
|
py
| 3,474 |
python
|
it
|
code
| 0 |
github-code
|
6
|
75079239548
|
from dal import autocomplete
from django import forms
from .models import Tag
class TForm(forms.ModelForm):
class Meta:
model = Tag
fields = ('Tag_name')
widgets = {
'Tag_name': autocomplete.ModelSelect2(url='test')
}
|
codebottlehun/WithMe
|
tag/forms.py
|
forms.py
|
py
| 270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36090200838
|
"""Some useful functions to deal with GitHub."""
import datetime
from github import Github
from github import UnknownObjectException
import click
class GitHubMux:
"""Class that let's you operate in multiple repos of the same org at the same time."""
def __init__(self, organization, token, exclude):
"""
Instantiate class.
Args:
organization(string): Organization name.
token(string): Token to interact with GitHub API.
exclude(tuple): Tuple with all the repo names that have to excluded from processing.
"""
self.token = token
self.gh = Github(self.token)
self.exclude = exclude
try:
self.org = self.gh.get_organization(organization)
except UnknownObjectException:
raise Exception("Looks like organization `{}` doesn't exist.".format(organization))
def exclude_repo(self, repo):
"""
Exclude a repo.
Args:
repo(string): Repo of the name to exclude
"""
self.exclude = self.exclude + (repo, )
def repos(self):
"""Return repos to process."""
for repo in self.org.get_repos():
if repo.name in self.exclude:
self.exclude_repo
click.secho("Skipping repo `{}`.".format(repo.name), fg="blue")
else:
yield repo
def _set_label_repo(self, repo, name, color):
"""
Create a label if it doesn't exist already.
Args:
repo(Repository): Repo where you want to create the label
name(string): Name of the label
color(string): Color of the label
Return:
(Label) Either the label that was created of the existing one.
"""
try:
label = repo.get_label(name)
if label.color == color:
click.secho("Label `{}` already exists in repo `{}`. ".format(name,
repo.name),
fg='green')
else:
click.secho("Label `{}` already exists in repo `{}` "
"but has a different color. Fixing.".format(name,
repo.name),
fg='yellow')
label.edit(name, color)
except UnknownObjectException:
click.secho("Label `{}` doesn't exist in repo `{}`. Creating.".format(name,
repo.name),
fg='yellow')
label = repo.create_label(name, color)
return label
def set_label(self, name, color):
"""
Create a label in all repos if it doesn't exist.
Args:
name(string): Name of the label
color(string): Color of the label
"""
for repo in self.repos():
self._set_label_repo(repo, name, color)
def _unset_label_repo(self, repo, name):
"""
Delete a label if it exists.
Args:
repo(Repository): Repo where you want to create the label
name(string): Name of the label
"""
try:
label = repo.get_label(name)
click.secho("Label `{}` exists in repo `{}`. Deleting.".format(name,
repo.name),
fg='yellow')
label.delete()
except UnknownObjectException:
click.secho("Label `{}` is already missing in repo `{}`.".format(name,
repo.name),
fg='green')
def unset_label(self, name):
"""
Delete a label in all the repos that it exists.
Args:
name(string): Name of the label
"""
for repo in self.repos():
self._unset_label_repo(repo, name)
def rename_label(self, name, new_name):
"""
Rename an existing label in all the repos that it exists.
Args:
name(str): Current name of the label
new_name(str): New name for the label
"""
for repo in self.repos():
try:
label = repo.get_label(name)
click.secho("Label `{}` exists in repo `{}`. Renaming.".format(name,
repo.name),
fg='yellow')
label.edit(new_name, label.color)
except UnknownObjectException:
click.secho("Couldn't find label `{}` in repo `{}`.".format(name,
repo.name),
fg='green')
def _get_labels_from_repo(self, repo):
"""
Get labels from a repo.
Args:
repo(Repository): Repository to process.
Return:
list(Label): List of Labels of repo.
"""
labels = set()
for label in repo.get_labels():
labels.add((label.name, label.color))
return labels
def synch_from_repo(self, repo):
"""
Synch labels across repos.
Ensure that all repos have exactly the same labels as another repo that holds
the source of truth. If labels exists same color is enforced, if labels don't exist they
are created and if there are more labels than necessary they are deleted.
Args:
repo(str): Name of the repo that holds the truth.
"""
repo = self.org.get_repo(repo)
orig_labels = self._get_labels_from_repo(repo)
for r in self.repos():
if r.name == repo.name:
continue
click.secho("Processing {}".format(r.name), fg="cyan")
r_labels = self._get_labels_from_repo(r)
to_update = orig_labels - r_labels
for l_tuple in to_update:
self._set_label_repo(r, l_tuple[0], l_tuple[1])
# We refresh labels as some might have changed color in the previous step
r_labels = self._get_labels_from_repo(r)
to_delete = r_labels - orig_labels
for l_tuple in to_delete:
self._unset_label_repo(r, l_tuple[0])
def search_issue_by_title(self, title, org, repo):
"""
Search for an issue with `title` in org/repo.
Args:
title(string): Title of the issue
org(string): Organization name the issue has to belong to
repo(string): Repository name the issue has to belong to
Return:
(Issue): that matches the criteria or None.
Raise:
(Exception): If there is more than one match.
"""
query = "{} in:Title repo:{}/{}".format(title, org, repo)
issues = self.gh.search_issues(query)
for i in issues:
if i.title == title:
return i
return None
def move_issue(self, issue_id, src_repo, dst_repo):
"""
Move an issue between different repos.
Original issue is going to be closed while the new one will reference to the original issue
and mention the original reporter.
Args:
issue_id(int): Issue number
src_repo(string): Name of the source repo where the issue lives
dst_repo(string): Name of the repo where you want to move the issue to
"""
src_repo = self.org.get_repo(src_repo)
dst_repo = self.org.get_repo(dst_repo)
issue = src_repo.get_issue(issue_id)
new_body = "Original issue {}/{}#{} created by @{}\n\n{}".format(
src_repo.organization.name,
src_repo.name,
issue.number,
issue.user.login,
issue.body)
issue.edit(state="closed")
new_issue = dst_repo.create_issue(title=issue.title, body=new_body, labels=issue.labels)
click.secho("Issue moved, new ID is #{} - {}".format(new_issue.id, new_issue.url),
fg="yellow")
issue.create_comment("This issue has been 'moved' to {}/{}#{}".format(
dst_repo.organization.name,
dst_repo.name,
new_issue.number))
def spread_issue(self, issue_id, src_repo):
"""
Spread an issue to multiple repos.
Given a issue_id from a source repo it will create issues in the rest of the repos
linking back to the original one.
Args:
issue_id(int): Issue number of the issue you want to spread.
src_repo(string): Repository name where the issue lives.
"""
issue = self.org.get_repo(src_repo).get_issue(issue_id)
self.exclude_repo(issue.repository.name)
body = "See details in the parent issue {}/{}#{}\n\n".format(
issue.repository.organization.name,
issue.repository.name,
issue.number)
for repo in self.repos():
new_issue = self.search_issue_by_title(issue.title, repo.organization.name, repo.name)
if new_issue:
click.secho("Issue already exists, ID is {}/{}#{} - {}".format(
new_issue.repository.organization.name,
new_issue.repository.name,
new_issue.number,
new_issue.url),
fg="green")
else:
new_issue = repo.create_issue(title=issue.title, body=body, labels=issue.labels)
click.secho("Issue created, ID is {}/{}#{} - {}".format(
new_issue.repository.organization.name,
new_issue.repository.name,
new_issue.number,
new_issue.url),
fg="yellow")
def pr_stats(self, days):
"""Gather stats for the past few days."""
stats = {}
summary_user = {}
summary_repo = {}
for repo in self.repos():
stats[repo.name] = {}
summary_repo[repo.name] = {
"count": 0,
"commits": 0,
"additions": 0,
"deletions": 0,
}
for pr in repo.get_pulls(state="all", sort="created", direction="desc"):
if pr.created_at < (datetime.datetime.now() - datetime.timedelta(days=days)):
break
summary_repo[repo.name]["count"] += 1
summary_repo[repo.name]["commits"] += pr.commits
summary_repo[repo.name]["additions"] += pr.additions
summary_repo[repo.name]["deletions"] += pr.deletions
if pr.user.login not in stats[repo.name]:
stats[repo.name][pr.user.login] = {
"count": 1,
"commits": pr.commits,
"additions": pr.additions,
"deletions": pr.deletions,
}
else:
stats[repo.name][pr.user.login]["count"] += 1
stats[repo.name][pr.user.login]["commits"] += pr.commits
stats[repo.name][pr.user.login]["additions"] += pr.additions
stats[repo.name][pr.user.login]["deletions"] += pr.deletions
if pr.user.login not in summary_user:
summary_user[pr.user.login] = {
"count": 1,
"commits": pr.commits,
"additions": pr.additions,
"deletions": pr.deletions,
}
else:
summary_user[pr.user.login]["count"] += 1
summary_user[pr.user.login]["commits"] += pr.commits
summary_user[pr.user.login]["additions"] += pr.additions
summary_user[pr.user.login]["deletions"] += pr.deletions
return {
"stats": stats,
"summary_user": summary_user,
"summary_repo": summary_repo
}
def issue_stats(self, days):
"""Gather stats for the past few days."""
stats = {}
for repo in self.repos():
stats[repo.name] = {"count": 0}
for issue in repo.get_issues(state="closed", sort="updated", direction="desc"):
if issue.updated_at < (datetime.datetime.now() - datetime.timedelta(days=days)):
break
stats[repo.name]["count"] += 1
return {
"stats": stats,
}
|
napalm-automation/tooling
|
gh_tools/github_helpers.py
|
github_helpers.py
|
py
| 13,728 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8417498337
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
cur = head
if cur == None: return None
while cur.next!= None:
if cur.next.val == cur.val:
if cur.next.next == None:
cur.next = None
return head
else:
cur.next=cur.next.next
else:
cur = cur.next
return head
|
SarthakPradhan/LeetCode
|
remove-duplicates-from-sorted-list/remove-duplicates-from-sorted-list.py
|
remove-duplicates-from-sorted-list.py
|
py
| 679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2736199577
|
import keras
from keras import backend as K
from keras.callbacks import Callback
import numpy as np
class BitsLogger(Callback):
def __init__(self, nConvs=9, **kwargs):
self.norm = 1./np.log(float(nConvs))
self.bits_history=[]
self.filterLayers=[]
super(BitsLogger, self).__init__(**kwargs)
def on_train_begin(self, logs):
layers = self.model.layers
for l in layers:
if l.name == 'model_1':
layers=l.layers
for l in layers:
if "filter_mask" in l.name:
self.filterLayers.append(l)
def on_epoch_end(self, epoch, logs={}):
bitsum=0.
for l in self.filterLayers:
weights=K.flatten(l.filterProbs)
b=-self.norm*K.sum(weights*K.log(weights))
bitsum += b
print(' Activation bits: ' + str(K.eval(bitsum)))
logs['activation_bits'] = K.eval(bitsum)
self.bits_history.append(K.eval(bitsum))
class EntropyLogger(Callback):
def __init__(self, **kwargs):
self.entropy_history=[]
self.filterLayers=[]
self.constant = 0.5*np.log(2*np.pi) + 0.5
self.hmin=0.
self.hmax=0.
self.norm=1.
super(EntropyLogger, self).__init__(**kwargs)
def on_train_begin(self, logs):
layers = self.model.layers
for l in layers:
if l.name == 'model_1':
layers=l.layers
for l in layers:
if "filter_mask" in l.name:
self.filterLayers.append(l)
nFilters = K.eval(K.shape(self.filterLayers[-1].filterProbs)[-1])
r=np.random.uniform(size=(1000000, nFilters))
sigma = np.std(r, axis=1)
self.hmin = 1.05 * np.log(np.amin(sigma, axis=0))
self.hmax = 0.95 * np.log(np.amax(sigma, axis=0))
self.norm = 1. / (self.hmax - self.hmin)
def on_epoch_end(self, epoch, logs={}):
s=0.
for l in self.filterLayers:
weights = K.flatten(l.filterProbs)
s += self.norm*(K.log(K.std(weights)) - self.hmin)
print(' entropy: ' + str(K.eval(s)) )
logs['entropy'] = K.eval(s)
self.entropy_history.append(K.eval(s))
|
twoev/APEMEN
|
utils/callbacks.py
|
callbacks.py
|
py
| 2,012 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25226116736
|
from sklearn import datasets
import pandas as pd
iris = datasets.load_iris()
iris_df = pd.DataFrame(iris.data)
iris_df.columns = iris.feature_names
iris_df['target'] = iris.target
# original target = 0,1,2 int32
print(iris_df.target)
# changing them by using DF.astype(type)
print(iris_df.target.astype(float))
|
HawkingLaugh/Data-Processing-Using-Python
|
Week4/28. inconsistent_data_handling.py
|
28. inconsistent_data_handling.py
|
py
| 313 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35425394354
|
from yaml_parser import parse_yaml
from voluptuous import Schema,Object, Range, Coerce, All, Any, Optional, Lower, Invalid
import re
import sys
import argparse
"""
Python YAML validator
"""
list_of_ints = All([Coerce(int)], msg='invalid list of ints')
from datetime import datetime
def check_date(datestring):
try:
fmt='%Y-%m-%d'
date_to_test = datetime.strptime(datestring, fmt)
Coerce(datetime)
except:
raise Invalid('expected in Y-m-d')
simulation_schema=Schema({
'quantiles': [All(Coerce(int), Range(1, 100), msg='not a valid quantile')],
'prediction': {
'model': str,
'window': int
},
'startdate': check_date,
'enddate': check_date,
'replenishment': {
'model': str
},
'input_file' : str
})
replenishment_schema=Schema({
'quantiles': [All(Coerce(int), Range(1, 100), msg='not a valid quantile')],
'prediction': {
'model': str,
'window': int
},
'replenishment': {
'model': str
},
'input_file' : str
})
def test_file(yamlconfig, types):
if types=='simulation':
simulation_schema(yamlconfig['simulation'])
if types=='replenishemnt':
replenishment_schema(yamlconfig['replenishment'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-y","--yaml", help="yaml inputfile to test", type=str)
parser.add_argument("-t","--types", help="type of yaml", type=str)
args = parser.parse_args()
### Parse YAML to test
to_test = parse_yaml(args.yaml)
test_file(to_test,args.types)
|
philippmack/europython2015-pmack
|
config/validator.py
|
validator.py
|
py
| 1,633 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3008185523
|
# Day 8
import numpy as np
from copy import copy
def run_part_1(data):
hidden, max_x, max_y = prepare_data(data)
for x in range(1, max_x - 1):
for y in range(1, max_y - 1):
sides = [
data[x, :y], data[x, y+1:],
data[:x, y], data[x+1:, y]]
if any(np.max(side) < data[x, y] for side in sides):
continue
hidden[x, y] = 1 # This tree should be blocked
return np.size(data) - np.sum(hidden)
def run_part_2(data):
scenic_scores, max_x, max_y = prepare_data(data)
for x in range(1, max_x - 1):
for y in range(1, max_y - 1):
sides = [
np.flip(data[x, :y]), data[x, y+1:],
np.flip(data[:x, y]), data[x+1:, y]]
trees = []
for side in sides:
if np.any(side >= data[x, y]):
trees.append(np.where(side >= data[x, y])[0][0] + 1)
else:
trees.append(side.size)
scenic_scores[x, y] = np.prod(np.array(trees))
return np.max(scenic_scores)
def prepare_data(data):
empty_data = copy(data)
empty_data.fill(0)
return (empty_data, data.shape[0], data.shape[1])
def parse_input(data):
return np.array([[int(height) for height in list(line)] for line in data])
|
swemoney/AdventOfCode
|
2022/08/day.py
|
day.py
|
py
| 1,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41646398531
|
""" Module containing routines to setup the training of policies.
"""
import argparse
from typing import Optional, Sequence
from aizynthfinder.training.utils import Config
from aizynthfinder.training.keras_models import (
train_expansion_keras_model,
train_filter_keras_model,
train_recommender_keras_model,
)
def main(optional_args: Optional[Sequence[str]] = None) -> None:
"""Entry-point for the aizynth_training tool"""
parser = argparse.ArgumentParser("Tool to train a network policy")
parser.add_argument("config", help="the filename to a configuration file")
parser.add_argument(
"model",
choices=["expansion", "filter", "recommender"],
help="the model to train",
)
args = parser.parse_args(optional_args)
config = Config(args.config)
if args.model == "expansion":
train_expansion_keras_model(config)
elif args.model == "filter":
train_filter_keras_model(config)
elif args.model == "recommender":
train_recommender_keras_model(config)
if __name__ == "__main__":
main()
|
AlanHassen/modelsmatter
|
aizynthfinder/training/training.py
|
training.py
|
py
| 1,085 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3749581806
|
import glob
import platform
import setuptools
import Cython.Build
# By compiling this separately as a C library, we avoid problems
# with passing C++-specific flags when building the extension
lrslib = ('lrslib', {'sources': glob.glob("solvers/lrs/*.c")})
cppgambit = setuptools.Extension(
"pygambit.lib.libgambit",
sources=(
["pygambit/lib/libgambit.pyx"] +
glob.glob("core/*.cc") +
glob.glob("games/*.cc") +
glob.glob("games/agg/*.cc") +
glob.glob("solvers/*/*.cc") +
["tools/lp/nfglp.cc",
"tools/lp/efglp.cc",
"tools/logit/path.cc",
"tools/logit/nfglogit.cc",
"tools/logit/efglogit.cc"]
),
language="c++",
include_dirs=["."],
extra_compile_args=(
["-std=c++11"] if platform.system() == "Darwin" else []
)
)
def readme():
with open("README.rst") as f:
return f.read()
setuptools.setup(
name="pygambit",
version="16.0.2",
description="Software tools for game theory",
long_description=readme(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Mathematics"
],
keywords="game theory Nash equilibrium",
license="GPL2+",
author="Theodore Turocy",
author_email="[email protected]",
url="http://www.gambit-project.org",
project_urls={
'Documentation': 'https://gambitproject.readthedocs.io/',
'Source': 'https://github.com/gambitproject/gambit',
'Tracker': 'https://github.com/gambitproject/gambit/issues',
},
python_requires=">=3.7",
install_requires=[
'lxml', # used for reading/writing GTE files
'numpy',
'scipy',
],
libraries=[lrslib],
packages=['pygambit', 'pygambit.games', 'pygambit.lib'],
ext_modules=Cython.Build.cythonize(cppgambit)
)
|
vignesh7056/gambit
|
src/setup.py
|
setup.py
|
py
| 2,265 |
python
|
en
|
code
| null |
github-code
|
6
|
6066153310
|
import pygame
from _draw import *
from _utils import *
class gui():
def __init__(self,
white,
screen,
width,
height,
smallNokiaFont,
hugeNokiaFont,
font,
bigFont,
hugeFont,
smallFont,
nanoFont,
themeColour,
exitButton,
nextButton,
dialogue,
sDialogue,
smsDialogue,
music,
borderSlide,
notificationDialogue,
user_input,
statusButton ,
inventoryButton ,
noteButton ,
nokiaFont ,
nanoNokiaFont ,
smsFont ,
musicFont,
jumboFont ,
gameTime ,
smsScrollDialogue,
squareFont,
squareFontH,
debugSwitch = True,
clicked=False,
):
self.white = white
self.screen = screen
self.width = width
self.height = height
self.smallNokiaFont = smallNokiaFont
self.hugeNokiaFont = hugeNokiaFont
self.font = font
self.bigFont = bigFont
self.hugeFont = hugeFont
self.smallFont = smallFont
self.nanoFont = nanoFont
self.themeColour = themeColour
self.exitButton = exitButton
self.nextButton = nextButton
self.dialogue = dialogue
self.sDialogue = sDialogue
self.smsDialogue = smsDialogue
self.music = music
self.borderSlide = borderSlide
self.notificationDialogue = notificationDialogue
self.user_input = user_input
self.statusButton = statusButton
self.inventoryButton = inventoryButton
self.noteButton = noteButton
self.nokiaFont = nokiaFont
self.nanoNokiaFont = nanoNokiaFont
self.smsFont = smsFont
self.musicFont = musicFont
self.jumboFont = jumboFont
self.gameTime = gameTime
self.smsScrollDialogue = smsScrollDialogue
self.squareFont = squareFont
self.squareFontH = squareFontH
self.debugSwitch = debugSwitch
self.clicked = clicked
self.greenA = (36,65,45)
self.greenB = (82,128,58)
self.greenC = (173,195,63)
self.greenD = (215,233,149)
self.darkGreen = (5,37,23)
self.buttonGreen = (47,75,45)
self.offwhite = (245,245,245)
self.screenDefault = (201,221,126)
self.screenColour = (201,221,126)
self.greenText = (29,153,29)
self.greenBorder = (127,187,73)
self.darkGrey = (44,52,56)
self.lightBlack = (40,41,35)
self.lightGrey = (72,77,79)
# ---------------Images
self.signal = pygame.image.load('pics/phoneLogos/signal.png')
self.bottomNavMock = pygame.image.load('pics/assets/mocks/navBottom.png')
self.bottomNav = pygame.image.load('pics/assets/nav/navBottom.png')
self.nextDayBtn = [pygame.image.load('pics/assets/nav/nextDay1.png'),pygame.image.load('pics/assets/nav/nextDay2.png')]
self.tileBackground = pygame.image.load('pics/assets/backgrounds/tile.png')
self.gradientBackground = pygame.image.load('pics/assets/backgrounds/gradient.png')
self.cubeBackground = pygame.image.load('pics/assets/backgrounds/cube.png')
# -------------widget images
self.widgetNode = [pygame.image.load('pics/assets/widgetNode/widgetNode1.png'),pygame.image.load('pics/assets/widgetNode/widgetNode2.png'),pygame.image.load('pics/assets/widgetNode/widgetNode3.png')]
self.smallActiveWidget = pygame.image.load('pics/assets/widgetNode/smallActiveWidget.png')
self.medActiveWidget = pygame.image.load('pics/assets/widgetNode/medActiveWidget.png')
self.medActiveWidgetLab = pygame.image.load('pics/assets/widgetNode/widgetMedLabel.png')
self.bigActiveWidget = pygame.image.load('pics/assets/widgetNode/bigActiveWidget.png')
# ----- Mech imgs
self.mechBoxMed = impFilesL('mechBoxMed1.png',tDir = 'pics/assets/mechBox/')
self.mechBoxBig = impFilesL('mechBoxBig1.png',tDir = 'pics/assets/mechBox/')
self.mechBoxGreen = impFilesL('mechBoxGreen1.png',tDir = 'pics/assets/mechBox/')
self.mechBoxMedLight = [pygame.image.load('pics/assets/mechBox/mechBoxMedLight1.png'),pygame.image.load('pics/assets/mechBox/mechBoxMedLight2.png'),pygame.image.load('pics/assets/mechBox/mechBoxMedLight3.png'),pygame.image.load('pics/assets/mechBox/mechBoxMedLight4.png')]
self.mechBtnMed = [pygame.image.load('pics/assets/buttons/mechBtnMed1.png'),pygame.image.load('pics/assets/buttons/mechBtnMed2.png')]
self.mechPlainBtnMed = [pygame.image.load('pics/assets/buttons/medMechBtn1.png'),pygame.image.load('pics/assets/buttons/medMechBtn2.png')]
self.extendableBox = [pygame.image.load('pics/assets/textBox/extendableDarkGreen1.png'),pygame.image.load('pics/assets/textBox/extendableDarkGreen2.png')]
self.notitfyBtnSmall = [pygame.image.load('pics/assets/buttons/buttonSmall1.png'),pygame.image.load('pics/assets/buttons/buttonSmall2.png')]
self.notitfyBtnMed = [pygame.image.load('pics/assets/buttons/buttonMed1.png'),pygame.image.load('pics/assets/buttons/buttonMed2.png')]
self.signal = pygame.image.load('pics/phoneLogos/signal.png')
self.minis = [pygame.image.load('pics/assets/minis/minibuttons1.png'),pygame.image.load('pics/assets/minis/minibuttons2.png'),pygame.image.load('pics/assets/minis/minibuttons3.png'),pygame.image.load('pics/assets/minis/minibuttons4.png'),pygame.image.load('pics/assets/minis/minibuttons5.png'),pygame.image.load('pics/assets/minis/minibuttons6.png'),pygame.image.load('pics/assets/minis/minibuttons7.png'),pygame.image.load('pics/assets/minis/minibuttons8.png'),pygame.image.load('pics/assets/minis/minibuttons9.png'),pygame.image.load('pics/assets/minis/minibuttons10.png')]
# ------mouse
self.mx = 0
self.my = 0
#buttons
self.sell = impFilesL('sell1.png',tDir = 'pics/assets/buttons/')
self.bank = impFilesL('bank1.png',tDir = 'pics/assets/buttons/')
self.auto = impFilesL('auto1.png',tDir = 'pics/assets/buttons/')
self.selectMe = impFilesL('selectme1.png',tDir = 'pics/assets/buttons/')
self.increment = impFilesL('increment1.png',tDir = 'pics/assets/buttons/')
self.decrement = impFilesL('decrement1.png',tDir = 'pics/assets/buttons/')
self.menuBG = None
self.hideExitButton = False
def border(self,colour=(128,0,0)):
self.bx,self.by = 0.1*self.width,0.1*self.height
self.bw,self.bh = 0.8*self.width,0.8*self.height
rect = pygame.draw.rect(self.screen, colour, [self.bx, self.by,self.bw , self.bh],4)
def mouseCollides(self,mousePos,x,y,w,h):
if mousePos[0] > x and mousePos[0] < x + w:
if mousePos[1] > y and mousePos[1] < y + h:
return(True)
return(False)
def incrementableWidget(self,x,y,text,value,inc=1,cap=100,userInput=None,incrementKey=None,insta=False,instaMessage='Auto On'):
"""+ button and text to increment and return value
"""
textx, texty = x+60,y+10
#---------exit if auto on
if(insta):
drawSelectableImage(self.increment[0],self.increment[1],(x,y),self,trim=False)
hov, tw,ty = drawText(self.screen,self.nanoNokiaFont, instaMessage,textx ,texty, self.greenD)
xEnd,yEnd = textx + tw, y + self.minis[5].get_rect().h
return(value,xEnd,yEnd)
# --------- display text
displayText = text + ' ' + str(value)
selected = drawSelectableImage(self.increment[0],self.increment[1],(x,y),self,trim=False)
if(userInput.upper() == incrementKey.upper()): selected = True
if(selected):
if(inc<=cap):
value = value + inc
else:
value = value + cap
hov, tw,ty = drawText(self.screen,self.nanoNokiaFont, displayText,textx ,texty, self.greenD)
xEnd,yEnd = textx + tw, y + self.minis[5].get_rect().h
return(value,xEnd,yEnd)
def incDecWidgetAbsolute(self,x,y,text,value,inc=1,cap=100,userInput="none",incrementKey="notset"):
"""+ button and text to increment and return value
"""
displayText = text + ' ' + str(value)
selected = drawSelectableImage(self.decrement[0],self.decrement[1],(x,y),self,trim=False)
if(userInput.upper() == incrementKey.upper()): selected = True
if(selected):
if((value - inc)>=0):
value = value - inc
else:
value = 0
x = x + self.decrement[0].get_rect().w
plusSelected = drawSelectableImage(self.increment[0],self.increment[1],(x,y),self,trim=False)
if(plusSelected):
if((value + inc)<=cap):
value = value + inc
else:
value = cap
textx, texty = x+60,y+10
hov, tw,ty = drawText(self.screen,self.nanoNokiaFont, displayText,textx ,texty, self.greenD)
xEnd,yEnd = textx + tw, y + self.minis[5].get_rect().h
return(value,xEnd,yEnd)
def debug(self,debugMessage):
if(self.debugSwitch):
print(debugMessage)
def debugDetailed(self,debugMessage):
if(self.debugSwitch=='detailed'):
print(debugMessage)
class notificationDialogue():
def __init__(self):
self.initialised = False
self.origText = ''
self.origSource = ''
self.textArray = []
self.colour = (0,0,0)
self.y = 0
self.senPos = 0
def drawDialogue(self,gui,myfont, text,pos,maxWidth,maxHeight,clicked, colour=(255, 255, 255),skip=False,verticalSep=1.1,maxVerticleLines=80,displayNextButton=False,source=None):
sx,sy = pos[0],pos[1]
x,y = sx,sy
tRemaining = ""
hovered = gui.mouseCollides((gui.mx,gui.my),x,y,maxWidth,maxHeight)
# reset if called by new function
if(self.origText!= text or self.origSource!= source):
self.initialised=False
self.origText = text
if(self.initialised== False):
# format paragraph into array of fitted sentences
self.origText = text
self.origSource = source
self.senPos = 0
dAr,para = [], ""
for word in text.split(' '):
pre = para
para += word + " "
textsurface = myfont.render(para, True, colour)
w = textsurface.get_rect().width
if(w>= maxWidth):
dAr.append(pre)
para = word + " "
dAr.append(para)
self.textArray = dAr
self.initialised = True
hTotal = 0
for sentence in range(0,len(self.textArray)):
textsurface = myfont.render(self.textArray[sentence], True, colour)
h = textsurface.get_rect().height
gui.screen.blit(textsurface,(x,y))
y = y + verticalSep*h
hTotal = hTotal + verticalSep*h
tRemaining = self.textArray[sentence+1:]
# Condition: If lines exceed specified MAX LINES, break here
if((sentence>=maxVerticleLines-1)): break
# Condition: If lines exceed specified HEIGHT
if(hTotal >= maxHeight): break
#if(displayNextButton): nextP = gui.nextButton.display(gui,noBorder=False)
# Condition: If lines remaining and clicked, go next page
if(clicked and hovered and (len(tRemaining)>0)):
self.textArray = tRemaining
|
murchie85/bumdee
|
_gui.py
|
_gui.py
|
py
| 12,565 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73787039549
|
"""
Code to explore the PDF and CDF of weight distributions.
We use truncated lognormals to define the distribution of excitatory connections.
We scale that by -8 for inhibitory connections.
We represent the inhibitory connections with a negative number as a convention to be consistent
with the network simulator (NEST), although technically conductances must be positive.
"""
import scipy.interpolate
import scipy.stats as st
import numpy as np
def _approx_pdf_from_cdf(cdf, vmin, vmax, n_samples=10**5):
"""numerically approximate the Probability Density Function from the cumulative"""
x = np.linspace(vmin, vmax, n_samples)
mid = .5 * (x[:-1] + x[1:])
derivative = np.diff(cdf(x)) / np.diff(x)
return scipy.interpolate.interp1d(mid, derivative, fill_value=0., bounds_error=False)
def _approx_inv_cdf_from_cdf(cdf, vmin, vmax, n_samples=10**5):
"""numerically approximate the inverse of a Cumulative Distribution Function"""
x = np.linspace(vmin, vmax, n_samples)
return scipy.interpolate.interp1d(cdf(x), x, fill_value=0., bounds_error=False)
class TruncatedLognormal:
"""
Represents a truncated, and possibly scaled, lognormal distribution.
"""
def __init__(self, loc, scale, shape, vmax, g=1):
self.loc = loc
self.scale = scale
self.shape = shape
self.vmax = vmax
self.g = g
self.base_lognorm = st.lognorm(
loc=self.loc,
scale=self.scale,
s=self.shape)
self.base_lognorm_cdf_vmax = self.base_lognorm.cdf(self.vmax)
self._pdf = _approx_pdf_from_cdf(self.cdf, *self.vrange)
self._icdf = _approx_inv_cdf_from_cdf(self.cdf, *self.vrange)
@property
def vrange(self) -> tuple:
"""truncated range of X"""
vrange = 0, self.vmax * self.g
if self.g < 0:
vrange = vrange[1], vrange[0]
return vrange
def linspace(self, num=50):
"""generate samples linearly on the domain of X"""
return np.linspace(*self.vrange, num=num)
def cdf(self, weight):
"""Cumulative Distribution Function"""
weight_norm = weight / self.g
prob = np.minimum(self.base_lognorm.cdf(weight_norm) / self.base_lognorm_cdf_vmax, 1)
if self.g < 0:
prob = 1 - prob
return prob
def pdf(self, weight):
"""Probability Density Function"""
return self._pdf(weight)
def inv_cdf(self, prob):
"""
Inverse of the Cumulative Distribution Function.
Maps from probability to values.
"""
return self._icdf(prob)
def rev_cdf(self, prob):
"""
Reversed Cumulative Distribution Function.
Cumulative summation is done right-to-left.
"""
return 1 - self.cdf(prob)
def mean(self):
"""Estimated mean from the distribution"""
x = self.linspace(1_000_000)
p = self.pdf(x)
p = p / np.sum(p)
mean = np.sum(x * p)
return mean
def var(self):
"""Estimated var from the distribution"""
mean = self.mean()
x = self.linspace(1_000_000)
p = self.pdf(x)
p = p / np.sum(p)
mean = np.sum(np.square(x - mean) * p)
return mean
def std(self):
"""Estimated std from the distribution"""
return np.sqrt(self.var())
def quantile(self, q):
"""Estimated quantile from the distribution"""
assert 0 <= q <= 1
return self.inv_cdf(q).item()
def median(self):
"""Estimated median from the distribution"""
return self.quantile(.5)
def min(self):
"""Min value of the distribution"""
return self.quantile(0)
def max(self):
"""Max value of the distribution"""
return self.quantile(1)
class ConnDist:
"""Combination of exc and inh weight distributions"""
def __init__(self, e_weights_loc, e_weights_scale, e_weights_shape, e_weights_vmax, g):
assert g < 0
self.exc = TruncatedLognormal(
e_weights_loc,
e_weights_scale,
e_weights_shape,
e_weights_vmax
)
self.inh = TruncatedLognormal(
e_weights_loc,
e_weights_scale,
e_weights_shape,
e_weights_vmax,
g=g,
)
@classmethod
def from_batch(cls, batch):
param_names = ['e_weights_loc', 'e_weights_scale', 'e_weights_vmax', 'e_weights_shape', 'g']
weight_dist_params = batch.reg[param_names].drop_duplicates()
assert len(weight_dist_params) == 1
weight_dist_params = weight_dist_params.iloc[0]
return cls(**weight_dist_params)
|
comp-neural-circuits/tctx
|
tctx/analysis/wdist.py
|
wdist.py
|
py
| 4,725 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24222566552
|
from tkinter import*
from PIL import Image ,ImageTk
from tkinter import ttk
from tkinter import messagebox
import mysql.connector
import urllib.request
urllib.request.urlretrieve(
'https://iocl.com/images/indane_1.jpg',
"indane1.png")
urllib.request.urlretrieve(
'https://cdn5.newsnationtv.com/images/2022/01/01/lpg-gas-price-today-83.jpg',
"cylinder.jpg")
class LPGbooking:
def __init__(self,root):
self.root=root
self.root.title ("LPG Booking ")
self.root.geometry("1295x550+30+100")
#======variables========
self.var_consid=StringVar()
self.var_bookdate=StringVar()
self.var_booking_type=StringVar()
self.var_deldate=StringVar()
self.var_paidtax=StringVar()
self.var_subtotal=StringVar()
self.var_total=StringVar()
#*********Title*****************
lbl_title=Label(self.root,text="LPG BOOKING ",font=("times new roman",15,"bold"),bg="black",fg="dark orange",bd=4,relief=RIDGE)
lbl_title.place(x=0,y=0,width=1290,height=70)
#***********LOGO**************
img1=Image.open(r"indane1.png")
img1=img1.resize((200,70),Image.ANTIALIAS)
self.photoimg1=ImageTk.PhotoImage(img1)
labelimg=Label(self.root,image=self.photoimg1,bd=4,relief=RIDGE)
labelimg.place(x=0,y=0,width=200,height=70)
#**************Label Frame******************
labelframeleft=LabelFrame(self.root,bd=2,relief=RIDGE,text="LPG Booking",padx=2,font=("times new roman",14,"bold"))
labelframeleft.place(x=5,y=70,width=425,height=472)
#********************Labels and Entries*****************
#cust contact
lbl_cust_contact=Label(labelframeleft,text="Consumer ID :",font=("arial",12,"bold"),padx=2,pady=6)
lbl_cust_contact.grid(row=0,column=0,sticky="w")
entry_contact=ttk.Entry(labelframeleft,textvariable=self.var_consid,font=("arial",12,"bold"),width=20)
entry_contact.grid(row=0,column=1,sticky="w")
#fetch data button
btnFetchData=Button(labelframeleft,command=self.Fetch_cust,text="Fetch Data",font=("arial",10,"bold"),bg="black",fg="gold",width=10)
btnFetchData.place(x=320,y=4)
#booking date
booking_date=Label(labelframeleft,font=("arial",12,"bold"), text="Booking Date :",padx=2,pady=6)
booking_date.grid(row=1,column=0,sticky="w")
txt_booking_date=ttk.Entry (labelframeleft,textvariable=self.var_bookdate,font=("arial",12,"bold"))
txt_booking_date.grid(row=1,column=1)
#delivery date
lbl_deliverydate=Label(labelframeleft,font=("arial",12,"bold"), text="Delivery Date :",padx=2,pady=6)
lbl_deliverydate.grid(row=2,column=0,sticky="w")
txt_deliverydate=ttk.Entry (labelframeleft,textvariable=self.var_deldate,font=("arial",12,"bold"))
txt_deliverydate.grid(row=2,column=1)
#booking type
lblbookingtype=Label(labelframeleft,font=("arial",12,"bold"), text="Cylinder Type :",padx=2,pady=6)
lblbookingtype.grid(row=3,column=0,sticky="w")
combo_search=ttk.Combobox(labelframeleft,textvariable=self.var_booking_type,font=("arial",12,"bold"))
combo_search["value"]=("Small","Medium","Large")
combo_search.current(0)
combo_search.grid(row=3,column=1,padx=8)
#paid tax
lbltax=Label(labelframeleft,font=("arial",12,"bold"), text="Paid Tax :",padx=2,pady=6)
lbltax.grid(row=4,column=0,sticky="w")
txttax=ttk.Entry (labelframeleft,textvariable=self.var_paidtax,font=("arial",12,"bold"))
txttax.grid(row=4,column=1)
#sub Total
lblsub=Label(labelframeleft,font=("arial",12,"bold"), text="Sub Total :",padx=2,pady=6)
lblsub.grid(row=5,column=0,sticky="w")
txtsub=ttk.Entry (labelframeleft,textvariable=self.var_subtotal,font=("arial",12,"bold"))
txtsub.grid(row=5,column=1)
#Total cost
lbltotal=Label(labelframeleft,font=("arial",12,"bold"), text="Total Amount :",padx=2,pady=6)
lbltotal.grid(row=6,column=0,sticky="w")
txttotal=ttk.Entry (labelframeleft,textvariable=self.var_total,font=("arial",12,"bold"))
txttotal.grid(row=6,column=1)
#========bill button======
btnbill=Button(labelframeleft,text="BILL",command=self.total,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnbill.grid(row=10,column=0,padx=1,sticky="w")
#===========btn============
btn_frame=Frame(labelframeleft,bd=2,relief=RIDGE)
btn_frame.place(x=0,y=400,width=412,height=780)
btnadd=Button(btn_frame,text="BOOK",command=self.add_data,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnadd.grid(row=0,column=0,padx=1)
btnupdate=Button(btn_frame,text="UPDATE",command=self.update,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnupdate.grid(row=0,column=1,padx=1)
btndel=Button(btn_frame,text="DELETE",command=self.deletes,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btndel.grid(row=0,column=2,padx=1)
btnreset=Button(btn_frame,text="RESET",command=self.reset,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnreset.grid(row=0,column=3,padx=1)
#=======right side image===========
img3=Image.open(r"cylinder.jpg")
img3=img3.resize((430,200),Image.ANTIALIAS)
self.photoimg3=ImageTk.PhotoImage(img3)
labelimg=Label(self.root,image=self.photoimg3,bd=4,relief=RIDGE)
labelimg.place(x=850,y=80,width=430,height=200)
#========table frame search system=============
Table_Frame=LabelFrame(self.root,bd=2,relief=RIDGE,text="VIEW DETAILS AND SEARCH SYSTEM",font=("arial",12,"bold"),bg="white",fg="red",width=9)
Table_Frame.place(x=435,y=280,width=850,height=260)
lblsearch=Label(Table_Frame,font=("arial",12,"bold"),text="Search by :",bg="red",fg="yellow")
lblsearch.grid(row=0,column=0,sticky="w",padx=8)
self.search_var=StringVar()
combo_search=ttk.Combobox(Table_Frame,textvariable=self.search_var,font=("arial",12,"bold"),width=24,state="readonly")
combo_search["value"]=("ConsumerID")
combo_search.current(0)
combo_search.grid(row=0,column=1,padx=8)
self.txt_search=StringVar()
entry_search=ttk.Entry(Table_Frame,textvariable=self.txt_search,width=24,font=("arial",12,"bold"))
entry_search.grid(row=0,column=2,padx=8)
btnsearch=Button(Table_Frame,text="SEARCH",command=self.search,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnsearch.grid(row=0,column=3,padx=8)
btnshowall=Button(Table_Frame,text="SHOW ALL",command=self.fetch_data,font=("arial",12,"bold"),bg="black",fg="orange",width=9)
btnshowall.grid(row=0,column=4,padx=8)
#=======show data table========
details_tbale=Frame(Table_Frame,bd=2,relief=RIDGE)
details_tbale.place(x=5,y=50,width=835,height=180)
scroll_x=ttk.Scrollbar(details_tbale,orient=HORIZONTAL)
scroll_y=ttk.Scrollbar(details_tbale,orient=VERTICAL)
self.book_table=ttk.Treeview(details_tbale,column=("Cons","bDate","DDate","Btype"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)
scroll_x.pack(side=BOTTOM,fill="x")
scroll_y.pack(side=RIGHT,fill="y")
scroll_x.config(command=self.book_table.xview)
scroll_y.config(command=self.book_table.yview)
self.book_table.heading("Cons",text="ConsumerID")
self.book_table.heading("bDate",text="Booking Date")
self.book_table.heading("DDate",text="Delivery Date")
self.book_table.heading("Btype",text="Booking Type")
self.book_table["show"]="headings"
self.book_table.column("Cons",width=100)
self.book_table.column("DDate",width=100)
self.book_table.column("bDate",width=100)
self.book_table.column("Btype",width=100)
self.book_table.pack(fill=BOTH,expand=1)
self.book_table.bind("<ButtonRelease-1>",self.get_cursor)
self.fetch_data()
def add_data(self):
if self.var_consid.get()=="" or self.var_bookdate=="" or self.var_deldate=="":
messagebox.showerror("Error","Please Enter the Required Fields",parent=self.root)
else:
try:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
my_cursor.execute("INSERT INTO booking values(%s,%s,%s,%s)",(self.var_consid.get(),self.var_bookdate.get(),self.var_deldate.get(),self.var_booking_type.get()))
conn.commit()
self.fetch_data()
conn.close()
messagebox.showinfo("Success","Booking has been Done",parent=self.root)
except Exception as es:
messagebox.showwarning("Warning",f"Something went Wrong :{str(es)}",parent=self.root)
def fetch_data(self):
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
my_cursor.execute("Select * from booking")
rows=my_cursor.fetchall()
if len(rows)!=0:
self.book_table.delete(*self.book_table.get_children())
for i in rows:
self.book_table.insert("",END,values=i)
conn.commit()
conn.close()
def get_cursor(self,event=""):
cursor_row=self.book_table.focus()
content=self.book_table.item(cursor_row)
row=content["values"]
self.var_consid.set(row[0]),
self.var_bookdate.set(row[1]),
self.var_deldate.set(row[2]),
self.var_booking_type.set(row[3])
def update(self):
if self.var_consid=="":
messagebox.showerror("Error","Please Enter Consumer ID ",parent=self.root)
else:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
my_cursor.execute("UPDATE booking SET BookingDate=%s,DeliveryDate=%s,BookingType=%s WHERE ConsumerID=%s",(
self.var_bookdate.get(),
self.var_deldate.get(),
self.var_booking_type.get(),
self.var_consid.get()
))
conn.commit()
self.fetch_data()
conn.close()
messagebox.showinfo("Update","Customer Details Successfully Updated",parent=self.root)
def deletes(self):
mdel=messagebox.askyesno("LPG Booking System","Are u Sure you want to Delete the selected Booking",parent=self.root)
if mdel>0:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query="delete from booking where ConsumerID=%s"
value=(self.var_consid.get(),)
my_cursor.execute(query,value)
else:
if not mdel:
return
conn.commit()
self.fetch_data()
conn.close()
def reset(self):
# self.var_cons.set(""),
self.var_bookdate.set(""),
self.var_deldate.set(""),
self.var_consid.set(""),
self.var_paidtax.set(""),
self.var_total.set(""),
self.var_booking_type.set("")
self.var_subtotal.set("")
#==================All data fetch=============
def Fetch_cust(self):
if self.var_consid.get()=="":
messagebox.showerror("Error","Please enter Consumer ID",parent=self.root)
else:
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Name from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query,value )
row=my_cursor.fetchone()
if row==None:
messagebox.showerror("Error","This Consumer ID is not Found",parent=self.root)
else:
conn.commit()
conn.close()
showDataframe=Frame(self.root,bd=4,relief=RIDGE,padx=2)
showDataframe.place(x=450,y=82,width=300,height=180)
lblName=Label(showDataframe,text="Name :",font =("arial",12,"bold"))
lblName.place(x=0,y=0)
lbl=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl.place(x=90,y=0)
# insert{ command=self.Fetch_contact } in fetch data button line 1 before font
# =============GENDER==================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Gender from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query,value )
row=my_cursor.fetchone()
lblGender=Label(showDataframe,text="Gender :",font =("arial",12,"bold"))
lblGender.place(x=0,y=30)
lbl2=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl2.place(x=90,y=30)
#===================MOBILE=====================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Mobile from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblmobile=Label(showDataframe,text="Mobile :",font =("arial",12,"bold"))
lblmobile.place(x=0,y=60)
lbl3=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl3.place(x=90,y=60)
#===================Email=====================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select Email from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblEmail=Label(showDataframe,text="Email :",font =("arial",12,"bold"))
lblEmail.place(x=0,y=90)
lbl4=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl4.place(x=90,y=90)
# #====================IDPROOF====================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select IDProof from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblidpro=Label(showDataframe,text="ID Proof :",font =("arial",12,"bold"))
lblidpro.place(x=0,y=120)
lbl4=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl4.place(x=90,y=120)
# #=======================ID NUMBER========================
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
query=("select IDNumber from customer where ConsumerID=%s")
value =(self.var_consid.get(),)
my_cursor.execute(query , value )
row=my_cursor.fetchone()
lblidnum=Label(showDataframe,text="ID Number :",font =("arial",12,"bold"))
lblidnum.place(x=0,y=150)
lbl5=Label(showDataframe,text=row,font =("arial",12,"bold"))
lbl5.place(x=90,y=150)
def search(self):
conn=mysql.connector.connect(host="localhost",username="root",password="Aditya8318@",database="lpg_booking")
my_cursor=conn.cursor()
s1=str(self.search_var.get())
s2=str(self.txt_search.get())
# query1="SELECT * from customer WHERE "+s1+"=%s"
# value=(s2,)
# my_cursor.execute(query1,value)
t="SELECT * from booking WHERE "+s1+" LIKE '%"+s2+"%'"
my_cursor.execute(t)
rows=my_cursor.fetchall()
if len(rows)!=0:
self.book_table.delete(*self.book_table.get_children())
for i in rows:
self.book_table.insert("",END,values=i)
conn.commit()
conn.close()
def total(self):
if(self.var_booking_type.get()=="Small"):
q1=float(546)
tax=float(0.18*q1)
totalbill=float(tax+q1)
self.var_paidtax.set(tax)
self.var_total.set(totalbill)
self.var_subtotal.set(q1)
elif(self.var_booking_type.get()=="Medium"):
q1=float(870)
tax=float(0.18*q1)
totalbill=float(tax+q1)
self.var_paidtax.set(tax)
self.var_total.set(totalbill)
self.var_subtotal.set(q1)
elif(self.var_booking_type.get()=="Large"):
q1=float(1136)
tax=float(0.18*q1)
totalbill=float(tax+q1)
self.var_paidtax.set(tax)
self.var_total.set(totalbill)
self.var_subtotal.set(q1)
if __name__=="__main__":
root=Tk()
obj=LPGbooking(root)
root.mainloop()
|
anonymouslyfadeditzme/Anonymously-Faded
|
booking.py
|
booking.py
|
py
| 19,472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43256048913
|
import os
import xlsxwriter
# Change basepath if applicable
basepath = "C:\\Users\\AYuen\\Environmental Protection Agency (EPA)\\ECMS - Documents\\newfiles\\"
workbook = xlsxwriter.Workbook(basepath+'fileandid.xlsx')
worksheet = workbook.add_worksheet("Sheet 1")
# Start from the first cell.
# Rows and columns are zero indexed.
row = 0
col = 0
# Get all files in the directory
qq = []
for (root, dirs, files) in os.walk(basepath, topdown=False):
if len(files) > 0:
for file in files:
qq.append(os.path.join(root,file))
print(qq[1])
for item in qq:
rid = item.split('\\')[6]
fname = item.split('\\')[7]
print(f'record id is {rid}')
print(f'file name is {fname}')
worksheet.write(row, col, rid)
worksheet.write(row, col + 1, fname)
row += 1
workbook.close()
|
USEPA/Document_Processing_Scripts
|
getidfilename.py
|
getidfilename.py
|
py
| 827 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14835956764
|
import torch
import torchaudio
import numpy as np
import opensmile
from collections import namedtuple
from .scan_data import scan_rootdir, CHANNELS
from .load_data import load_anno_tensor, load_vad_df
from .segment_data import SegmentEgs
class ChunkOpenSmileDataSet:
def __init__(self, rootdir,
channels=CHANNELS,
#transform=torchaudio.transforms.MFCC(n_mfcc=40), # n_mfcc=80, melkwargs={'n_fft': 1280}
feats2anno_rate=1,
chunk_size_s=2,
chunk_hop_s=1, use_vad=True):
""" feats2anno_rate = feats_sr / anno_sr """
self.rootdir = rootdir
self.channels = channels
self.transform = opensmile.Smile(
feature_set=opensmile.FeatureSet.ComParE_2016,
feature_level=opensmile.FeatureLevel.Functionals)
#self.transform = transform
self.feats2anno_rate = feats2anno_rate
self.finfos = scan_rootdir(rootdir, channels)
preloaded_annos = [load_anno_tensor(f.anno[0]) for f in self.finfos]
self.segments = []
Chunk = namedtuple('Chunk', ['start_sec', 'end_sec'])
for f, p_a in zip(self.finfos, preloaded_annos):
if use_vad and f.vad:
for _, row in load_vad_df(f.vad).iterrows():
start = row.start_sec
#row.end_sec
keep_doing=True
while keep_doing:
end = start + chunk_size_s
if end > row.end_sec:
end = row.end_sec
start = max(0, end-chunk_size_s)
keep_doing=False
chunk = Chunk(start, end)
start += chunk_hop_s
self.segments.append(SegmentEgs(f, chunk, p_a))
else:
total = torchaudio.info(f.wav[0]).num_frames//f.wav[1]
for start in range(0, total - chunk_size_s, chunk_hop_s):
chunk = Chunk(start, start + chunk_size_s)
self.segments.append(SegmentEgs(f, chunk, p_a))
print(f"{len(self.segments)} chunks")
def __len__(self):
return len(self.segments)
def total_sec(self):
return sum(s.duration for s in self.segments)
def size(self, index):
return self.segments[index].duration
def __getitem__(self, index):
seq = self.segments[index]
wav_keeper = seq.wav_keeper
feats = self.transform.process_file(wav_keeper.wav_fname,
start=wav_keeper.start_sec,
end = wav_keeper.end_sec).values# 1 X feats
feats = torch.from_numpy(feats).T # feats X 1
anno = seq.anno.mean(dim=-2)
#corr_anno_len = round(feats.shape[-1] / self.feats2anno_rate)
# if abs(anno.shape[0] - corr_anno_len) > 2:
# print(f"WARNING: element {index}, {anno.shape[0]=} ({corr_anno_len=}), {feats.shape[-1]=}, {self.feats2anno_rate=}")
# anno = anno[:corr_anno_len]
# corr_feats_len = round(anno.shape[0] * self.feats2anno_rate)
# feats = feats[:, :corr_feats_len]
return {'feats': feats,
'labels': anno,
'padding': torch.ones(anno.shape[0]),
'index': index}
|
medbar/maga_sis
|
3/ULM/utils/chunk_opensmile_dataset.py
|
chunk_opensmile_dataset.py
|
py
| 3,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17388621437
|
# Import necessary Tkinter and sqlite3 libraries.
import tkinter as tk
import sqlite3
from sqlite3 import Error
from PIL import Image, ImageTk
import tkinter.messagebox as messagebox
# Making things object oriented, define a class.
class School_Data:
'''Constructor to initialize the GUI window'''
def __init__(self):
self.root = tk.Tk()
self.root.geometry('1200x700')
self.connection = self.create_connection()
self.home()
self.root.mainloop()
self.connection.close()
def home(self):
# Clear the screen and display the home screen
self.clear_screen()
# Create a menubar with two menus File and Action
# From the File Menu the application can be closed
# From the Action menu a message can be displayed.
self.menubar = tk.Menu(self.root)
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label='Close', command=self.close)
self.filemenu.add_separator()
self.filemenu.add_command(label='Close without question', command=exit)
self.actionmenu = tk.Menu(self.menubar, tearoff=0)
self.actionmenu.add_command(label='Show Message', command=self.show_message)
self.menubar.add_cascade(menu = self.filemenu, label='File')
self.menubar.add_cascade(menu = self.actionmenu, label='Action')
self.root.config(menu = self.menubar)
# Create a label for the application title
self.label = tk.Label(self.root, text="Sample School Data", font=("Calibri", 24))
self.label.pack(padx=20, pady=20)
# Load and display an image
image = Image.open("school_image.jpg")
image = image.resize((800,300))
self.photo = ImageTk.PhotoImage(image)
image_label = tk.Label(self.root, image=self.photo)
image_label.pack(padx=10, pady=10)
# Create a frame for the buttons
self.homeframe = tk.Frame(self.root)
self.homeframe.pack(padx=20, pady=20)
# Add buttons for Add, Search, and Extra functionality
self.add_button_in_frame(self.homeframe,"Add",0,0, self.add)
self.add_button_in_frame(self.homeframe,"Search",0,1, self.search)
self.add_button_in_frame(self.homeframe,"Extra",0,2, self.extra)
def add_button_in_frame(self, parent, text, row, col, *commands):
"""
Create a button and place it in a frame within the parent widget.
Args:
parent (tk.Widget): The parent widget.
text (str): The text to display on the button.
row (int): The row number within the parent's grid layout.
col (int): The column number within the parent's grid layout.
*commands (callable): The command(s) to associate with the button.
Returns:
tk.Button: The created button.
"""
button = tk.Button(parent, text=text, font=("Arial", 14))
button.grid(row=row, column=col)
for cmd in commands:
button.config(command = lambda c=cmd: c())
return button
def add_button(self, text, command):
"""
Create a button and place it in the root window with standard padding.
Args:
text (str): The text to display on the button.
command (callable): The command(s) to associate with the button.
"""
button = tk.Button(self.root, text=text, font=("Arial", 14), command=command)
button.pack(padx=10, pady=10)
def add(self):
"""
Displays the screen for adding a new entry.
"""
self.clear_screen()
# Create a label for the add screen title
self.label = tk.Label(self.root, text="Add a new Entry", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
self.addframe = tk.Frame(self.root)
self.addframe.pack(padx=10, pady=10)
# Create input fields for name, age, and class
self.create_label_and_entry(self.addframe, "Name", 0, "Name", "")
self.create_label_and_entry(self.addframe, "Age", 1, "Age", "")
self.create_label_and_entry(self.addframe, "Class", 2, "Class", "")
self.addbtnframe = tk.Frame(self.root)
self.addbtnframe.pack(padx=10, pady=10)
# Add buttons to add the entry and return to the home screen
self.add_button_in_frame(self.addbtnframe,"Add",0,1, self.connection_add)
self.add_button_in_frame(self.addbtnframe,"Home",0,2, self.home)
# Method to connect to database and pass the entries to save
def connection_add(self):
"""
Add the new entry to the SQLite database.
"""
try:
data_entry = '''CREATE TABLE IF NOT EXISTS Stud_Data (name TEXT, age INT, class INT)'''
self.connection.execute(data_entry,)
data_insert = '''INSERT INTO Stud_Data (name, age, class) VALUES (?,?,?)'''
data_insert_tuple = (
self.Name.get('1.0', 'end-1c'),
self.Age.get('1.0', 'end-1c'),
self.Class.get('1.0', 'end-1c')
)
# If any space is left blank, prompt user to enter all details else, execute the data entry
# and display respective messages.
if '' in data_insert_tuple:
messagebox.showinfo(title='Error', message='Kindly fill in all the details')
else:
cursor = self.connection.cursor()
cursor.execute(data_insert, data_insert_tuple)
self.connection.commit()
messagebox.showinfo(title='Congratulations!', message='Entry added Successfully!')
self.clear_text(self.addframe)
except Error as e:
print(e)
def search(self):
"""
Displays the screen for searching an entry.
"""
self.clear_screen()
# Create a label for the search screen title
self.label = tk.Label(self.root, text="Search an Entry", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
# Create frame for search input field
self.searchframe = tk.Frame(self.root)
self.searchframe.pack(padx=10, pady=10)
self.attribute = tk.Label(self.searchframe, text="Search by", font=("Arial", 14))
self.attribute.grid(row=0, column=0)
# Define a variable to store the attribute name selected by user by which user wants to search
self.sel_string = tk.StringVar()
# Define option menu to select Name, Age or Class and store value in variable
self.attribute_sel = tk.OptionMenu(self.searchframe, self.sel_string, *["Name", "Age", "Class"])
self.attribute_sel.grid(row=1, column=0)
# Text input by user which will be searched in the database
self.search_value = tk.Text(self.searchframe, height=1, font=("Arial", 12))
self.search_value.grid(row=1, column=1)
# Add buttons to search the entry and return to the home screen
self.add_button("Search", self.connection_search)
self.add_button("Home", self.home)
def connection_search(self):
"""
Search for entries in the SQLite database.
"""
try:
# Search user given text input in user selected attribute column of database
search_column = self.sel_string.get()
search_querry = "SELECT * FROM Stud_Data WHERE {} = ?".format(search_column)
cursor = self.connection.cursor()
# if text input is left blank, prompt user to enter a text
# else store search results from database in global variable self.info
if self.search_value.get('1.0', 'end-1c') == '':
messagebox.showinfo(title='Error!', message='Kindly enter value for search')
else:
cursor.execute(search_querry, (self.search_value.get('1.0', 'end-1c'),))
self.info = cursor.fetchall()
self.disp_search_results(self.info)
self.connection.commit()
except Error as e:
print(e)
def disp_search_results(self, info):
'''Displays all the results of search command in database
Args:
info: list of all the rows from database that correspond to user search
'''
# Clear any previously displayed search results
self.clear_search_results()
# Create label for results of search
self.label = tk.Label(self.root, text="Search Results", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
# Create frame to display all matching results
self.dispframe = tk.Frame(self.root)
self.dispframe.pack(fill = 'y')
# Create a variable to store the value of radiobutton
self.rbvar = tk.StringVar()
# if no matching result is found, display No Results found!
# else display results
if len(info) == 0:
self.label_nor = tk.Label(self.root, text="No Results found!", font=("Arial", 16))
self.label_nor.pack(padx=20, pady=20)
# Create radiobutton for each row of result
# if a row is selected, option to edit or delete the row pops up
else:
for i, row in enumerate(info, start=1):
self.rb = tk.Radiobutton(self.dispframe, variable=self.rbvar, value = i, command=self.enable_options)
self.rb.grid(row=i, column=0)
for j, val in enumerate(row):
label = tk.Label(self.dispframe, text=val, relief=tk.RAISED, width=15, font=("Arial", 14))
label.grid(row=i, column=j+1, sticky= tk.W + tk.E)
def enable_options(self):
'''Method to display Edit and Delete buttons only on selection of a row'''
present = False
for widget in self.root.winfo_children():
if isinstance(widget, tk.Button) and (widget.cget('text') == 'Edit'):
present = True
if present == False:
# If buttons not already present, create frame for buttons
self.searchbtnframe = tk.Frame(self.root)
self.searchbtnframe.pack(padx=10,pady=10)
self.add_button_in_frame(self.searchbtnframe, 'Edit', 0,0, self.edit)
self.add_button_in_frame(self.searchbtnframe, 'Delete', 0,1, self.delete_entry)
def edit(self):
''' Edit the selected row in database'''
# Extracting details of selected row
selected_row = int(self.rbvar.get()) -1
(name, age, classl) = self.info[selected_row]
# Clear screen for Edit screen
self.clear_screen()
# Create label for Edit screen
self.label = tk.Label(self.root, text="Update an Entry", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
# Create frame for text entries that should replace the existing entry
self.editframe = tk.Frame(self.root)
self.editframe.pack(padx=10, pady=10)
self.create_label_and_entry(self.editframe, "Name", 0, "Name", "")
self.create_label_and_entry(self.editframe, "Age", 1, "Age", "")
self.create_label_and_entry(self.editframe, "Class", 2, "Class", "")
# Create a frame for buttons to execute the edit function or cancel the process
self.editbtnframe = tk.Frame(self.root)
self.editbtnframe.pack(padx=10, pady=10)
self.add_button_in_frame(self.editbtnframe,"Update",0,1, lambda: self.edit_entry(self.info[int(self.rbvar.get()) - 1]))
self.add_button_in_frame(self.editbtnframe,"Cancel",0,2, self.clear_text)
self.add_button_in_frame(self.editbtnframe,"Back",0,3, self.search)
self.add_button_in_frame(self.editbtnframe,"Home",0,4, self.home)
def edit_entry(self, entry):
''' Method to execute the edit in Sqlite database'''
edit_query = '''UPDATE Stud_Data SET name=?, age=?, class=? WHERE name=? AND age=? AND class=?'''
data_edit_tuple = (self.Name.get('1.0', 'end-1c'), self.Age.get('1.0', 'end-1c'), self.Class.get('1.0', 'end-1c'))
# If any field is left blank, prompt user to fill all details
if '' in data_edit_tuple:
messagebox.showinfo(title='Error', message='Kindly fill in all the details')
else:
cursor = self.connection.cursor()
cursor.execute(edit_query,
(self.Name.get('1.0', 'end-1c'),
self.Age.get('1.0', 'end-1c'),
self.Class.get('1.0', 'end-1c'),
entry[0], entry[1], entry[2]))
self.connection.commit()
messagebox.showinfo(title='Congratulations!', message='Entry updated Successfully!')
# Clear the text fields after operation
self.clear_text(self.editframe)
def delete_entry(self):
'''Delete the selected entry'''
# Confirm if user really wants to delete the entry
sure = messagebox.askyesnocancel(title='Delete?', message='''Are you sure you want to delete this entry?''')
if sure == True:
cursor = self.connection.cursor()
selected_row = int(self.rbvar.get()) -1
(name, age, classl) = self.info[selected_row]
delete_query = '''DELETE from Stud_Data WHERE
name = ? AND age = ? AND class = ?'''
cursor.execute(delete_query, (name, age, classl))
self.connection.commit()
messagebox.showinfo(title="Success", message="Entry deleted successfully!")
self.connection_search()
def create_label_and_entry(self, parent, text, row, entry_name, default_value):
"""
Create a label, an entry field, and place them in a frame within the parent widget.
Args:
parent (tk.Widget): The parent widget.
label_text (str): The text to display on the label.
row (int): The row number within the parent's grid layout.
entry_placeholder (str): The placeholder text for the entry field.
entry_default (str): The default value for the entry field.
Returns:
tuple: A tuple containing the label and entry field widgets.
"""
label = tk.Label(parent, text=text, font=("Arial", 14))
label.grid(sticky=tk.W + tk.E)
entry = tk.Text(parent, height=1, font=("Arial", 12))
entry.bind("<KeyPress>", self.shortcut)
entry.insert("1.0", default_value)
entry.grid(row=row, column=1, sticky=tk.W + tk.E)
setattr(self, entry_name, entry)
def clear_text(self, frame):
''' Method to clear text fields if present on the screen'''
text_entry = [widget for widget in frame.winfo_children() if isinstance(widget, tk.Text)]
for element in text_entry:
element.delete('1.0', 'end')
def create_connection(self):
'''Method to create connection with the Sqlite database'''
try:
connection = sqlite3.connect(r"c:\Users\rsahu\Documents\git_files\Repo1\data.db")
return connection
except Error as e:
print(e)
def clear_search_results(self):
''' Method to refresh and clear previously displyed results in case of new search or deleted entry'''
for widget in self.root.winfo_children():
if isinstance(widget, tk.Frame) and widget != self.searchframe:
widget.destroy()
elif isinstance(widget, tk.Label) and widget.cget('text') == 'Search Results':
widget.destroy()
def shortcut(self, event):
''' Method to enable function through shortcut keys'''
#print(event.keysym, event.state)
if event.keysym == 'Return':
self.connection_add()
if event.keysym == 'Tab':
current_widget = event.widget
current_widget.tk_focusNext().focus()
return 'break'
def extra(self):
"""
Displays the screen for extra functionality (placeholder).
"""
self.clear_screen()
# Create a label for the extra screen title
self.label = tk.Label(self.root, text="Extra Functionality", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
self.extrabtnframe = tk.Frame(self.root)
self.extrabtnframe.pack(padx=10, pady=10)
# Add button to go back to the home screen
self.add_button_in_frame(self.extrabtnframe, "Back", 0, 0, self.home)
def clear_screen(self):
'''Method to clear screen of widgets on the window'''
for widget in self.root.winfo_children():
widget.destroy()
def show_message(self):
'''Method to show message when asked from Actionmenu'''
messagebox.showinfo(title='Information', message='This is a sample GUI for entry of data of students in a school')
def close(self):
'''Method to kill the application window'''
if messagebox.askyesno(title="Quit?", message='Do you really want to quit?'):
self.root.destroy()
# Instantiate the School_Data class to start the application.
if __name__ == '__main__':
School_Data()
|
rohan-sahuji/Repo1
|
Tkinter_GUI.py
|
Tkinter_GUI.py
|
py
| 17,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14003038546
|
from app.custom_queue import CustomQueue
from app.logger import get_logger
from datetime import datetime, timedelta
LOGGER = get_logger(__name__)
QUEUE_MAX_SIZE = 20
class Queues():
def __init__(self):
self.LS = CustomQueue(QUEUE_MAX_SIZE, 'LeftSingle')
self.LT = CustomQueue(QUEUE_MAX_SIZE, 'LeftTriple')
self.RT = CustomQueue(QUEUE_MAX_SIZE, 'RightTriple')
self.RS = CustomQueue(QUEUE_MAX_SIZE, 'RightSingle')
self.starting_time = datetime.now()
self.Total_time = self.starting_time
def add_time(self, queue: CustomQueue, time: timedelta):
self.Total_time += time
queue.time += time
queue.count += 1
def add_to_LS(self, skyer):
self.add_to(self.LS, skyer)
def add_to_LT(self, skyer):
self.add_to(self.LT, skyer)
def add_to_RT(self, skyer):
self.add_to(self.RT, skyer)
def add_to_RS(self, skyer):
self.add_to(self.RS, skyer)
def add_to(self, queue: CustomQueue, skyer):
queue.put(skyer)
LOGGER.debug(f'Esquiador entrou na fila: {queue.name}')
def normalize_time(self):
self.Total_time -= self.starting_time
self.LS.time -= self.starting_time
self.LT.time -= self.starting_time
self.RT.time -= self.starting_time
self.RS.time -= self.starting_time
def report_queue_time(self):
self.normalize_time()
total_count = self.LS.count + self.LT.count + self.RT.count + self.RS.count
if total_count:
LOGGER.info(f'Total time = {self.Total_time/total_count}')
else:
LOGGER.info('ninguem saiu de qualquer fila')
if self.LS.count:
LOGGER.info(f'{self.LS.name} time = {self.LS.time/self.LS.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.LS.name}')
if self.LT.count:
LOGGER.info(f'{self.LT.name} time = {self.LT.time/self.LT.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.LT.name}')
if self.RT.count:
LOGGER.info(f'{self.RT.name} time = {self.RT.time/self.RT.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.RT.name}')
if self.RS.count:
LOGGER.info(f'{self.RS.name} time = {self.RS.time/self.RS.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.RS.name}')
def queue_sizes(self):
LS_size = self.LS.qsize()
LT_size = self.LT.qsize()
RT_size = self.RT.qsize()
RS_size = self.RS.qsize()
return [LS_size, LT_size, RT_size, RS_size]
def count_queues_lenght(self):
LS_size, LT_size, RT_size, RS_size = self.queue_sizes()
LOGGER.debug(
f"""count_queues_lenght()
{'###'*3}
>Filas agora<
LeftSingle: {LS_size}
LeftTriple: {LT_size}
RightTriple: {RT_size}
RightSingle: {RS_size}
{'###'*3}
""")
|
ViniciusLinharesAO/ski-slope-problem-uece-ppc
|
app/queues.py
|
queues.py
|
py
| 3,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18598274205
|
import requests
from data_access.openWeatherMap.client import OpenWeatherMap
from business_logic.services import GetWeatherService
from config import OWM_API_KEY, OWM_BASE_URL
from .server import Request, Response
def get_weather_controller(request: Request) -> Response:
cities = request.params.get('query')[0]
with requests.Session() as session:
weather_api = OpenWeatherMap(session=session, api_key=OWM_API_KEY, base_url=OWM_BASE_URL)
weather_service = GetWeatherService(weather_api_adapter=weather_api)
weather_data_in_cities = weather_service.get_weather_in_cities(cities=cities)
headers = {"Content-Type": "text/html"}
mes = "<html><body><h1><b>Weather Data Table</b></h1><table>"
mes += "<tr><th>city</th><th>temp</th><th>description</th><th>humidity</th></tr>"
for weather_data in weather_data_in_cities:
mes += (f"<tr><td>{weather_data.name}</td><td>{weather_data.main.temp}</td>"
f"<td>{weather_data.weather[0].description}</td><td>{weather_data.main.humidity}</td></tr>")
mes += "</table></body></html>"
return Response(
status="200 OK",
headers=headers,
body=mes
)
def hello_world_controller(request: Request) -> Response:
mes = "<h1>Hello World!</h1>"
headers = {"Content-Type": "text/html"}
return Response(
status="200 OK",
headers=headers,
body=mes
)
urlpatterns = [
('/', get_weather_controller),
('/hello', hello_world_controller)
]
class WebApplication: # Web-Frameworks: Django, Flask, FastAPI
def _get_404_error(self, request: Request) -> Response:
mes = f"<h1>404 ERROR, URL {request.path} NOT FOUND"
headers = {"Content-Type": "text/html"}
return Response(
status="404 NOT FOUND",
headers=headers,
body=mes
)
def __call__(self, request: Request) -> Response:
for url_path, controller in urlpatterns:
if url_path == request.path:
resp = controller(request)
return resp
return self._get_404_error(request=request)
|
pyteacher123/py35-onl
|
weather_app_refactored/presentation/web/application.py
|
application.py
|
py
| 2,199 |
python
|
en
|
code
| 2 |
github-code
|
6
|
6679634602
|
import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
import argparse, sys
import joblib
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from sklearn.metrics import roc_curve, auc, f1_score, precision_recall_curve, average_precision_score, ConfusionMatrixDisplay
from medmnistutils.evaluationmetrics import accuracy, roc, presenf1cfsmtx
from medmnistutils.medmnistdataloader import PathMNIST, OrganMNIST3D, PneumoniaMNIST, VesselMNIST3D, OCTMNIST
#from medmnistutils.jiaodaresnet import ResNet18 as jiaodaresnet18
#from nets.unknownthreedresnet import resnet18
from medmnistutils.blingblingresnet import resnet18 as blingblingresnet18
from medmnistutils.O2Uzidairesnet import ResNet18 as O2Uresnet18
from medmnistutils.yixianresnet import resnet18 as yixian3dresnet18
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='OCTMNIST', help='PathMNIST, OCTMNIST, PneumoniaMNIST, OrganMNIST3D, VesselMNIST3D')
parser.add_argument('--noise_rate', type=float, default=0.4, help='noise rate')
parser.add_argument('--batchsize', type=int, default=128, help='128')
parser.add_argument('--num_epochs', type=int, default=200, help='number of epochs')
#args = parser.parse_args(args=[])
args = parser.parse_args()
if args.dataset =='PathMNIST': #2D, 9 classes, 89,996 / 10,004 / 7,180
newtransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[.5], std=[.5])])
train_dataset = PathMNIST(split = 'train', root = '../../medmnistdata', transform=newtransform, noise_rate=args.noise_rate)
val_dataset = PathMNIST(split = 'val', root = '../../medmnistdata', transform=newtransform)
test_dataset = PathMNIST(split = 'test', root = '../../medmnistdata', transform=newtransform)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = O2Uresnet18(input_channel=train_dataset.in_channels, n_outputs=train_dataset.num_classes)
#model = blingblingresnet18(num_classes=train_dataset.num_classes)
if args.dataset =='OCTMNIST': #2D, 4 classes,
newtransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[.5], std=[.5])])
train_dataset = OCTMNIST(split = 'train', root = '../../medmnistdata', transform=newtransform, noise_rate=args.noise_rate)
val_dataset = OCTMNIST(split = 'val', root = '../../medmnistdata', transform=newtransform)
test_dataset = OCTMNIST(split = 'test', root = '../../medmnistdata', transform=newtransform)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = O2Uresnet18(input_channel=train_dataset.in_channels, n_outputs=train_dataset.num_classes)
#model = blingblingresnet18(num_classes=train_dataset.num_classes)
elif args.dataset =='PneumoniaMNIST': #2D, 2 class, 4,708 / 524 / 624
newtransform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[.5], std=[.5])])
train_dataset = PneumoniaMNIST(split = 'train', root = '../../medmnistdata', transform=newtransform, noise_rate=args.noise_rate)
val_dataset = PneumoniaMNIST(split = 'val', root = '../../medmnistdata', transform=newtransform)
test_dataset = PneumoniaMNIST(split = 'test', root = '../../medmnistdata', transform=newtransform)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = O2Uresnet18(input_channel=train_dataset.in_channels, n_outputs=train_dataset.num_classes)
#model = blingblingresnet18(num_classes=train_dataset.num_classes)
elif args.dataset =='OrganMNIST3D': #3D, 11 class, 972 / 161 / 610
train_dataset = OrganMNIST3D(split = 'train', root = '../../medmnistdata', transform=None, noise_rate=args.noise_rate)
val_dataset = OrganMNIST3D(split = 'val', root = '../../medmnistdata', transform=None)
test_dataset = OrganMNIST3D(split = 'test', root = '../../medmnistdata', transform=None)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = yixian3dresnet18(num_classes = train_dataset.num_classes)
elif args.dataset =='VesselMNIST3D': #3D, 2 class, 1,335 / 192 / 382
train_dataset = VesselMNIST3D(split = 'train', root = '../../medmnistdata', transform=None, noise_rate=args.noise_rate)
val_dataset = VesselMNIST3D(split = 'val', root = '../../medmnistdata', transform=None)
test_dataset = VesselMNIST3D(split = 'test', root = '../../medmnistdata', transform=None)
train_loader = DataLoader(train_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batchsize, drop_last = False, shuffle=True)
model = yixian3dresnet18(num_classes = train_dataset.num_classes)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
error = nn.CrossEntropyLoss()
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
###############################################################################
验证准确率列表 = []
测试准确率列表= []
###############################################################################
#main loop
for epoch in range(args.num_epochs):
#train
model.train()
for images, labels, _ in train_loader:
images, labels = images.to(device), labels.to(device)
labels = labels.squeeze().long()
outputs = model(images)
loss = error(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#evaluation
valaccuracy = accuracy(model, val_loader)
testaccuracy = accuracy(model, test_loader)
print('epoch', epoch+1, 'val accuracy', valaccuracy, 'test accuracy', testaccuracy)
###############################################################################
#以下都是不需要的
###############################################################################
验证准确率列表.append(valaccuracy)
测试准确率列表.append(testaccuracy)
实验名 = '20230924baselineexp1'
resultdict = dict()
#模型
resultdict['model'] = model
#acc变化图
resultdict['valacclist'] = 验证准确率列表
resultdict['testacclist'] = 测试准确率列表
验证准确率列表 = [x*100 for x in 验证准确率列表]
测试准确率列表 = [x*100 for x in 测试准确率列表]
plt.plot(验证准确率列表, label = 'validation set')
plt.plot(测试准确率列表, label = 'test set')
plt.xlim((0,200))
plt.ylim((0,100))
#plt.title('origingal method on ' + args.dataset + ' under noise rate ' + str(args.noise_rate))
plt.xlabel('Epoch')
plt.ylabel('Accuracy (%)')
acc变化图文件名 = 实验名 + '_acccurve_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.legend()
plt.savefig(acc变化图文件名)
plt.show()
#ROC曲线图
resultdict['valfprdict'], resultdict['valtprdict'], resultdict['valaucdict'] = roc(model, val_loader)
resultdict['testfprdict'], resultdict['testtprdict'], resultdict['testaucdict'] = roc(model, test_loader)
plt.plot(resultdict['valfprdict']["micro"], resultdict['valtprdict']["micro"],
label='validation set, AUC ' + str(round(100*resultdict['valaucdict']["micro"],2)))
plt.plot(resultdict['testfprdict']["micro"], resultdict['testtprdict']["micro"],
label='test set, AUC ' + str(round(100*resultdict['testaucdict']["micro"],2)))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
ROC文件名 = 实验名 + '_roccurve_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.savefig(ROC文件名)
plt.show()
#confusion matrix图
resultdict['valprecision'], resultdict['valrecall'], resultdict['valf1'], resultdict['valtruelist'], resultdict['valpredlist'], resultdict['valcfsmtx'] = presenf1cfsmtx(model, val_loader)
ConfusionMatrixDisplay.from_predictions(resultdict['valtruelist'], resultdict['valpredlist'], cmap = plt.cm.Blues, colorbar = False)
cfsmtx文件名 = 实验名 + '_valconfusionmatrix_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.savefig(cfsmtx文件名)
plt.show()
resultdict['testprecision'], resultdict['testrecall'], resultdict['testf1'], resultdict['testtruelist'], resultdict['testpredlist'], resultdict['testcfsmtx'] = presenf1cfsmtx(model, test_loader)
ConfusionMatrixDisplay.from_predictions(resultdict['testtruelist'], resultdict['testpredlist'], cmap = plt.cm.Blues, colorbar = False)
cfsmtx文件名 = 实验名 + '_testconfusionmatrix_' + args.dataset + '_' + str(args.noise_rate) + '.png'
plt.savefig(cfsmtx文件名)
plt.show()
#txt
txt文件名 = 实验名 + '_txt_' + args.dataset + '_' + str(args.noise_rate) + '.txt'
with open (txt文件名, 'a', encoding='utf-8') as txt:
txt.write('最后一轮acc' + "\n" )
txt.write(str(round(验证准确率列表[-1],2)) + "\n" )
txt.write(str(round(测试准确率列表[-1],2)) + "\n" )
txt.write('最后十轮acc平均' + "\n" )
txt.write(str(round(sum(验证准确率列表[-11:-1])/len(验证准确率列表[-11:-1]),2)) + "\n" )
txt.write(str(round(sum(测试准确率列表[-11:-1])/len(验证准确率列表[-11:-1]),2)) + "\n" )
txt.write('precision' + "\n" )
txt.write(str(round(100*resultdict['valprecision'],2)) + "\n" )
txt.write(str(round(100*resultdict['testprecision'],2)) + "\n" )
txt.write('recall' + "\n" )
txt.write(str(round(100*resultdict['valrecall'],2)) + "\n" )
txt.write(str(round(100*resultdict['testrecall'],2)) + "\n" )
txt.write('f1' + "\n" )
txt.write(str(round(100*resultdict['valf1'],2)) + "\n" )
txt.write(str(round(100*resultdict['testf1'],2)) + "\n" )
#保存整个文件
resultdict文件名 = 实验名 + '_resultdict_' + args.dataset + '_' + str(args.noise_rate)
joblib.dump(resultdict, resultdict文件名)
|
gdqb233/inm363
|
baseline.py
|
baseline.py
|
py
| 11,328 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38144650744
|
import unittest
from src.BinaryTree import BinaryTree, BinaryTreeNode
class TestBinaryTree(unittest.TestCase):
"""
This class tests the BinaryTree class
"""
def test_constructor(self):
"""
Tests the state of a binary tree's root after initialization
"""
try: # test invalid tree creation
bt = BinaryTree(None)
self.assertTrue(False)
except ValueError:
bt = BinaryTree(BinaryTreeNode(1))
self.assertEqual(bt.root.val, 1)
def test_equivalent(self):
"""
Tests whether tree equivalence with both equivalent and non-equivalent trees
"""
bt1 = BinaryTree(BinaryTreeNode(1, BinaryTreeNode(2), BinaryTreeNode(3)))
bt2 = BinaryTree(BinaryTreeNode(1, BinaryTreeNode(2), BinaryTreeNode(3)))
self.assertTrue(bt1.is_equivalent(bt2))
bt3 = BinaryTree(BinaryTreeNode(1))
self.assertFalse(bt1.is_equivalent(bt3))
bt4 = BinaryTree(BinaryTreeNode(1, BinaryTreeNode(3), BinaryTreeNode(2)))
self.assertFalse(bt1.is_equivalent(bt4))
def test_leaves_just_root(self):
"""
Tests the leaves returned from a singleton root tree
"""
bt = BinaryTree(BinaryTreeNode(1))
self.assertListEqual(list(bt.get_leaves()), [bt.root])
def test_leaves_basic(self):
"""
Tests the leaves returned from a tree with a root and two children
"""
nodes = BinaryTreeNode(1, BinaryTreeNode(2), BinaryTreeNode(3))
bt = BinaryTree(nodes)
self.assertListEqual(list(bt.get_leaves()), [nodes.left, nodes.right])
def test_leaves_complex(self):
"""
Tests the leaves returned from a 3-generation tree with different configurations of children
"""
leaf1, leaf2, leaf3 = BinaryTreeNode(10), BinaryTreeNode(20), BinaryTreeNode(30)
parent1, parent2 = BinaryTreeNode(5, leaf1, leaf2), BinaryTreeNode(15, leaf3)
root = BinaryTreeNode(0, parent1, parent2)
bt = BinaryTree(root)
self.assertListEqual(list(bt.get_leaves()), [leaf1, leaf2, leaf3])
def test_preorder(self):
"""
Tests the nodes returned by a pre-order traversal of a 3-generation tree
"""
leaf1, leaf2, leaf3 = BinaryTreeNode(10), BinaryTreeNode(20), BinaryTreeNode(30)
parent1, parent2 = BinaryTreeNode(5, leaf1, leaf2), BinaryTreeNode(15, leaf3)
root = BinaryTreeNode(0, parent1, parent2)
bt = BinaryTree(root)
self.assertListEqual(list(bt.get_preorder()), [root, parent1, leaf1, leaf2, parent2, leaf3])
|
snitkdan/BlackJack
|
test/test_binarytree.py
|
test_binarytree.py
|
py
| 2,631 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21051188362
|
from django.db import models
from django_countries.fields import CountryField
from product.models import product, product_version
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth import get_user_model
User = get_user_model()
from decimal import Decimal
from django.conf import settings
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='order_user')
full_name = models.CharField(max_length=50)
address1 = models.CharField(max_length=250)
address2 = models.CharField(max_length=250)
city = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
post_code = models.CharField(max_length=20)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
total_paid = models.DecimalField(max_digits=5, decimal_places=2)
order_key = models.CharField(max_length=200)
billing_status = models.BooleanField(default=False)
class Meta:
ordering = ('-created',)
def __str__(self):
return str(self.created)
class OrderItem(models.Model):
order = models.ForeignKey(Order,
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(product,
related_name='order_items',
on_delete=models.CASCADE)
price = models.DecimalField(max_digits=5, decimal_places=2)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return str(self.id) # type: ignore
class Basket(models.Model):
...
# """
# A base Basket class, providing some default behaviors that
# can be inherited or overrided, as necessary.
# """
# def __init__(self, request):
# self.session = request.session
# basket = self.session.get(settings.BASKET_SESSION_ID)
# if settings.BASKET_SESSION_ID not in request.session:
# basket = self.session[settings.BASKET_SESSION_ID] = {}
# self.basket = basket
# def add(self, product, qty):
# """
# Adding and updating the users basket session data
# """
# product_id = str(product.id)
# if product_id in self.basket:
# self.basket[product_id]["qty"] = qty
# else:
# self.basket[product_id] = {"price": str(product.regular_price), "qty": qty}
# self.save()
# def __iter__(self):
# """
# Collect the product_id in the session data to query the database
# and return products
# """
# product_ids = self.basket.keys()
# products = product.objects.filter(id__in=product_ids) # type: ignore
# basket = self.basket.copy()
# for product in products:
# basket[str(product.id)]["product"] = product
# for item in basket.values():
# item["price"] = Decimal(item["price"])
# item["total_price"] = item["price"] * item["qty"]
# yield item
# def __len__(self):
# """
# Get the basket data and count the qty of items
# """
# return sum(item["qty"] for item in self.basket.values())
# def update(self, product, qty):
# """
# Update values in session data
# """
# product_id = str(product)
# if product_id in self.basket:
# self.basket[product_id]["qty"] = qty
# self.save()
# def get_subtotal_price(self):
# return sum(Decimal(item["price"]) * item["qty"] for item in self.basket.values())
# def get_total_price(self):
# subtotal = sum(Decimal(item["price"]) * item["qty"] for item in self.basket.values())
# shipping = Decimal(0.00) if subtotal == 0 else Decimal(11.50)
# return subtotal + Decimal(shipping)
# def delete(self, product):
# """
# Delete item from session data
# """
# product_id = str(product)
# if product_id in self.basket:
# del self.basket[product_id]
# self.save()
# def clear(self):
# # Remove basket from session
# del self.session[settings.BASKET_SESSION_ID]
# self.save()
# def save(self):
# self.session.modified = True
class WishList(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE,null=True)
item = models.ForeignKey(product_version, on_delete=models.CASCADE,blank=True, null=True)
class Meta(object):
verbose_name = 'WishList'
verbose_name_plural = 'WishLists'
def __str__(self):
return f"{self.user}"
class CheckoutBilling(models.Model):
first_name = models.CharField(max_length=50,verbose_name='First Name', help_text='Max 255 character')
last_name = models.CharField(max_length=50,verbose_name='Last Name', help_text='Max 255 character')
company = models.TextField(verbose_name='Company')
email = models.EmailField(verbose_name='Email Address')
address = models.TextField(verbose_name='Street Address')
country = CountryField(max_length=255, verbose_name='Country')
telephone = models.CharField(max_length=25 ,verbose_name='Telephone')
fax = models.CharField(max_length=50, verbose_name='Fax')
user_id = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Meta:
verbose_name = "Checkout Billing"
verbose_name_plural = "Checkout Billings"
def __str__(self):
return self.first_name
class Checkout(models.Model):
address=models.CharField(max_length=100)
created_at=models.DateField(auto_now_add=True)
updated_at=models.DateField(auto_now=True)
def __str__(self):
return self.address
class CheckoutShipping(models.Model):
first_name = models.CharField(max_length=50,verbose_name='First Name', help_text='Max 255 character')
last_name = models.CharField(max_length=50,verbose_name='Last Name', help_text='Max 255 character')
company = models.TextField(verbose_name='Company')
email = models.EmailField(verbose_name='Email Address')
address = models.TextField(verbose_name='Street Address')
country = CountryField(max_length=255, verbose_name='Country')
telephone = models.CharField(max_length=25 ,verbose_name='Telephone')
fax = models.CharField(max_length=50, verbose_name='Fax')
user_id = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Meta:
verbose_name = "Checkout Shipping"
verbose_name_plural = "Checkout Shipping"
def __str__(self):
return self.first_name
class ShoppingCart(models.Model):
product_name=models.CharField(max_length=200)
img = models.ImageField(upload_to = "images/")
unit_price=models.CharField(max_length=10)
qty=models.CharField(max_length=20)
subtotal=models.CharField(max_length=25)
coupon=models.CharField(max_length=20)
zip_code=models.CharField(max_length=20)
state=models.TextField()
country=models.TextField(blank=False)
class Meta:
verbose_name = "Shopping Cart"
verbose_name_plural = "Shopping Cart"
def __str__(self):
return self.product_name
|
Shafag42/SuperB_E-commerce
|
order/models.py
|
models.py
|
py
| 7,334 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42539411350
|
from django.shortcuts import render, redirect
from .models import *
import os
from django.conf import settings
from django.http import HttpResponse
import json
# Create your views here.
def cargarInicio(request):
productos = Producto.objects.all()
producto_perros = Producto.objects.filter(categoria_id=1)
producto_gatos = Producto.objects.filter(categoria_id=2)
return render(request,"inicio.html",{"prod" : productos, "prod_dogs":producto_perros, "prod_cats":producto_gatos})
def cargarAgregarProducto(request):
categorias = Categoria.objects.all()
productos = Producto.objects.all()
return render(request, "agregarProducto.html",{"cate":categorias,"prod":productos})
def agregarProducto(request):
#print("AGREGANDO PRODUCTOS A LA BBDD",request.POST)
v_sku = request.POST['txtSku']
v_precio = request.POST['txtPrecio']
v_nombre = request.POST['txtNombre']
v_imagen = request.FILES['txtImagen']
v_descripcion = request.POST['txtDescripcion']
v_stock = request.POST['txtStock']
v_categoria = Categoria.objects.get(id_categoria = request.POST['cmbCategoria'])
Producto.objects.create(sku = v_sku, precio = v_precio, nombre = v_nombre,imagen = v_imagen,descripcion = v_descripcion,stock = v_stock, categoria_id = v_categoria)
return redirect('/agregarProducto')
def cargarEditarProducto(request,sku):
producto = Producto.objects.get(sku = sku)
categorias = Categoria.objects.all()
return render(request,"editarProducto.html",{"prod":producto,"cate":categorias})
def editarProducto(request):
v_sku = request.POST['txtSku']
productoBD = Producto.objects.get(sku = v_sku)
v_precio = request.POST['txtPrecio']
v_nombre = request.POST['txtNombre']
v_descripcion = request.POST['txtDescripcion']
v_stock = request.POST['txtStock']
v_categoria = Categoria.objects.get(id_categoria = request.POST['cmbCategoria'])
try:
v_imagen = request.FILES['txtImagen']
ruta_img = os.path.join(settings.MEDIA_ROOT,str(productoBD.imagen))
os.remove(ruta_img)
except:
v_imagen = productoBD.imagen
productoBD.nombre = v_nombre
productoBD.precio = v_precio
productoBD.imagen = v_imagen
productoBD.descripcion = v_descripcion
productoBD.stock = v_stock
productoBD.categoria_id = v_categoria
productoBD.save()
return redirect('/agregarProducto')
def eliminarProducto(request,sku):
producto = Producto.objects.get(sku = sku)
ruta_img = os.path.join(settings.MEDIA_ROOT,str(producto.imagen))
os.remove(ruta_img)
producto.delete()
return redirect('/agregarProducto')
def carrito(request):
#print("CARRITO",request.body)
productos = json.loads(request.body)
for p in productos:
print("SKU",p['sku'])
print("CANTIDAD",p['cantidad'])
return HttpResponse("OK!")
|
GuillermoVillacuraTorres/PGY3121-012D
|
django/apps/Tienda/views.py
|
views.py
|
py
| 2,897 |
python
|
es
|
code
| null |
github-code
|
6
|
33706250276
|
import sys
from PySide2.QtWidgets import QApplication, QMainWindow, QGroupBox, QRadioButton
aplicacao = QApplication(sys.argv)
janela = QMainWindow()
# setGeometry(esquerda, topo, largura, altura)
janela.setGeometry( 100, 50, 300, 200 )
janela.setWindowTitle("Primeira Janela")
# cria uma instancia de um grupo de seleção dentro da janela
group_box = QGroupBox("Selecione uma opção", janela)
group_box.move(50,50)
group_box.resize(200,100)
group_box.setStyleSheet('QGroupBox \
{background-color: yellow}')
# cria os radio buttons dentro do grupo de seleção
radio_btn_1 = QRadioButton("Opção 1", group_box)
radio_btn_1.move(10,20)
radio_btn_2 = QRadioButton("Opção 2", group_box)
radio_btn_2.move(10,40)
radio_btn_3 = QRadioButton("Opção 3", group_box)
radio_btn_3.move(10,60)
radio_btn_3.setChecked(True)
janela.show()
aplicacao.exec_()
sys.exit()
|
leuribeiru/QtforPhyton
|
componentes_basicos/radio.py
|
radio.py
|
py
| 865 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
32605878813
|
import discord
import youtube_dl
from bot_token import TOKEN
if not TOKEN:
raise ValueError("Please add your token to bot_token.py")
client = discord.Client()
@client.event
async def on_message(message):
if message.author== client.user :
return
elif message.content.startswith("*l"):
msg = f'{message.content[3:]}Hello{message.author.mention}'
await client.send_message(message.channel, msg)
elif message.content.startswith("*chante"):
url= message.content[8:]
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN)
|
F3YoD/Bot-python
|
tamer2.py
|
tamer2.py
|
py
| 669 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43529823665
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 11:51:37 2019
@author: javie
"""
import plotly_express as px
from plotly.offline import plot
def pl(df, r, var):
tmp = df[df.randomSeed.isin(r)]
plot(px.line(tmp, height=300 * len(r), x="tick",
y = var,
color="FirmNumID",
line_dash="scenario",
facet_row="randomSeed"
))
# Several variables melting columns
def plMelt(df, r, vars, id_vars=["randomSeed","scenario","tick","FirmNumID"]):
tmp = df[df.randomSeed.isin(r)]
tmp = tmp.melt(id_vars=id_vars, value_vars=vars)
plot(px.line(tmp, height=300 * len(r), x="tick",
y= "value",
color="FirmNumID",
line_dash="scenario",
facet_col="variable",
facet_row="randomSeed"
))
|
javiergarciasanchez/businessCycles
|
businessCycles/exploreData/Python/Graphs_plotly.py
|
Graphs_plotly.py
|
py
| 871 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35049082181
|
"""
3D convolutions using GPU accelereration for Theano (using conv2d)
https://github.com/jaberg/TheanoConv3d2d
"""
import theano
from theano.gradient import DisconnectedType
from theano.gof import Op, Apply
from theano import tensor
import theano.sandbox.cuda as cuda
def get_diagonal_subtensor_view(x, i0, i1):
"""Helper function for DiagonalSubtensor and
IncDiagonalSubtensor
:note: it return a partial view of x, not a partial copy.
"""
if x.shape[i0] < x.shape[i1]:
raise NotImplementedError('is this allowed?')
idx = [slice(None)] * x.ndim
idx[i0] = slice(x.shape[i1] - 1, None, None)
xview = x.__getitem__(tuple(idx))
strides = list(xview.strides)
strides[i1] -= strides[i0]
xview.strides = strides
return xview
class DiagonalSubtensor(Op):
"""Return a form a nd diagonal subtensor.
:param x: n-d tensor
:param i0: axis index in x
:param i1: axis index in x
:note: Work on the GPU.
``x`` is some n-dimensional tensor, but this Op only deals with a
matrix-shaped slice, using axes i0 and i1. Without loss of
generality, suppose that ``i0`` picks out our ``row`` dimension,
and i1 the ``column`` dimension.
So the relevant part of ``x`` is some matrix ``u``. Suppose it has 7 rows
and 4 columns::
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
The view returned by this function is also a matrix. It's a thick,
diagonal ``stripe`` across u that discards the lower left triangle
and the upper right triangle:
[ x 0 0 0 ]
[ x x 0 0 ]
[ x x x 0 ]
[ 0 x x x ]
[ 0 0 x x ]
[ 0 0 0 x ]
In this case the return value would be this view of shape 3x4. The
returned view has the same number of dimensions as the input
``x``, and the only difference is that the shape along dimension
``i0`` has been reduced by ``shape[i1] - 1`` because of the
triangles that got chopped out.
The NotImplementedError is meant to catch the case where shape[i0]
is too small for the stripe to reach across the matrix, in which
case it's not clear what this function should do. Maybe always
raise an error. I'd look back to the call site in the Conv3D to
see what's necessary at that point.
"""
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return "%s" % self.__class__.__name__
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.view_map = {0: [0]}
def __eq__(self, other):
return type(self) == type(other) and self.inplace == other.inplace
def __hash__(self):
return hash((type(self), self.inplace))
def make_node(self, x, i0, i1):
_i0 = tensor.as_tensor_variable(i0)
_i1 = tensor.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1], [x.type()])
def perform(self, node, inputs, output_storage):
xview = get_diagonal_subtensor_view(*inputs)
if self.inplace:
output_storage[0][0] = xview
else:
output_storage[0][0] = xview.copy()
def grad(self, inputs, g_outputs):
z = tensor.zeros_like(inputs[0])
gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
return [gx, DisconnectedType()(), DisconnectedType()()]
def connection_pattern(self, node):
rval = [[True], [False], [False]]
return rval
diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op):
"""
The gradient of DiagonalSubtensor
"""
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return "%s" % self.__class__.__name__
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def __eq__(self, other):
return type(self) == type(other) and self.inplace == other.inplace
def __hash__(self):
return hash((type(self), self.inplace))
def make_node(self, x, i0, i1, amt):
_i0 = tensor.as_tensor_variable(i0)
_i1 = tensor.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1, amt], [x.type()])
def perform(self, node, inputs, output_storage):
x, i0, i1, amt = inputs
if not self.inplace:
x = x.copy()
xview = get_diagonal_subtensor_view(x, i0, i1)
xview += amt
output_storage[0][0] = x
def grad(self, inputs, g_outputs):
x, i0, i1, amt = inputs
gy = g_outputs[0]
return [gy, DisconnectedType()(), DisconnectedType()(),
diagonal_subtensor(gy, i0, i1)]
def connection_pattern(self, node):
rval = [[True], [False], [False], [True]]
return rval
inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(signals, filters,
signals_shape=None, filters_shape=None,
border_mode='valid'):
"""Convolve spatio-temporal filters with a movie.
:param signals: timeseries of images whose pixels have color channels.
shape: [Ns, Ts, C, Hs, Ws]
:param filters: spatio-temporal filters
shape: [Nf, Tf, C, Hf, Wf]
:param signals_shape: None or a tuple/list with the shape of signals
:param filters_shape: None or a tuple/list with the shape of filters
:param border_mode: The only one tested is 'valid'.
:note: Work on the GPU.
"""
if isinstance(border_mode, str):
border_mode = (border_mode, border_mode, border_mode)
_signals_shape_5d = signals.shape if signals_shape is None else signals_shape
_filters_shape_5d = filters.shape if filters_shape is None else filters_shape
_signals_shape_4d = (
_signals_shape_5d[0] * _signals_shape_5d[1],
_signals_shape_5d[2],
_signals_shape_5d[3],
_signals_shape_5d[4],
)
_filters_shape_4d = (
_filters_shape_5d[0] * _filters_shape_5d[1],
_filters_shape_5d[2],
_filters_shape_5d[3],
_filters_shape_5d[4],
)
if border_mode[1] != border_mode[2]:
raise NotImplementedError('height and width bordermodes must match')
conv2d_signal_shape = _signals_shape_4d
conv2d_filter_shape = _filters_shape_4d
if signals_shape is None:
conv2d_signal_shape = None
if filters_shape is None:
conv2d_filter_shape = None
out_4d = tensor.nnet.conv2d(
signals.reshape(_signals_shape_4d),
filters.reshape(_filters_shape_4d),
image_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode = border_mode[1]) # ignoring border_mode[2]
# reshape the output to restore its original size
# shape = Ns, Ts, Nf, Tf, W-Wf+1, H-Hf+1
if border_mode[1] == 'valid':
out_tmp = out_4d.reshape((
_signals_shape_5d[0], # Ns
_signals_shape_5d[1], # Ts
_filters_shape_5d[0], # Nf
_filters_shape_5d[1], # Tf
_signals_shape_5d[3] - _filters_shape_5d[3] + 1,
_signals_shape_5d[4] - _filters_shape_5d[4] + 1,
))
elif border_mode[1] == 'full':
out_tmp = out_4d.reshape((
_signals_shape_5d[0], # Ns
_signals_shape_5d[1], # Ts
_filters_shape_5d[0], # Nf
_filters_shape_5d[1], # Tf
_signals_shape_5d[3] + _filters_shape_5d[3] - 1,
_signals_shape_5d[4] + _filters_shape_5d[4] - 1,
))
elif border_mode[1] == 'same':
raise NotImplementedError()
else:
raise ValueError('invalid border mode', border_mode[1])
# now sum out along the Tf to get the output
# but we have to sum on a diagonal through the Tf and Ts submatrix.
if border_mode[0] == 'valid':
out_5d = diagonal_subtensor(out_tmp, 1, 3).sum(axis=3)
elif border_mode[0] in ('full', 'same'):
out_5d = out_4d.reshape((_signals_shape_5d))
# raise NotImplementedError('sequence border mode', border_mode[0])
else:
raise ValueError('invalid border mode', border_mode[1])
return out_5d
def make_gpu_optimizer(op, to_gpu):
"""This function create optimizer that move some inputs to the GPU
for op that work on both CPU and GPU.
The op object is created by calling op(), so good default value
are needed.
We suppose the same op work with CPU and GPU inputs.
:param op: the op that support GPU inputs
:param to_gpu: a list of op inputs that are moved to the GPU.
"""
@theano.gof.local_optimizer([])
def local_to_gpu(node):
"""
op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host)
"""
if isinstance(node.op, op):
#op(host_from_gpu()) -> host_from_gpu(op)
#If any of the input that go on the GPU are on the GPU,
#move the op to the gpu.
if any(node.inputs[idx].owner and
isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)
for idx in to_gpu):
new_inp = list(node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [cuda.host_from_gpu(op()(*new_inp))]
if node.op == cuda.gpu_from_host:
#gpu_from_host(op) -> op(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
op):
op_node = host_input.owner
new_inp = list(op_node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [op()(*new_inp)]
return False
local_to_gpu.__name__ = "local_to_gpu_" + op.__name__
cuda.opt.register_opt()(local_to_gpu)
if cuda.cuda_available:
make_gpu_optimizer(DiagonalSubtensor, [0])
make_gpu_optimizer(IncDiagonalSubtensor, [0, 3])
|
lpigou/Theano-3D-ConvNet
|
convnet3d/conv3d2d.py
|
conv3d2d.py
|
py
| 10,163 |
python
|
en
|
code
| 83 |
github-code
|
6
|
1360579310
|
import pandas as pd
import pathlib as pl
import numpy as np
import RootPath
from abc import abstractmethod
from Utils.Data.Features.RawFeatures import *
from Utils.Data.Dictionary.MappingDictionary import *
def map_column_single_value(series, dictionary):
mapped_series = series.map(dictionary).astype(np.int32)
return pd.DataFrame(mapped_series)
def map_column_array(series, dictionary):
mapped_series = series.map(
lambda x: np.array([dictionary[y] for y in x.split('\t')], dtype=np.int32) if x is not pd.NA else None)
return pd.DataFrame(mapped_series)
class MappedFeaturePickle(Feature):
"""
Abstract class representing a dictionary that works with pickle file.
"""
def __init__(self, feature_name: str, dataset_id: str):
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(f"{Feature.ROOT_PATH}/{self.dataset_id}/mapped/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(f"{Feature.ROOT_PATH}/{self.dataset_id}/mapped/{self.feature_name}.csv.gz")
def has_feature(self):
return self.pck_path.is_file()
def load_feature(self):
assert self.has_feature(), f"The feature {self.feature_name} does not exists. Create it first."
df = pd.read_pickle(self.pck_path, compression="gzip")
# Renaming the column for consistency purpose
df.columns = [self.feature_name]
return df
@abstractmethod
def create_feature(self):
pass
def save_feature(self, dataframe: pd.DataFrame):
# Changing column name
dataframe.columns = [self.feature_name]
self.pck_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_pickle(self.pck_path, compression='gzip')
# For backup reason
# self.csv_path.parent.mkdir(parents=True, exist_ok=True)
# dataframe.to_csv(self.csv_path, compression='gzip', index=True)
class MappedFeatureTweetLanguage(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_language", dataset_id)
def create_feature(self):
feature = RawFeatureTweetLanguage(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingLanguageDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureGroupedTweetLanguage(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_grouped_tweet_language", dataset_id)
self.group_id_dict = {}
self.current_mapping = 0
def get_grouped_id(self, language_id):
# ??? inglese misto altre cose
if language_id == 16 or language_id == 18 or language_id == 20:
return 16
# [UNK]
elif language_id == 26 or language_id == 56 or language_id == 57 or language_id == 58 or language_id == 59 or language_id == 61:
return 26
# ???
elif language_id == 28 or language_id == 36 or language_id == 37 or language_id == 43 or language_id == 45 or language_id == 46:
return 28
# persian / pashto
elif language_id == 25 or language_id == 44 or language_id == 41:
return 25
# lingue indiane
elif language_id == 8 or language_id == 32 or language_id == 34 or language_id == 35 or language_id == 47 or language_id == 48 or language_id == 49 or language_id == 50 or language_id == 52 or language_id == 53 or language_id == 54 or language_id == 60 or language_id == 62:
return 8
# lingue est europa
elif language_id == 14 or language_id == 23 or language_id == 24 or language_id == 55:
return 14
# lingue nord europa
elif language_id == 21 or language_id == 31 or language_id == 38 or language_id == 39:
return 21
# lingue centro europa / balcani
elif language_id == 29 or language_id == 40 or language_id == 42:
return 29
# others (vietnamita, birmano, armeno, georgiano, uiguro)
elif language_id == 30 or language_id == 51 or language_id == 63 or language_id == 64 or language_id == 65:
return 30
else:
return language_id
def remap_language_id(self, group_id):
if group_id not in self.group_id_dict:
self.group_id_dict[group_id] = self.current_mapping
self.current_mapping += 1
return self.group_id_dict[group_id]
def create_feature(self):
feature = MappedFeatureTweetLanguage(self.dataset_id)
dataframe = feature.load_or_create()
#dataframe = dataframe.head()
grouped_dataframe = pd.DataFrame(dataframe["mapped_feature_tweet_language"].map(lambda x: self.get_grouped_id(x)))
#print(grouped_dataframe)
mapped_dataframe = pd.DataFrame(dataframe["mapped_feature_tweet_language"].map(lambda x: self.remap_language_id(x)))
#print(mapped_dataframe)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetId(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_id", dataset_id)
def create_feature(self):
feature = RawFeatureTweetId(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingTweetIdDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureCreatorId(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_creator_id", dataset_id)
def create_feature(self):
feature = RawFeatureCreatorId(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingUserIdDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureEngagerId(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_engager_id", dataset_id)
def create_feature(self):
feature = RawFeatureEngagerId(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingUserIdDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetHashtags(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_hashtags", dataset_id)
def create_feature(self):
feature = RawFeatureTweetHashtags(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingHashtagDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetLinks(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_links", dataset_id)
def create_feature(self):
feature = RawFeatureTweetLinks(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingLinkDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetDomains(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_domains", dataset_id)
def create_feature(self):
feature = RawFeatureTweetDomains(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingDomainDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetMedia(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_media", dataset_id)
def create_feature(self):
feature = RawFeatureTweetMedia(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingMediaDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
|
MaurizioFD/recsys-challenge-2020-twitter
|
Utils/Data/Features/MappedFeatures.py
|
MappedFeatures.py
|
py
| 8,608 |
python
|
en
|
code
| 39 |
github-code
|
6
|
14731423365
|
from datetime import datetime
import pandas as pd
import pydash as _
from bs4 import BeautifulSoup
from Base import NSEBase
class NSE(NSEBase):
"""
A class to interact with NSE (National Stock Exchange) API.
Attributes:
valid_pcr_fields : list of valid fields for put-call ratio calculation
Methods:
__init__ : Initialize the NSE class
get_option_chain : Get the option chain for a given ticker
get_raw_option_chain : Get the raw option chain data for a given ticker
get_options_expiry : Get the next expiry date for a given ticker
get_all_derivatives_enabled_stocks : Get the list of equities available for derivatives trading
get_equity_future_trade_info : Get the trade information of active future contracts for a given ticker
get_equity_options_trade_info : Get the trade information of equity options for a given ticker
_mapped_index_ticker_for_futures : Get the mapped index ticker for index futures
get_index_futures_data : Get the data for index futures of a given index or ticker
get_currency_futures : Get the data for currency futures
get_commodity_futures : Get the data for commodity futures
get_pcr : Get the put-call ratio for a given ticker and expiry date
"""
def __init__(self) -> None:
"""
The __init__ function is called when the class is instantiated.
It sets up the session and headers for all subsequent requests.
:param self: Represent the instance of the class
:return: Nothing
"""
super().__init__()
self.valid_pcr_fields = ['oi', 'volume']
# ----------------------------------------------------------------------------------------------------------------
# Utility Functions
def get_option_chain(self, ticker: str, is_index: bool = True, expiry: datetime = None) -> pd.DataFrame:
"""
The get_option_chain function takes a ticker as input and returns the option chain for that ticker. The
function uses the try_n_times_get_response function to get a response from NSE's API, which is then converted
into a DataFrame using pd.json_normalize.
:param self: Represent the instance of the class
:param ticker: Specify the stock ticker for which we want to get the option chain its also called symbol in
NSE
:param is_index: (optional) Boolean value Specifies the given ticker is an index or not
:param expiry: (optional) It takes the `expiry date` in the datetime format of the options contracts,
default is very next expiry day
:return: A dataframe with option chain
"""
params = {'symbol': ticker}
if is_index:
url = f'{self._base_url}/api/option-chain-indices'
else:
url = f'{self._base_url}/api/option-chain-equities'
response = self.hit_and_get_data(url, params=params)
if expiry is None:
df = pd.DataFrame(pd.json_normalize(_.get(response, 'filtered.data', {}), sep='_')).set_index('strikePrice')
else:
df = pd.DataFrame(pd.json_normalize(_.get(response, 'records.data', {}), sep='_')).set_index('strikePrice')
df = df[df['expiryDate'] == expiry.strftime('%d-%b-%Y')]
return df
def get_raw_option_chain(self, ticker: str, is_index: bool = True) -> dict:
"""
The get_option_chain function takes a ticker as input and returns the option chain for that ticker.
The function uses the try_n_times_get_response function to get a response from NSE's API, which is
then converted into a DataFrame using pd.json_normalize.
:param is_index: Boolean value Specifies the given ticker is an index or not
:param self: Represent the instance of the class
:param ticker: Specify the stock ticker for which we want to get the option chain
:return: A dataframe with option chain data
"""
params = {'symbol': ticker}
if is_index:
url = f'{self._base_url}/api/option-chain-indices'
else:
url = f'{self._base_url}/api/option-chain-equities'
response = self.hit_and_get_data(url, params=params)
return response
def get_options_expiry(self, ticker: str, is_index: bool = False) -> datetime:
"""
The get_expiry function takes in a ticker and returns the next expiry date for that ticker.
The function uses the NSE API to get all expiry dates for a given ticker, sorts them in ascending order,
and then returns the nth element of this sorted list.
:param self: Represent the instance of the class
:param ticker: Specify the ticker / symbol for which we want to get the expiry date
:param is_index: Boolean value Specifies the given ticker is an index or not
:return: The very next expiry date
"""
params = {'symbol': ticker}
if is_index:
url = f'{self._base_url}/api/option-chain-indices'
else:
url = f'{self._base_url}/api/option-chain-equities'
response = self.hit_and_get_data(url, params=params)
dates = sorted([datetime.strptime(date_str, "%d-%b-%Y") for date_str in
response.get('records', {}).get('expiryDates', [])])
return dates
# ----------------------------------------------------------------------------------------------------------------_
# Equity Futures
def get_all_derivatives_enabled_stocks(self) -> list:
"""
The get_all_derivatives_enabled_stocks provides the list of Equities available for derivative trading
:param self: Represent the instance of the class
:return: List of all Equities tickers / symbols for which derivative trading is allowed
"""
response = self.hit_and_get_data(f'{self._base_url}/api/master-quote')
return response
def get_equity_future_trade_info(self, ticker: str) -> pd.DataFrame:
"""
The get_equity_future_trade_info provides all active future contracts trade information including its price
details
:param self: Represent the instance of the class
:param ticker: Specify the ticker / symbol for which we want to get the expiry date
:return: A DataFrame of trade info data of Equity Future contracts
"""
params = {'symbol': ticker}
response = self.hit_and_get_data(f'{self._base_url}/api/quote-derivative', params=params)
future_data = []
for fno_data in response.get('stocks', []):
if fno_data.get('metadata', {}).get('instrumentType') == 'Stock Futures':
future_data.append(fno_data)
df = pd.DataFrame(pd.json_normalize(future_data, sep='_'))
df['ticker'] = response.get('info', {}).get('symbol', '')
df['companyName'] = response.get('info', {}).get('companyName', '')
df['industry'] = response.get('info', {}).get('industry', '')
df['fut_timestamp'] = response.get('fut_timestamp', '')
return df
# ----------------------------------------------------------------------------------------------------------------
# Equity Options
def get_equity_options_trade_info(self, ticker: str) -> pd.DataFrame:
"""
Gets equity options trade information for a given ticker.
:param ticker: Ticker symbol of the equity options trade.
:return: DataFrame containing the trade information.
"""
params = {'symbol': ticker}
response = self.hit_and_get_data(f'{self._base_url}/api/quote-derivative', params=params)
future_data = []
for fno_data in response.get('stocks', []):
if fno_data.get('metadata', {}).get('instrumentType') == 'Stock Options':
future_data.append(fno_data)
df = pd.DataFrame(pd.json_normalize(future_data, sep='_'))
df['ticker'] = response.get('info', {}).get('symbol', '')
df['companyName'] = response.get('info', {}).get('companyName', '')
df['industry'] = response.get('info', {}).get('industry', '')
df['opt_timestamp'] = response.get('opt_timestamp', '')
return df
# ----------------------------------------------------------------------------------------------------------------
# Index Futures
def _mapped_index_ticker_for_futures(self) -> dict:
"""
Mapped index ticker will give dict of available options with its corresponding ticker value
:param self: Represent the instance of the class
:return: A dict obj with all FUTURES mappings
"""
response = self.session.get(f'{self._base_url}//market-data/equity-derivatives-watch',
headers=self.headers)
soup = BeautifulSoup(response.text, features="html5lib")
all_derivative_options = soup.find_all('option', attrs={"rel": "derivative"})
mapped_index_ticker = {}
for i in all_derivative_options:
mapped_index_ticker[i.get_text().lower()] = i['value']
return mapped_index_ticker
def get_index_futures_data(self, index_or_ticker: str) -> pd.DataFrame:
"""
Fetches index futures data.
:param self: Represent the instance of the class
:param index_or_ticker: Name or ticker symbol of the index.
:return: DataFrame containing the FUTURES data
"""
index_or_ticker = index_or_ticker.lower()
mapped_tickers = {}
try:
mapped_tickers = self._mapped_index_ticker_for_futures()
except Exception as err:
print(
f'Exception in fetching mapped ticker for this index try to pass actual ticker in the next call, '
f'Exact error : {err}')
if index_or_ticker in mapped_tickers.keys():
ticker_to_used = mapped_tickers[index_or_ticker]
else:
ticker_to_used = index_or_ticker
params = {'index': ticker_to_used}
response = self.hit_and_get_data(f'{self._base_url}/api/liveEquity-derivatives', params=params)
df = pd.DataFrame(response.get('data', []))
return df
# ----------------------------------------------------------------------------------------------------------------
# Currency
def get_currency_futures(self) -> pd.DataFrame:
"""
Fetches currency futures data.
:param self: Represent the instance of the class
:return: DataFrame containing the currency futures data
"""
params = {'index': 'live_market_currency', 'key': 'INR'}
response = self.hit_and_get_data(
f'{self._base_url}/api/liveCurrency-derivatives', params=params)
df = pd.DataFrame(response.get('data', []))
return df
# ----------------------------------------------------------------------------------------------------------------
# Commodity
def get_commodity_futures(self) -> pd.DataFrame:
"""
Fetches commodity futures data.
:param self: Represent the instance of the class
:return: Pd.DataFrame: DataFrame containing the currency futures data
"""
response = self.hit_and_get_data(f'{self._base_url}/api/liveCommodity-derivatives')
df = pd.DataFrame(response.get('data', []))
return df
def get_pcr(self, ticker: str, is_index: bool = True, on_field: str = 'OI', expiry: datetime = None) -> float:
"""
Calculate the put-call ratio (PCR) for a given ticker.
:param self: Represent the instance of the class
:param ticker: The ticker symbol.
:param is_index: Boolean value Specifies the given ticker is an index or not
:param expiry: The expiry date of the option contract. Defaults to None.
:param on_field: The field to calculate PCR on. `Volume` or `oi` (open-interest) Default to 'OI'.
:return: The calculated PCR value
"""
on_field = on_field.lower()
if on_field not in self.valid_pcr_fields:
print(f'Un-supported filed is passed only these are the fields available : {self.valid_pcr_fields}')
return 0
if expiry is None:
df = self.get_option_chain(ticker, is_index=is_index)
else:
df = self.get_option_chain(ticker, is_index=is_index, expiry=expiry)
if df.shape[0] == 0:
print('Your filters lead to empty DataSet check all params, expiry, etc; returning 0 as default')
return 0
if on_field == 'oi':
put_oi = df['PE_openInterest'].sum()
call_oi = df['CE_openInterest'].sum()
return put_oi / call_oi
else:
put_vol = df['PE_totalTradedVolume'].sum()
call_vol = df['CE_totalTradedVolume'].sum()
return put_vol / call_vol
|
Sampad-Hegde/Bharat-SM-Data
|
Bharat_sm_data/Derivatives/NSE.py
|
NSE.py
|
py
| 13,281 |
python
|
en
|
code
| 2 |
github-code
|
6
|
1592231392
|
import asyncio
from flask import Blueprint, abort, flash, redirect, render_template, request, jsonify, url_for, Response
from werkzeug.utils import secure_filename
import socket
from flask_socketio import SocketIO, emit
from app import app, db, socketio
import os
import time
HOST = "127.0.1.1"
WEBSOCKET_PORT = 9999
CHUNK_SIZE = 4096 # Define o tamanho do pacote. Pode ser ajustado conforme necessário.
# Lista de endereços IP dos servidores para armazenamento de réplicas
REPLICA_SERVERS = [HOST, HOST, HOST] #ips locais mockados
#REPLICA_SERVERS = ["192.168.1.2", "192.168.1.3", "192.168.1.4"] # IPs das máquinas das réplicas
main = Blueprint('main', __name__)
MIME_TYPES = {
"mp4": "video/mp4",
"avi": "video/x-msvideo",
"mkv": "video/x-matroska",
"flv": "video/x-flv"
}
class StreamingError(Exception):
"""Exceção personalizada para erros de streaming."""
pass
class Video(db.Model):
__tablename__ = 'video'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(150), unique=True, nullable=False)
description = db.Column(db.String(500), nullable=True)
with app.app_context():
db.create_all()
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
def upload_to_replica(filename, file_content):
for server_ip in REPLICA_SERVERS:
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((server_ip, WEBSOCKET_PORT))
# Enviar comando UPLOAD
header = f"UPLOAD"
client.send(header.encode())
# Enviar tamanho do arquivo como uma string de tamanho 10
client.send(str(len(file_content)).encode().zfill(10))
# Enviar tamanho do nome do arquivo
client.send(str(len(filename)).encode().zfill(10))
# Enviar nome do arquivo
client.send(filename.encode())
# Enviar os dados do arquivo
client.sendall(file_content)
client.close()
except Exception as e:
print(f"Erro ao enviar para servidor {server_ip}: {e}")
@main.route('/upload', methods=['POST'])
def upload_file():
if 'file' not in request.files:
return jsonify({"error": "No file provided"}), 400
file = request.files['file']
if file.filename == '':
return jsonify({"error": "No file selected"}), 400
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
new_video = Video(filename=filename)
db.session.add(new_video)
db.session.commit()
with open(file_path, 'rb') as f:
file_content = f.read()
upload_to_replica(filename, file_content)
return "File uploaded successfully! You can now upload another file."
return jsonify({"error": "Invalid file type"}), 400
@main.route('/', methods=['GET'])
def show_upload():
return render_template('upload.html')
@main.route('/videos', methods=['GET'])
def list_videos():
videos = Video.query.all()
return render_template('video_list.html', videos=videos)
from websockets import connect as ws_connect
@main.route('/play/<int:video_id>', methods=['GET'])
def play_video(video_id):
video = Video.query.get(video_id)
video_name = video.filename
# Adicionando failover para o streaming de vídeo
for _ in range(3): # Tenta até 3 vezes, uma para cada réplica
try:
return stream_video(video_name)
except StreamingError:
continue # Se ocorrer um erro, tenta a próxima réplica
return "Não foi possível reproduzir o vídeo."
def stream_video(video_name):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((HOST, 9999))
header = f"STREAM"
client.send(header.encode())
client.send(str(len(video_name)).zfill(10).encode())
client.send(video_name.encode())
def generate():
while True:
chunk = client.recv(CHUNK_SIZE)
if not chunk:
break
yield chunk
ext = video_name.split('.')[-1]
mime_type = MIME_TYPES.get(ext, "video/mp4")
return Response(generate(), content_type=mime_type)
except ConnectionError:
# Esta exceção pode ser lançada se houver um problema de conexão de rede
raise StreamingError("Erro de conexão durante o streaming do vídeo")
@main.route('/delete_video/<int:video_id>', methods=['POST'])
def delete_video(video_id):
video = Video.query.get(video_id)
if video:
db.session.delete(video)
db.session.commit()
return redirect(url_for('main.list_videos'))
else:
# Caso o vídeo não seja encontrado no banco de dados
flash('Vídeo não encontrado', 'error')
return redirect(url_for('main.list_videos'))
if __name__ == '__main__':
app.run(debug=True)
|
isaacbrasil/My-youtube-flask
|
app/blueprints/client.py
|
client.py
|
py
| 5,180 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
10242082365
|
# -*- coding: utf-8 -*-
"""The Simulator takes in a :obj:`seagull.Board`, and runs a simulation given a
set number of iterations and a rule. For each iteration, the rule is applied to
the Board in order to evolve the lifeforms. After the simulation, run
statistics are returned.
.. code-block:: python
import seagull as sg
board = sg.Board()
board.add(Blinker(), loc=(0,0))
# Initialize a simulator
sim = sg.Simulator(board)
stats = sim.run(sg.rules.conway_classic, iters=1000)
You can always get the history of the whole simulation by calling the
`get_history()` method. The length of the history will always be equal to
:code:`iters + 1` since we include the initial state
.. note::
Running a simulation does not change the :code:`state` attribute of the
board. Internally, the simulator makes a copy of that layout and updates
that instead. This is to avoid unintended behaviour when running
simulations again and again.
Various statistics such as entropy, peak cell coverage, and the like are
returned as a dictionary. This gives us an idea on the characteristics of the
simulation experiment.
.. note::
Some statistics are highly-dependent on the size of the board and the
number of iterations. For example, peak cell coverage (pertaining to the
max. amount of active cells during the whole run) depends on board size. If
you have better ideas for computing these statistics, please open-up an
Issue!
The :code:`run()` method only computes the progress of the board for the whole
simulation, but it does not animate it yet. To create an animation, call the
:code:`animate()` method:
.. code-block:: python
sim.animate()
This returns a :obj:`matplotlib.animation.FuncAnimation` that you can turn into
an interactive animation in your notebook or exported as a GIF.
.. note::
When exporting to GIF, it is required to have the ffmpeg backend installed.
"""
# Import standard library
from typing import Callable, Union
# Import modules
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger
from matplotlib import animation
from .board import Board
from .utils import statistics as stats
class Simulator:
def __init__(self, board: Board):
"""Initialize the class
Parameters
----------
board : seagull.Board
The board to run the simulation on
"""
self.board = board
self.history = [] # type: list
self.stats = {} # type: dict
def run(self, rule: Callable, iters: int, **kwargs) -> dict:
"""Run the simulation for a given number of iterations
Parameters
----------
rule : callable
Callable that takes in an array and returns an array of the same
shape.
iters : int
Number of iterations to run the simulation.
Returns
-------
dict
Computed statistics for the simulation run
"""
layout = self.board.state.copy()
# Append the initial state
self.history.append(layout)
# Run simulation
for i in range(iters):
layout = rule(layout, **kwargs)
self.history.append(layout)
self.stats = self.compute_statistics(self.get_history())
return self.stats
def compute_statistics(self, history: Union[list, np.ndarray]) -> dict:
"""Compute various statistics for the board
Parameters
----------
history : list or numpy.ndarray
The simulation history
Returns
-------
dict
Compute statistics
"""
logger.info("Computing simulation statistics...")
sim_stats = {
"peak_cell_coverage": np.max(
[stats.cell_coverage(h) for h in history]
),
"avg_cell_coverage": np.mean(
[stats.cell_coverage(h) for h in history]
),
"avg_shannon_entropy": np.mean(
[stats.shannon_entropy(h) for h in history]
),
"peak_shannon_entropy": np.max(
[stats.shannon_entropy(h) for h in history]
),
}
return sim_stats
def get_history(self, exclude_init=False) -> np.ndarray:
"""Get the simulation history
Parameters
----------
exclude_init: bool
If True, then excludes the initial state in the history
Returns
-------
numpy.ndarray
Simulation history of shape :code:`(iters+1, board.size[0],
board.size[1])`
"""
history = self.history[1:] if exclude_init else self.history
return np.asarray(history)
def animate(self, figsize=(5, 5), interval=100) -> animation.FuncAnimation:
"""Animate the resulting simulation
Parameters
----------
figsize : tuple
Size of the output figure
interval : int
Interval for transitioning between frames
Returns
-------
matplotlib.animation.FuncAnimation
Animation generated from the run
"""
if not self.history:
msg = "The run() argument must be executed first"
logger.error(msg)
raise ValueError(msg)
logger.info("Rendering animation...")
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
X_blank = np.zeros(self.board.size, dtype=bool)
im = ax.imshow(X_blank, cmap=plt.cm.binary, interpolation="nearest")
im.set_clim(-0.05, 1)
def _animate(i, history):
current_pos = history[i]
im.set_data(current_pos)
return (im,)
def _init():
im.set_data(X_blank)
return (im,)
history = self.get_history()
anim = animation.FuncAnimation(
fig,
func=_animate,
frames=range(history.shape[0]),
init_func=_init,
interval=interval,
fargs=(history,),
blit=True,
)
return anim
|
ljvmiranda921/seagull
|
seagull/simulator.py
|
simulator.py
|
py
| 6,209 |
python
|
en
|
code
| 167 |
github-code
|
6
|
3116557557
|
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.nn.functional as F
from random import randint
import numpy as np
# import subprocess
# import multiprocessing
# import concurrent.futures
from time import time
from math import sqrt
CHANNEL = 256
BLOCKNUM = 40
BOARDSIZE = 8
BATCH = 50
EPOCHS = 20
DATASIZE = 7200
DATAUSE = 2000
ROUNDLIMIT = 500
PROCESS = 3
OUTPUT_INFO = 1
class resBlock(nn.Module):
def __init__(self, x):
super(resBlock, self).__init__()
self.resBlock = nn.Sequential(
nn.Conv2d(x, x, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True),
nn.Conv2d(x, x, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(x)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
shortCut = x
out = self.resBlock(x)
out += shortCut
out = self.relu(out)
return out
class resCNN(nn.Module):
def __init__(self):
super(resCNN, self).__init__()
self.input = nn.Sequential(
nn.Conv2d(3, CHANNEL, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(CHANNEL),
nn.ReLU(inplace=True)
)
self.resnet = nn.Sequential()
for i in range(BLOCKNUM):
self.resnet.add_module(str(i),resBlock(CHANNEL))
self.ph = nn.Sequential(
nn.Conv2d(CHANNEL, 2, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(2),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(BOARDSIZE*BOARDSIZE*2, BOARDSIZE*BOARDSIZE),
# nn.Softmax(dim=1)
)
self.vh = nn.Sequential(
nn.Conv2d(CHANNEL, 1, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(BOARDSIZE*BOARDSIZE, CHANNEL),
nn.ReLU(inplace=True),
nn.Linear(CHANNEL, 1),
nn.Tanh()
)
def forward(self, x):
model = self.input(x)
model = self.resnet(model)
p = self.ph(model)
v = self.vh(model)
return p, v
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cnn = resCNN()
cnn.load_state_dict(torch.load(r'./rescnn.pth'))
cnn.to(device)
optimizer = Adam(cnn.parameters(), weight_decay=1e-4)
stateData = torch.zeros(DATASIZE, 3, 8, 8, dtype=float)
policyData = torch.zeros(DATASIZE, 64, dtype=float)
valueData = torch.zeros(DATASIZE, 1, dtype=float)
policyLossFunc = nn.CrossEntropyLoss()
valueLossFunc = nn.MSELoss()
def calc(cood):
return cood[0] * BOARDSIZE + cood[1]
def lossFunction(policyOutput, valueOutput, policyTarget, valueTarget):
policyLoss = policyLossFunc(policyOutput, policyTarget)
valueLoss = valueLossFunc(valueOutput, valueTarget)
return policyLoss + valueLoss
def train():
cnn.train()
use = torch.zeros(DATASIZE)
inputData = torch.zeros(DATAUSE,3,8,8)
policyTargetData = torch.zeros(DATAUSE,64)
valueTargetData = torch.zeros(DATAUSE,1)
i = 0
while i < DATAUSE:
x = randint(0, DATASIZE - 1)
if use[x] == 1:
continue
inputData[i] = stateData[x]
policyTargetData[i] = policyData[x]
valueTargetData[i] = valueData[x]
use[x] = 1
i += 1
optimizer.zero_grad()
for i in range(EPOCHS):
policyLossAvg = 0.0
valueLossAvg = 0.0
if OUTPUT_INFO:
print(f'epoch {i+1}:')
for j in range(0, DATAUSE, BATCH):
input = inputData[j:j+BATCH]
policyTarget = policyTargetData[j:j+BATCH]
valueTarget = valueTargetData[j:j+BATCH]
policyOutput, valueOutput = cnn(input.to(device))
policyLoss = policyLossFunc(policyOutput, policyTarget.to(device))
valueLoss = valueLossFunc(valueOutput, valueTarget.to(device))
loss = policyLoss + valueLoss
policyLossAvg += float(policyLoss)
valueLossAvg += float(valueLoss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if OUTPUT_INFO:
print(f' policy loss: {policyLossAvg / (DATAUSE / BATCH)}')
print(f' value loss: {valueLossAvg / (DATAUSE / BATCH)}')
print(f' total loss: {(policyLossAvg + valueLossAvg) / (DATAUSE / BATCH)}')
torch.save(cnn.state_dict(), r'./rescnn.pth')
class GameState:
def __init__(self):
self.board = np.zeros((8, 8), dtype=np.int8) # 0 ~ 7
self.board[3, 3] = self.board[4, 4] = -1
self.board[3, 4] = self.board[4, 3] = 1 #Black 1 White -1
self.history = []
def copy(self):
state = GameState()
state.board = np.copy(self.board)
state.history = self.history[:]
return state
def makeMove(self, move, player):
self.history.append(move)
self.board[move] = player
for d in (-1, 0, 1):
for e in (-1, 0, 1):
if d == 0 and e == 0:
continue
x, y = move
x += d
y += e
to_flip = []
while x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == -player:
to_flip.append((x, y))
x += d
y += e
if x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == player:
for f in to_flip:
self.board[f] = player
def isValid(self, move, player):
if self.board[move] != 0:
return False
for d in (-1, 0, 1):
for e in (-1, 0, 1):
if d == 0 and e == 0:
continue
x, y = move
x += d
y += e
num = 0
while x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == -player:
x += d
y += e
num += 1
if num > 0 and x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == player:
return True
return False
def getValidMoves(self, player):
moves = []
for i in range(8):
for j in range(8):
if self.isValid((i, j), player):
moves.append((i, j))
return moves
def isTerminal(self):
if len(self.getValidMoves(1)) > 0:
return False
if len(self.getValidMoves(-1)) > 0:
return False
return True
def getWinner(self):
count = np.sum(self.board)
if count > 0:
return 1
elif count < 0:
return -1
else:
return 0
def getScore(self, player):
cnt = 0
for i in range(8):
for j in range(8):
if self.board[i,j] == player:
cnt += 1
return cnt
def print(self):
print(' ',end='')
for i in range(8):
print(i,end=' ')
print('')
for i in range(8):
print(i,end=' ')
for j in range(8):
if self.board[i,j] == 1:
print('#',end=' ')
elif self.board[i,j] == -1:
print('O',end=' ')
else:
print('.',end=' ')
print('')
PUCT_CONSTANT = 1
class MCTSNode:
def __init__(self, state:GameState, player):
self.state:GameState = state.copy()
self.parent:MCTSNode = None
self.children = []
self.unexploredMoves = state.getValidMoves(player)
self.player = player
self.n = 0
self.v = 0.0
self.p = 0.0
self.policyPredict = torch.zeros(64)
self.valuePredict = 0.0
if type == 2:
input = torch.zeros(3,8,8)
for i in range(8):
for j in range(8):
if state.board[i,j] == 1:
input[0,i,j] = 1
for i in range(8):
for j in range(8):
if state.board[i,j] == -1:
input[1,i,j] = 1
for i in range(8):
for j in range(8):
input[2,i,j] = player
input.unsqueeze_(0)
output = cnn(input.to(device))
self.policyPredict = F.softmax(output[0][0], dim=-1)
self.valuePredict = float(output[1][0])
def expand(self):
if len(self.unexploredMoves) <= 0:
return None
move = self.unexploredMoves.pop()
newState = self.state.copy()
newState.makeMove(move, self.player)
child = None
if len(newState.getValidMoves(-self.player)) > 0:
child = MCTSNode(newState, -self.player)
else:
child = MCTSNode(newState, self.player)
child.parent = self
child.p = float(self.policyPredict[calc(move)])
self.children.append(child)
return child
def puct(self, player):
Q = self.v / self.n
U = PUCT_CONSTANT * self.p * sqrt(self.parent.n + 1) / (self.n + 1)
if player == -1:
Q = -Q
return Q + U
def select(self, player):
return max(self.children, key=lambda c: c.puct(player))
def backpropagate(self, v):
self.n += 1
self.v += v
if self.parent:
self.parent.backpropagate(v)
class CNNMCTS:
def __init__(self):
return
def CNNMCTSBestMove(self, state, player, timeIterations):
rootNode = MCTSNode(state, player)
for i in range(timeIterations):
node = rootNode
while len(node.unexploredMoves) == 0 and node.state.isTerminal() == False:
if len(node.children) > 0:
node = node.select(player)
else:
break
if len(node.unexploredMoves) > 0 and node.state.isTerminal() == False:
node = node.expand()
if node.state.isTerminal() == False:
node.backpropagate(node.valuePredict)
else:
node.backpropagate(node.state.getWinner())
bestChild = rootNode.children[0]
for child in rootNode.children:
if child.n > bestChild.n:
bestChild = child
return bestChild.state.history[-1]
def gen_py():
MCTS = CNNMCTS()
cnt = 0
cnn.eval()
while cnt < DATASIZE:
c_state = GameState()
currentPlayer = 1
cur = 0
lst = cnt
while c_state.isTerminal() == 0:
if len(c_state.getValidMoves(currentPlayer)) <= 0:
currentPlayer = -currentPlayer
continue
bestMove = MCTS.CNNMCTSBestMove(c_state, currentPlayer, ROUNDLIMIT)
cur += 1
if 5 <= cur and cur <= 54 and cnt < DATASIZE:
for i in range(8):
for j in range(8):
if c_state.board[i,j] == 1:
stateData[cnt,0,i,j] = 1
for i in range(8):
for j in range(8):
if c_state.board[i,j] == -1:
stateData[cnt,1,i,j] = 1
for i in range(8):
for j in range(8):
stateData[cnt,2,i,j] = currentPlayer
policyData[cnt] = calc(bestMove)
cnt += 1
c_state.makeMove(bestMove, currentPlayer)
currentPlayer = -currentPlayer
valueData[lst:cnt] = c_state.getWinner()
if OUTPUT_INFO:
print(f'{cnt} / {DATASIZE}\r', end='')
if OUTPUT_INFO:
print('')
if __name__ == '__main__':
np.set_printoptions(suppress=True, precision=7)
# multiprocessing.freeze_support()
times = 0
while 1 :
if OUTPUT_INFO:
print(f'iteration {times}:')
print('self-matching:')
gen_py()
# gen_cpp()
# gen_mainProcess() # in train.py
if OUTPUT_INFO:
print('train start:')
train()
# archivePath = 'D:/Desktop/yanxue/rescnn_archive/rescnn-iteration' + str(times) +'.pth'
# torch.save(cnn.state_dict(), archivePath)
|
wxwoo/yanxue
|
train_py.py
|
train_py.py
|
py
| 12,641 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22397762010
|
import cv2
import pandas as pd
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
class COVIDChestXRayDataset(Dataset):
def __init__(self, path, size=128, augment=None):
super(COVIDChestXRayDataset, self).__init__()
print('{} initialized with size={}, augment={}'.format(self.__class__.__name__, size, augment))
print('Dataset is located in {}'.format(path))
self.size = size
self.augment = augment
image_dir = path / 'images'
metadata_path = path / 'metadata.csv'
df_metadata = pd.read_csv(metadata_path, header=0)
# Drop CT scans
df_metadata = df_metadata[df_metadata['modality'] == 'X-ray']
# Keep only PA/AP/AP Supine, drop Axial, L (lateral)
allowed_views = ['PA', 'AP', 'AP Supine']
df_metadata = df_metadata[df_metadata['view'].isin(allowed_views)]
# COVID-19 = 1, SARS/ARDS/Pneumocystis/Streptococcus/No finding = 0
self.labels = (df_metadata.finding == 'COVID-19').values.reshape(-1, 1)
images = df_metadata.filename
images = images.apply(lambda x: image_dir / x).values.reshape(-1, 1)
self.df = pd.DataFrame(np.concatenate((images, self.labels), axis=1), columns=['image', 'label'])
del images
print("Dataset: {}".format(self.df))
@staticmethod
def _load_image(path, size):
img = Image.open(path)
img = cv2.resize(np.array(img), (size, size), interpolation=cv2.INTER_AREA)
if len(img.shape) == 2:
img = np.expand_dims(img, axis=2)
img = np.dstack([img, img, img])
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# size, size, chan -> chan, size, size
img = np.transpose(img, axes=[2, 0, 1])
return img
def __getitem__(self, index):
row = self.df.iloc[index]
img = self._load_image(row['image'], self.size)
label = row['label']
if self.augment is not None:
img = self.augment(img)
return img, label
def __len__(self):
return self.df.shape[0]
|
defeatcovid19/defeatcovid19-net-pytorch
|
datasets/covid_chestxray_dataset.py
|
covid_chestxray_dataset.py
|
py
| 2,205 |
python
|
en
|
code
| 9 |
github-code
|
6
|
71689560509
|
# Import packages.
import glob
import numpy as np
import os
# import cvxpy as cp
##################################################################################
# Data import
##################################################################################
folder = 'runD'
n_robots = '30'
min_votes = '3'
seed = '1'
storage = '10'
routing = '10'
hashing_bucket = '5'
name = '/vcsbppfile_' + min_votes + '_' + n_robots + '_' + seed + '_' + storage + '_' + routing + '_' + hashing_bucket + '.dat'
path = os.path.abspath(os.path.join(os.getcwd(), '..', 'argos-application', 'data', folder))
file_names = glob.glob(path + name)
file_names = sorted(file_names)
print(file_names)
def get_data(file_name):
results = []
items = []
################ Reading file ##################################
with open(file_name, 'r') as file:
while True:
line = file.readline()
if not line:
break
timestep, num_robots = map(int, line.split(' '))
total_tuples = 0
for _ in range(num_robots):
line = file.readline().strip('\n').split(' ')
rid = int(line[0][1:])
node_id = int(line[1])
num_tuples = int(line[2])
neighbors = int(line[3])
total_tuples += num_tuples
results.append((timestep, rid, node_id, num_tuples, neighbors))
items.append(total_tuples)
return results, items
results_dict = {}
load_dict = {}
for name in file_names:
n = name.split('/')
n = n[-1].split('.')
n = n[0].split('_')
min_votes = n[1]
num_robots = int(n[2])
results, load = get_data(name)
results_dict[min_votes] = results
load_dict[min_votes] = load
##################################################################################
# Optimization
##################################################################################
# https://stackoverflow.com/questions/10035752/elegant-python-code-for-integer-partitioning
# See vscbpp_testing for more detail
def accel_asc(n):
a = [0 for i in range(n + 1)]
k = 1
y = n - 1
while k != 0:
x = a[k - 1] + 1
k -= 1
while 2 * x <= y:
a[k] = x
y -= x
k += 1
l = k + 1
while x <= y:
a[k] = x
a[l] = y
yield a[:k + 2]
x += 1
y -= 1
a[k] = x + y
y = x + y - 1
yield a[:k + 1]
def solve_vscbpp_accel(total_tuples, num_robots, neighbors, memory_capacity):
min_cost = num_robots
optimal_partition = []
# Sort in ascending order
a_neighbors = sorted(neighbors)
idx_neighbors = np.argsort(neighbors)
assignment_partitions = accel_asc(total_tuples)
for partition in assignment_partitions:
num_parts = len(partition)
# Ignore partitions with too many parts
if(num_parts > num_robots):
continue
# Sort in ascending order
a_partition = sorted(partition)
# Impose volume constraint
if(a_partition[-1] > memory_capacity):
continue
# Match largest num neighbors with largest part size
prod = np.multiply(a_neighbors[-num_parts:], memory_capacity - np.array(a_partition))
cur_cost = sum(np.divide(1, prod))
if (cur_cost < min_cost):
# print (min_cost, cur_cost)
min_cost = cur_cost
optimal_partition = list([0] * (num_robots - len(partition)) + list(a_partition))
# Unsort back to initial neighbor order
idx_unsort = idx_neighbors.argsort()
opt_partition = np.array(optimal_partition)[idx_unsort]
return min_cost, opt_partition
##################################################################################
# Bin Packing
##################################################################################
########### Saving bin packing cost over time ###########################
#### In simulation ####
M = int(storage) + int(routing)
for key in results_dict.keys():
results = results_dict[key]
tuples = load_dict[key]
x = []
y = []
# assignments = []
cost = 0
# assignment = np.zeros(num_robots)
for i, result in enumerate(results):
# assignment[result[1] - 1] = result[3]
free_memory = float(M - result[3])
if (result[3] != 0):
cost += 1 / max(result[4] * free_memory, 1)
if((i+1)%num_robots == 0):
print(result[0], tuples[result[0]-1])
x.append(result[0] / 10)
y.append(cost)
cost = 0
# assignments.append(assignment)
# assignment = np.zeros(num_robots)
# Write to file (made to match optimal, want to have a partial file if takes too long)
with open("heuristic_" + folder + '_' + key + '_' + n_robots + ".txt", "w") as f:
for i,j in zip(x,y):
f.write(str(i) +"\n")
f.write(str(j) +"\n")
#### Optimal solution ####
# M = int(storage) + int(routing)
# for key in results_dict.keys():
# results = results_dict[key]
# tuples = load_dict[key]
# x_opt = []
# y_opt = []
# neighbors = np.zeros(num_robots)
# for i, result in enumerate(results):
# neighbors[result[1] - 1] = result[4]
# if((i+1)%num_robots == 0):
# print("t", result[0])
# # Skip time steps
# if (result[0]%10 != 0):
# continue
# opt_cost, pa = solve_vscbpp_accel(tuples[result[0]-1], num_robots, neighbors, M)
# x_opt = result[0] / 10
# y_opt = opt_cost
# neighbors = np.zeros(num_robots)
# with open("optimal_" + folder + '_' + key + '_' + n_robots + ".txt", "a") as f:
# f.write(str(x_opt) +"\n")
# f.write(str(y_opt) +"\n")
#### Worst cost ####
M = int(storage) + int(routing)
for key in results_dict.keys():
results = results_dict[key]
tuples = load_dict[key]
x_worst = []
y_worst = []
worst_cost = 0
neighbors = np.zeros(num_robots)
for i, result in enumerate(results):
neighbors[result[1] - 1] = result[4]
if((i+1)%num_robots == 0):
items = tuples[result[0]-1]
# Put one item in all bins with 0 neighbors (assuming low enough load factor)
zero_neighbors = len(neighbors) - np.count_nonzero(neighbors)
if(items > zero_neighbors):
worst_cost += zero_neighbors
items -= zero_neighbors
if items > 0 and items < M:
# Put all in one bin
worst_cost += 1
else:
# Sort number of neighbors in ascending order
a_neighbors = sorted(neighbors)
# Fill out memory of bins with lowest degree
num_bins_to_fill = items // M
worst_cost += num_bins_to_fill
items -= num_bins_to_fill * M # same as modulo
# Put remaining items in next lowest
if(items > 0):
n_low = a_neighbors[zero_neighbors + num_bins_to_fill - 1]
free_memory = M - items
worst_cost += 1 / (max(n_low * free_memory, 1))
x_worst = result[0] / 10
y_worst = worst_cost
neighbors = np.zeros(num_robots)
worst_cost = 0
with open("worst_" + folder + '_' + key + '_' + n_robots + ".txt", "a") as f:
f.write(str(x_worst) +"\n")
f.write(str(y_worst) +"\n")
# # Generate data.
# bins = 10
# items = 40
# np.random.seed(1)
# neighbors = np.random.randint(1, bins, bins)
# M = 20
# # Define and solve the CVXPY problem.
# assignment = cp.Variable((items, bins), boolean=True)
# selection = cp.Variable(bins, boolean=True)
# # cost = 1/cp.multiply(neighbors, M - cp.sum(assignment, axis=0))
# cost = 1/neighbors
# # objective = cp.sum(cp.multiply(cost, selection) + cp.multiply(cost/M, cp.max(cp.sum(assignment, axis=0))) )
# objective = cp.sum(cp.multiply(cost, selection) + cp.multiply(1/M, cp.max(cp.sum(assignment, axis=0))) )
# constraints = [
# cp.sum(assignment, axis=1) == 1,
# cp.sum(assignment, axis=0) <= M * selection
# ]
# prob = cp.Problem(cp.Minimize(objective), constraints)
# prob.solve()
# # Print result.
# print("Neigbors", neighbors)
# print("\nThe optimal value is", prob.value)
# # print("The optimal assignment is")
# # print(assignment.value)
# # print("The optimal selection is")
# # print(selection.value)
# # for tau in range(len(assignment.value))
# print("The optimal assignment per bin is")
# print(np.sum(assignment.value, axis=0))
|
NESTLab/DistributedSemanticMaps
|
PythonScripts/vscbpp_cluster.py
|
vscbpp_cluster.py
|
py
| 8,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26470760841
|
""" Problem 3: Largest Prime Factor
https://projecteuler.net/problem=3
Goal: Find the largest prime factor of N.
Constraints: 10 <= N <= 1e12
Fundamental Theorem of Arithmetic: There will only ever be a unique set of prime
factors for any number.
e.g.: N = 10
prime factors = {2, 5}
largest = 5
"""
from math import isqrt
from util.maths.reusable import prime_factors
def largest_prime_factor(n: int) -> int:
"""
Uses prime decomposition via the Sieve of Eratosthenes algorithm to return
the largest prime factor.
SPEED (WORSE for N with small factors)
53.54ms for N = 1e12
SPEED (WORST for N with large factors)
39.56ms for N = 600_851_475_143
"""
factors = prime_factors(n)
return max(factors.keys())
def largest_prime_factor_simple(n: int) -> int:
"""
Uses prime decomposition via trial division without any optimisation.
SPEED (BEST for N with small factors)
3743ns for N = 1e12
SPEED (BEST for N with large factors)
2.7e+05ns for N = 600_851_475_143
"""
factor = 2
while factor * factor <= n:
while n % factor == 0 and n != factor:
n //= factor
factor += 1
return n
def largest_prime_factor_recursive(n: int, f: int = 2) -> int:
"""
Original solution used a floored square root to get an integer value. This
was replaced with math.isqrt(), introduced in Py 3.8.
SPEED (WORSE for N with small factors)
52.41ms for N = 1e12
SPEED (BETTER for N with large factors)
12.85ms for N = 600_851_475_143
"""
factors = [2]
factors.extend(range(3, isqrt(n) + 1, 2))
for factor in factors:
if n % factor == 0:
return largest_prime_factor_recursive(n // factor, factor)
if n > 2:
f = max(f, n)
return f
|
bog-walk/project-euler-python
|
solution/batch0/problem3.py
|
problem3.py
|
py
| 1,835 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10918154737
|
import os
import ast
import subprocess
import uuid
import json
import hashlib
import socket
import psutil
from ipykernel.ipkernel import IPythonKernel
def make_except_safe(code):
code = code.replace('\n', '\n ')
code = 'try:\n ' + code
code = code + '\nexcept: pass\n'
try:
ast.parse(code)
return code
except:
return ''
SCIUNIT_HOME = os.path.expanduser('~/sciunit/')
SCIUNIT_PROJECT_FILE = os.path.join(SCIUNIT_HOME, '.activated')
SCIUNIT_SOCKET_FILE = os.path.join(SCIUNIT_HOME, 'listener.socket')
class SciunitKernel(IPythonKernel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
implementation = super().implementation + ' sciunit'
if (os.path.exists(SCIUNIT_PROJECT_FILE)):
self.project = open(SCIUNIT_PROJECT_FILE).read().strip()
self.project_name = os.path.basename(os.path.normpath(self.project))
if (os.path.exists(os.path.join(self.project, 'kernel'))):
self.recording = False
else:
self.recording = True
open(os.path.join(self.project, 'kernel'), 'w').write(json.dumps([]))
else:
self.project_name = 'Project_' + str(uuid.uuid4())
self.project = os.path.join(SCIUNIT_HOME, self.project_name)
subprocess.run(['sciunit', 'create', self.project_name])
self.recording = True
open(os.path.join(self.project, 'kernel'), 'w').write(json.dumps([]))
self.eid = 1
self.file = os.path.join(self.project, 'run.py')
self.valid = True
files = psutil.Process().open_files()
for file in files:
os.close(file.fd)
criu_path = os.path.join(self.project, 'criu0')
data = ['Dump', os.getpid(), os.getppid(), criu_path, 0]
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect(SCIUNIT_SOCKET_FILE)
client.sendall(json.dumps(data).encode())
client.close()
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
criu_path = os.path.join(self.project, f'criu{self.eid}')
if (os.path.exists(criu_path)): self.recording = False
hashes = json.loads(open(os.path.join(self.project, 'kernel')).read())
if not self.recording and (len(hashes) == self.eid - 1): self.valid = False
data = []
if self.valid:
with open(self.file[1], 'a') as file:
safe_code = make_except_safe(code)
if safe_code:
if self.recording:
print('Recording e{}'.format(self.eid))
open(self.file, 'a').write(safe_code)
subprocess.Popen(['sciunit', 'exec', 'python3', self.file], stdout=subprocess.PIPE).communicate()
hashes.append(hashlib.sha256(safe_code.encode()).hexdigest())
open(os.path.join(self.project, 'kernel'), 'w').write(json.dumps(hashes))
data = ['Dump', os.getpid(), os.getppid(), criu_path, self.eid]
else:
if (hashlib.sha256(safe_code.encode()).hexdigest() != hashes[self.eid - 1]):
print('Invalid, stopped repeating')
self.valid = False
else:
print('Valid, repeating e{}'.format(self.eid))
subprocess.Popen(['sciunit', 'repeat', 'e{}'.format(self.eid)], stdout=subprocess.PIPE).communicate()
data = ['Restore', os.getpid(), os.getppid(), criu_path, self.eid]
self.eid += 1
output = super().do_execute(code, silent, False, user_expressions, allow_stdin)
if data:
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect(SCIUNIT_SOCKET_FILE)
client.sendall(json.dumps(data).encode())
client.close()
# TODO: Wait without Socket
return output
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=SciunitKernel)
|
depaul-dice/sciunit-NBv1
|
__main__.py
|
__main__.py
|
py
| 4,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40565271392
|
# Crea una función llamada promedio que tome una lista de números como parámetro y devuelva el promedio de esos números.
usuario = input(
'Ingresa un una lista de numeros separados por coma "," : ').split(",")
def promedio(args):
acumulador = 0
cantidad = len(args)
for i in args:
acumulador += int(i)
resultado = acumulador / cantidad
print(resultado)
promedio(usuario)
|
maximiliano1997/informatorio-2023
|
Week-4/ejercicios/ejercicio9.py
|
ejercicio9.py
|
py
| 413 |
python
|
es
|
code
| 0 |
github-code
|
6
|
42663501049
|
import os
import torch
import datetime
import numpy as np
import pandas as pd
from src.attn_analysis import gradcam
from src.attn_analysis import iou_analysis
from src.attn_analysis import blue_heatmap
from src.attn_analysis import extract_disease_reps
from src.attn_analysis import make_2d_plot_and_3d_gif
import warnings
warnings.filterwarnings('ignore')
class AttentionAnalysis(object):
def __init__(self, results_dir_force,
base_results_dir, task,
attention_type, attention_type_args,
setname, valid_results_dir,
custom_net, custom_net_args, params_path,
stop_epoch, which_scans, dataset_class, dataset_args):
"""
Variables:
<results_dir_force>: path to a results directory. If this is a valid
path, then all results will be stored in here. If this is NOT a
valid path, a new directory for the new results will be created
based on <base_results_dir>.
<base_results_dir>: path to the base results directory. A new directory
will be created within this directory to store the results of
this experiment.
<task>: a list of strings. The strings may include 'iou_analysis',
'blue_heatmaps', and/or 'attn_plots'.
If <task> contains 'iou_analysis' then calculate approximate IOU
statistics for the final epoch of a model.
Specifically, the 'IOU' is calculated as the ratio of raw scores
within the allowed area to raw scores outside of the allowed
area.
Produces iou_wide_df, a dataframe with the following 5 columns:
'Epoch': int, the epoch in which the IOU was calculated.
'IOU': float, the 'IOU' value for this label's attention map vs. the
segmentation ground truth (which in this case is the approximate
attention ground truth.)
'Label': string for the label for which IOU was calculated e.g. 'airplane'
'VolumeAccession': volume accession number
'LabelsPerImage': total number of labels present in this image
Also produces dfs that summarize the IOU across different ways
of grouping the data.
If <task> contains 'blue_heatmaps' then make a blue heatmap showing
the disease scores for each slice.
If <task> contains 'attn_plots' then make visualizations of the
attention superimposed on the CT scan (as a 3D gif, and as a 2D plot
for the slice with the highest score for that disease). Also if
doing Grad-CAM, make a 2d debugging plot.
<attention_type>: str; either
'gradcam-vanilla' for vanilla Grad-CAM, or
'hirescam' for HiResCAM, in which feature maps and gradients are
element-wise multiplied and then we take the avg over the
feature dimension, or
'hirescam-check' for alternative implementation of HiResCAM
attention calculation, which can be used in a model that
has convolutional layers followed by a single FC layer.
In this implementation, the HiResCAM attention is calculated
during the forward pass of the model by element-wise multiplying
the final FC layer weights (the gradients) against the final
representation. This option is called 'hirescam-check'
because for models that meet the architecture requirements this
implementation is a 'check' on the 'hirescam' option which
actually accesses the gradients.
'hirescam-check' and 'hirescam' on the output of the last conv
layer produce identical results on AxialNet as expected, since
AxialNet is a CNN with one FC layer at the end.
<attention_type_args>: dict; additional arguments needed to calculate
the specified kind of attention. If the attention_type is one of the
GradCAMs then in this dict we need to specify
'model_name' and 'target_layer_name' (see gradcam.py for
more documentation)
<setname>: str; which split to use e.g. 'train' or 'val' or 'test'; will
be passed to the <dataset_class>
<valid_results_dir>: path to a directory that contains the validation
set IOU analysis results. Only needed if setname=='test' because we
need to use validation set per-label thresholds to calculate
results.
<custom_net>: a PyTorch model
<custom_net_args>: dict; arguments to pass to the PyTorch model
<params_path>: str; path to the model parameters that will be loaded in
<stop_epoch>: int; epoch at which the model saved at <params_path> was
saved
<which_scans>: a pandas DataFrame specifying what scans and/or
abnormalities to use.
It can be an empty pandas DataFrame, in which case all available
scans in the set will be used and named with whatever volume
accession they were saved with (real or fake).
Or, it can be a filled in pandas DataFrame, with columns
['VolumeAcc','VolumeAcc_ForOutput','Abnormality'] where
VolumeAcc is the volume accession the scan was saved with,
VolumeAcc_ForOutput is the volume accession that should be used in
the file name of any output files of this module (e.g. a DEID acc),
and Abnormality is either 'all' to save all abnormalities for that
scan, or it's comma-separated names of specific abnormalities to
save for that scan.
<dataset_class>: a PyTorch dataset class
<dataset_args>: dict; arguments to pass to the <dataset_class>"""
self.base_results_dir = base_results_dir
self.task = task
for specific_task in self.task:
assert ((specific_task == 'iou_analysis')
or (specific_task == 'blue_heatmaps')
or (specific_task == 'attn_plots'))
assert len(self.task) <= 2
if 'blue_heatmaps' in self.task:
#only allow calculation of the blue_heatmaps if we are using
#attention_type hirescam-check. Why? Because for both the blue
#heatmaps and the hirescam-check visualizations, we need to run
#the model to get out. And in gradcam we need to run the model again
#later so we get a memory error if we try to do this after getting
#out.
assert attention_type == 'hirescam-check'
self.attention_type = attention_type
assert self.attention_type in ['gradcam-vanilla','hirescam','hirescam-check']
self.attention_type_args = attention_type_args
if self.attention_type in ['gradcam-vanilla','hirescam']:
assert 'model_name' in self.attention_type_args.keys()
assert 'target_layer_name' in self.attention_type_args.keys()
self.setname = setname
self.valid_results_dir = valid_results_dir
self.custom_net = custom_net
self.custom_net_args = custom_net_args #dict of args
self.params_path = params_path
self.stop_epoch = stop_epoch
self.which_scans = which_scans
self.CTDatasetClass = dataset_class
self.dataset_args = dataset_args #dict of args
self.device = torch.device('cuda:0')
self.verbose = self.dataset_args['verbose'] #True or False
#Run
self.set_up_results_dirs(results_dir_force)
self.run()
def set_up_results_dirs(self, results_dir_force):
if os.path.isdir(results_dir_force):
results_dir = results_dir_force
else:
#If you're not forcing a particular results_dir, then make a new
#results dir:
#Example params_path = '/home/rlb61/data/img-hiermodel2/results/2020-09/2020-09-27_AxialNet_Mask_CORRECT_dilateFalse_nearest/params/AxialNet_Mask_CORRECT_dilateFalse_nearest_epoch23'
old_results_dir = os.path.split(os.path.split(os.path.split(self.params_path)[0])[0])[1] #e.g. '2020-09-27_AxialNet_Mask_CORRECT_dilateFalse_nearest'
date = datetime.datetime.today().strftime('%Y-%m-%d')
results_dir = os.path.join(self.base_results_dir,date+'_'+self.setname.capitalize()+'AttnAnalysis_of_'+old_results_dir)
if not os.path.isdir(results_dir):
os.mkdir(results_dir)
#Subdirs for particular analyses:
if 'iou_analysis' in self.task:
self.iou_analysis_dir = os.path.join(results_dir,'iou_analysis_'+self.attention_type)
if not os.path.exists(self.iou_analysis_dir): os.mkdir(self.iou_analysis_dir)
if 'blue_heatmaps' in self.task:
#Note that the blue heatmaps depend only on the model, and not on the
#attention type
self.blue_heatmaps_dir = os.path.join(results_dir,'blue_heatmaps')
if not os.path.exists(self.blue_heatmaps_dir): os.mkdir(self.blue_heatmaps_dir)
if 'attn_plots' in self.task:
self.attn_2dplot_dir = os.path.join(results_dir,'attn_2dplot_'+self.attention_type)
self.attn_3dgif_dir = os.path.join(results_dir,'attn_3dgif_dir_'+self.attention_type)
for directory in [self.attn_2dplot_dir,self.attn_3dgif_dir]:
if not os.path.exists(directory): os.mkdir(directory)
for key in ['g1p1', 'g1p0', 'g0p1', 'g0p0']:
if not os.path.exists(os.path.join(self.attn_2dplot_dir,key)):
os.mkdir(os.path.join(self.attn_2dplot_dir,key))
if not os.path.exists(os.path.join(self.attn_3dgif_dir,key)):
os.mkdir(os.path.join(self.attn_3dgif_dir,key))
if self.attention_type in ['gradcam-vanilla','hirescam']:
self.gradcam_debug_dir = os.path.join(results_dir,self.attention_type+'_debug_dir')
if not os.path.exists(self.gradcam_debug_dir): os.mkdir(self.gradcam_debug_dir)
else: #even if attn_plots is not in task, we need to have a placeholder for
#this directory to avoid an error later:
self.gradcam_debug_dir = None
def run(self):
self.load_model()
self.load_dataset()
self.load_chosen_indices()
if 'blue_heatmaps' in self.task:
self.blue_heatmap_baseline = blue_heatmap.get_baseline(self.chosen_dataset, self.model, self.blue_heatmaps_dir)
if 'iou_analysis' in self.task:
thresh_perf_df_filename = 'Determine_Best_Threshold_For_Each_Label_Epoch'+str(self.stop_epoch)+'.csv'
valid_thresh_perf_df_path = os.path.join(os.path.join(self.valid_results_dir,'iou_analysis_'+self.attention_type), thresh_perf_df_filename)
self.iou_analysis_object = iou_analysis.DoIOUAnalysis(self.setname, self.stop_epoch,
self.label_meanings, self.iou_analysis_dir, valid_thresh_perf_df_path)
self.loop_over_dataset_and_labels()
if 'iou_analysis' in self.task:
self.iou_analysis_object.do_all_final_steps()
######################################################
# Methods to Load Model, Dataset, and Chosen Indices #----------------------
######################################################
def load_model(self):
print('Loading model')
self.model = self.custom_net(**self.custom_net_args).to(self.device)
check_point = torch.load(self.params_path, map_location='cpu') #map to CPU to avoid memory issue #TODO check if you need this
self.model.load_state_dict(check_point['params'])
self.model.eval()
#If everything loads correctly you will see the following message:
#IncompatibleKeys(missing_keys=[], unexpected_keys=[])
def load_dataset(self):
print('Loading dataset')
self.chosen_dataset = self.CTDatasetClass(setname = self.setname, **self.dataset_args)
self.label_meanings = self.chosen_dataset.return_label_meanings()
def load_chosen_indices(self):
print('Loading chosen indices')
if len([x for x in self.which_scans.columns.values.tolist() if x in ['VolumeAcc','VolumeAcc_ForOutput','Abnormality']])==3:
#you did specify which scans to use, so figure out what indices
#you need to query in the dataset to get those chosen scans:
for df_idx in range(self.which_scans.shape[0]):
volume_acc = self.which_scans.at[df_idx,'VolumeAcc']
self.which_scans.at[df_idx,'ChosenIndex'] = np.where(self.chosen_dataset.volume_accessions == volume_acc)[0][0]
else:
assert (self.which_scans == pd.DataFrame()).all().all()
#you didn't specify which scans to use, so use all the scans in the dataset
self.which_scans['ChosenIndex'] = [x for x in range(len(self.chosen_dataset))]
self.which_scans['ChosenIndex'] = self.which_scans['ChosenIndex'].astype('int')
###########
# Looping #-----------------------------------------------------------------
###########
def loop_over_dataset_and_labels(self):
if (self.task == ['iou_analysis'] and self.iou_analysis_object.loaded_from_existing_file):
return #don't need to loop again if iou_wide_df already created
print('Looping over dataset and labels')
five_percent = max(1,int(0.05*self.which_scans.shape[0]))
#Iterate through the examples in the dataset. df_idx is an integer
for df_idx in range(self.which_scans.shape[0]):
if self.verbose: print('Starting df_idx',df_idx)
idx = self.which_scans.at[df_idx,'ChosenIndex'] #int, e.g. 5
example = self.chosen_dataset[idx]
ctvol = example['data'].unsqueeze(0).to(self.device) #unsqueeze to create a batch dimension. out shape [1, 135, 3, 420, 420]
gr_truth = example['gr_truth'].cpu().data.numpy() #out shape [80]
volume_acc = example['volume_acc'] #this is a string, e.g. 'RHAA12345_5.npz'
attn_gr_truth = example['attn_gr_truth'].data.cpu().numpy() #out shape [80, 135, 6, 6]
#Get out and x_perslice_scores when using attention_type hirescam-check
out = self.get_out_and_blue_heatmaps(ctvol, gr_truth, volume_acc)
if self.verbose: print('Analyzing',volume_acc)
#volume_acc sanity check and conversion to FAKE volume acc if indicated
if 'VolumeAcc' in self.which_scans.columns.values.tolist():
intended_volume_acc = self.which_scans.at[df_idx,'VolumeAcc']
assert volume_acc == intended_volume_acc
#Now, because which_scans is not empty, you can switch volume_acc
#from the actual volume acc e.g. RHAA12345_6 to the fake ID,
#because from here onwards, the volume acc is only used in file
#names:
volume_acc = self.which_scans.at[df_idx,'VolumeAcc_ForOutput'].replace('.npz','').replace('.npy','') #e.g. fake ID 'val12345'
#Now organize the labels for this particular image that you want to
#make heatmap visualizations for into g1p1, g1p0, g0p1, and g0p0
#g1p1=true positive, g1p0=false negative, g0p1=false positive, g0p0=true negative
#we pass in volume_acc twice because the variable volume_acc could
#be fake OR real, depending on the preceding logic, but
#example['volume_acc'] is guaranteed to always be real.
label_indices_dict = make_label_indices_dict(volume_acc, example['volume_acc'], gr_truth, self.params_path, self.label_meanings)
for key in ['g1p1', 'g1p0', 'g0p1', 'g0p0']:
chosen_label_indices = label_indices_dict[key] #e.g. [32, 37, 43, 46, 49, 56, 60, 62, 64, 67, 68, 71]
if (('Abnormality' not in self.which_scans.columns.values.tolist()) or (self.which_scans.at[df_idx,'Abnormality'] == 'all')): #plot ALL abnormalities
pass
else: #plot only chosen abnormalities
chosen_abnormalities = self.which_scans.at[df_idx,'Abnormality'].split(',')
chosen_label_indices = [x for x in chosen_label_indices if self.label_meanings[x] in chosen_abnormalities]
#Calculate label-specific attn and make label-specific attn figs
for chosen_label_index in chosen_label_indices:
#Get label_name and seg_gr_truth:
label_name = self.label_meanings[chosen_label_index] #e.g. 'lung_atelectasis'
seg_gr_truth = attn_gr_truth[chosen_label_index,:,:,:] #out shape [135, 6, 6]
#segprediction is the raw attention. slice_idx is the index of
#the slice with the highest raw score for this label
segprediction, x_perslice_scores_this_disease = self.return_segprediction(out, ctvol, gr_truth, volume_acc, chosen_label_index) #out shape [135, 6, 6]
segprediction_clipped_and_normed = clip_and_norm_volume(segprediction)
if 'iou_analysis' in self.task:
if key in ['g1p1','g1p0']: #TODO: implement IOU analysis for other options! also make this more efficient so no excessive calculations are done
if self.verbose: print('Adding example to IOU analysis')
self.iou_analysis_object.add_this_example_to_iou_wide_df(segprediction_clipped_and_normed,
seg_gr_truth, volume_acc, label_name, num_labels_this_ct=int(gr_truth.sum()))
if 'attn_plots' in self.task:
if self.verbose: print('Making 2D and 3D attn figures')
make_2d_plot_and_3d_gif.plot_attn_over_ct_scan(ctvol,
segprediction_clipped_and_normed, x_perslice_scores_this_disease, volume_acc,
label_name, os.path.join(self.attn_2dplot_dir,key), os.path.join(self.attn_3dgif_dir,key))
#Report progress
if df_idx % five_percent == 0:
print('Done with',df_idx,'=',round(100*df_idx/self.which_scans.shape[0],2),'%')
del example, ctvol, gr_truth, volume_acc, attn_gr_truth, out
def get_out_and_blue_heatmaps(self, ctvol, gr_truth, volume_acc):
"""Calculate 'out' which will be used for:
1. the blue heatmap figure (the 'x_perslice_scores') which is
specific to a particular scan, NOT a particular label;
2. the 'hirescam-check' attention (the 'disease_reps')
Note that we don't do this within the label for loop below
because it's computationally wasteful to run a fixed model again
and again on the same input CT scan.
To avoid memory issues of running the model twice,
for determining true positives/false positives/true negatives/false
negatives, we use the pre-calculated predicted probabilities that were
saved when the model was first run.
out['out'] contains the prediction scores and has shape [1,80]
out['disease_reps'] contains the 'hirescam-check' attention for
all diseases and has shape [80, 135, 16, 6, 6]
out['x_perslice_scores'] contains the abnormality scores for each
slice and has shape [1, 80, 135]"""
if self.attention_type == 'hirescam-check':
out = self.model(ctvol)
if 'blue_heatmaps' in self.task:
if self.verbose: print('Making blue heatmap')
blue_heatmap.visualize_slicediseases(out['out'], gr_truth,
out['x_perslice_scores'].cpu().data.numpy(),
volume_acc, self.blue_heatmaps_dir, self.label_meanings,
self.blue_heatmap_baseline)
return out
else:
return None
def return_segprediction(self, out, ctvol, gr_truth, volume_acc, chosen_label_index):
"""Return the <segprediction> which is a volume of scores for a particular
label"""
if self.attention_type == 'hirescam-check':
return extract_disease_reps.return_segprediction_from_disease_rep(out, chosen_label_index)
elif self.attention_type in ['gradcam-vanilla','hirescam']:
#note that if 'make_figure' is in self.task, then a 2d debugging
#figure for Grad-CAM will also be saved in this step
return gradcam.RunGradCAM(self.attention_type, self.model, self.device,
self.label_meanings, self.gradcam_debug_dir, self.task,
**self.attention_type_args).return_segprediction_from_grad_cam(ctvol, gr_truth, volume_acc, chosen_label_index)
def make_label_indices_dict(possibly_fake_volume_acc, real_volume_acc, gr_truth, params_path, label_meanings):
"""Based on the <gr_truth> and the predicted probability that was
pre-calculated, figure out which abnormalities are true positives (g1p1),
false negatives (g1p0), false positives (g0p1), and true negatives (g0p0).
g stands for ground truth and p stands for predicted probability.
The predicted probabilities are read in from the predicted probabilities
that were saved from the final model when it was done training.
The path for these is inferred from params_path based on known
directory structure. We also need to use this pre-calculated file because
we need to get the median predicted probability for each abnormality.
The predicted probabilities are binarized as 0 or 1 according to being
above or below the median (50th percentile) for that abnormality.
Returns a dictionary with keys g1p1, g1p0, g0p1, and g0p0
and values that are numpy arrays of numeric indices of the corresponding
abnormalities e.g. array([32, 37, 64, 67, 68, 71])"""
#Infer paths to the precomputed pred probs based on known directory organization:
#e.g. precomputed_path = '/home/rlb61/data/img-hiermodel2/results/results_2019-2020/2020-10/2020-10-09_WHOLEDATA_BodyAvg_Baseline_FreshStart/pred_probs'
precomputed_path = os.path.join(os.path.split(os.path.split(params_path)[0])[0],'pred_probs')
files = os.listdir(precomputed_path) #e.g. ['valid_grtruth_ep4.csv', 'valid_predprob_ep4.csv']
pred_probs_file = [x for x in files if 'predprob' in x][0] #e.g. 'valid_predprob_ep4.csv'
gr_truth_file = [x for x in files if 'grtruth' in x][0] #e.g. 'valid_grtruth_ep4.csv'
#Open the pred probs and gr truth for this data subset
#Each of them has volume accesions as the index, and abnormalities as
#the columns. Example shape: [2085,80]
pred_probs_all = pd.read_csv(os.path.join(precomputed_path, pred_probs_file),header=0,index_col=0)
gr_truth_all = pd.read_csv(os.path.join(precomputed_path, gr_truth_file),header=0,index_col=0)
#Sanity checks:
for df in [pred_probs_all, gr_truth_all]:
assert df.columns.values.tolist()==label_meanings
assert (gr_truth_all.loc[real_volume_acc,:]==gr_truth).all()
#Calculate the medians of the different abnormalities across the whole
#data subset.
medians = np.median(pred_probs_all,axis=0) #np array, e.g. shape [80]
#Select out the predicted probabilities for just this scan
pred_probs = pred_probs_all.loc[real_volume_acc,:] #pd Series w abn labels and float values, e.g. shape [80]
#Get binary vector that's equal to 1 if the corresponding abnormality
#has a pred prob greater than the median
pred_probs_geq = (pred_probs >= medians).astype('int') #pd Series w abn labels and binary int values, e.g. shape [80]
#Now divide up the abnormalities for this particular CT scan based on whether
#they are above or below the median pred prob, and whether the gr truth
#is 1 or 0
g0p0 = np.intersect1d(np.where(gr_truth==0)[0], np.where(pred_probs_geq==0)[0])
g0p1 = np.intersect1d(np.where(gr_truth==0)[0], np.where(pred_probs_geq==1)[0])
g1p0 = np.intersect1d(np.where(gr_truth==1)[0], np.where(pred_probs_geq==0)[0])
g1p1 = np.intersect1d(np.where(gr_truth==1)[0], np.where(pred_probs_geq==1)[0])
#Checks
assert len(g1p0)+len(g1p1)==int(gr_truth.sum())
assert len(g0p0)+len(g0p1)+len(g1p0)+len(g1p1)==len(gr_truth)
label_indices_dict = {'g0p0':g0p0.tolist(),
'g0p1':g0p1.tolist(),
'g1p0':g1p0.tolist(),
'g1p1':g1p1.tolist()}
#uncomment the next line to print detailed info to the terminal:
#print_for_future_reference(params_path, label_indices_dict, possibly_fake_volume_acc, pred_probs, medians, label_meanings)
return label_indices_dict
def print_for_future_reference(params_path, label_indices_dict, possibly_fake_volume_acc, pred_probs, medians, label_meanings):
model_description = os.path.split(params_path)[1]
for key in list(label_indices_dict.keys()): #the keys are ['g0p0','g0p1','g1p0','g1p1']
for idx in label_indices_dict[key]:
print('\t'.join([model_description, possibly_fake_volume_acc, key, label_meanings[idx], str(round(pred_probs[idx],4)),'median:',str(round(medians[idx],4))]))
#############
# Functions #-------------------------------------------------------------------
#############
def clip_and_norm_volume(volume):
volume = np.maximum(volume, 0) #ReLU operation
volume = volume - np.min(volume)
if np.max(volume)!=0:
volume = volume / np.max(volume)
return volume
|
rachellea/explainable-ct-ai
|
src/run_attn_analysis.py
|
run_attn_analysis.py
|
py
| 26,139 |
python
|
en
|
code
| 3 |
github-code
|
6
|
33180088903
|
from itertools import *
def repl(a):
if a == '01': return 712
elif a == '02': return 673
elif a == '03': return 1075
elif a == '04': return 875
elif a == '05': return 1622
elif a == '06': return 423
elif a == '10': return 712
elif a == '12': return 1385
elif a == '13': return 1800
elif a == '14': return 1577
elif a == '15': return 2348
elif a == '16': return 1128
elif a == '20': return 673
elif a == '21': return 1385
elif a == '23': return 1499
elif a == '24': return 239
elif a == '25': return 2046
elif a == '26': return 244
elif a == '30': return 1075
elif a == '31': return 1800
elif a == '32': return 1499
elif a == '34': return 1287
elif a == '35': return 551
elif a == '36': return 1266
elif a == '40': return 875
elif a == '41': return 1577
elif a == '42': return 239
elif a == '43': return 1287
elif a == '45': return 1835
elif a == '46': return 442
elif a == '50': return 1622
elif a == '51': return 2348
elif a == '52': return 2046
elif a == '53': return 551
elif a == '54': return 1835
elif a == '56': return 1813
elif a == '60': return 423
elif a == '61': return 1128
elif a == '62': return 244
elif a == '63': return 1266
elif a == '64': return 442
elif a == '65': return 1813
elif a == '00': return 0
elif a == '11': return 0
elif a == '22': return 0
elif a == '33': return 0
elif a == '44': return 0
elif a == '55': return 0
elif a == '66': return 0
def city(s):
if s == '0': return 'Москва '
if s == '1': return 'Санкт-Петербург '
if s == '2': return 'Чебоксары '
if s == '3': return 'Ростов-на-Дону '
if s == '4': return 'Ульяновск '
if s == '5': return 'Сочи '
if s == '6': return 'Нижний Новгород '
ways = list(product('0123456',repeat=7))
waysR = []
wR = []
waysL = 0
n = 0
for k in ways:
l = ''.join(k)
if l.count('0')==1 and l.count('1')==1 and l.count('2')==1 and l.count('3')==1 and l.count('4')==1 and l.count('5')==1 and l.count('6')==1:
c = 0
for j in range(6):
s = l[j:j+2]
c+= repl(s)
if c > waysL:
waysR = []
waysR.append(l)
waysL = c
elif c == waysL:
waysR.append(l)
for p in waysR:
Y = ''
for t in p:
Y += city(t)
wR.append(Y)
print(waysL)
print(wR)
|
Ethryna/InfTasks
|
2 полугодие/airports.py
|
airports.py
|
py
| 2,639 |
python
|
en
|
code
| 2 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.