seq_id
stringlengths
7
11
text
stringlengths
156
1.7M
repo_name
stringlengths
7
125
sub_path
stringlengths
4
132
file_name
stringlengths
4
77
file_ext
stringclasses
6 values
file_size_in_byte
int64
156
1.7M
program_lang
stringclasses
1 value
lang
stringclasses
38 values
doc_type
stringclasses
1 value
stars
int64
0
24.2k
dataset
stringclasses
1 value
pt
stringclasses
1 value
3357675588
from numpy.lib.polynomial import RankWarning import torch as pt import numpy as np from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D from model.PFSeg import PFSeg3D import cv2 import SimpleITK as sitk lr=0.0001 epoch=100 batch_size=1 model_path='/path/to/Saved_models' img_size=(64,96,96) model=PFSeg3D().cuda() model.load_state_dict(pt.load(model_path+'/PFSeg_3D_BraTS_patch-free_bs_best.pt',map_location = 'cpu')) trainset=GuidedBraTSDataset3D('/path/to/BraTS20',mode='all',augment=False) # valset=BraTSDataset3D('/path/to/BraTS20',mode='val') # testset=GuidedBraTSDataset3D('/path/to/BraTS20',mode='test') train_dataset=pt.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=False,drop_last=True) # val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True) # test_dataset=pt.utils.data.DataLoader(testset,batch_size=1,shuffle=True,drop_last=True) def GenerateCoarseMask(): model.eval() dice_sum=0 hd_sum=0 jc_sum=0 for i,data in enumerate(train_dataset): output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2])) label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2])) (inputs,labels,raw_image,guidance,_)=data labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1) guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1) inputs3D = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1) with pt.no_grad(): outputs3D,_ = model(inputs3D,guidance) outputs3D=np.array(outputs3D.squeeze(0).squeeze(0).cpu().data.numpy()) output_list=np.zeros((raw_image.shape[1]+64,raw_image.shape[2]+64,raw_image.shape[3]+64)) output_list[32:-32,32:-32,32:-32]=outputs3D label_list=np.zeros((raw_image.shape[1]+64,raw_image.shape[2]+64,raw_image.shape[3]+64)) label_list[32:-32,32:-32,32:-32]=np.array(labels3D.squeeze(0).squeeze(0).cpu().data.numpy()) input_real=np.array(raw_image.squeeze(0).numpy()) input_list=np.zeros((raw_image.shape[1]+64,raw_image.shape[2]+64,raw_image.shape[3]+64)) input_list[32:-32,32:-32,32:-32]=input_real output_list[output_list<0.5]=0. output_list[output_list>=0.5]=1. results=np.where(output_list!=0) x_list=results[0] y_list=results[1] z_list=results[2] x_max=x_list.max() x_min=x_list.min() y_max=y_list.max() y_min=y_list.min() z_max=z_list.max() z_min=z_list.min() x_length=64*(1+(x_max-x_min)//64) #确保是16的倍数 y_length=64*(1+(y_max-y_min)//64) z_length=64*(1+(z_max-z_min)//64) x_center=(x_max-x_min)//2+x_min y_center=(y_max-y_min)//2+y_min z_center=(z_max-z_min)//2+z_min bbox_xmin=x_center-x_length//2 bbox_xmax=x_center+x_length//2 bbox_ymin=y_center-y_length//2 bbox_ymax=y_center+y_length//2 bbox_zmin=z_center-z_length//2 bbox_zmax=z_center+z_length//2 # cropped_coarse=np.zeros((x_length,y_length,z_length)) # cropped_image=np.zeros((x_length,y_length,z_length)) # cropped_mask=np.zeros((x_length,y_length,z_length)) cropped_image=input_list[bbox_xmin:bbox_xmax,bbox_ymin:bbox_ymax,bbox_zmin:bbox_zmax] cropped_coarse=output_list[bbox_xmin:bbox_xmax,bbox_ymin:bbox_ymax,bbox_zmin:bbox_zmax] cropped_mask=label_list[bbox_xmin:bbox_xmax,bbox_ymin:bbox_ymax,bbox_zmin:bbox_zmax] if not(cropped_mask.shape==cropped_image.shape): raise Exception() if not(cropped_image.shape[0]%16==0 and cropped_image.shape[1]%16==0 and cropped_image.shape[2]%16==0): raise Exception() # save the cropped images for next round training np.save('/path/to/BraTS20/cropped_coarse/Case_{:3d}_64image.npy'.format(i+1),cropped_image) np.save('/path/to/BraTS20/cropped_coarse/Case_{:3d}_64coarse.npy'.format(i+1),cropped_coarse) np.save('/path/to/BraTS20/cropped_coarse/Case_{:3d}_64mask.npy'.format(i+1),cropped_mask) # final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2])) # final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255 # final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255 # cv2.imwrite('TestPhase_BraTS.png',final_img) pr_sum = output_list.sum() gt_sum = label_list.sum() pr_gt_sum = np.sum(output_list[label_list == 1]) dice = 2 * pr_gt_sum / (pr_sum + gt_sum) dice_sum += dice print("dice:",dice) # hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0)) # jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0)) # hd_sum+=hausdorff # jc_sum+=jaccard print("Finished. Total dice: ",dice_sum/len(train_dataset),'\n') print("Finished. Avg Jaccard: ",jc_sum/len(train_dataset)) print("Finished. Avg hausdorff: ",hd_sum/len(train_dataset)) return dice_sum/len(train_dataset) GenerateCoarseMask()
Dootmaan/PFSeg-ABR
step2_generateCoraseMask.py
step2_generateCoraseMask.py
py
5,166
python
en
code
3
github-code
6
35411640384
#!/usr/bin/python # -*- coding: utf-8 -*- """ Update map explorers -------------------- """ import logging from os.path import join from hdx.data.dataset import Dataset from hdx.data.resource import Resource from src.acled import update_lc_acled, update_ssd_acled from src.cbpf import update_cbpf from src.fts import update_fts #from src.rowca import update_rowca logger = logging.getLogger(__name__) def get_valid_names(downloader, url, headers): rows_gen = downloader.get_tabular_rows(url, dict_rows=True, headers=headers) return [x['Name'] for x in rows_gen if x['Name'] != 'Name'] def update_resources(resource_updates): for resource_info in resource_updates.values(): resource = Resource.read_from_hdx(resource_info['id']) resource.set_file_to_upload(resource_info['path']) resource.update_in_hdx() def update_lc(today, downloader, folder, lc_names_url, lc_mappings_url, acled_base_url, fts_base_url, rowca_base_url): logger.info('Lake Chad Map Explorer Data') country_list = ['Cameroon', 'Nigeria', 'Niger', 'Chad'] valid_names = get_valid_names(downloader, lc_names_url, headers=['ISO', 'Name']) replace_values = downloader.download_tabular_key_value(lc_mappings_url) resource_updates = dict() resource_updates['acled_events'] = {'id': 'fc396bf2-d204-48b2-84d2-337ada015273', 'path': join(folder, 'Lake_Chad_Basin_Recent_Conflict_Events.csv')} resource_updates['acled_fatalities'] = {'id': '3792ee5d-ca30-4e5c-96c8-618c6b625d12', 'path': join(folder, 'Lake_Chad_Basin_Recent_Conflict_Event_Total_Fatalities.csv')} resource_updates['fts'] = {'id': '2890c719-4fb2-4178-acdb-e0c5c91cfbce', 'path': join(folder, 'Lake_Chad_Basin_Appeal_Status.csv')} # resource_updates['rowca_population'] = {'id': '048df35c-e35f-4b1f-aa1a-2d1ce1292f22', # 'path': join(folder, 'Lake_Chad_Basin_Estimated_Population.csv')} # resource_updates['rowca_displaced'] = {'id': '1bdcc8f3-223c-4f7d-9bc6-48be317d50c5', # 'path': join(folder, 'Lake_Chad_Basin_Displaced.csv')} logger.info('Lake Chad - ACLED') update_lc_acled(today, acled_base_url, country_list, valid_names, replace_values, resource_updates) logger.info('Lake Chad - FTS') update_fts(fts_base_url, downloader, country_list, resource_updates) # logger.info('Lake Chad - ROWCA') # update_rowca(rowca_base_url, downloader, valid_names, replace_values, resource_updates) logger.info('Lake Chad - Dataset Date') update_resources(resource_updates) dataset = Dataset.read_from_hdx('lake-chad-crisis-map-explorer-data') dataset.set_dataset_date_from_datetime(today) dataset.update_in_hdx() def update_ssd(today, downloader, folder, ssd_adm1_names_url, ssd_adm2_names_url, ssd_mappings_url, acled_base_url, cbpf_base_url): logger.info('South Sudan Map Explorer Data') country_list = ['South Sudan'] valid_adm1_names = get_valid_names(downloader, ssd_adm1_names_url, headers=['Name']) valid_adm2_names = get_valid_names(downloader, ssd_adm2_names_url, headers=['Name']) replace_values = downloader.download_tabular_key_value(ssd_mappings_url) resource_updates = dict() resource_updates['acled_events'] = {'id': '3480f362-67bb-44d0-b749-9e8fc0963fc0', 'path': join(folder, 'South_Sudan_Recent_Conflict_Events.csv')} resource_updates['acled_fatalities'] = {'id': 'a67b85ee-50b4-4345-9102-d88bf9091e95', 'path': join(folder, 'South_Sudan_Recent_Conflict_Event_Total_Fatalities.csv')} resource_updates['cbpf'] = {'id': 'd6b18405-5982-4075-bb0a-a1a85f09d842', 'path': join(folder, 'South_Sudan_Country_Based_Pool_Funds.csv')} logger.info('South Sudan - ACLED') update_ssd_acled(today, acled_base_url, country_list, valid_adm2_names, replace_values, resource_updates) logger.info('South Sudan - CBPF') update_cbpf(cbpf_base_url, downloader, 'SSD19', today, valid_adm1_names, replace_values, resource_updates) logger.info('South_Sudan_ - Dataset Date') update_resources(resource_updates) dataset = Dataset.read_from_hdx('south-sudan-crisis-map-explorer-data') dataset.set_dataset_date_from_datetime(today) dataset.update_in_hdx()
OCHA-DAP/hdx-scraper-mapexplorer
mapexplorer.py
mapexplorer.py
py
4,508
python
en
code
0
github-code
6
32414340113
from flask import Flask, send_file, request, abort from pathlib import Path import youtube_dl import json app = Flask(__name__) @app.route('/queuemp3', methods=['GET', 'POST']) def queuemp3(): if request.method == 'POST': try: data = request.get_json() url = data['url'] print(url) ydl = youtube_dl.YoutubeDL() r = None with ydl: # don't download, much faster r = ydl.extract_info(url, download=False) options = { 'format': 'bestaudio/best', 'extractaudio': True, # only keep the audio 'audioformat': "mp3", # convert to mp3 'outtmpl': '{}.mp3'.format(r['title']), # name the file the ID of the video 'noplaylist': True, # only download single song, not playlist } ''' print some typical fields if needed print("%s uploaded by '%s', has %d views, %d likes, and %d dislikes" % ( r['title'], r['uploader'], r['view_count'], r['like_count'], r['dislike_count']))''' with youtube_dl.YoutubeDL(options) as ydl: ydl.download([url]) try: return json.dumps({'filename': r['title']}) except Exception as e: return str(e) finally: print("A request was sent for queueing a conversion") @app.route('/downloadmp3', methods=['GET', 'POST']) def downloadmp3(): if request.method == 'POST': filename = request.form['filename'] print(filename) audio_file = Path("./{}.mp3".format(filename)) if audio_file.is_file(): return send_file('./{}.mp3'.format(filename), attachment_filename='{}.mp3'.format(filename)) else: abort(404) if __name__ == "__main__": app.run(host="0.0.0.0", port=8080, debug=True)
BK-Modding/youtube-2-mp3
flask server/app.py
app.py
py
1,961
python
en
code
2
github-code
6
33561633117
import typing as t import json import re from pathlib import Path from PIL import Image from torch.utils.data import Dataset from .types.marked_image \ import MarkedImage, MarkedImageTensor from .transforms import ( ToTensor ) from ..utils import coord class BdcDataSet(Dataset): def __init__(self, img_path: str, land_path: str, transform=None): super().__init__() if transform is None: self.transform = ToTensor() else: self.transform = transform self.image_files = [ p for p in Path(img_path).glob("**/*") if re.search('/*.(jpg|png)', str(p)) ] if land_path is not None: with open(land_path) as lm: landmarks = json.load(lm) self.landmarks = self.__normalize_landmarks(landmarks) else: self.landmarks = {} def __len__(self) -> int: return len(self.image_files) def __getitem__(self, idx: int) -> MarkedImageTensor: p = self.image_files[idx] with Image.open(str(p)).convert('RGB') as img: img.load() lmarks = self.landmarks.get(p.name, []) sample: MarkedImage = { 'image': img, 'landmarks': lmarks } sample = self.transform(sample) return sample def __normalize_landmarks(self, landmarks) -> t.Dict: norm_lands = {} for p in self.image_files: lmarks = landmarks[p.name] with Image.open(str(p)).convert('RGB') as img: img.load() norm_lands[p.name] = list(map( lambda x: coord.to_ml_coord(x, img.size), lmarks )) return norm_lands
daikon-oroshi/court-detection
court_detection/data/data_set.py
data_set.py
py
1,789
python
en
code
0
github-code
6
11004197028
class Solution: def maxCandies(self, status: List[int], candies: List[int], keys: List[List[int]], containedBoxes: List[List[int]], initialBoxes: List[int]) -> int: n = len(status) can_open = [status[i] for i in range(n)] has_box, used = [False] * n, [False] * n q = collections.deque() ans = 0 for box in initialBoxes: has_box[box] = True if can_open[box]: q.append(box) used[box] = True ans += candies[box] while len(q) > 0: big_box = q.popleft() for key in keys[big_box]: can_open[key] = True if not used[key] and has_box[key]: q.append(key) used[key] = True ans += candies[key] for box in containedBoxes[big_box]: has_box[box] = True if not used[box] and can_open[box]: q.append(box) used[box] = True ans += candies[box] return ans
xixihaha1995/CS61B_SP19_SP20
temp/toy/python/1298. Maximum Candies You Can Get from Boxes.py
1298. Maximum Candies You Can Get from Boxes.py
py
1,118
python
en
code
0
github-code
6
31632214544
import os import sys import random import tables as tb import numpy as np import pandas as pd import invisible_cities.reco.paolina_functions as plf import invisible_cities.reco.dst_functions as dstf from invisible_cities.io.mcinfo_io import load_mchits from invisible_cities.io.mcinfo_io import load_mcparticles start = int(sys.argv[1]) numb = int(sys.argv[2]) size = float(sys.argv[3]) blob_radius = float(sys.argv[4]) vox_size = np.array([size,size,size],dtype=np.float16) # voxel size pe2keV = 1. loop_events = [] event, track_ID = [], [] maxR, minX, maxX, minY, maxY, minZ, maxZ = [], [], [], [], [], [], [] evt_energy, energy = [], [] length, numb_of_hits, numb_of_voxels, numb_of_tracks = [], [], [], [] v_size_x, v_size_y, v_size_z = [], [], [] extreme1_x, extreme1_y, extreme1_z = [], [], [] extreme2_x, extreme2_y, extreme2_z = [], [], [] eblob1, eblob2 = [], [] eblob1_bary, eblob2_bary = [], [] blob1_bary_x, blob1_bary_y, blob1_bary_z = [], [], [] blob2_bary_x, blob2_bary_y, blob2_bary_z = [], [], [] event_vxls, track_ID_vxls = [], [] voxel_x, voxel_y, voxel_z = [], [], [] voxel_e = [] signal = [] hits_file = '' events_in = 0 for n in range(start,start+numb): for part in range(10): hits_file = '/home/paolafer/data/MC/Tl_upper_port/hits/Tl208_NEW_v1_03_01_nexus_v5_03_04_UPPER_PORT_10.2bar_run4_1hit_perSiPM_hits.{0}_{1}.h5'.format(n, part) if not os.path.isfile(hits_file): print('{0} not existing'.format(hits_file)) continue print('Analyzing {0}'.format(hits_file)) hits_dict = load_mchits(hits_file) p_dict = load_mcparticles(hits_file) events_in += len(hits_dict) for nevt, hitc in hits_dict.items(): tot_e = sum([hh.E for hh in hitc]) ### smear hit energy to create 1% FWHM resolution at 1592 keV sigma_e = 0.01/2.35 * np.sqrt(1.592/tot_e) ### remember, this is relative! smeared_tot_e = tot_e + tot_e*np.random.normal(0., 1.) * sigma_e sm_factor = smeared_tot_e / tot_e #print(tot_e, smeared_tot_e) for h in hitc: h.energy = h.energy * sm_factor voxels = plf.voxelize_hits(hitc, vox_size) trks = plf.make_track_graphs(voxels) ### Is it a e+e- events? positron = False for _, particle in p_dict[nevt].items(): if (particle.name == 'e+') & (len(particle.hits) > 0): positron = True for c, t in enumerate(trks, 0): etrk = sum([vox.E for vox in t.nodes()]) extr1, extr2 = plf.find_extrema(t) ## first way to calculate blobs: using hits within a sphere from the extremes e_blob1 = e_blob2 = 0. for h in hitc: dist1 = np.linalg.norm(h.pos - extr1.pos) dist2 = np.linalg.norm(h.pos - extr2.pos) if dist1 < blob_radius: e_blob1 += h.E if dist2 < blob_radius: e_blob2 += h.E if (e_blob2 > e_blob1): e_blob1, e_blob2 = e_blob2, e_blob1 ## second way to calculate blob (a la Michel) positions1 = [h.pos for h in extr1.hits] qs1 = [h.E for h in extr1.hits] if sum(qs1): bary_pos1 = np.average(positions1, weights=qs1, axis=0) else: bary_pos1 = extr1.pos positions2 = [h.pos for h in extr2.hits] qs2 = [h.E for h in extr2.hits] if sum(qs2): bary_pos2 = np.average(positions2, weights=qs2, axis=0) else: bary_pos2 = extr2.pos e_blob1_bary = e_blob2_bary = 0. for h in hitc: dist1 = np.linalg.norm(h.pos - bary_pos1) dist2 = np.linalg.norm(h.pos - bary_pos2) if dist1 < blob_radius: e_blob1_bary += h.E if dist2 < blob_radius: e_blob2_bary += h.E if (e_blob2_bary > e_blob1_bary): e_blob1_bary, e_blob2_bary = e_blob2_bary, e_blob1_bary ## event-related event += [nevt] signal += [positron] evt_energy += [tot_e/pe2keV] numb_of_hits += [len(hitc)] v_size_x += [voxels[0].size[0]] v_size_y += [voxels[0].size[1]] v_size_z += [voxels[0].size[2]] ## track-related track_ID += [c] length += [plf.length(t)] energy += [etrk/pe2keV] numb_of_voxels += [len(t.nodes())] numb_of_tracks += [len(trks)] extreme1_x += [extr1.X] extreme1_y += [extr1.Y] extreme1_z += [extr1.Z] extreme2_x += [extr2.X] extreme2_y += [extr2.Y] extreme2_z += [extr2.Z] eblob1 += [e_blob1/pe2keV] eblob2 += [e_blob2/pe2keV] eblob1_bary += [e_blob1_bary/pe2keV] eblob2_bary += [e_blob2_bary/pe2keV] blob1_bary_x += [bary_pos1[0]] blob1_bary_y += [bary_pos1[1]] blob1_bary_z += [bary_pos1[2]] blob2_bary_x += [bary_pos2[0]] blob2_bary_y += [bary_pos2[1]] blob2_bary_z += [bary_pos2[2]] min_x = 1e+06 max_x = -1e+06 min_y = 1e+06 max_y = -1e+06 min_z = 1e+06 max_z = 0. max_r = 0 for v in t.nodes(): ## voxel-related event_vxls = event_vxls + [nevt] track_ID_vxls = track_ID_vxls + [c] voxel_x = voxel_x + [v.X] voxel_y = voxel_y + [v.Y] voxel_z = voxel_z + [v.Z] voxel_e = voxel_e + [v.E] for h in v.hits: if h.X < min_x: min_x = h.X if h.X > max_x: max_x = h.X if h.Y < min_y: min_y = h.Y if h.Y > max_y: max_y = h.Y if h.Z < min_z: min_z = h.Z if h.Z > max_z: max_z = h.Z if np.sqrt(h.X*h.X + h.Y*h.Y) > max_r: max_r = np.sqrt(h.X*h.X + h.Y*h.Y) minX += [min_x] maxX += [max_x] minY += [min_y] maxY += [max_y] minZ += [min_z] maxZ += [max_z] maxR += [max_r] loop_events = [events_in] blob_radius = [blob_radius] df = pd.DataFrame({ 'event': event, 'evt_energy': evt_energy, 'signal': signal, 'minX': minX, 'maxX': maxX, 'minY': minY, 'maxY': maxY, 'minZ': minZ, 'maxZ': maxZ, 'maxR': maxR, 'numb_of_hits': numb_of_hits, 'energy': energy, 'numb_of_tracks': numb_of_tracks, 'length': length, 'track_ID': track_ID, 'numb_of_voxels': numb_of_voxels, 'voxel_size_x': v_size_x, 'voxel_size_y': v_size_y, 'voxel_size_z': v_size_z, 'eblob1': eblob1, 'eblob2': eblob2, 'extreme1_x': extreme1_x, 'extreme1_y': extreme1_y, 'extreme1_z': extreme1_z, 'extreme2_x': extreme2_x, 'extreme2_y': extreme2_y, 'extreme2_z': extreme2_z, 'eblob1_bary': eblob1_bary, 'eblob2_bary': eblob2_bary, 'blob1_bary_x': blob1_bary_x, 'blob1_bary_y': blob1_bary_y, 'blob1_bary_z': blob1_bary_z, 'blob2_bary_x': blob2_bary_x, 'blob2_bary_y': blob2_bary_y, 'blob2_bary_z': blob2_bary_z, }) df_vxls = pd.DataFrame({'event': event_vxls, 'track_ID': track_ID_vxls, 'voxel_x': voxel_x, 'voxel_y': voxel_y, 'voxel_z': voxel_z, 'voxel_e': voxel_e }) df_run_info = pd.DataFrame({'events_in': loop_events, 'blob_radius': blob_radius }) out_name = '/home/paolafer/analysis/tracking_trueinfo_TlMC_run4_vxl{0}mm_R{1}mm_{2}_{3}.hdf5'.format(int(size), int(blob_radius[0]), start, numb) store = pd.HDFStore(out_name, "w", complib=str("zlib"), complevel=4) store.put('tracks', df, format='table', data_columns=True) store.put('voxels', df_vxls, format='table', data_columns=True) store.put('run_info', df_run_info, format='table', data_columns=True) store.close()
paolafer/next_analysis
reco/topology2019/tracking_trueMC_part.py
tracking_trueMC_part.py
py
8,998
python
en
code
0
github-code
6
2348487124
import os import sys import logging if sys.version_info >= (3, 0): from io import StringIO else: try: from cStringIO import StringIO except ImportError: from StringIO import StringIO assert StringIO from pylint import lint from pylint.__pkginfo__ import numversion class PyLinter(object): """PyLinter class for Anaconda """ def __init__(self, filename, rcfile): self.filename = filename self.exit = sys.exit self.rcfile = rcfile self.stdout = sys.stdout self.output = StringIO() sys.exit = lambda x: None sys.stdout = self.output self.execute() def execute(self): """Execute the linting process """ if numversion < (1, 0, 0): args = '--include-ids=y -r n'.split(' ') else: args = '--msg-template={msg_id}:{line}:{column}:{msg} -r n'.split( ' ') if self.rcfile: args.append('--rcfile={0}'.format(os.path.expanduser(self.rcfile))) args.insert(0, self.filename) lint.Run(args) def parse_errors(self): """Parse the output given by PyLint """ errors = {'E': [], 'W': [], 'V': []} data = self.output.getvalue() sys.exit = self.exit sys.stdout = self.stdout for error in data.splitlines(): if '************* Module ' in error: _, module = error.split('************* Module ') if not module in self.filename: continue else: offset = None try: if numversion >= (1, 0, 0): code, line, offset, message = error.split(':', 3) else: code, line, message = error.split(':', 2) except ValueError as exception: logging.debug( 'unhandled exception in PyLinter parse_errors ' 'this is a non fatal error: {0}'.format(exception) ) logging.debug( 'the error string that raised this exception was: ' '{0}, please, report this in the GitHub site'.format( error ) ) continue if numversion < (1, 0, 0): try: line, offset = line.split(',') except ValueError: # seems like some versions (or packagers) of pylint # prior to 1.0.0 adds offset to the output but others # doesn't pass errors[self._map_code(code)[0]].append({ 'line': int(line), 'offset': offset, 'code': self._map_code(code)[1], 'message': '[{0}] {1}'.format( self._map_code(code)[1], message ) }) return errors def _map_code(self, code): """Map the given code to fit Anaconda codes """ mapping = {'C': 'V', 'E': 'E', 'F': 'E', 'I': 'V', 'R': 'W', 'W': 'W'} return (mapping[code[0]], code[1:])
blizzrdof77/Sublime-Text-3-Packages
Anaconda/anaconda_lib/linting/anaconda_pylint.py
anaconda_pylint.py
py
3,368
python
en
code
1
github-code
6
14077597352
from lk.utils.config_util import ConfigUtil from lk.utils.shell_util import run_and_confirm, run, run_and_return_output from furl import furl bitbucket = 'bitbucket' bitbucket_domain = 'bitbucket.org' github = 'github' github_domain = 'github.com' class SourceCodeRepo(object): def __init__(self, url=None, service=None, user=None, repo_name=None): self._url = url self._service = service self._user = user self._repo_name = repo_name @property def url(self): if self._url: return self._url else: url = 'https://{service_domain}/{user}/{repo}'.format( service_domain=self.service_domain, user=self.user, repo=self.repo_name ) return url @property def hosting_service_host(self): hosting_service_host = self._url.split('/')[2] return hosting_service_host @property def hosting_service(self): hosting_service = self.hosting_service_host.split('.')[0] return hosting_service @property def user(self): if self._user: return self._user else: user = self._url.split('/')[3] return user @property def repo_name(self): if self._repo_name: return self._repo_name else: repo_name = self._url.split('/')[4] return repo_name @property def clone_command(self): # https://github.com/lk-commands/default # [email protected]:lk-commands/default.git # git clone [email protected]:eyalev/lk-commands.git # clone_command = 'git clone git@{hosting_service_host}:{user}/{repo_name}.git'.format( # clone_command = 'git clone {repo_url}.git'.format( clone_command = 'git clone {git_url}'.format( git_url=self.git_url ) return clone_command @property def git_url(self): url = self.url if 'github' in url: return url _furl = furl(url) git_url = 'git@{host}:{user}/{repo}.git'.format( host=_furl.host, user=str(_furl.path).split('/')[1], repo=str(_furl.path).split('/')[2] ) return git_url def clone(self): print('# Cloning lk-repo') clone_command = SourceCodeRepo(self.url).clone_command command = '{clone_command} {local_repo_path}'.format( clone_command=clone_command, local_repo_path=self.local_repo_string_path ) run_and_confirm(command) @property def commands_dir_string_path(self): return self.local_repo_string_path + '/commands' @property def local_repo_string_path(self): commands_repo_local_path = '{local_repos_dir}/{repo_service}/{repo_user}/{commands_repo_name}'.format( local_repos_dir=ConfigUtil().local_repos_dir, repo_service=self.hosting_service, repo_user=self.user, commands_repo_name=self.repo_name ) return commands_repo_local_path @property def service(self): if self._service: return self._service if 'bitbucket.org' in self.url: return bitbucket elif 'github.com' in self.url: return github else: raise NotImplementedError @property def bitbucket(self): return self.service == bitbucket @property def github(self): return self.service == github @property def service_domain(self): if self.bitbucket: return bitbucket_domain if self.github: return github_domain else: raise NotImplementedError def remote_file_source(self, file_name): if self.bitbucket: shell_command = 'git archive --remote=git@{service_domain}:{user}/{repo}.git HEAD commands/{file_name} | tar -x -O'.format( service_domain=self.service_domain, user=self.user, repo=self.repo_name, file_name=file_name ) output = run_and_return_output(shell_command) return output elif self.github: raise NotImplementedError else: raise NotImplementedError
eyalev/lk
lk/classes/source_code_repo.py
source_code_repo.py
py
4,401
python
en
code
0
github-code
6
3439809361
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None from collections import deque class Solution(object): def widthOfBinaryTree(self, root): """ :type root: TreeNode :rtype: int """ if root == None: return 0 maxWidth = 1 q = deque([(0, root)]) while len(q) != 0: cnt = len(q) start = q[0] end = q[-1] width = end[0] - start[0] + 1 maxWidth = max(maxWidth, width) while cnt > 0: cnt -= 1 idx, node = q.popleft() if node.left != None: q.append((idx * 2, node.left)) if node.right != None: q.append((idx * 2 + 1, node.right)) return maxWidth
cuiy0006/Algorithms
leetcode/662. Maximum Width of Binary Tree.py
662. Maximum Width of Binary Tree.py
py
957
python
en
code
0
github-code
6
5024929632
from django.core.management import call_command from django.core.management.base import BaseCommand, CommandError import requests, json from app_comments.models import RedditPost, Comment from annoying.functions import get_object_or_None from app_comments.lib.comments import CommentBuilder, RedditPostBuilder from bs4 import BeautifulSoup from app_comments.management.commands.get_comments import PostGetter from time import sleep class Command(BaseCommand): args = "" help = "" def add_arguments(s, parser): parser.add_argument('--url', nargs='+', type=str) def process_args(s, options): url = options['url'][0] if options['url'] else None return url # orig_url = url[:] # if url: # if url[-5:] != '.json': # url = url[:-1] + '.json' # return url, orig_url def handle(s, *args, **options): #url = s.process_args(options) #print(url) url = 'https://www.reddit.com/top.json?sort=top&t=year' base_url = 'https://www.reddit.com' resp = requests.get(url) if resp.status_code == 200: text_json = resp.text else: print(resp.text) return page_json = json.loads(text_json) for post_info in page_json['data']['children']: comments_url = base_url + post_info['data']['permalink'] comments_json_url = comments_url[:-1]+'.json' pg = PostGetter() resp = pg.get(comments_json_url, comments_url) print(resp, 1) if resp == 'bad http': sleep_time = 5 print('sleeping (%s)...' % sleep_time) sleep(sleep_time) resp = pg.get(comments_json_url, comments_url) if resp == 'bad http': print('sleeping (%s)...' % sleep_time) sleep(sleep_time) resp = pg.get(comments_json_url, comments_url) if resp == 'bad http': print('sleeping (%s)...' % sleep_time) sleep(sleep_time) # cmd_data = {'--url': comments_url} # call_command('get_comments', **cmd_data) # break
daviddennis/comments
app_comments/management/commands/get_links.py
get_links.py
py
2,264
python
en
code
0
github-code
6
72014598908
import json import sys import argparse sys.path.append("../evaluation") from evaluate import tuple_f1, convert_opinion_to_tuple def get_args(): """ Helper function to get the gold json, predictions json and negation jsons """ parser = argparse.ArgumentParser() parser.add_argument("gold") parser.add_argument("predictions") parser.add_argument("metadata") args = parser.parse_args() return args def open_json(json_file): """ Helper function to open the json files """ with open(json_file) as o: file = json.load(o) sent_dict = {sent["sent_id"]: sent for sent in file} sent_keys = set(sent_dict.keys()) return sent_keys, sent_dict def main(): args = get_args() with open(args.metadata) as o: metadata = json.load(o) test_domains = {} gold_keys, gold = open_json(args.gold) pred_keys, pred = open_json(args.predictions) # get the domains found in the test data for sent_id in gold_keys: domain = metadata[sent_id[:6]]["category"] if domain not in test_domains: test_domains[domain] = [sent_id] else: test_domains[domain].append(sent_id) # print the domains in descending order for key, value in sorted(test_domains.items(), key=lambda kv: len(kv[1])): print("{}: \t{}".format(key, len(value))) print() print() # get the sentiment graph F1 for each domain for domain, sent_ids in sorted(test_domains.items(), key=lambda kv: len(kv[1])): domain_gold = dict([(sent_id, convert_opinion_to_tuple(gold[sent_id])) for sent_id in sent_ids]) domain_pred = dict([(sent_id, convert_opinion_to_tuple(pred[sent_id])) for sent_id in sent_ids]) f1 = tuple_f1(domain_gold, domain_pred) print("{0}: {1:.3f}".format(domain, f1)) if __name__ == "__main__": main()
jerbarnes/semeval22_structured_sentiment
analysis/domain_analysis.py
domain_analysis.py
py
1,950
python
en
code
71
github-code
6
810789082
from __future__ import division import numpy as np from scipy import sparse from sklearn.metrics.pairwise import euclidean_distances import time # Produce grid points for a 2d grayscale image def get_points_2d(image, res): rows, columns = image.shape grid_x, grid_y = np.mgrid[0:columns:res, 0:rows:res] grid = np.array((grid_x.flatten(), grid_y.flatten())).T return grid # Produce grid points for a 3d grayscale image def get_points_3d(image, res): rows, columns, z = image.shape grid_z, grid_x, grid_y = np.mgrid[0:z:res, 0:columns:res, 0:rows:res] grid = np.array((grid_x.flatten(), grid_y.flatten(), grid_z.flatten())).T return grid # Wendland kernel as a function of r = norm(x-y)/c_sup def dist_kernel(r): return max((1-r, 0))**4 * (4*r + 1) def blowup_S(S, dim): (m, n) = S.shape if dim == 3: S_full = sparse.lil_matrix((3 * m, 3 * n), dtype = np.float32) #S_full = np.zeros((3 * m, 3 * n)) S_full[0::3, 0::3] = S S_full[1::3, 1::3] = S S_full[2::3, 2::3] = S else: S_full = np.zeros((2 * m, 2 * n)) S_full[0::2, 0::2] = S S_full[1::2, 1::2] = S return S_full.tocsc() # Create evaluation matrix given kernel centers (grid points), evaluation points # and kernel support def evaluation_matrix(kernels, points, c_sup, dim): dim = kernels.shape[1] vect_kernel = np.vectorize(dist_kernel) start = time.time() S = euclidean_distances(points, kernels) / c_sup #print("VEC -- euc dist ", (time.time() - start) / 60) # Mark entries with 0 kernel support start = time.time() S[np.where(S > 1)] = -1 non_zero_indices = np.where(S >= 0) #print("VEC -- S[np.where(S > 1)] and np.where(S>=0) ", (time.time() - start) / 60) # Evaluate kernel at points within support start = time.time() S[non_zero_indices] = vect_kernel(S[non_zero_indices]) #print("VEC -- S[non_zero] = vect_kernel ", (time.time() - start) / 60) start = time.time() S[np.where(S == -1)] = 0 #print("VEC -- S[np.where(S == -1)] = 0 ", (time.time() - start) / 60) start = time.time() #full_S = blowup_S_old(S, dim) #print("VEC -- blowup ", (time.time() - start) / 60) return sparse.csc_matrix(S) def evaluation_matrix_blowup(kernels, points, c_sup, dim): dim = kernels.shape[1] vect_kernel = np.vectorize(dist_kernel) start = time.time() S = euclidean_distances(points, kernels) / c_sup #print("VEC -- euc dist ", (time.time() - start) / 60) # Mark entries with 0 kernel support start = time.time() S[np.where(S > 1)] = -1 non_zero_indices = np.where(S >= 0) #print("VEC -- S[np.where(S > 1)] and np.where(S>=0) ", (time.time() - start) / 60) # Evaluate kernel at points within support start = time.time() S[non_zero_indices] = vect_kernel(S[non_zero_indices]) #print("VEC -- S[non_zero] = vect_kernel ", (time.time() - start) / 60) start = time.time() S[np.where(S == -1)] = 0 #print("VEC -- S[np.where(S == -1)] = 0 ", (time.time() - start) / 60) start = time.time() full_S = blowup_S(S, dim) #print("VEC -- blowup ", (time.time() - start) / 60) return full_S # Create velocity field by weighing kernels by alphas def make_V(S, alpha, d): alpha = alpha.flatten() if (S.shape[1] == alpha.shape[0]): lmda = S.dot(alpha) return lmda.reshape(-1, d) else: alpha = alpha.reshape(-1, d) return S.dot(alpha)
polaschwoebel/NonLinearDataAugmentation
vector_fields.py
vector_fields.py
py
3,499
python
en
code
2
github-code
6
8092333942
from vector import Vector import turtle scale = 40 def print_vector(vector, color): turtle.pencolor(color) turtle.penup() turtle.home() turtle.pendown() turtle.goto(vector.elements[0]*scale,vector.elements[1]*scale) def print_system(x,y): turtle.home() for i in range(x): turtle.dot(3) turtle.write(i, align='right') turtle.setx(scale*(i+1)) turtle.home() for j in range(y): turtle.dot(3) turtle.write(j, align='right') turtle.sety(scale*(j+1)) turtle.speed(10) print_system(10,10) vector1 = Vector([3, 2]) print_vector(vector1, 'red') vector2 = Vector([1,-4]) print_vector(vector2, 'blue') vector1.add_vector(vector2) print_vector(vector1, 'green') turtle.done()
sashokbg/python-exercises
vector/draw.py
draw.py
py
760
python
en
code
0
github-code
6
21836154529
import sys sys.stdin = open('../input.txt', 'r') N = int(input()) numbers = list(map(int, sys.stdin.readline().split())) min_num, max_num = 1000000, -1000000 for number in numbers: if number < min_num: min_num = number if number > max_num: max_num = number print(min_num, max_num)
liza0525/algorithm-study
BOJ/boj_10818_min_max.py
boj_10818_min_max.py
py
308
python
en
code
0
github-code
6
72683621307
from matplotlib import pyplot as plt from numpy import loadtxt, zeros from skimage.measure import label from os import path if __name__ == '__main__': current_dir = path.dirname(__file__) file_names = ['mat_p0.70.dat', 'mat_p0.72.dat'] for file_name in file_names: file_path = path.join(current_dir, file_name) lattice = loadtxt(file_path) # change connectivity to 2 if you want to consider Moore neighborhood labelled_lattice = label(lattice, background=0, connectivity=1) num_clusters = labelled_lattice.max() cluster_sizes = [] for cluster_id in range(1, num_clusters + 1): cluster_sizes.append((labelled_lattice == cluster_id).sum()) cluster_size_distribution = zeros(max(cluster_sizes)) for cluster_size in cluster_sizes: cluster_size_distribution[cluster_size - 1] += 1 inverse_cdf = zeros(max(cluster_sizes)) for cluster_size in range(max(cluster_sizes)): inverse_cdf[cluster_size] = (cluster_size_distribution[cluster_size:]).sum() inverse_cdf /= sum(cluster_size_distribution) plt.figure(figsize=(11, 5)) plt.subplot(1, 2, 1) plt.title(f"Lattice from {file_name}") plt.imshow(lattice) plt.subplot(1, 2, 2) plt.title("Cluster Size Distribution") plt.xlabel("Cluster Size s") plt.ylabel("P(S > s)") plt.loglog(range(1, max(cluster_sizes) + 1), inverse_cdf, 'bo') plt.show()
tee-lab/patchy-ecosterics
temp_actions/CSD/plotter.py
plotter.py
py
1,513
python
en
code
2
github-code
6
30301888432
import os import sys import unittest from pathlib import Path import coverage from mpi4py import MPI def main(path, parallel): cov = coverage.coverage( branch=True, include=str(Path(path).parent) + '/ignis/executor/*.py', ) cov.start() import ignis.executor.core.ILog as Ilog Ilog.enable(False) tests = unittest.TestLoader().discover(path + '/executor/core', pattern='*Test.py') if parallel: tests.addTests(unittest.TestLoader().discover(path + '/executor/core', pattern='IMpiTest2.py')) else: print("WARNING: mpi test skipped", file=sys.stderr) result = unittest.TextTestRunner(verbosity=2, failfast=True).run(tests) cov.stop() cov.save() MPI.COMM_WORLD.Barrier() if result.wasSuccessful() and result.testsRun > 0 and MPI.COMM_WORLD.Get_rank() == 0: if parallel: others = ["../np" + str(i) + "/.coverage" for i in range(1, MPI.COMM_WORLD.Get_size())] cov.combine(data_paths=others, strict=True) covdir = os.path.join(os.getcwd(), "ignis-python-coverage") print('Coverage: (HTML version: file://%s/index.html)' % covdir, file=sys.stderr) cov.report(file=sys.stderr) cov.html_report(directory=covdir) if __name__ == '__main__': rank = MPI.COMM_WORLD.Get_rank() parallel = MPI.COMM_WORLD.Get_size() > 1 path = os.getcwd() Path("debug").mkdir(parents=True, exist_ok=True) os.chdir("debug") if parallel: wd = "np" + str(rank) Path(wd).mkdir(parents=True, exist_ok=True) os.chdir(wd) if rank > 0: log = open("log.txt", 'w') sys.stderr = log sys.stdout = log main(path, parallel) if rank > 0: sys.stderr.close()
andreasolla/core-python
ignis_test/Main.py
Main.py
py
1,575
python
en
code
1
github-code
6
13954467913
'''Indicarle al usuario que ingrese un número entero e informar si es primo o no, utilizando una función booleana que lo decida.''' import os if os.name == "posix": os.system('clear') else: os.system('cls') def primos(X): if X<2: return False else: for i in range(2,X): modulo=X%i if(modulo==0): return False return True res=primos(int(input("Ingrese el valor X\n"))) print(res)
eSwayyy/UCM-projects
python/catedra/lab_funciones/ejercicio6.py
ejercicio6.py
py
463
python
es
code
1
github-code
6
7091903997
import database from datetime import datetime import db_pyMySQL conn = database.connection # Thêm tài khoản "user": User sẽ không mã hoá mkhau do xài 2 ngôn ngữ khác nhau, # nên khi mã hoá xong NodeJS sẽ ko hỗ trợ để giải mã => sẽ không đăng nhập được. # INSERT: # Thêm tài khoản khách hàng: def insert_user(name, email, password, phone, address): with conn.cursor() as cur: mk = password + database.mysecret_key # pas = mk.encode() sql = ''' INSERT INTO khachhang(tenkh, email, matkhau, sodienthoai, diachi) VALUES (%s, %s, %s, %s, %s) ''' cur.execute(sql, (name, email, mk, phone, address)) conn.commit() # Thêm tài khoản "admin": def insert_admin(admin, matkhau, ten, diachi, sdt, maquyen): with conn.cursor() as cur: mk = matkhau + database.mysecret_key # pas = database.cipher.encrypt(matkhau) # Mã hoá mật khẩu sql = ''' INSERT INTO admin(admin, matkhau, tennv, diachi, sodienthoai, maquyen) VALUES (%s, %s, %s, %s, %s, %s) ''' cur.execute(sql, (admin, mk, ten, diachi, sdt, maquyen)) conn.commit() # Thêm "danh mục" sản phẩm: def insert_category(ma, ten): with conn.cursor() as cur: sql = ''' INSERT INTO danhmuc(madm, tendm) VALUES (%s, %s) ''' cur.execute(sql, (ma, ten)) conn.commit() # Thêm "nhà sản xuất": def insert_producer(ma, ten, xuatxu): with conn.cursor() as cur: sql = ''' INSERT INTO nhasx(mansx, tennsx, xuatxu) VALUES (%s, %s, %s) ''' cur.execute(sql, (ma, ten, xuatxu)) conn.commit() # Thêm "loại" sản phẩm: def insert_type(type_id, name): with conn.cursor() as cur: sql = ''' INSERT INTO loaisp(maloai, tenloai) VALUES (%s, %s) ''' cur.execute(sql, (type_id, name)) conn.commit() # Thêm "sản phẩm": def insert_product(code, name, price, reduced_price, amount, img, producer_id, type_id): with conn.cursor() as cur: sql = ''' INSERT INTO sanpham(code, tensp, gia, giamgia, soluong, hinh, mansx, maloai) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ''' cur.execute(sql, (code, name, price, reduced_price, amount, img, producer_id, type_id)) conn.commit() # Thêm mới "Quyền hạn - chức vụ": def insert_permission(code, name): with conn.cursor() as cur: sql = ''' INSERT INTO quyen(maquyen, Ten) VALUES (%s, %s) ''' cur.execute(sql, (code, name)) conn.commit() # Thêm mới "trạng thái": def insert_status(ten, trangthai): with conn.cursor() as cursor: sql = ''' INSERT INTO trangthai(tentt, trangthai) VALUES (%s, %s) ''' cursor.execute(sql, (ten, trangthai)) conn.commit() # UPDATE: # Sửa profile tài khoản admin: def update_profile_admin(email, name, address, phone, permission, admin_id): with conn.cursor() as cur: sql = ''' UPDATE admin SET admin = %s, tennv = %s, diachi = %s, sodienthoai = %s, maquyen = %s WHERE manv = %s ''' cur.execute(sql, (email, name, address, phone, permission, admin_id)) conn.commit() return 1 # Cập nhật mật khẩu của admin: def update_password_admin(pas, admin_id): with conn.cursor() as cur: password = pas + database.mysecret_key sql = ''' UPDATE admin SET matkhau = %s WHERE manv = %s ''' cur.execute(sql, (password, admin_id,)) conn.commit() return 1 # Sửa profile tài khoản khách hàng: def update_profile_user(name, email, phone, address, user_id): with conn.cursor() as cur: sql = ''' UPDATE khachhang SET tenkh = %s, email = %s, sodienthoai = %s, diachi = %s WHERE makh = %s ''' cur.execute(sql, (name, email, phone, address, user_id)) conn.commit() return 1 # Cập nhật mật khẩu của khách hàng: def update_password_user(pas, user_id): with conn.cursor() as cur: password = pas + database.mysecret_key sql = ''' UPDATE khachhang SET matkhau = %s WHERE makh = %s ''' cur.execute(sql, (password, user_id,)) conn.commit() return 1 # Sửa danh mục: def update_category(name, category_id): with conn.cursor() as cur: sql = ''' UPDATE danhmuc SET tendm = %s WHERE madm = %s ''' cur.execute(sql, (name, category_id,)) conn.commit() return 1 # Sửa loại: def update_type(name, type_id): with conn.cursor() as cur: sql = ''' UPDATE loaisp SET tenloai = %s WHERE maloai = %s ''' cur.execute(sql, (name, type_id,)) conn.commit() return 1 # Sửa nhà sản xuất: def update_producer(name, origin, producer_id): with conn.cursor() as cur: sql = ''' UPDATE nhasx SET tennsx = %s, xuatxu = %s WHERE mansx = %s ''' cur.execute(sql, (name, origin, producer_id,)) conn.commit() return 1 # Sửa quyền hạn - chức vụ: def update_permission(name, permission_id): with conn.cursor() as cur: sql = ''' UPDATE quyen SET Ten = %s WHERE maquyen = %s ''' cur.execute(sql, (name, permission_id,)) conn.commit() return 1 # Sửa trạng thái: def update_status(name, status_id): with conn.cursor() as cur: sql = ''' UPDATE trangthai SET tentt = %s WHERE trangthai = %s ''' cur.execute(sql, (name, status_id,)) conn.commit() return 1 # Sửa sản phẩm: def update_product(code, name, price, reduced_price, amount, img, producer_id, type_id, product_id): with conn.cursor() as cur: sql = ''' UPDATE sanpham SET code = %s, tensp = %s, gia = %s, giamgia = %s, soluong = %s, hinh = %s, mansx = %s, maloai = %s WHERE masp = %s ''' cur.execute(sql, (code, name, price, reduced_price, amount, img, producer_id, type_id, product_id)) conn.commit() return 1 # Chức năng của khách hàng. # Thêm đơn hàng: def insert_order(user_id, total, product_id, product_name, price, amount): try: with conn.cursor() as cur: order_date = datetime.today() sql_order = ''' INSERT INTO donhang(makh, tong, ngaydat) VALUES (%s, %s, %s); ''' val_order = (user_id, total, order_date) sql_orderID = "SELECT LAST_INSERT_ID() as LastID;" sql_detailOrder = ''' INSERT INTO chitietdh(masp, tensp, gia, soluong, madonhang) VALUES (%s, %s, %s, %s, %s); ''' arrayProduct = [] try: cur.execute(sql_order, val_order) conn.commit() cur.execute(sql_orderID) lastId = cur.fetchone() order_id = lastId['LastID'] # Lấy id của đơn hàng vừa tạo. for i in arrayProduct: code = i['masp'] name = i['tensp'] prices = i['gia'] amounts = i['soluong'] cur.execute(sql_detailOrder, (code, name, prices, amounts, order_id)) conn.commit() except: conn.rollback() finally: # Ngắt kết nối DB. conn.close() # Sửa đơn hàng: Chỉ sửa được đơn hàng khi trạng thái đơn hàng là 'Đang chờ xử lý', còn lại thì khách hàng ko được sửa. def update_order(amount, order_id): with conn.cursor() as cur: sql = "SELECT * FROM donhang WHERE madonhang = %s" cur.execute(sql, (order_id,)) order = cur.fetchone() product_id = order['masp'] # Tìm giá của sản phẩm: sql1 = "SELECT gia FROM sanpham WHERE masp = %s" cur.execute(sql1, (product_id,)) gia = cur.fetchone() price = amount * gia if order['trangthai'] == 0: # Kiểm tra trạng thái đơn hàng. sql = ''' UPDATE donhang SET soluong = %s, gia = %s WHERE madonhang = %s ''' cur.execute(sql, (amount, price, order_id,)) conn.commit() return 1 else: # Đơn hàng đã được duyệt ko thể sửa. return -1
letrinhan1509/FashionShop
api_admin/model_insert.py
model_insert.py
py
8,813
python
vi
code
0
github-code
6
21916878669
#!/usr/bin/env python2 import logging import os import shutil import tempfile from test_utils import TESTS_DIR, qsym, check_testcase SCHEDULE_DIR = os.path.join(TESTS_DIR, "schedule") logging.getLogger('qsym.Executor').setLevel(logging.DEBUG) def get_testcases(exe, bitmap, input_binary): output_dir = tempfile.mkdtemp(prefix="qsym-") input_file = tempfile.NamedTemporaryFile(prefix="qsym-", delete=False).name new_inputs = [] with open(input_file, "wb") as f: f.write(input_binary) try: q = qsym.Executor([exe], input_file, output_dir, bitmap=bitmap) q.run() for path in q.get_testcases(): with open(path, "rb") as f: data = f.read() new_inputs.append(data) return new_inputs finally: shutil.rmtree(output_dir) os.unlink(input_file) return None def get_seeds(target_dir): seeds = [] inputs_dir = os.path.join(target_dir, "inputs") for name in os.listdir(inputs_dir): path = os.path.join(inputs_dir, name) with open(path, "rb") as f: data = f.read() seeds.append(data) return seeds def get_all_testcases(target, max_iter=30): target_dir = os.path.join(SCHEDULE_DIR, target) exe = os.path.join(target_dir, "main") inputs = get_seeds(target_dir) processed = [] bitmap = tempfile.NamedTemporaryFile(prefix="qsym-", delete=False).name try: for i in xrange(max_iter): if not inputs: break input_binary = inputs.pop() new_inputs = get_testcases(exe, bitmap, input_binary) assert new_inputs is not None inputs.extend(new_inputs) processed.append(input_binary) return processed finally: os.unlink(bitmap) def check_testcases(exe, testcases): input_file = tempfile.NamedTemporaryFile(prefix="qsym-", delete=False).name try: for testcase in testcases: if check_testcase(exe, testcase): return True finally: os.unlink(input_file) return False def test_dup(): testcases = get_all_testcases("dup") # default + 0xdeadbeef assert len(testcases) == 2
sslab-gatech/qsym
tests/test_schedule.py
test_schedule.py
py
2,236
python
en
code
615
github-code
6
72532823229
# pylint: disable=protected-access # pylint: disable=redefined-outer-name # pylint: disable=too-many-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable from typing import Any from urllib.parse import parse_qs import pytest from aiohttp.test_utils import make_mocked_request from models_library.utils.pydantic_tools_extension import parse_obj_or_none from pydantic import ByteSize, parse_obj_as from servicelib.aiohttp.requests_validation import parse_request_query_parameters_as from simcore_service_webserver.studies_dispatcher._models import ( FileParams, ServiceParams, ) from simcore_service_webserver.studies_dispatcher._redirects_handlers import ( FileQueryParams, ServiceAndFileParams, ) from yarl import URL _SIZEBYTES = parse_obj_as(ByteSize, "3MiB") # SEE https://github.com/ITISFoundation/osparc-simcore/issues/3951#issuecomment-1489992645 # AWS download links have query arg _DOWNLOAD_LINK = "https://discover-use1.s3.amazonaws.com/23/2/files/dataset_description.xlsx?AWSAccessKeyId=AKIAQNJEWKCFAOLGQTY6&Signature=K229A0CE5Z5OU2PRi2cfrfgLLEw%3D&x-amz-request-payer=requester&Expires=1605545606" _DOWNLOAD_LINK1 = "https://prod-discover-publish-use1.s3.amazonaws.com/44/2/files/code/model_validation.ipynb?response-content-type=application%2Foctet-stream&AWSAccessKeyId=AKIAVPHN3KJHIM77P4OY&Signature=WPBOqEyTnUIKfxRFaC2YnyO85XI%3D&x-amz-request-payer=requester&Expires=1680171597" _DOWNLOAD_LINK2 = "https://raw.githubusercontent.com/pcrespov/osparc-sample-studies/master/files%20samples/sample.ipynb" _DOWNLOAD_LINK3 = ( "https://raw.githubusercontent.com/rawgraphs/raw/master/data/orchestra.csv" ) @pytest.mark.parametrize( "url_in,expected_download_link", [ ( f'{URL("http://localhost:9081").with_path("/view").with_query(file_type="CSV", viewer_key="simcore/services/comp/foo", viewer_version="1.0.0", file_size="300", file_name="orchestra.csv", download_link=_DOWNLOAD_LINK3)}', _DOWNLOAD_LINK3, ), ( f'{URL("http://127.0.0.1:9081").with_path("/view").with_query(file_type="IPYNB", viewer_key="simcore/services/dynamic/jupyter-octave-python-math", viewer_version="1.0.0", file_size="300", file_name="sample.ipynb", download_link=_DOWNLOAD_LINK2)}', _DOWNLOAD_LINK2, ), ( f'{URL("https://123.123.0.1:9000").with_path("/view").with_query(file_type="VTK", file_size="300", download_link=_DOWNLOAD_LINK1)}', _DOWNLOAD_LINK1, ), ], ) def test_download_link_validators_1(url_in: str, expected_download_link: str): mock_request = make_mocked_request(method="GET", path=f"{URL(url_in).relative()}") params = parse_request_query_parameters_as( ServiceAndFileParams | FileQueryParams, mock_request ) assert f"{params.download_link}" == expected_download_link @pytest.fixture def file_and_service_params() -> dict[str, Any]: return dict( file_name="dataset_description.slsx", file_size=_SIZEBYTES, file_type="MSExcel", viewer_key="simcore/services/dynamic/fooo", viewer_version="1.0.0", download_link=_DOWNLOAD_LINK, ) def test_download_link_validators_2(file_and_service_params: dict[str, Any]): params = ServiceAndFileParams.parse_obj(file_and_service_params) assert params.download_link assert params.download_link.host and params.download_link.host.endswith( "s3.amazonaws.com" ) assert params.download_link.host_type == "domain" query = parse_qs(params.download_link.query) assert {"AWSAccessKeyId", "Signature", "Expires", "x-amz-request-payer"} == set( query.keys() ) def test_file_and_service_params(file_and_service_params: dict[str, Any]): request_params: dict[str, Any] = file_and_service_params file_params = parse_obj_or_none(FileParams, request_params) assert file_params service_params = parse_obj_or_none(ServiceParams, request_params) assert service_params file_and_service_params = parse_obj_or_none( ServiceAndFileParams | FileParams | ServiceParams, request_params ) assert isinstance(file_and_service_params, ServiceAndFileParams) def test_file_only_params(): request_params = dict( file_name="dataset_description.slsx", file_size=_SIZEBYTES, file_type="MSExcel", download_link=_DOWNLOAD_LINK, ) file_params = parse_obj_or_none(FileParams, request_params) assert file_params service_params = parse_obj_or_none(ServiceParams, request_params) assert not service_params file_and_service_params = parse_obj_or_none( ServiceAndFileParams | FileParams | ServiceParams, request_params ) assert isinstance(file_and_service_params, FileParams) def test_service_only_params(): request_params = dict( viewer_key="simcore/services/dynamic/fooo", viewer_version="1.0.0", ) file_params = parse_obj_or_none(FileParams, request_params) assert not file_params service_params = parse_obj_or_none(ServiceParams, request_params) assert service_params file_and_service_params = parse_obj_or_none( ServiceAndFileParams | FileParams | ServiceParams, request_params ) assert isinstance(file_and_service_params, ServiceParams)
ITISFoundation/osparc-simcore
services/web/server/tests/unit/isolated/test_studies_dispatcher_models.py
test_studies_dispatcher_models.py
py
5,342
python
en
code
35
github-code
6
5619484190
# Backend function in order to the system # check the credentials of users inside the system # from mainGUI import * # from mainGUI import adminMenu, customerMenu import os import tkinter as tk def check_credentials(identity, password, choice, admin_access): # checks credentials of admin/customer and returns True or False folder_name = "./database/Admin" if (choice == 1) else "./database/Customer" file_name = "/adminDatabase.sqlite3" if (choice == 1) else "/customerDatabase.sqlite3" try: os.makedirs(folder_name, exist_ok=True) database = open(folder_name + file_name, "r") except FileNotFoundError: print("#", folder_name[2:], "database doesn't exists!\n# New", folder_name[2:], "database created automatically.") database = open(folder_name + file_name, "a") if choice == 1: database.write("admin\nadmin\n*\n") else: is_credentials_correct = False for line in database: id_fetched = line.replace("\n", "") password_fetched = database.__next__().replace("\n", "") if id_fetched == identity: if ((password == "DO_NOT_CHECK_ADMIN" and choice == 1 and admin_access == False) or ( password == "DO_NOT_CHECK" and choice == 2 and admin_access == True) or password_fetched == password): is_credentials_correct = True database.close() return True if choice == 1: # skips unnecessary lines in admin database. database.__next__() # skipping line else: # skips unnecessary lines in customer database. for index in range(10): fetched_line = database.readline() if fetched_line is not None: continue else: break if is_credentials_correct: print("Success!") else: print("Failure!") database.close() return False # check weather the customer account is valid or not def is_valid(customer_account_number): try: customer_database = open("./database/Customer/customerDatabase.sqlite3") except FileNotFoundError: os.makedirs("./database/Customer/customerDatabase.sqlite3", exist_ok=True) print("# Customer database doesn't exists!\n# New Customer database created automatically.") customer_database = open("./database/Customer/customerDatabase.sqlite3", "a") else: # if customer account number is already allocated then this will return false. otherwise true. if check_credentials(customer_account_number, "DO_NOT_CHECK", 2, True): return False else: return True customer_database.close() # Check the phone number is valid or not / weather it is less than 10 digit def is_valid_mobile(mobile_number): if mobile_number.__len__() == 10 and mobile_number.isnumeric(): return True else: return False # Append or open the database def append_data(database_path, data): customer_database = open(database_path, "a") customer_database.write(data) # Display details of customer accounts def display_account_summary(identity, choice): # choice 1 for full summary; choice 2 for only account balance. flag = 0 customer_database = open("./database/Customer/customerDatabase.sqlite3") output_message = "" for line in customer_database: if identity == line.replace("\n", ""): if choice == 1: output_message += "Account number : " + line.replace("\n", "") + "\n" customer_database.__next__() # skipping pin output_message += "Current balance : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Date of account creation : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Name of account holder : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Type of account : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Date of Birth : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Mobile number : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Gender : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "Nationality : " + customer_database.__next__().replace("\n", "") + "\n" output_message += "KYC : " + customer_database.__next__().replace("\n", "") + "\n" else: customer_database.readline() # skipped pin output_message += "Current balance : " + customer_database.readline().replace("\n", "") + "\n" flag = 1 break else: for index in range(11): fetched_line = customer_database.readline() if fetched_line is not None: continue else: break if flag == 0: print("\n# No account associated with the entered account number exists! #") return output_message # Transaction function to check amount def transaction(identity, amount, choice): # choice 1 for deposit; choice 2 for withdraw customer_database = open("./database/Customer/customerDatabase.sqlite3") data_collector = "" balance = 0 for line in customer_database: if identity == line.replace("\n", ""): data_collector += line # ID data_collector += customer_database.readline() # PIN balance = float(customer_database.readline().replace("\n", "")) if choice == 2 and balance - amount < 2000: # Minimum balance 2000 return -1 else: if choice == 1: balance += amount else: balance -= amount data_collector += str(balance) + "\n" for index in range(9): data_collector += customer_database.readline() else: data_collector += line for index in range(11): data_collector += customer_database.readline() customer_database.close() customer_database = open("./database/Customer/customerDatabase.sqlite3", "w") customer_database.write(data_collector) return balance # Error message function class Error: def __init__(self, window=None): global master master = window window.geometry("411x117+485+248") window.minsize(120, 1) window.maxsize(1370, 749) window.resizable(0, 0) window.title("Error") window.configure(background="#f2f3f4") global Label2 self.Button1 = tk.Button(window, background="#d3d8dc", borderwidth="1", disabledforeground="#a3a3a3", font="-family {Segoe UI} -size 9", foreground="#000000", highlightbackground="#d9d9d9", highlightcolor="black", pady="0", text='''OK''', command=self.goback) self.Button1.place(relx=0.779, rely=0.598, height=24, width=67) global _img0 _img0 = tk.PhotoImage(file="./images/error_image.png") self.Label1 = tk.Label(window, background="#f2f3f4", disabledforeground="#a3a3a3", foreground="#000000", image=_img0, text='''Label''') self.Label1.place(relx=0.024, rely=0.0, height=81, width=84) def setMessage(self, message_shown): Label2 = tk.Label(master, background="#f2f3f4", disabledforeground="#a3a3a3", font="-family {Segoe UI} -size 16", foreground="#000000", highlightcolor="#646464646464", text=message_shown) Label2.place(relx=0.210, rely=0.171, height=41, width=214) def goback(self): master.withdraw()
prince749924/banking-system
backend.py
backend.py
py
7,923
python
en
code
0
github-code
6
71817771068
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # mid2sheet.py # Midi-Files -> Sheets for Musicbox (30 notes, starting from F) # (c) 2017 Niklas Kannenberg <[email protected]> and Gunnar J. # Released under the GPL v3 or later, see file "COPYING" # # ToDo # - Use 'pypdf' instead of external 'pdfjam' for PDF merging, avoid latex # (to much dependencies) # # Bugs # - No whitespace in path/to/script allowed # pdfjam and rm will not work, see subprocess.call() # - exits if input/output folder not exists, better create output folder # # # Useful links: # https://mido.readthedocs.io/en/latest/midi_files.html # http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html # http://stackoverflow.com/questions/3444645/merge-pdf-files # https://pythonhosted.org/PyPDF2/ # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import mido import os import pandas as pd import matplotlib.pyplot as plt import subprocess import datetime # version of this software version = 0.3 # print lot of debug messages? debug = 0 # directories inputdir = os.getcwd()+"/input" # input directory, e.g. "/input" outputdir = os.getcwd()+"/output" # output directory for PDFs # notes and y_mm yBase = 5.5 # y_mm first note yAbst = 58.5 / 29.0 # y_mm between notes yUppr = 70.0 # y_mm whole strip # Plot x8beat = 4.0 # x_mm per 1/8 beat minbeat = 7.9 # minimal playable x-distance for one note xprmax = 250.0 # printable size, A4 Landscape preplt = 8.0 # space for note names on plot, do not change # lut midi-note -> y_mm notemmlut = [ # Note # y_mm # name [ 53, yBase + 0 * yAbst ], # F [ 55, yBase + 1 * yAbst ], # G [ 60, yBase + 2 * yAbst ], # C [ 62, yBase + 3 * yAbst ], # D [ 64, yBase + 4 * yAbst ], # E [ 65, yBase + 5 * yAbst ], # F [ 67, yBase + 6 * yAbst ], # G [ 69, yBase + 7 * yAbst ], # A [ 70, yBase + 8 * yAbst ], # A# [ 71, yBase + 9 * yAbst ], # H [ 72, yBase + 10 * yAbst ], # C [ 73, yBase + 11 * yAbst ], # C# [ 74, yBase + 12 * yAbst ], # D [ 75, yBase + 13 * yAbst ], # D# [ 76, yBase + 14 * yAbst ], # E [ 77, yBase + 15 * yAbst ], # F [ 78, yBase + 16 * yAbst ], # F# [ 79, yBase + 17 * yAbst ], # G [ 80, yBase + 18 * yAbst ], # G# [ 81, yBase + 19 * yAbst ], # A [ 82, yBase + 20 * yAbst ], # A# [ 83, yBase + 21 * yAbst ], # H [ 84, yBase + 22 * yAbst ], # C [ 85, yBase + 23 * yAbst ], # C# [ 86, yBase + 24 * yAbst ], # D [ 87, yBase + 25 * yAbst ], # D# [ 88, yBase + 26 * yAbst ], # E [ 89, yBase + 27 * yAbst ], # F [ 91, yBase + 28 * yAbst ], # G [ 93, yBase + 29 * yAbst ], # A ] print("-> Converting .mid to .pdf for Musicbox - mid2sheet v"+str(version)) print("--------------------------------------------------------") print("Input from Folder: "+inputdir) print("Output to Folder: "+outputdir) # midi note number to y_mm def get_mm(note): retval = -1 for i in range(len(notemmlut)): if (notemmlut[i][0] == note): retval = notemmlut[i][1] return retval # name of midi note number def get_name(note): names = [ "C","C#","D","D#","E","F","F#","G","G#","A","A#","H" ] return names[note % 12] # returns 1 if note is to close to last note on same line def get_terr(notes, pos): gap = 9999 for i in range(0,pos): if(notes.note[i] == notes.note[pos]): gap = notes.x[pos] - notes.x[i] if(gap < minbeat): # gap < min_gap return 1 # not playable else: return 0 # OK # mm -> inch (for matplotlib) def mm2in(mm): return mm/10/2.54 # mm to inch # convert one midi file def do_convert(infile, outfile, fname): mid = mido.MidiFile(infile) # the input file now = datetime.datetime.now() # actual time sig_cnt = 0 # counter for signature messages tim_cnt = 0 # counter for timing messages # midi timing ticks per beat ticks_4th = mid.ticks_per_beat ticks_8th = ticks_4th / 2 # data frame for all midi events of melody track datacols = ['time','tdiff','type','track','bytes'] data = pd.DataFrame(columns=datacols) # data frame for note_on events notecols = ['time','note','name', 'x', 'y', 'bar'] notes = pd.DataFrame(columns=notecols) # list all tracks if(debug): print("Tracks : " + str(len(mid.tracks))) for i in range(len(mid.tracks)): track_len = len(mid.tracks[i]) print("Track " + str(i) + " : " + str(track_len) + " events") # extract all messages from all tracks to data frame 'data' for i, track in enumerate(mid.tracks): for msg in track: if(msg.type == "time_signature"): time_signature = msg.dict() numerator = time_signature['numerator'] denominator = time_signature['denominator'] sig_cnt += 1 if(debug): print("Timing : " + str(numerator) + "/" + str(denominator)) if(msg.type == "set_tempo"): set_tempo = msg.dict() tempo = round((500000 / set_tempo['tempo']) * 120, 2) tim_cnt += 1 if(debug): print("Tempo : " + str(tempo) + " bpm") data = data.append({ 'time' : 0, 'tdiff' : msg.time, 'type' : msg.type, 'track' : i, 'bytes' : msg.bytes() }, ignore_index=True) # warnings for tracks, tempo and signature if(len(mid.tracks) != 1): print("-> WARNING: Midi file has " + str(len(mid.tracks)) + " tracks instead of 1") if(sig_cnt != 1): print("-> WARNING: Midi file has " + str(sig_cnt) + " signature messages instead of 1. " + "Using " + str(numerator) + "/" + str(denominator)) if(tim_cnt != 1): print("-> WARNING: Midi file has " + str(tim_cnt) + " tempo messages instead of 1. " + "Using " + str(tempo) + " bpm.") # calculate absolute timing values for i in range(1, len(data)): # actual time difference tdiffnext = data.tdiff[i] # accumulate time only for same track if(data.track[i] == data.track[i-1]): timeacc = data.time[i-1] else: timeacc = 0 data.loc[i, 'time'] = timeacc + tdiffnext # extract all 'note_on' events from 'data' to 'notes for i in range(len(data)): # event == note_on AND velocity > x if(data.type[i] == 'note_on' and data.bytes[i][2] > 0): thisnote = data.bytes[i][1] mtime = data.time[i] x_val = ( mtime / ticks_8th ) * x8beat notes = notes.append({ 'time' : data.time[i], 'note' : thisnote, 'name' : get_name(thisnote), 'x' : x_val, 'y' : get_mm(thisnote), 'bar' : (data.time[i] / (4 * ticks_4th * (numerator/denominator))) + 1 }, ignore_index=True) # mm per bar mm_bar = 8 * x8beat * (numerator/denominator) # bars per page bars_pp = int((xprmax - preplt) / mm_bar) # debug if(debug): #print("--- DATA ---") #print(data) print("--- NOTES ---") print(notes) # generate plot # ----------------------------- # size of one strip strip_x = mm2in(preplt + bars_pp * mm_bar) # X-Size of plot strip_y = mm2in(yUppr) # Y-Size of plot hlines_x = mm2in(preplt) # start of horizontal note lines newpage = 1 # flag for newpage pagecnt = 0 # page counter poffs = 0 # x-offset for current page # for all notes (can't manipulate k in 'for' loop but in 'while' loop) k = 0 while(k < len(notes) ): # create a new plot if( newpage==1 ): newpage = 0 # reset flag pagecnt = pagecnt + 1 # increment page counter if(pagecnt > 1): # re plot last notes on current page while( (notes.bar[k] ) >= bars_pp * (pagecnt - 1) + 1 ): k -= 1 k += 1 # undo last while, no 'do-while' loop in python # frame line width, hacked plt.rcParams['axes.linewidth'] = 0.2 # x-offset for this page poffs = mm2in( -preplt + (pagecnt-1) * mm_bar * bars_pp ) # create figure f = plt.figure(figsize=(strip_x,strip_y), dpi=300,frameon=False) ax = plt.subplot(111) # figure has no borders plt.subplots_adjust(left=0,right=1,bottom=0,top=1) # plot 30 horizontal lines for i in range(len(notemmlut)): yy = mm2in(notemmlut[i][1]) # y-val nnote = get_name(notemmlut[i][0]) # name of the acutal note if(nnote == "C"): # C-Lines plt.plot([hlines_x,strip_x],[yy,yy],color="black", linewidth=0.4) elif nnote.endswith("#"): # #-Lines (Black keys) plt.plot([hlines_x,strip_x],[yy,yy],color="black", linewidth=0.1, linestyle=':') else: # Normal Lines plt.plot([hlines_x,strip_x],[yy,yy],color="black", linewidth=0.2) # add the name of the note if(i%2 ==0): ofs = 0.1 # indent every 2nd note else: ofs = 0.0 # no indent ax.text(.1+ofs,yy, nnote, fontsize=5,verticalalignment='center',rotation=90) # plot beat lines for i in range(bars_pp * numerator): xx = mm2in(mm_bar) / numerator # x per bar if(i % numerator == 0): # plot line (full bar) plt.plot([hlines_x+xx*i, hlines_x+xx*i ], [mm2in(notemmlut[0][1]), mm2in(notemmlut[-1][1])],color="black",linewidth=0.4) # plot bar number ax.text( hlines_x+xx*i + (xx/2), mm2in(notemmlut[0][1]) - mm2in(2.5), str(int(1+ i/numerator + bars_pp * (pagecnt-1))), fontsize=5,horizontalalignment='center',) else: # plot line (beat) plt.plot([hlines_x+xx*i, hlines_x+xx*i ], [mm2in(notemmlut[0][1]), mm2in(notemmlut[-1][1])], color="black",linewidth=0.1, linestyle=':') # add song name and info ax.text( hlines_x + mm2in(4), yy + mm2in(2), str(pagecnt) + " " + fname + " " + str(numerator) + "/" + str(denominator) + " " + str(tempo) + " bpm", fontsize=8, horizontalalignment='left') ax.text( mm2in(xprmax) / 2, yy + mm2in(2), "Generated in " + now.strftime('%Y-%m-%d') + " with mid2sheet v" + str(version) , fontsize=5, horizontalalignment='left') # vertical start line plt.plot([hlines_x,hlines_x],[0,strip_y],color="black", linewidth=0.4) plt.xticks([]) plt.yticks([]) ax.axis([0,strip_x, 0, strip_y]) # end if newpage # position of note to plot xx = mm2in(notes.x[k]) yy = mm2in(notes.y[k]) xx = xx -poffs # plot one note if(notes.y[k] != -1): # normal note plt.plot(xx,yy,marker='.',color='white',markersize=12) plt.plot(xx,yy,marker='.',color='black',markersize=8) plt.plot(xx,yy,marker='.',color='white',markersize=5) # fill red, if timing is to short if(get_terr(notes, k)): plt.plot(xx,yy,marker='.',color='red',markersize=3) else: # plot error note name (not in musicbox range) ax.text( xx,mm2in(1),get_name(int(notes.note[k])), fontsize=5,color='red', horizontalalignment='center',) # prepare new page, if this note was already outside current page if( (notes.bar[k] ) > bars_pp * pagecnt + 1 ): newpage = 1 # save current page to file filename = outfile + "_%03d" % (pagecnt) + '.pdf' f.savefig(filename, bbox_inches='tight') # next note (manually in while loop) else: k += 1 # for all notes # save last page to file filename = outfile + "_%03d" % (pagecnt) + '.pdf' f.savefig(filename, bbox_inches='tight') # combine pdfs, TODO: switch to PyPDF2 subprocess.call("pdfjam " + outfile + "_*.pdf --nup 1x2 --a4paper --landscape --noautoscale true --delta '0.5cm 0.5cm' --outfile " + outfile + ".pdf", shell=True) subprocess.call("rm " + outfile + "_*.pdf ", shell=True) # result: list of notes with x,y mm values return notes # convert all files for filename in os.listdir(inputdir): if filename.endswith(".mid"): inpfile = inputdir+"/"+filename outfile_name = filename.rsplit('.', 1)[0] outfile = outputdir+"/"+outfile_name print("--------------------------------------------------------") print("-> Input File : "+filename) print("-> Output File : "+outfile_name + ".pdf") do_convert(inpfile, outfile, outfile_name) print("--------------------------------------------------------") print("DONE")
flylens/mid2sheet
mid2sheet.py
mid2sheet.py
py
14,949
python
en
code
27
github-code
6
70285712189
""" SWF """ from __future__ import absolute_import from .tag import SWFTimelineContainer from .stream import SWFStream from .export import SVGExporter from six.moves import cStringIO from io import BytesIO class SWFHeaderException(Exception): """ Exception raised in case of an invalid SWFHeader """ def __init__(self, message): super(SWFHeaderException, self).__init__(message) class SWFHeader(object): """ SWF header """ def __init__(self, stream): a = stream.readUI8() b = stream.readUI8() c = stream.readUI8() if not a in [0x43, 0x46, 0x5A] or b != 0x57 or c != 0x53: # Invalid signature! ('FWS' or 'CWS' or 'ZFS') raise SWFHeaderException("not a SWF file! (invalid signature)") self._compressed_zlib = (a == 0x43) self._compressed_lzma = (a == 0x5A) self._version = stream.readUI8() self._file_length = stream.readUI32() if not (self._compressed_zlib or self._compressed_lzma): self._frame_size = stream.readRECT() self._frame_rate = stream.readFIXED8() self._frame_count = stream.readUI16() @property def frame_size(self): """ Return frame size as a SWFRectangle """ return self._frame_size @property def frame_rate(self): """ Return frame rate """ return self._frame_rate @property def frame_count(self): """ Return number of frames """ return self._frame_count @property def file_length(self): """ Return uncompressed file length """ return self._file_length @property def version(self): """ Return SWF version """ return self._version @property def compressed(self): """ Whether the SWF is compressed """ return self._compressed_zlib or self._compressed_lzma @property def compressed_zlib(self): """ Whether the SWF is compressed using ZLIB """ return self._compressed_zlib @property def compressed_lzma(self): """ Whether the SWF is compressed using LZMA """ return self._compressed_lzma def __str__(self): return " [SWFHeader]\n" + \ " Version: %d\n" % self.version + \ " FileLength: %d\n" % self.file_length + \ " FrameSize: %s\n" % self.frame_size.__str__() + \ " FrameRate: %d\n" % self.frame_rate + \ " FrameCount: %d\n" % self.frame_count class SWF(SWFTimelineContainer): """ SWF class The SWF (pronounced 'swiff') file format delivers vector graphics, text, video, and sound over the Internet and is supported by Adobe Flash Player software. The SWF file format is designed to be an efficient delivery format, not a format for exchanging graphics between graphics editors. @param file: a file object with read(), seek(), tell() methods. """ def __init__(self, file=None): super(SWF, self).__init__() self._data = None if file is None else SWFStream(file) self._header = None if self._data is not None: self.parse(self._data) @property def data(self): """ Return the SWFStream object (READ ONLY) """ return self._data @property def header(self): """ Return the SWFHeader """ return self._header def export(self, exporter=None, force_stroke=False): """ Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases. """ exporter = SVGExporter() if exporter is None else exporter if self._data is None: raise Exception("This SWF was not loaded! (no data)") if len(self.tags) == 0: raise Exception("This SWF doesn't contain any tags!") return exporter.export(self, force_stroke) def parse_file(self, filename): """ Parses the SWF from a filename """ self.parse(open(filename, 'rb')) def parse(self, data): """ Parses the SWF. The @data parameter can be a file object or a SWFStream """ self._data = data = data if isinstance(data, SWFStream) else SWFStream(data) self._header = SWFHeader(self._data) if self._header.compressed: temp = BytesIO() if self._header.compressed_zlib: import zlib data = data.f.read() zip = zlib.decompressobj() temp.write(zip.decompress(data)) else: import pylzma data.readUI32() #consume compressed length data = data.f.read() temp.write(pylzma.decompress(data)) temp.seek(0) data = SWFStream(temp) self._header._frame_size = data.readRECT() self._header._frame_rate = data.readFIXED8() self._header._frame_count = data.readUI16() self.parse_tags(data) def __str__(self): s = "[SWF]\n" s += self._header.__str__() for tag in self.tags: s += tag.__str__() + "\n" return s
timknip/pyswf
swf/movie.py
movie.py
py
5,642
python
en
code
154
github-code
6
5517603024
#python3 from math import floor class HeapBuilder(): def __init__(self): self._swaps = [] self._data =[] def ReadInput(self): #manual input #n = 5 #self._data = [5, 4, 3, 2, 1] #auto input n = int(input()) self._data = [int(s) for s in input().split()] def PrintAnswer(self): print(len(self._swaps)) for swap in self._swaps: print(swap[0],swap[1]) def BuildHeap(self): size = len(self._data) n = size swaps = 0 iter = floor(n/2) while (iter+1): self.SiftDown(iter) iter-=1 #for k in range(2,0, -1): # self.SiftDown(k) def SiftDown(self, i): maxIndex = i size = len(self._data) l = self.LeftChild(i) if l <= size-1 and self._data[l] < self._data[maxIndex]: maxIndex = l r = self.RightChild(i) if r <= size-1 and self._data[r] < self._data[maxIndex]: maxIndex = r if i != maxIndex: val_maxIndex = self._data[maxIndex] val_i = self._data[i] self._data[i] = val_maxIndex self._data[maxIndex] = val_i self._swaps.append([i,maxIndex]) self.SiftDown(maxIndex) def Parent(self, i): return floor(i/2) def LeftChild(self, i): return 2*i + 1 def RightChild(self, i): return 2*i + 2 def Solve(self): self.ReadInput() self.BuildHeap() self.PrintAnswer() def main(): heapBuilder = HeapBuilder() heapBuilder.Solve() main()
craigpauga/Data-Structure-and-Algorithms
2. Data Structures/Assignment 2 - Priority Queues & Disjoint Disjoint Sets/make_heap/build_heap.py
build_heap.py
py
1,658
python
en
code
0
github-code
6
7573771770
import os import logging from dotenv import load_dotenv from flask import Flask, jsonify, request from flask_cors import CORS from flask_restful import Api, Resource, reqparse from models.db.postgresDB import PostgresDB from models.services.logger import get_module_logger import models.services.flask_service as flask_service load_dotenv() app = Flask(__name__) CORS(app, resources=r'/*') parser = reqparse.RequestParser() parser.add_argument('keywords', type=list) @app.route('/', methods=['GET']) def hello_server(): return jsonify({"info": "Server works"}), 200 @app.route('/articles', methods=['GET']) def get_articles(): article_id = request.args.get("article_id", None) return flask_service.get_articles(db=postgresDB, article_id=article_id) #TODO: z parametrem # @app.route('/articles', methods=['GET']) # def get_articles(): # keywords = parser.parse_args() # return keywords # #return flask_service.get_articles(db=postgresDB, keywords=keywords) @app.route('/articles', methods=['POST']) def create_article(): data = request.json return flask_service.create_article(db=postgresDB, data=data) @app.route('/articles/<article_id>', methods=['PUT']) def update_article(article_id): data = request.json return flask_service.update_article(db=postgresDB, article_id=article_id, data=data) @app.route('/articles/<article_id>', methods=['DELETE']) def delete_article(article_id): return flask_service.delete_article(db=postgresDB, article_id=article_id,article_table=article_table) @app.route('/categories', methods=['GET']) def get_category(): category_id = request.args.get("category_id", None) return flask_service.get_categories(db=postgresDB, category_id=category_id) @app.route('/categories', methods=['POST']) def create_categories(): data = request.json return flask_service.create_category(db=postgresDB, data=data) @app.route('/categories/<category_id>', methods=['PUT']) def update_categories(category_id): data = request.json return flask_service.update_category(db=postgresDB, category_id=category_id, data=data) @app.route('/categories/<category_id>', methods=['DELETE']) def delete_categories(category_id): return flask_service.delete_category(db=postgresDB, category_id=category_id,category_table=category_table) @app.route('/comments', methods=['GET']) def get_comment(): article_id = request.args.get("article_id", None) author=request.args.get("author", None) return flask_service.get_comments(db=postgresDB, article_id=article_id,author=author,comment_table=comment_table) @app.route('/comments', methods=['POST']) def create_comments(): data = request.json return flask_service.create_comment(db=postgresDB, data=data) @app.route('/comments/<comment_id>', methods=['PUT']) def update_comments(comment_id): data = request.json return flask_service.update_comment(db=postgresDB, comment_id=comment_id, data=data) @app.route('/comments/<comment_id>', methods=['DELETE']) def delete_comments(comment_id): return flask_service.delete_comment(db=postgresDB, comment_id=comment_id,comment_table=comment_table) @app.route("/export", methods=['GET']) def to_txt(): return flask_service.db_to_txt(db=postgresDB, article_table=article_table, relation_category_article_table=relation_category_article_table, category_table=category_table, comment_table=comment_table) if __name__ == "__main__": logger = get_module_logger(mod_name=__name__, log_path='./logs/app_logs.log', lvl=logging.DEBUG) postgresDB = PostgresDB(db_host=os.environ.get("DB_HOST"), db_port=os.environ.get("DB_PORT"), db_user=os.environ.get("POSTGRES_USER"), db_password=os.environ.get("POSTGRES_PASSWORD"), db_name=os.environ.get("POSTGRES_DB")) try: article_table = postgresDB.get_table('article') category_table = postgresDB.get_table('category') comment_table = postgresDB.get_table('comment') relation_category_article_table = postgresDB.get_table('relation_category_article') logger.info('Got tables') app.run(host='0.0.0.0', port=5000) except Exception as e: logger.exception(e) logger.exception('Error, could not get tables from database')
Mariusz94/Knowledge-base
backend/app.py
app.py
py
4,372
python
en
code
0
github-code
6
10936847432
def funcaoI(n): i = 1 lista = [] while i <= n: lista.append(i) print(lista) i += 1 def funcaoJ(n): #solução com range for i in range(n): i += 1 print(f'{str(i) * i}') #solução sem range # i = 1 # while i <= n: # print(f'{str(i) * i}') # i += 1 def calcular_pagamento(qtd_horas, valor_hora): horas = float(qtd_horas) taxa = float(valor_hora) if horas <= 40: salario=horas*taxa else: h_excd = horas - 40 salario = 40*taxa+(h_excd*(1.5*taxa)) return salario def imprimeLinha(numero): for n in range(1, numero + 1): print((' {} ').format(n), end='') print() def imprimeSequencia(numero): for numero in range(numero + 1): imprimeLinha(numero)
thallesbruno/logica-de-programacao
exercicios/lista_aula03/funcoesUteis.py
funcoesUteis.py
py
785
python
pt
code
0
github-code
6
27735122824
from scipy import integrate import math def func1(x): return 1 / ((3*x - 1)**0.5) def func2(x): return math.log(x**2 + 1) / x def func3(x): return 1 / (0.2*x**2 + 1)**0.5 def rectangle_method(func, a, b, n): h = (b - a)/n integral_sum = sum(func(a + i * h) for i in range(n)) result = h * integral_sum return result def simpson_method(func, a, b, n): integral_result = integrate.simps([func(a + i * (b - a) / n) for i in range(n+1)], dx=(b - a) / n) return integral_result def trapezoid_method(func, a, b, n): h = (b - a) / n nodes = [func(a + i * h) for i in range(n + 1)] integral_result = h * (sum(nodes) - 0.5 * (nodes[0] + nodes[n])) return integral_result precision = 0.0001 integrals = [(func1, 1.4, 2.1), (func2, 0.8, 1.6), (func3, 1.3, 2.5)] methods = [rectangle_method, simpson_method, trapezoid_method] p_values = [10, 8, 20] for i, (func, a, b) in enumerate(integrals): print(f"Інтеграл {i + 1} (від {a} до {b}):") method = methods[i] n = p_values[i] result = method(func, a, b, n) print(f"Метод {i + 1}: {result:af}\n")
Alisa7A/Numerical-methods-of-programming
Pr11 Шамігулової Аліси.py
Pr11 Шамігулової Аліси.py
py
1,152
python
en
code
0
github-code
6
3325344481
#SIMPLY READING A FILE file = open("../files/essay.txt") content = file.read() print(content.title()) file.close() # Return the numbers of characters in the file file = open("../files/essay.txt", 'r') content = file.read() n_char = len(content) print(n_char) #ADDING MEMBERS IN THE FILE member = input("Add a new member: ") file = open("../files/members.txt", 'r') existing_members = file.readlines() file.close() existing_members.append(member + "\n") file = open("../files/members.txt", 'w') members = file.writelines(existing_members) file.close()
ramhors/todo-app
A-MegaPython/exercises/readingFile.py
readingFile.py
py
555
python
en
code
0
github-code
6
70793816827
from pathlib import Path import re, pickle, os import pickle, win32net from time import sleep class Scanner: wordList = "" ignored_type = "" ignored_dir = "" # this will store all of the file dictionsaries files = [] # This is the path that will be scanned p = '' # The code that iterates through the path from above def directory_file_iteration(self): ignored_directories = self.getIgnoredDirectories() ignored_filetypes = self.getIgnoredFileTypes() for i in Path(self.p).rglob("*"): # If there are directories in the "ignored directories.p" file, then it will iterate through them to see if file should be ignored if len(ignored_directories) > 0: # If the path of the file is in the ignored directories file, it will move to the next file if os.path.normpath(i.parents[0]) in ignored_directories: continue # if the file type of the file is in the ignored filetypes, it will move to the next file if Path(i).suffix.lower() in ignored_filetypes or len(Path(i).suffix) == 0 and "none" in ignored_filetypes: continue # if it passes both, it will check if it's actually a file else: if i.is_file(): # creating a file dictionary of attributes fileDict = {"filename":i.name,"pathParent":i.parents[0],"fullPath":i, "filetype":Path(i).suffix, "flag":False, "data":{"filename":"","filecontents":"","ssn":"","phone":"","email":[], "cc":""}} self.files.append(fileDict) else: continue # if there are none in ignored directories.p it will run this elif Path(i).suffix in ignored_filetypes: continue else: if i.is_file(): fileDict = {"filename":i.name,"pathParent":i.parents[0],"fullPath":i, "filetype":Path(i).suffix, "flag":False, "data":{"filename":"","filecontents":"","ssn":"","phone":"","email":[], "CC":""}} self.files.append(fileDict) # checking to see if a keyword is in a filename def checkFileNames(self): for file_ in self.files: for word in self.wordList: if word.lower() in str(file_["filename"].lower()): file_["flag"] = True file_["data"]["filename"] = word # reading in .txt files and checking for keywords def readInTextFile(self): for file_ in self.files: if file_["filetype"] == ".txt": try: # trying to open the file, sometimes it won't read because it isn't always ascii characters. f = open(file_["fullPath"], "r") fileContents = f.read() f.close() # searching the contents of the file for keyword for word in self.wordList: if word in fileContents.lower(): file_["flag"] = True file_["data"]["filecontents"] = file_["data"]["filecontents"] + " " + word # searching contents of file for SSN file_ = self.ssnSearch(file_, fileContents) # searching for phone numbers file_ = self.phoneNumberSearch(file_, fileContents) # searching for emails file_ = self.emailSearch(file_, fileContents) # searching for credit cards file_ = self.ccSearch(file_, fileContents) except UnicodeDecodeError: pass def ccSearch(self, file_, fileContents): ccAmexFound = re.findall(r'(?<!\d)3[47][0-9]{13}$(?!\d)', fileContents) ccVisaFound = re.findall(r'(?<!\d)4[0-9]{12}(?:[0-9]{3})?(?!\d)', fileContents) ccMasterCardFound = re.findall(r'(?<!\d)(5[1-5][0-9]{14}|2(22[1-9][0-9]{12}|2[3-9][0-9]{13}|[3-6][0-9]{14}|7[0-1][0-9]{13}|720[0-9]{12}))(?!\d)', fileContents) strAmex = '' strVisa = '' strMaster = '' for card in ccAmexFound: strAmex = strAmex + " , Amex " + str(card) for card in ccVisaFound: strVisa = strVisa + " , Visa " + str(card) for card in ccMasterCardFound: strMaster = strMaster + " , Master " + str(card) if len(strAmex) + len(strVisa) + len(strMaster) < 1: return file_ else: ccFound = str(strAmex) + str(strVisa) + str(strMaster) try: file_["flag"] = True except: pass file_["data"]["cc"] = file_["data"]["cc"] + ccFound return file_ def emailSearch(self, file_, fileContents): emailFound = re.findall(r'[A-Za-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w+', fileContents) strEmailFound = "" for email in emailFound: strEmailFound = strEmailFound + " , " + email if len(emailFound) < 1: return file_ else: try: file_["flag"] = True except: pass file_["data"]["email"] += emailFound return file_ def phoneNumberSearch(self, file_, fileContents): phoneFound = re.findall(r'(?<!\d)(?!000|.+0{4})(?:\d{10}|\d{3}-\d{3}-\d{4}|\d{3}\.\d{3}\.\d{4}|\d{3}\s\d{3}\s\d{4}|\(\d{3}\)\s\d{3}\s\d{4})(?!\d)', fileContents) strPhoneFound = "" for phone in phoneFound: strPhoneFound = strPhoneFound + " , " + phone if len(phoneFound) < 1: return file_ else: try: file_["flag"] = True except: pass file_["data"]["phone"] = file_["data"]["phone"] + strPhoneFound return file_ # searching for SSNs def ssnSearch(self,file_,fileContents): #ssn format: xxxxxxxxx or xxx-xx-xxxx ssnFound = re.findall(r'(?<!\d)(?!000|.+0{4})(?:\d{9}|\d{3}-\d{2}-\d{4})(?!\d)', fileContents) strSSNFOUND = "" for ssn in ssnFound: strSSNFOUND = strSSNFOUND + " , " + ssn if len(ssnFound) < 1: return file_ else: try: file_["flag"] = True except: pass file_["data"]["ssn"] = file_["data"]["ssn"] + strSSNFOUND return file_ # Ignore_dir.txt which will hold directories you want to ignore def getIgnoredDirectories(self): ignored_directories = pickle.load(open("ignored directories.p","rb")) return ignored_directories # Ignore the file types in this file such as .torrent, .txt def getIgnoredFileTypes(self): ignored_filetypes = pickle.load(open("ignored filetypes.p", "rb")) return ignored_filetypes # Setting path to scan def setPath(self,i): self.p = i def getWordList(self): self.wordList = pickle.load(open("word list.p", "rb")) def checkIfAdmin(self): if 'logonserver' in os.environ: server = os.environ['logonserver'][2:] else: server = None def if_user_is_admin(Server): groups = win32net.NetUserGetLocalGroups(Server, os.getlogin()) isadmin = False for group in groups: if group.lower().startswith('admin'): isadmin = True return isadmin, groups # Function usage is_admin, groups = if_user_is_admin(server) # Result handeling if is_admin == True: return True else: return False #print('You are in the following groups:') # for group in groups: # print(group) #sleep(10) #if error: no module named win32api, run these lines in cmd #pip uninstall pipywin32 #pip uninstall pywin32 #pip install pywin32 def get_scanning(self, scan_type): if scan_type == "quick": self.getWordList() self.files = [] # removing all data in the files list self.directory_file_iteration() self.checkFileNames() else: self.getWordList() self.files = [] # removing all data in the files list self.directory_file_iteration() self.checkFileNames() self.readInTextFile() return self.files
thang41/OpenSourceSecurityCheck
scanner.py
scanner.py
py
9,244
python
en
code
0
github-code
6
20507256803
import pandas as pd import csv #This function initializes the DataFrame def resetDf(): df = pd.read_csv("./Scoreboard.csv") df.index += 1 return df #This function adds a new player if it does not exist def newPlayer(player): create = True with open('Scoreboard.csv', newline='', encoding='utf-8') as f: reader = csv.reader(f) for row in reader: if row[0] == player: create = False break else: create = True if create == True: with open('Scoreboard.csv', 'a', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',') spamwriter.writerow([player, '0']) resetDf() return resetDf() #This function increases a player's wins def addWins(name): df = resetDf() df.loc[df["name"]== name , "wins"] += 1 df.to_csv("Scoreboard.csv", index=False) return resetDf()
RafaelM4gn/TicTacToe
Scoreboard.py
Scoreboard.py
py
953
python
en
code
0
github-code
6
70724549309
from django.urls import path from .views import RegiaoCreate, EmpresaCreate, AgendamentoColetaCreate, AgendamentoDescarteCreate from .views import RegiaoUpdate, EmpresaUpdate, AgendamentoColetaUpdate, AgendamentoDescarteUpdate from .views import RegiaoDelete, EmpresaDelete, AgendamentoColetaDelete, AgendamentoDescarteDelete from .views import RegiaoList, EmpresaList, AgendamentoColetaList, AgendamentoDescarteList urlpatterns = [ #Modelo de criação de url: path('endereco/',NomedaView.as.view(),name='nome_da_url'), path ('cadastros/regiao/', RegiaoCreate.as_view(), name='cadastrar-regiao'), path ('cadastros/empresa/', EmpresaCreate.as_view(), name='cadastrar-empresa'), path ('descarte/agendardescarte/', AgendamentoDescarteCreate.as_view(), name='cadastrar-descarte'), path ('coleta/agendarcoleta', AgendamentoColetaCreate.as_view(), name='cadastrar-coleta'), path ('editar/regiao/<int:pk>', RegiaoUpdate.as_view(), name='editar-regiao'), path ('editar/empresa/<int:pk>', EmpresaUpdate.as_view(), name='editar-empresa'), path ('editar/descarte/<int:pk>', AgendamentoDescarteUpdate.as_view(), name='editar-descarte'), path ('editar/coleta/<int:pk>', AgendamentoColetaUpdate.as_view(), name='editar-coleta'), path ('deletar/regiao/<int:pk>', RegiaoDelete.as_view(), name='deletar-regiao'), path ('deletar/empresa/<int:pk>', EmpresaDelete.as_view(), name='deletar-empresa'), path ('deletar/descarte/<int:pk>', AgendamentoDescarteDelete.as_view(), name='deletar-descarte'), path ('deletar/coleta/<int:pk>', AgendamentoColetaDelete.as_view(), name='deletar-coleta'), path ('listar/regiao', RegiaoList.as_view(), name='listar-regiao'), path ('listar/empresa', EmpresaList.as_view(), name='listar-empresa'), path ('listar/descarte', AgendamentoDescarteList.as_view(), name='listar-descarte'), path ('listar/coleta', AgendamentoColetaList.as_view(), name='listar-coleta'), ]
micaelhjs/PIUnivesp02
cadastros/urls.py
urls.py
py
1,948
python
pt
code
0
github-code
6
1448273356
"""This file is to run the model inference here's the command python run_inference.py -i trainval/images/image_000000001.jpg -m model/model.pt""" # import the necessary packages import argparse import cv2 import numpy as np from PIL import Image import torch from torchvision import transforms import config from utils import get_model_instance_segmentation # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--input_image", required=True, help="path to input image") ap.add_argument("-m", "--model", required=True, help="path to trained pytorch model") ap.add_argument("-c", "--confidence", type=float, default=0.85, help="minimum probability to filter weak detections") args = vars(ap.parse_args()) model_path = args["model"] input_image = args["input_image"] confidence = args["confidence"] # classes which our model will detect and the color object of the bounding box it will create CLASSES=["Background","Person","Car"] # reading the image with pillow and converion into the numpy arrays img = Image.open(input_image) open_cv_image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) # pytorch will work on the suitable device wheather it's CPU or GPU device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # getting the model instance and loading the pytorch model model = get_model_instance_segmentation(config.num_classes) model.load_state_dict(torch.load(model_path)) # move model to the right device model.to(device) model.eval() trans =transforms.Compose([transforms.ToTensor()]) img = trans(img).cuda() # getting the all the detections generated by the trained model detections = model([img]) # seperating out all the bounding boxes, labels, and scores we get _bboxes, _labels, _scores = detections[0]['boxes'], detections[0]['labels'], detections[0]['scores'] # loop over the detections for i in range(0, len(_bboxes)): # extract the confidence (i.e., probability) associated with the # prediction pred_confidence = _scores[i] # filter out weak detections by ensuring the confidence is # greater than the minimum confidence if pred_confidence > confidence: # extract the index of the class label from the detections, # then compute the (x, y)-coordinates of the bounding box # for the object idx = int(_labels[i]) box = _bboxes[i].detach().cpu().numpy() (startX, startY, endX, endY) = box.astype("int") # display the prediction to our terminal label = "{}: {:.2f}%".format(CLASSES[idx], pred_confidence * 100) print("[INFO] {}".format(label)) # draw the bounding box and label on the image cv2.rectangle(open_cv_image, (startX, startY), (endX, endY), (0,0,255) if idx==1 else (0,255,0), 1) y = startY - 15 if startY - 15 > 15 else startY + 15 cv2.putText(open_cv_image, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255) if idx==1 else (0,255,0), 2) # show the output image cv2.imshow("output", open_cv_image) cv2.waitKey(0)
Pradhunmya/pytorch_faster_rcnn
run_inference.py
run_inference.py
py
3,143
python
en
code
0
github-code
6
15354136781
#使用unittest测试代码 import unittest '''下面的函数用作示例,接下来将对它进行测试''' def get_formatted_name(first,last): '''格式化姓名''' full_name = f'{first} {last}' return full_name.title() #单元测试,核实某函数某方面没有问题 class NamesTestCase(unittest.TestCase): #这里的类名可以随意命名,但必须继承unittest.TestCase类 '''测试示例的函数''' def test_first_last_name(self):#测试方法必须以test_开头 '''能否正确处理像Acher Krau这样的姓名?''' formatted_name = get_formatted_name('acher','krau') self.assertEqual(formatted_name,'Acher Krau')#unittest最有用的方法之一:断言 if __name__ == '__main__': unittest.main()
krau/py-learn
basics/10_testcode.py
10_testcode.py
py
773
python
zh
code
0
github-code
6
40686482793
import time import unittest import swagger_client from integ_tests.cloud import cloud_manager, fixtures from integ_tests.cloud.cloud_manager import CloudManager from integ_tests.gateway import rpc class TestConfigUpdates(unittest.TestCase): """ Test that a newly-registered gateway receives updated configurations from the cloud. This test should run last in the suite as it modifies mconfig values. """ MAX_CHECKS = 12 POLL_SEC = 10 def setUp(self): self._cloud_manager = cloud_manager.CloudManager() # We want to start with a fresh network every time because we're # testing gateway registration -> config update flow self._cloud_manager.delete_networks([fixtures.NETWORK_ID]) # We also want to start off with default mconfigs rpc.reset_gateway_mconfigs() self._cloud_manager.create_network(fixtures.NETWORK_ID) self._cloud_manager.register_gateway( fixtures.NETWORK_ID, fixtures.GATEWAY_ID, rpc.get_gateway_hw_id(), ) def tearDown(self): self._cloud_manager.clean_up() rpc.reset_gateway_mconfigs() def test_config_update(self): # Update configs on cloud updated_gw_config = swagger_client.MagmadGatewayConfig( **fixtures.DEFAULT_GATEWAY_CONFIG.to_dict(), ) updated_gw_config.checkin_interval = 12 updated_gw_config.checkin_timeout = 20 updated_gw_cellular = swagger_client.GatewayCellularConfigs( ran=swagger_client.GatewayRanConfigs( **fixtures.DEFAULT_GATEWAY_CELLULAR_CONFIG.ran.to_dict(), ), epc=swagger_client.GatewayEpcConfigs( **fixtures.DEFAULT_GATEWAY_CELLULAR_CONFIG.epc.to_dict(), ), ) updated_gw_cellular.ran.pci = 261 updated_network_dnsd = swagger_client.NetworkDnsConfig( enable_caching=True, ) updated_network_cellular = swagger_client.NetworkCellularConfigs( ran=swagger_client.NetworkRanConfigs( **fixtures.DEFAULT_NETWORK_CELLULAR_CONFIG.ran.to_dict(), ), epc=swagger_client.NetworkEpcConfigs( **fixtures.DEFAULT_NETWORK_CELLULAR_CONFIG.epc.to_dict(), ), ) updated_network_cellular.epc.mcc = '002' updated_network_cellular.epc.mnc = '02' updated_network_cellular.epc.tac = 2 self._cloud_manager.update_network_configs( fixtures.NETWORK_ID, { CloudManager.NetworkConfigType.DNS: updated_network_dnsd, CloudManager.NetworkConfigType.CELLULAR: updated_network_cellular, }, ) self._cloud_manager.update_gateway_configs( fixtures.NETWORK_ID, fixtures.GATEWAY_ID, { CloudManager.GatewayConfigType.MAGMAD: updated_gw_config, CloudManager.GatewayConfigType.CELLULAR: updated_gw_cellular, }, ) # Expected updated mconfig values expected = { 'magmad': {'checkin_interval': 12, 'checkin_timeout': 20}, 'enodebd': {'pci': 261, 'tac': 2}, 'dnsd': {'enable_caching': True}, 'mme': {'mcc': '002', 'mnc': '02'}, } def verify_mconfigs(actual_mconfigs): for srv, actual_mconfig in actual_mconfigs.items(): expected_mconfig = expected[srv] for k, expected_v in expected_mconfig.items(): actual = getattr(actual_mconfig, k) if actual != expected_v: return False return True for _ in range(self.MAX_CHECKS): mconfigs = rpc.get_gateway_service_mconfigs( ['magmad', 'enodebd', 'dnsd', 'mme'], ) if not verify_mconfigs(mconfigs): print( 'mconfigs do not match expected values, ' 'will poll again', ) time.sleep(self.POLL_SEC) else: return self.fail('mconfigs did not match expected values within poll limit')
magma/magma
lte/gateway/python/integ_tests/cloud_tests/config_test.py
config_test.py
py
4,232
python
en
code
1,605
github-code
6
35572141881
command = "" started = False stopped = True while True: command = input("> ").lower() if (command == 'help'): print(""" Start - to start the car Stop - to stop the car quit - to exit the program """) elif (command == 'start'): if started: print("Car already started ...") else: started = True print("Car start to gooo....") elif (command == 'stop'): if started == True: started = False print("Car Stopped !") else: print("Car already Stopped...!") elif (command == 'quit'): print("Program quiting....!") exit() break else: print("I do not undersatand this...") #Cameron was here, testing a push
abdallauno1/python
car_game.py
car_game.py
py
926
python
en
code
0
github-code
6
75108014908
# from unicodedata import lookup from django.urls import path, include from rest_framework.routers import SimpleRouter, DefaultRouter # This for the viewset models in the views from rest_framework_nested import routers # This is for the nested routers from store.models import Product # from pprint import pprint from . import views # This is for the nested routers router = routers.DefaultRouter() router.register('products', views.ProductViewSet, basename='products') router.register('carts', views.CartViewSet, basename='carts') router.register('customers', views.CustomerViewSet, basename='customers') router.register('orders', views.OrderViewSet, basename='orders') # product to review nested routing products_router = routers.NestedDefaultRouter(router, 'products', lookup='product') # This registers the url as a nested router products_router.register('reviews', views.ReviewViewSet, basename='product-reviews')# This allows configuration of the already created nested url products_router.register('images', views.ProductImageViewSet, basename='product-images')# This allows configuration of the already created nested url cart_router = routers.NestedDefaultRouter(router, 'carts', lookup='cart') # This registers the url as a nested router cart_router.register('items', views.CartItemViewSet, basename='cart-items')# This allows configuration of the already created nested url # This for the normal viewset # router = SimpleRouter() # router.register('products', views.ProductViewSet, basename='products') # the prefix 'products' is what displays as a url # router = DefaultRouter() # router.register('products', views.ProductViewSet, basename='products') # This is a the url pattern for the nestedviewset(its optional) # urlpatterns = router.urls + products_router.urls urlpatterns = [ ## THIS IS FOR ROUTER path('', include(router.urls)), path('', include(products_router.urls)), path('', include(cart_router.urls)), ### THIS IS FOR THE CLASS BASED VIEWS # path('products/', views.ProductList.as_view()), # ".as_views()" generates function url for the CBV # path('products/<int:pk>/', views.ProductDetail.as_view()), path('category/', views.CategoryList.as_view()), # path('category/', views.category_list), path('category/<int:pk>/', views.CategoryDetail.as_view()), ### THIS IS FOR THE FUNCTION BASED VIEWS # path('products/', views.product_list), # path('products/<int:pk>/', views.product_detail), # path('categories/', views.category_list), # path('categories/<int:pk>/', views.category_detail), # path('categories/<int:pk>/', views.category_detail, name='category-detail'), # This is for the HyperlinkedRelatedField ]
Auracule/e_commerce_api
store/urls.py
urls.py
py
2,718
python
en
code
0
github-code
6
35919740986
print(''' ||QURTZ|| ============ hello participants, welome! to the "QURTZ" platform. [instruction: you have total 5 question. Read each statement carefuly and place " True " for right answer & " False " for wrong answer. Every question giveS you 1 mark .] let's start! ''') import random questions = {"MS Word is a hardware": "False", "Octal number system contains digits from 0-7": "True", "Python supports for dynamic typing": "True", "python is case sensitive": "True", "Is a,b=6 statement will return an error": "True", "Writing comments is mandatory in python programs": "False", "CPU controls only input data of computer": "False", "The language that the computer can understand is called Machine Language": "True", "Linix is a open source operating system": "False", "Twitter is a online social networking and blogging service.": "False"} name = str(input("Enter your name to proceed: ")) def ask_questions(): score = 0 temp = 1 while temp <=5: rand_q = random.choice(list(questions.keys())) rand_q_answer = str(questions[rand_q]) print( "\n", rand_q) user_input = input("your answer: ") if user_input.capitalize() == rand_q_answer: print ("Correct Answer!") score +=1 else: print("Incorrect Answer!") temp +=1 if score < 3: print("\nTry again! %s, your score is:" %(name), score) else: print("\nCongrats! %s, your score is:" %(name), score) ask_questions()
Vaishnavimaury2222/Vaishnavimaury2222
py
1,691
python
en
code
0
github-code
6
22852667916
class Solution(object): def combinationSum3(self, k, n): """ :type k: int :type n: int :rtype: List[List[int]] """ res = [] self.check(1, n, res, [], k) return res def check(self, start, target, res, pre, k): if target == 0 and len(pre) == k: res += [pre] return if len(pre) == k or target == 0: return for i in range(start, 10): if target < i: break self.check(i + 1, target - i, res, pre + [i], k)
yuweishi/LeetCode
Algorithms/Combination Sum III/solution.py
solution.py
py
572
python
en
code
0
github-code
6
36992069067
from tkinter import * clicks = 0 def click_button(): global clicks clicks += 1 root.title("Clicks {}".format(clicks)) root = Tk() root.geometry("300x250") btn = Button(text="клик",background="blue",foreground="lime", padx="3100", pady="1000", font="1000", command=click_button) btn.pack() root.mainloop()
vitaminik2/programme
0раторh.py
0раторh.py
py
371
python
en
code
0
github-code
6
811999536
# Convert Sorted Array to Binary Search Tree - https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/ '''Given an array where elements are sorted in ascending order, convert it to a height balanced BST. For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1. Example: Given the sorted array: [-10,-3,0,5,9], One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST: 0 / \ -3 9 / / -10 5''' # Definition for a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def sortedArrayToBST(self, nums: List[int]) -> TreeNode: if not nums: return None def convertToBST(left, right): if left > right: return None mid = (left + right) // 2 node = TreeNode(nums[mid]) if left == right: return node node.left = convertToBST(left, mid - 1) node.right = convertToBST(mid + 1, right) return node return convertToBST(0, len(nums) - 1) # Iterative # Definition for a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def sortedArrayToBST(self, nums: List[int]) -> TreeNode: if not nums: return None left = 0 right = len(nums) - 1 root = TreeNode(0) stack = [] stack.append(root) stack.append(left) stack.append(right) while stack: right = int(stack.pop()) left = int(stack.pop()) node = stack.pop() mid = left + ((right - left) // 2) node.val = nums[mid] if left <= mid - 1: node.left = TreeNode(0) stack.append(node.left) stack.append(left) stack.append(mid - 1) if right >= mid + 1: node.right = TreeNode(0) stack.append(node.right) stack.append(mid + 1) stack.append(right) return root
Saima-Chaity/Leetcode
Tree/convertSortedArrayToBinarySearchTree.py
convertSortedArrayToBinarySearchTree.py
py
2,392
python
en
code
0
github-code
6
19416798117
"""Determine the fration of non-built-up land area needed to become autarkic.""" import click import pandas as pd import geopandas as gpd from src.potentials import Potential @click.command() @click.argument("path_to_demand") @click.argument("path_to_potential") @click.argument("path_to_footprint") @click.argument("path_to_built_up_area") @click.argument("path_to_units") @click.argument("path_to_output") @click.argument("share_from_pv", type=click.INT) def necessary_land(path_to_demand, path_to_potential, path_to_footprint, path_to_built_up_area, path_to_units, path_to_output, share_from_pv=100): """Determine the fraction of non-built-up land area needed to become autarkic. Can vary the share of demand satisfied by rooftop PV. Ignores offshore as it distorts total area sizes. """ assert share_from_pv <= 100 assert share_from_pv >= 0 share_from_pv = share_from_pv / 100 demand = pd.read_csv(path_to_demand, index_col=0)["demand_twh_per_year"] potentials = pd.read_csv(path_to_potential, index_col=0) footprint = pd.read_csv(path_to_footprint, index_col=0) built_up_area = pd.read_csv(path_to_built_up_area, index_col=0) country_codes = gpd.read_file(path_to_units).set_index("id")["country_code"] rooftop_pv = potentials[str(Potential.ROOFTOP_PV)].where( potentials[str(Potential.ROOFTOP_PV)] < share_from_pv * demand, share_from_pv * demand ) demand_after_rooftops = demand - rooftop_pv assert (demand_after_rooftops >= 0).all() open_field_potential = potentials[str(Potential.ONSHORE_WIND)] + potentials[str(Potential.OPEN_FIELD_PV)] open_field_footprint = footprint[Potential.ONSHORE_WIND.area_name] + footprint[Potential.OPEN_FIELD_PV.area_name] fraction_non_built_up_land = fraction_land_where_potential_exists( open_field_potential=open_field_potential, open_field_footprint=open_field_footprint, built_up_area=built_up_area, demand_after_rooftops=demand_after_rooftops ) fraction_non_built_up_land.where( fraction_non_built_up_land.notna(), fraction_land_where_no_potential_exists( open_field_potential=open_field_potential, open_field_footprint=open_field_footprint, built_up_area=built_up_area, demand_after_rooftops=demand_after_rooftops, country_codes=country_codes ), inplace=True ) # corner cases fraction_non_built_up_land[fraction_non_built_up_land > 1] = 1 pd.DataFrame( index=fraction_non_built_up_land.index, data={ "fraction_non_built_up_land_necessary": fraction_non_built_up_land, "fraction_roofs_necessary": rooftop_pv / potentials[str(Potential.ROOFTOP_PV)], "rooftop_pv_generation_twh_per_year": rooftop_pv } ).to_csv( path_to_output, index=True, header=True ) def fraction_land_where_potential_exists(open_field_potential, open_field_footprint, built_up_area, demand_after_rooftops): share_of_open_field_potential_necessary = demand_after_rooftops / open_field_potential necessary_land = open_field_footprint * share_of_open_field_potential_necessary return necessary_land / built_up_area["non_built_up_km2"] def fraction_land_where_no_potential_exists(open_field_potential, open_field_footprint, built_up_area, demand_after_rooftops, country_codes): factor = open_field_footprint.groupby(country_codes).sum() / open_field_potential.groupby(country_codes).sum() factor.name = "km2_per_twh_nationally" assert (factor > 10).all() assert (factor < 70).all() factor = pd.DataFrame(country_codes).join(factor.rename("factor"), on="country_code")["factor"] necessary_land = demand_after_rooftops * factor return necessary_land / built_up_area["non_built_up_km2"] if __name__ == "__main__": necessary_land()
timtroendle/possibility-for-electricity-autarky
src/necessary_land.py
necessary_land.py
py
4,031
python
en
code
10
github-code
6
655296277
import json import os from concurrent import futures import luigi import numpy as np import nifty.tools as nt import z5py from cluster_tools.inference import InferenceLocal from cluster_tools.inference.inference_embl import InferenceEmbl OFFSETS = [ [-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0], [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9] ] def update_block_shape(config_dir, block_shape, default_config): global_conf = os.path.join(config_dir, 'global.config') if os.path.exists(global_conf): with open(global_conf) as f: config = json.load(f) else: config = default_config if config['block_shape'] != block_shape: config['block_shape'] = block_shape with open(global_conf, 'w') as f: json.dump(config, f) def predict(input_path, input_key, output_path, output_prefix, ckpt, gpus, tmp_folder, target, gpu_type='2080Ti', predict_affinities=False): task = InferenceLocal if target == 'local' else InferenceEmbl # halo = [8, 64, 64] # block_shape = [32, 256, 256] # larger halo halo = [12, 96, 96] block_shape = [24, 128, 128] if predict_affinities: output_key = { f'{output_prefix}/foreground': [0, 1], f'{output_prefix}/affinities': [1, 10] } else: output_key = { f'{output_prefix}/foreground': [0, 1], f'{output_prefix}/boundaries': [1, 2] } config_dir = os.path.join(tmp_folder, 'configs') os.makedirs(config_dir, exist_ok=True) update_block_shape(config_dir, block_shape, task.default_global_config()) conf = task.default_global_config() conf.update({'block_shape': block_shape}) with open(os.path.join(config_dir, 'global.config'), 'w') as f: json.dump(conf, f) if target == 'local': device_mapping = {ii: gpu for ii, gpu in enumerate(gpus)} else: device_mapping = None n_threads = 6 conf = task.default_task_config() conf.update({ 'dtype': 'uint8', 'device_mapping': device_mapping, 'threads_per_job': n_threads, 'mixed_precision': True, 'gpu_type': gpu_type, 'qos': 'high', 'mem_limit': 24, 'time_limit': 600 }) with open(os.path.join(config_dir, 'inference.config'), 'w') as f: json.dump(conf, f) t = task(tmp_folder=tmp_folder, config_dir=config_dir, max_jobs=len(gpus), input_path=input_path, input_key=input_key, output_path=output_path, output_key=output_key, checkpoint_path=ckpt, halo=halo, framework='pytorch') assert luigi.build([t], local_scheduler=True) update_block_shape(config_dir, [32, 256, 256], task.default_global_config()) def set_bounding_box(tmp_folder, bounding_box): config = InferenceLocal.default_global_config() config.update({ 'roi_begin': [bb.start for bb in bounding_box], 'roi_end': [bb.stop for bb in bounding_box] }) config_folder = os.path.join(tmp_folder, 'configs') os.makedirs(config_folder, exist_ok=True) config_file = os.path.join(config_folder, 'global.config') with open(config_file, 'w') as f: json.dump(config, f) def get_checkpoint(checkpoint, use_best=False, is_affinity_model=False): if use_best: path = os.path.join(checkpoint, 'best.pt') else: path = os.path.join(checkpoint, 'latest.pt') n_out = 10 if is_affinity_model else 2 if 'large' in checkpoint: model_kwargs = dict( scale_factors=[ [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2] ], in_channels=1, out_channels=n_out, initial_features=128, gain=2, pad_convs=True, final_activation='Sigmoid' ) else: model_kwargs = dict( scale_factors=[ [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2] ], in_channels=1, out_channels=n_out, initial_features=64, gain=2, pad_convs=True, final_activation='Sigmoid' ) ckpt = { 'class': ('mipnet.models.unet', 'AnisotropicUNet'), 'kwargs': model_kwargs, 'checkpoint_path': path, 'model_state_key': 'model_state' } return ckpt def run_multicut(path, checkpoint_name, target, max_jobs, tmp_folder, beta): from cluster_tools.workflows import MulticutSegmentationWorkflow task = MulticutSegmentationWorkflow config_dir = os.path.join(tmp_folder, 'configs') configs = task.get_config() ws_config = configs['watershed'] ws_config.update({ "threshold": 0.25, 'apply_dt_2d': True, 'apply_filters_2d': True, 'apply_ws_2d': False, 'sigma_seeds': 2.6 }) with open(os.path.join(config_dir, 'watershed.config'), 'w') as f: json.dump(ws_config, f) cost_config = configs['probs_to_costs'] cost_config.update({ 'beta': beta }) with open(os.path.join(config_dir, 'probs_to_costs.config'), 'w') as f: json.dump(cost_config, f) bd_key = f'predictions/{checkpoint_name}/boundaries' node_labels_key = f'node_labels/{checkpoint_name}/multicut' ws_key = f'segmentation/{checkpoint_name}/watershed' seg_key = f'segmentation/{checkpoint_name}/multicut' t = task(target=target, max_jobs=max_jobs, tmp_folder=tmp_folder, config_dir=config_dir, input_path=path, input_key=bd_key, ws_path=path, ws_key=ws_key, problem_path=os.path.join(tmp_folder, 'data.n5'), node_labels_key=node_labels_key, output_path=path, output_key=seg_key) assert luigi.build([t], local_scheduler=True) def run_mws(data_path, checkpoint_name, target, max_jobs, tmp_folder, threshold): fg_key = f'predictions/{checkpoint_name}/foreground' mask_key = f'predictions/{checkpoint_name}/mask' aff_key = f'predictions/{checkpoint_name}/affinities' seg_key = f'segmentation/{checkpoint_name}/mutex_watershed' from cluster_tools.thresholded_components.threshold import ThresholdLocal, ThresholdSlurm task = ThresholdLocal if target == 'local' else ThresholdSlurm config_dir = os.path.join(tmp_folder, 'configs') t = task(tmp_folder=tmp_folder, config_dir=config_dir, max_jobs=max_jobs, input_path=data_path, input_key=fg_key, output_path=data_path, output_key=mask_key, threshold=0.5) assert luigi.build([t], local_scheduler=True) from cluster_tools.mutex_watershed import MwsWorkflow task = MwsWorkflow config_dir = os.path.join(tmp_folder, 'configs') configs = task.get_config() conf = configs['mws_blocks'] conf.update({ 'strides': [4, 4, 4], 'randomize_strides': True }) with open(os.path.join(config_dir, 'mws_blocks.config'), 'w') as f: json.dump(conf, f) conf = configs['block_edge_features'] conf.update({ 'offsets': OFFSETS }) with open(os.path.join(config_dir, 'block_edge_features.config'), 'w') as f: json.dump(conf, f) # TODO with halo? halo = None t = task(tmp_folder=tmp_folder, config_dir=config_dir, target=target, max_jobs=max_jobs, input_path=data_path, input_key=aff_key, output_path=data_path, output_key=seg_key, offsets=OFFSETS, halo=halo, mask_path=data_path, mask_key=mask_key, stitch_via_mc=True) assert luigi.build([t], local_scheduler=True) def postprocess(path, checkpoint_name, seg_key, out_key, target, max_jobs, tmp_folder, size_threshold=250, threshold=None): from cluster_tools.postprocess import FilterByThresholdWorkflow from cluster_tools.postprocess import SizeFilterWorkflow fg_key = f'predictions/{checkpoint_name}/foreground' hmap_key = f'predictions/{checkpoint_name}/boundaries' config_dir = os.path.join(tmp_folder, 'configs') if threshold is not None: task = FilterByThresholdWorkflow t = task(target=target, max_jobs=max_jobs, tmp_folder=tmp_folder, config_dir=config_dir, input_path=path, input_key=fg_key, seg_in_path=path, seg_in_key=seg_key, seg_out_path=path, seg_out_key=out_key, threshold=threshold) assert luigi.build([t], local_scheduler=True) seg_key = out_key if size_threshold is not None: task = SizeFilterWorkflow t = task(tmp_folder=tmp_folder, config_dir=config_dir, target=target, max_jobs=max_jobs, input_path=path, input_key=seg_key, output_path=path, output_key=out_key, hmap_path=path, hmap_key=hmap_key, relabel=True, preserve_zeros=True, size_threshold=size_threshold) assert luigi.build([t], local_scheduler=True) # this deserves a cluster tools task def affinity_to_boundary(data_path, prediction_prefix, tmp_folder, target, max_jobs): aff_key = os.path.join(prediction_prefix, 'affinities') bd_key = os.path.join(prediction_prefix, 'boundaries') with z5py.File(data_path, 'a') as f: if bd_key in f: return ds_affs = f[aff_key] shape = ds_affs.shape[1:] chunks = ds_affs.chunks[1:] ds_bd = f.require_dataset(bd_key, shape=shape, chunks=chunks, compression='gzip', dtype=ds_affs.dtype) blocking = nt.blocking([0, 0, 0], shape, chunks) def _block(block_id): block = blocking.getBlock(block_id) bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end)) bb_affs = (slice(None),) + bb affs = ds_affs[bb_affs] bd = np.maximum(affs[1], affs[2]) bd = np.maximum(bd, np.maximum(affs[4], affs[5])) ds_bd[bb] = bd.astype(ds_bd.dtype) with futures.ThreadPoolExecutor(8) as tp: tp.map(_block, range(blocking.numberOfBlocks)) def segment_with_boundaries(sample, checkpoint, target, gpus, max_jobs=32, bounding_box=None, beta=.5, threshold=0.25, only_prediction=False, gpu_type='2080Ti', is_affinity_model=False, size_threshold=250): checkpoint_name = os.path.split(checkpoint)[1] data_path = os.path.join('./data', f'{sample}.n5') raw_key = 'raw' prediction_prefix = os.path.join('predictions', checkpoint_name) tmp_folder = os.path.join('./tmp_folders', f'tmp_{checkpoint_name}_{sample}') if bounding_box is not None: set_bounding_box(tmp_folder, bounding_box) ckpt = get_checkpoint(checkpoint, is_affinity_model=is_affinity_model) predict(data_path, raw_key, data_path, prediction_prefix, ckpt, gpus, tmp_folder, target, gpu_type=gpu_type, predict_affinities=is_affinity_model) if only_prediction: return if is_affinity_model: affinity_to_boundary(data_path, prediction_prefix, tmp_folder, target, max_jobs) run_multicut(data_path, checkpoint_name, target, max_jobs, tmp_folder, beta=beta) seg_key = f'segmentation/{checkpoint_name}/multicut' out_key = f'segmentation/{checkpoint_name}/multicut_postprocessed' postprocess(data_path, checkpoint_name, seg_key, out_key, target, max_jobs, tmp_folder, threshold=threshold, size_threshold=size_threshold) def segment_with_affinities(sample, checkpoint, target, gpus, max_jobs=32, bounding_box=None, threshold=0.5, only_prediction=False, gpu_type='2080Ti', size_threshold=250): checkpoint_name = os.path.split(checkpoint)[1] data_path = os.path.join('./data', f'{sample}.n5') raw_key = 'raw' prediction_prefix = os.path.join('predictions', checkpoint_name) tmp_folder = os.path.join('./tmp_folders', f'tmp_{checkpoint_name}_{sample}_mws') if bounding_box is not None: set_bounding_box(tmp_folder, bounding_box) ckpt = get_checkpoint(checkpoint, is_affinity_model=True) predict(data_path, raw_key, data_path, prediction_prefix, ckpt, gpus, tmp_folder, target, gpu_type=gpu_type, predict_affinities=True) if only_prediction: return affinity_to_boundary(data_path, prediction_prefix, tmp_folder, target, max_jobs) run_mws(data_path, checkpoint_name, target, max_jobs, tmp_folder, threshold=threshold) seg_key = f'segmentation/{checkpoint_name}/mutex_watershed' out_key = f'segmentation/{checkpoint_name}/mutex_watershed_postprocessed' postprocess(data_path, checkpoint_name, seg_key, out_key, target, max_jobs, tmp_folder, size_threshold=size_threshold) if __name__ == '__main__': segment_with_affinities( 'small', './checkpoints/affinity_model_default_human_rat', 'local', gpus=[0, 1, 2, 3] )
constantinpape/torch-em
experiments/unet-segmentation/mitochondria-segmentation/mito-em/challenge/segmentation_impl.py
segmentation_impl.py
py
14,203
python
en
code
42
github-code
6
10522399200
from src.common.database import Database class Main(object): @classmethod def start_service(cls): card_number = input("Enter card Number: ") check_card_number = Database.find_one(query={"card_number": card_number}) if check_card_number is not None: pin = input("Enter Pin: ") data = Database.find_one(query={"card_number": card_number, "pin": pin}) if data is not None: print('___________________________________') print("Welcome {} ".format(data['name']).upper()) print('___________________________________') Main.present_options(card_number, pin) else: print("invalid pin") else: print("Invalid card number") @staticmethod def present_options(card_number, pin): print("1. Deposite cash\n2. Withdraw cash\n3. Account enquiries\n4. Change pin") print('___________________________________\n') number = int(input(" Enter Option number: ")) if number == 1: amount = int(input("Enter the amount to Deposit: ")) data = Database.find_one(query={"card_number": card_number, "pin": pin}) print("You are Depositing GHS {} into your account( {} )".format(amount, data['acc_number'])) print("Do you wish to continue?\n1. YES\n2. NO") confirm_option = int(input("")) if confirm_option == 1: initial = Database.find_one(query={"card_number": card_number}) Database.update_balance(card_number=card_number, pin=pin, amount=amount) updated = Database.find_one(query={"card_number": card_number}) print("You have succesfully Deposited GHS {} into your account {}\nInitial balance: GHS {}\nNew Balance GHS {}\nLast Transaction date: {}".format(amount, updated['acc_number'], initial['balance'], updated['balance'], updated['last_transaction_date'])) elif confirm_option == 2: print("Transaction cancelled") return None else: print("You have entered invalid response") return None elif number == 2: withdrawal_amount = int(input("Enter the amount to withdraw: ")) data = Database.find_one(query={"card_number": card_number}) print("You are withdrawing GHS {} from your account( {} )".format(withdrawal_amount, data['acc_number'])) print("Do you wish to continue?\n1. YES\n2. NO") confirm_option = int(input("")) if confirm_option == 1: initial = Database.find_one(query={"card_number": card_number}) if initial['balance']-5 >= withdrawal_amount: Database.update_balance(card_number=card_number, pin=pin, amount=-withdrawal_amount) updated = Database.find_one(query={"card_number": card_number}) print("You have succesfully withdrawn GHS {} from your account {}\nInitial balance: GHS {}\nNew Balance GHS {}\nLast Transaction date: {}".format(withdrawal_amount, updated['acc_number'], initial['balance'], updated['balance'], updated['last_transaction_date'])) else: print("Transaction failed! Your balance is insufficient") elif confirm_option == 2: print("Transaction cancelled") return None else: "You have entered invalid response" return None elif number == 3: data = Database.find_one(query={"card_number": card_number, "pin": pin}) print("_____________________________") print('Name: {}\nAccount Number: {}\nCurrent Balance: {}\nLast transaction Date: {}'.format(data['name'], data['acc_number'], data['balance'], data['last_transaction_date'])) elif number == 4: new_pin = input("Enter new pin: ") comfirm_new_pin = input("Enter new pin again: ") if new_pin == comfirm_new_pin: Database.update_pin(card_number=card_number, new_pin=new_pin) print("Pin changed successfully\nExiting app...\nRun app again") return None else: print('Pin does not match\nExiting app...') else: print("Invalid input")
Ankomahene/Terminal_ATM_Banking
src/models/main.py
main.py
py
5,680
python
en
code
0
github-code
6
10758898663
import uvicorn from fastapi import FastAPI, HTTPException app = FastAPI() @app.get("/") async def root(): return {"message": "Welcome to basic math operations api!"} @app.get("/add") async def add(a: int, b: int): return {"result": a + b} @app.get("/subtract") async def subtract(a: int, b: int): return {"result": a - b} @app.get("/multiply") async def multiply(a: int, b: int): return {"result": a * b} @app.get("/divide") async def divide(a: int, b: int): if b == 0: raise HTTPException( status_code=404, detail='Division by 0 not allowed!') return {"result": a / b} if __name__ == '__main__': uvicorn.run("app:app", host="0.0.0.0", port=5000, reload=True)
pawelcich/rest_api
web/app.py
app.py
py
722
python
en
code
0
github-code
6
19631761443
from FACE_VERIFICATION.validation import Verify from utils.encrypt import Encrypt from utils.calling import caller import pickle obj1 = Verify() obj2 = Encrypt() obj3 = caller() class RUN: def __init__(self): pass def controller(self,data): mode = data['mode'] if mode == "verify": response = obj1.verify(frame_count=1,WINDOW=data['image_area']) print(response) return response if mode == "train": response = obj1.generate_embeds(frame_count=2,WINDOW=data['image_area']) print(response) return response if mode == "predict": response = obj1.verify(frame_count=1,WINDOW=data['image_area']) print(response) return response def encrypt_controller(self,unique_id=None,data=None,mode=None,_id=None): if mode == 'Add' or mode == 'Update': data = obj2.encrypt_data(unique_id,data) return obj3.database_controller(unique_id,data,mode=mode,_id =_id) elif mode == "View": data = obj3.database_controller(unique_id,data,mode=mode,_id =_id) new_data = [] for key in data.keys(): new_data = data[key] new_data = obj2.decrypt_data(unique_id,new_data) data[key] = new_data return data else: return obj3.database_controller(unique_id,data,mode=mode,_id =_id)
saquibquddus/Face-Unlock-Web-Application
STREAMLIT/utils/run.py
run.py
py
1,503
python
en
code
0
github-code
6
15560664217
def call_repeatmasker(fasta, lib, engine = "ncbi", cores = 1, dir = "./"): # RepeatMasker -e ncbi -pa 28 -s # -lib dmel_repbase_lib.fasta # -no_is -nolow # -dir . # dmel-all-chromosome-r6.22.fasta import subprocess from rwt.checkers import check_installation if not (check_installation("RepeatMasker")): sys.exit() return(subprocess.run(["RepeatMasker", "-e " + engine, str(cores), "-s", "-lib " + lib, "-no_is", "-nolow", "-dir " + dir, fasta])) def call_repbase_fixer(ifa, ofa): import subprocess return(subprocess.run(["Rscript","--vanilla","scripts/format_repbase_fa.R",ifa,ofa])) def call_fa2gtf(ifai, ogtf): import subprocess return(subprocess.run(["Rscript","--vanilla", "scripts/fa2gtf.R", ifai, ogtf]))
mal2017/reference-with-transposons
rwt/callers.py
callers.py
py
854
python
en
code
0
github-code
6
27545085038
#! /usr/bin/env python3 # -*- coding: utf-8 -*- """ Cube centring, detects bad frames, crops and bins @author: Iain """ __author__ = 'Iain Hammond' __all__ = ['calib_dataset'] from os import makedirs, system from os.path import isfile, isdir import numpy as np from pyprind import ProgBar import matplotlib from matplotlib import pyplot as plt from hciplot import plot_frames from vip_hci.config import get_available_memory, time_ini, timing from vip_hci.fits import open_fits, write_fits from vip_hci.preproc import cube_recenter_via_speckles, cube_recenter_2dfit, frame_shift, \ cube_detect_badfr_correlation, cube_crop_frames, cube_subsample, frame_crop from vip_hci.stats import cube_distance from vip_hci.var import frame_center matplotlib.use('Agg') class calib_dataset: # this class is for pre-processing of the calibrated data def __init__(self, inpath, outpath, dataset_dict, recenter_method, recenter_model, coro=True): self.inpath = inpath self.outpath = outpath self.derot_angles_cropped = open_fits(self.inpath+'derot_angles_cropped.fits', verbose=False) self.recenter_method = recenter_method self.recenter_model = recenter_model self.sci_list = [] # get all the science cubes into a list with open(self.inpath+'sci_list.txt', "r") as f: tmp = f.readlines() for line in tmp: self.sci_list.append(line.split('\n')[0]) self.sci_list.sort() # make sure they are in order so derotation doesn't make a mess of the frames print(len(self.sci_list), 'science cubes', flush=True) # read the dimensions of each science cube from calibration, or get from each fits file if isfile(self.inpath+'new_ndit_sci_sky_unsat.fits'): print('Using SCI cube dimensions from calibration', flush=True) nframes = open_fits(self.inpath+'new_ndit_sci_sky_unsat.fits', verbose=False) self.real_ndit_sci = [int(nframes[0])] * len(self.sci_list) else: self.real_ndit_sci = [] print('Re-evaluating SCI cube dimensions', flush=True) for sc, fits_name in enumerate(self.sci_list): # enumerate over the list of all science cubes tmp = open_fits(self.inpath+'4_sky_subtr_'+fits_name, verbose=False) self.real_ndit_sci.append(tmp.shape[0]) # gets length of each cube for later use del tmp self.dataset_dict = dataset_dict self.nproc = dataset_dict['nproc'] if not isdir(self.outpath): makedirs(self.outpath) system("cp " + self.inpath + 'master_unsat-stellarpsf_fluxes.fits ' + self.outpath) # for use later system("cp " + self.inpath + 'fwhm.fits ' + self.outpath) # for use later system("cp " + self.inpath + 'master_unsat_psf_norm.fits ' + self.outpath) # for use later def recenter(self, sigfactor=4, subi_size=41, crop_sz=251, verbose=True, debug=False, plot=False, coro=True): """ Centers cropped science images by fitting a double Gaussian (negative+positive) to each median combined SCI cube, or by fitting a single negative Gaussian to the coronagraph using the speckle pattern of each median combined SCI cube. Parameters: ---------- sigfactor: float, default = 4 If thresholding is performed during 2gauss fitting, set the threshold in terms of gaussian sigma in the subimage (will depend on your cropping size) subi_size: int, default = 21 Size of the square subimage sides in pixels. crop_sz: int, optional, in units of pixels. 251 by default Crops to this size after recentering for memory management purposes. Useful for very large datasets verbose: bool To provide extra information about the progress and results of the pipeline plot: bool If True, a plot of the shifts is saved (PDF) coro: bool For coronagraph data. False otherwise. Recentering requires coronagraphic data Writes fits to file: ---------- x_shifts.fits # writes the x shifts to the file y_shifts.fits # writes the y shifts to the file {source}_master_cube.fits # makes the recentered master cube derot_angles.fits # makes a vector of derotation angles """ if not coro: if self.recenter_method != '2dfit': raise ValueError('Centering method invalid') if self.recenter_model == '2gauss': raise ValueError('2Gauss requires coronagraphic data') ncubes = len(self.sci_list) fwhm_all = open_fits(self.inpath+'fwhm.fits', verbose=debug) # changed this to open the file as sometimes we wont run get_stellar_psf() or it may have already run fwhm = fwhm_all[0] # fwhm is the first entry in the file fwhm = fwhm.item() # changes from numpy.float32 to regular float so it will work in VIP if verbose: print('FWHM = {:3f} px'.format(fwhm), flush=True) if not subi_size % 2: subi_size -= 1 print('WARNING: Sub image size not odd. Adjusted to {} px'.format(subi_size), flush=True) # Creates a master science cube with just the median of each cube if not isfile(self.outpath+'median_calib_cube.fits'): bar = ProgBar(len(self.sci_list), stream=1, title='Creating master science cube (median of each science cube)....') for sc, fits_name in enumerate(self.sci_list): # enumerate over the list of all science cubes tmp = open_fits(self.inpath+'4_sky_subtr_'+fits_name, verbose=debug) # open cube as tmp if sc == 0: _, ny, nx = tmp.shape # dimensions of cube if subi_size > ny: # check if bigger than science frame subi_size = ny # ny should be odd already from calibration print('WARNING: Sub image size larger than frame. Adjusted to {} px'.format(subi_size), flush=True) tmp_tmp = np.zeros([ncubes, ny, ny]) # template cube with the median of each SCI cube tmp_tmp[sc] = np.median(tmp, axis=0) # median frame of cube tmp get_available_memory() bar.update() write_fits(self.outpath+'median_calib_cube.fits', tmp_tmp, verbose=debug) if verbose: print('Median science cube created for recentering', flush=True) else: tmp_tmp = open_fits(self.outpath+'median_calib_cube.fits', verbose=debug) _, ny, nx = tmp_tmp.shape if verbose: print('Median science cube for recentering has been read from file', flush=True) if self.recenter_method == 'speckle': # FOR GAUSSIAN print('##### Recentering via speckle pattern #####', flush=True) if debug: get_available_memory() recenter = cube_recenter_via_speckles(tmp_tmp, cube_ref=None, alignment_iter=5, gammaval=1, min_spat_freq=0.5, max_spat_freq=3, fwhm=fwhm, debug=debug, recenter_median=True, negative=coro, fit_type='gaus', crop=True, subframesize=subi_size, imlib='opencv', interpolation='lanczos4', plot=plot, full_output=True, nproc=self.nproc) sy = recenter[4] sx = recenter[3] elif self.recenter_method == '2dfit': # DOUBLE GAUSSIAN print('##### Recentering via 2dfit #####', flush=True) if debug: get_available_memory() params_2g = {'fwhm_neg': 0.8*fwhm, 'fwhm_pos': 2*fwhm, 'theta_neg': 48., 'theta_pos':135., 'neg_amp': 0.8} recenter = cube_recenter_2dfit(tmp_tmp, xy=None, fwhm=fwhm, subi_size=subi_size, model=self.recenter_model, nproc=self.nproc, imlib='opencv', interpolation='lanczos4', offset=None, negative=False, threshold=True, sigfactor=sigfactor, fix_neg=False, params_2g=params_2g, save_shifts=False, full_output=True, verbose=verbose, debug=debug, plot=plot) sy = recenter[1] sx = recenter[2] elif self.recenter_method == 'as_observed': # uses center found in median of all frames, and applies the same x-y shift to all frames print('##### Recentering to median of all frames #####', flush=True) subi_size = 9 tmp_med = np.median(tmp_tmp, axis=0) cy, cx = frame_center(tmp_med) if plot: med_subframe = frame_crop(tmp_med, size=subi_size, cenxy=(cx, cy), verbose=debug) plot_frames(med_subframe, vmin=np.percentile(med_subframe, 0.5), vmax=np.percentile(med_subframe, 99.5), label='Median frame for centering', cmap='inferno', dpi=300, save=self.outpath + 'frame_center_as_observed.pdf') tmp_med = tmp_med[np.newaxis, :, :] # make 3D to use in cube_recenter_2dfit recenter = cube_recenter_2dfit(tmp_med, full_output=True, xy=(cx, cy), subi_size=subi_size, nproc=self.nproc, fwhm=fwhm, debug=verbose, negative=coro, plot=plot) sy = np.repeat(recenter[1], len(self.sci_list)) # make array of shifts equal to number of science cubes sx = np.repeat(recenter[2], len(self.sci_list)) else: raise ValueError("Centering method is not recognised. Use either `speckle', `2dfit' or `as_observed'.") if plot: # save the shift plot plt.savefig(self.outpath+'shifts-xy_{}.pdf'.format(self.recenter_method), bbox_inches='tight', pad_inches=0.1) plt.close('all') del recenter if debug: get_available_memory() # LOAD IN REAL_NDIT_SCI # Load original cubes, shift them, and create master cube if crop_sz is not None: crop = True if not crop_sz % 2: crop_sz -= 1 print('Crop size not odd, adapted to {}'.format(crop_sz), flush=True) print('Cropping to {} pixels'.format(crop_sz), flush=True) tmp_tmp = np.zeros([int(np.sum(self.real_ndit_sci)), crop_sz, crop_sz]) else: tmp_tmp = np.zeros([int(np.sum(self.real_ndit_sci)), ny, nx]) angles_1dvector = np.zeros([int(np.sum(self.real_ndit_sci))]) # empty array for derot angles, length of number of frames if verbose: print('Shifting frames and creating master science cube', flush=True) for sc, fits_name in enumerate(self.sci_list): tmp = open_fits(self.inpath+'4_sky_subtr_'+fits_name, verbose=debug) # opens science cube if crop: tmp = cube_crop_frames(tmp, crop_sz, force=False, verbose=debug, full_output=False) dim = int(self.real_ndit_sci[sc]) # gets the integer dimensions of this science cube for dd in range(dim): # dd goes from 0 to the largest dimension tmp_tmp[int(np.sum(self.real_ndit_sci[:sc]))+dd] = frame_shift(tmp[dd], shift_y=sy[sc], shift_x=sx[sc], imlib='vip-fft') # this line applies the shifts to all the science images in the cube the loop is currently on. it also converts all cubes to a single long cube by adding the first dd frames, then the next dd frames from the next cube and so on angles_1dvector[int(np.sum(self.real_ndit_sci[:sc]))+dd] = self.derot_angles_cropped[sc][dd] # turn 2d rotation file into a vector here same as for the mastercube above # sc*ndit+dd i don't think this line works for variable sized cubes if debug: get_available_memory() print('Science cube number: {}'.format(sc+1), flush=True) # write all the shifts write_fits(self.outpath+'x_shifts.fits', sx, verbose=debug) # writes the x shifts to the file write_fits(self.outpath+'y_shifts.fits', sy, verbose=debug) # writes the y shifts to the file write_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), tmp_tmp, verbose=debug) # makes the master cube write_fits(self.outpath+'derot_angles.fits', angles_1dvector, verbose=debug) # writes the 1D array of derotation angles if verbose: print('Shifts applied, master cube saved', flush=True) del tmp_tmp, sx, sy, angles_1dvector def bad_frame_removal(self, pxl_shift_thres=0.5, sub_frame_sz=31, verbose=True, debug=False, plot=True): """ For removing outlier frames often caused by AO errors. To be run after recentering is complete. Takes the recentered mastercube and removes frames with a shift greater than a user defined pixel threshold in x or y above the median shift. It then takes the median of those cubes and correlates them to the median combined mastercube. Removes all those frames below the threshold from the mastercube and rotation file, then saves both as new files for use in post processing Parameters: ---------- pxl_shift_thres : float, in units of pixels. Default is 0.5 pixels. Any shifts in the x or y direction greater than this threshold will cause the frame/s to be labelled as bad and thus removed. May required a stricter threshold depending on the dataset sub_frame_sz : integer, must be odd. Default is 31. This sets the cropping during frame correlation to the median debug : bool Will show open and save messages for FITS files plot : bool Will write the correlation plot to file if True, False will not """ if verbose: print('######### Beginning bad frame removal #########', flush=True) if not sub_frame_sz % 2: sub_frame_sz -= 1 print('WARNING: Bad frame sub image size not odd. Adjusted to {} px'.format(sub_frame_sz), flush=True) angle_file = open_fits(self.outpath+'derot_angles.fits', verbose=debug) # opens the rotation file recentered_cube = open_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), verbose=debug) # loads the master cube # open x shifts file for the respective method x_shifts = open_fits(self.outpath+"x_shifts.fits", verbose=debug) median_sx = np.median(x_shifts) # median of x shifts # opens y shifts file for the respective method y_shifts = open_fits(self.outpath+"y_shifts.fits", verbose=debug) median_sy = np.median(y_shifts) # median of y shifts # self.ndit came from the z dimension of the first calibrated science cube above in recentering # x_shifts_long = np.zeros([len(self.sci_list)*self.ndit]) # list with number of cubes times number of frames in each cube as the length # y_shifts_long = np.zeros([len(self.sci_list)*self.ndit]) # long are shifts to be applied to each frame in each cube x_shifts_long = np.zeros([int(np.sum(self.real_ndit_sci))]) y_shifts_long = np.zeros([int(np.sum(self.real_ndit_sci))]) for i in range(len(self.sci_list)): # from 0 to the length of sci_list ndit = self.real_ndit_sci[i] # gets the dimensions of the cube x_shifts_long[i*ndit:(i+1)*ndit] = x_shifts[i] # sets the average shifts of all frames in that cube y_shifts_long[i*ndit:(i+1)*ndit] = y_shifts[i] write_fits(self.outpath+'x_shifts_long.fits', x_shifts_long, verbose=debug) # saves shifts to file write_fits(self.outpath+'y_shifts_long.fits', y_shifts_long, verbose=debug) x_shifts = x_shifts_long y_shifts = y_shifts_long if verbose: print("x shift median:", median_sx) print("y shift median:", median_sy, flush=True) bad = [] good = [] i = 0 shifts = list(zip(x_shifts, y_shifts)) bar = ProgBar(len(x_shifts), stream=1, title='Running pixel shift check...') for sx, sy in shifts: # iterate over the shifts to find any greater or less than pxl_shift_thres pixels from median if abs(sx) < ((abs(median_sx)) + pxl_shift_thres) and abs(sx) > ((abs(median_sx)) - pxl_shift_thres) and abs(sy) < ((abs(median_sy)) + pxl_shift_thres) and abs(sy) > ((abs(median_sy)) - pxl_shift_thres): good.append(i) else: bad.append(i) i += 1 bar.update() # only keeps the files that weren't shifted above the threshold frames_pxl_threshold = recentered_cube[good] # only keeps the corresponding derotation entry for the frames that were kept angle_pxl_threshold = angle_file[good] del recentered_cube, angle_file if verbose: print('Frames within pixel shift threshold:', len(frames_pxl_threshold)) print('########### Median combining {} frames for correlation check... ###########'.format( len(frames_pxl_threshold)), flush=True) # makes array of good frames from the recentered mastercube subarray = cube_crop_frames(frames_pxl_threshold, size=sub_frame_sz, verbose=verbose) # crops all the frames to a common size frame_ref = np.nanmedian(subarray, axis=0) # median frame of remaining cropped frames, can be sped up with multi-processing if verbose: print('Running frame correlation check...', flush=True) # calculates correlation threshold using the median of the Pearson correlation of all frames, minus 1 standard deviation # frame_ref = frame_crop(tmp_median, size = sub_frame_sz, verbose=verbose) # crops the median of all frames to a common size distances = cube_distance(subarray, frame_ref, mode='full', dist='pearson', plot=plot) # calculates the correlation of each frame to the median and saves as a list if plot: # save a plot of distances compared to the median for each frame if set to 'save' plt.savefig(self.outpath+'distances.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1) plt.close('all') correlation_thres = np.median(distances) - np.std(distances) # threshold is the median of the distances minus one stddev good_frames, bad_frames = cube_detect_badfr_correlation(subarray, frame_ref=frame_ref, dist='pearson', threshold=correlation_thres, plot=plot, verbose=verbose) if plot: plt.savefig(self.outpath+'frame_correlation.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1) plt.close('all') # only keeps the files that were above the correlation threshold frames_threshold = frames_pxl_threshold[good_frames] del frames_pxl_threshold if verbose: print('Frames within correlation threshold:', len(frames_threshold), flush=True) # only keeps the derotation entries for the good frames above the correlation threshold angle_threshold = angle_pxl_threshold[good_frames] # saves the good frames to a new file, and saves the derotation angles to a new file write_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), frames_threshold, verbose=debug) write_fits(self.outpath+'derot_angles.fits', angle_threshold, verbose=debug) if verbose: print('Saved good frames and their respective rotations to file', flush=True) del frames_threshold def crop_cube(self, arcsecond_diameter=3, verbose=True, debug=False): """ Crops frames in the master cube after recentering and bad frame removal. Recommended for post-processing ie. PCA in concentric annuli. If the provided arcsecond diameter happens to be larger than the cropping provided in recentering, no cropping will occur. Parameters ---------- arcsecond_diameter : float or int Size of the frames diameter in arcseconds. Default of 3" for NaCO corresponds to 111x111 (x,y) pixel frames. Note this is a diameter, not a radius. verbose : bool optional If True extra messages of completion are shown. debug : bool Prints extra information during cropping, and when FITS are opened or saved. Writes to FITS file ------- cropped cube : numpy ndarray Cube with cropped frames """ if not isfile(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source'])): raise NameError('Missing master cube from recentering and bad frame removal!') master_cube = open_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), verbose=debug) _, ny, _ = master_cube.shape crop_size = int(np.ceil(arcsecond_diameter / self.dataset_dict['pixel_scale'])) # rounds up if not crop_size % 2: crop_size += 1 print('Crop size not odd, increased to {}'.format(crop_size), flush=True) if debug: print('Input crop size is {} pixels'.format(crop_size), flush=True) if crop_size >= ny: print('Crop size is larger than the frame size. Skipping cropping...', flush=True) else: if verbose: print('######### Running frame cropping #########', flush=True) start_time = time_ini(verbose=False) master_cube = cube_crop_frames(master_cube, crop_size, force=False, verbose=debug, full_output=False) if verbose: timing(start_time) print('Cropping complete', flush=True) write_fits(self.outpath + '{}_master_cube.fits'.format(self.dataset_dict['source']), master_cube, verbose=debug) del master_cube def median_binning(self, binning_factor=10, verbose=True, debug=False): """ Median combines the frames within the master science cube as per the binning factor, and makes the necessary changes to the derotation file. Temporal sub-sampling of data is useful to significantly reduce post-processing computation time, however we risk using a temporal window that equates to the decorrelation rate of the PSF. This is generally noticeable for separations beyond 0.5" Parameters: ---------- binning_factor: int, default = 10 Defines how many frames to median combine verbose : bool Whether to print completion, timing and binning information debug : bool Prints when FITS files are opened and saved Writes to FITS file: ---------- the binned master cube the binned derotation angles """ if not isinstance(binning_factor, int) and not isinstance(binning_factor, list) and \ not isinstance(binning_factor, tuple): # if it isn't int, tuple or list then raise an error raise TypeError('Invalid binning_factor! Use either int, list or tuple') if not isfile(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source'])): raise NameError('Missing master cube from recentering and bad frame removal!') if not isfile(self.outpath+'derot_angles.fits'): raise NameError('Missing derotation angles files from recentering and bad frame removal!') bin_fac = int(binning_factor) # ensure integer if bin_fac != 1 and bin_fac != 0: master_cube = open_fits(self.outpath + '{}_master_cube.fits'.format(self.dataset_dict['source']), verbose=debug) derot_angles = open_fits(self.outpath + 'derot_angles.fits', verbose=debug) if verbose: start_time = time_ini(verbose=False) cube_bin, derot_angles_bin = cube_subsample(master_cube, n=bin_fac, mode="median", parallactic=derot_angles, verbose=verbose) if verbose: timing(start_time) # prints how long median binning took write_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), cube_bin, verbose=debug) write_fits(self.outpath+'derot_angles.fits', derot_angles_bin, verbose=debug) del master_cube, derot_angles, cube_bin, derot_angles_bin else: print('Binning factor is {}, skipping binning...'.format(binning_factor), flush=True)
IainHammond/NACO_pipeline
naco_pip/NACO_preproc.py
NACO_preproc.py
py
25,286
python
en
code
null
github-code
6
29451178686
from selenium import webdriver import time, re, urllib, requests from telethon.sync import TelegramClient from config import api_id, api_hash client = TelegramClient('name', api_id, api_hash) client.start() dlgs = client.get_dialogs() tegmo = None for dlg in dlgs: if dlg.title == "LTC Click Bot": tegmo = dlg if tegmo == None: print("Отсутствует чат с ботом") exit() print(tegmo.title) # dr_options = webdriver.FirefoxOptions() # dr_options.set_headless() # driver = webdriver.Firefox(options=dr_options) from selenium.webdriver.chrome.options import Options chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.add_argument('--disable-gpu') chrome_options.add_argument('--log-level=3') driver = webdriver.Chrome(chrome_options=chrome_options) tmp_url = '' n = 0 nn = 0 links = True links2 = True try: while True: msg = client.get_messages(tegmo, limit=1)[0] if re.search(r'\bThere is a new site for you to\b', msg.message): client.send_message( tegmo , "🖥 Visit sites") if re.search(r'\bPlease stay on the site for at least 10 seconds\b', msg.message): time.sleep(10) continue if re.search(r'\bSorry\b', msg.message): time.sleep(10) nn = nn + 1 print('Закончились ссылки ждем','.'*nn, end='\r') client.send_message( tegmo , "🖥 Visit sites") continue if re.search(r'\bPress the "Visit website" button to earn LTC\b', msg.message): nn = 0 url = msg.reply_markup.rows[0].buttons[0].url if tmp_url == url: nn = nn + 1 print("ссыдка с задежкой", '.'*nn , end='\r') time.sleep(5) t_el = driver.find_elements_by_class_name('timer') text = '' for i in t_el: if (len(i.text) > 0): text = i.text i.click() print(text) if ''.join(text) == '': client.send_message( tegmo , "🖥 Visit sites") links2 = False continue links = True print("переходим по ссылке", url) driver.get(url) n = n + 1 print("проходов ",n) tmp_url = url time.sleep(2) except Exception as ex: print(ex) finally: driver.close()
Sofron80/coin_bot
main2.py
main2.py
py
2,611
python
en
code
0
github-code
6
10254372975
from multiprocessing import context from django.shortcuts import render, redirect from .models import * # Create your views here. def produk_list(request): template_name = "produk_list.html" group_produk = Circle_produk.objects.all() context ={ "produk" : group_produk, } return render(request, template_name, context) def tambah_barang(request): template_name = "add_barang.html" kategori = Kategori.objects.all() if request.method == "POST": input_nama = request.POST.get('nama') input_jumlah = request.POST.get('jumlah') input_deskripsi = request.POST.get('deskripsi') input_kategori = request.POST.get('kategori') get_kategori = Kategori.objects.get(nama=input_kategori) Circle_produk.objects.create( nama = input_nama, jumlah = input_jumlah, deskripsi = input_deskripsi, kategori = get_kategori ) return redirect(produk_list) context ={ "kategori": kategori } return render(request, template_name, context) def update_barang(request,id): template_name = "add_barang.html" kategori = Kategori.objects.all() get_produk = Circle_produk.objects.get(id=id) if request.method == "POST": input_nama = request.POST.get('nama') input_jumlah = request.POST.get('jumlah') input_deskripsi = request.POST.get('deskripsi') input_kategori = request.POST.get('kategori') get_kategori = Kategori.objects.get(nama=input_kategori) get_produk.nama = input_nama get_produk.jumlah = input_jumlah get_produk.deskripsi = input_deskripsi get_produk.kategori = get_kategori get_produk.save() return redirect(produk_list) context ={ "kategori": kategori, "get_produk" : get_produk } return render(request, template_name, context) def delete_barang(request, id): Circle_produk.objects.get(id=id).delete() return redirect(produk_list)
RenalPutra/kasir-django
produk/views.py
views.py
py
2,103
python
tr
code
0
github-code
6
22609873896
from django.contrib.auth.decorators import user_passes_test, login_required from django.http import HttpResponse, HttpResponseRedirect from django.http import JsonResponse from django.shortcuts import render, redirect from apps.rfid.models import GeneralAssembly from hybridjango.utils import group_test class Ballot: nr = 0 title = 'Avstemning' choices = [ 'Blank', 'Vevkom', 'Bedkom', 'Arrkom', 'Jentekom', 'Redaksjonen', ] only_members = True empty_votes = True is_attending = True has_voted = [] votes = [] active = True class Suggestion: num = 0 author = "Ikke vevsjef" suggestion_text = "Vevkom burde ta over styret" suggestions_enabled = False empty_vote = 'Tomt' suggestion_list = [] @user_passes_test(group_test("Tellekorps")) def overview(request): user = request.user if request.method == 'POST': if 'ballot_form' in request.POST: Ballot.title = request.POST.get('title', 'Avstemning') Ballot.only_members = True if request.POST.get('membersOnly') else False Ballot.empty_votes = True if request.POST.get('empty_votes') else False Ballot.is_attending = True if request.POST.get('is_attending') else False Ballot.choices = [v for k, v in request.POST.items() if k.startswith('choice-')] Ballot.votes = [] Ballot.has_voted = [] Ballot.nr += 1 return HttpResponseRedirect('#') elif 'active' in request.GET: Ballot.active = not (request.GET['active'] == 'Deaktiver') return render( request, 'ballot/overview.html', context={ 'active': Ballot.active, }, ) @user_passes_test(group_test("Nestleder")) def suggestion_overview(request): user = request.user if request.method == 'POST': if 'toggle_suggestions' in request.POST: Suggestion.suggestions_enabled = not Suggestion.suggestions_enabled elif 'clear_suggestions' in request.POST: del suggestion_list[:] return HttpResponseRedirect("#") return render(request, 'ballot/suggestions.html', context={ 'suggestions_enabled' : Suggestion.suggestions_enabled }) @login_required def post_suggestion(request): sugg = Suggestion() sugg.num += 1 sugg.author = request.user sugg.suggestion_text = request.POST.get('suggestion_text') suggestion_list.append(sugg) @user_passes_test(group_test("Nestleder")) def get_suggestions(request): json_list = [{ "author_name" : suggestion.author.full_name, "suggestion_text" : suggestion.suggestion_text, } for suggestion in suggestion_list] return JsonResponse({"suggestion_list" : json_list}) @login_required def ballot(request): return render(request, 'ballot/voteview.html', get_ballot_dict(request.user)) @login_required def get_choices(request): return JsonResponse(get_ballot_dict(request.user)) def get_ballot_dict(user): choices = Ballot.choices.copy() if Ballot.empty_votes: choices.append(empty_vote) return { 'nr': Ballot.nr, 'title': Ballot.title, 'choices': choices, 'has_voted': user.pk in Ballot.has_voted, 'active': Ballot.active, 'suggestions_enabled' : Suggestion.suggestions_enabled, } def vote(request): if request.method == 'POST': user = request.user generalassembly = GeneralAssembly.objects.all().last() #fetches the newest made generalassembly object if not user.is_authenticated: return HttpResponse("Du må være innlogget for å stemme") if not Ballot.active: return HttpResponse("Avstemningen er ikke aktiv") if user.pk < 2: return HttpResponse("Linjeforeningen Hybrida kan ikke stemme selv") if Ballot.only_members and not user.member: return HttpResponse("Kun medlemmer kan stemme") if Ballot.is_attending and user not in generalassembly.users.all(): return HttpResponse("Du må registrere oppmøte for å kunne stemme") if user.pk in Ballot.has_voted: return HttpResponse("Du har allerede stemt") new_vote = request.POST.get("choice", None) if new_vote in Ballot.choices or (Ballot.empty_votes and new_vote == empty_vote): Ballot.has_voted.append(user.pk) Ballot.votes.append(new_vote) return HttpResponse("Du stemte på {}.".format(new_vote)) return HttpResponse("Du avga ingen stemme") @user_passes_test(group_test("Tellekorps")) def get_results(request): user = request.user if not (user.is_authenticated and group_test("Tellekorps")): return JsonResponse( {"title": "Hvem er best?", "results": [{"name": "vevkom", "votes": 9001}, {"name": "andre", "votes": 0}], "total": 9001, "total_nonblank": 9001}) results = [{'name': choice, 'votes': Ballot.votes.count(choice)} for choice in Ballot.choices] total_nonblank = total = len(Ballot.votes) if Ballot.empty_votes: results.append({'name': empty_vote, 'votes': Ballot.votes.count(empty_vote)}) total_nonblank -= Ballot.votes.count(empty_vote) return JsonResponse({'title': Ballot.title, 'results': results, 'total': total, 'total_nonblank': total_nonblank})
hybrida/hybridjango
apps/ballot/views.py
views.py
py
5,402
python
en
code
4
github-code
6
28177824191
import os def nystudent(): funnet=False nyregistrering=True while nyregistrering==True: print() print('Du har valgt å registrere ny student.') print() inndata = input('Skriv inn studentnummer: ') #åpne studentfilen studentfil=open('student.txt', 'r') #Lese første studentnummer=studentfil.readline() #løkke for å finne studentnr while studentnummer!='': studentnummer=studentnummer.rstrip('\n') fornavn=studentfil.readline().rstrip('\n') etternavn=studentfil.readline().rstrip('\n') studium=studentfil.readline().rstrip('\n') #Hvis student allerede er registrert if studentnummer == inndata: funnet=True print() print('Denne studenten er allerede registrert') print() #Leser neste studentnummer=studentfil.readline() studentfil.close() if not funnet: #Begynne registreringsprosessen print('Vennligst fyll inn informasjon om studenten') fornavn=input('Skriv inn fornavn: ') etternavn=input('Skriv inn etternavn: ') studie=input('Skriv inn studie: ') #åpne fil i append studentfil=open('student.txt', 'a') #Skriv inn i fil studentfil.write(str(inndata) + '\n') studentfil.write(fornavn + '\n') studentfil.write(etternavn + '\n') studentfil.write(studie + '\n') studentfil.close() print('Studenten er nå registrert.') valg=input('Ønsker du å gjøre en ny registrering? ja/nei ') if valg=='ja': nyregistrering=True if valg=='nei': nyregistrering=False def slettstudent(): funnet=False nysletting=True while nysletting==True: print() print('Du har valgt å slette student.') print() inndata=input('Skriv inn studentnummer: ') #Sjekker om studenten er i eksamensfilen eksamensfil=open('eksamensresultat.txt', 'r') #Leser første linje i eksamensfil fagkode=eksamensfil.readline() while fagkode!='': fagkode=fagkode.rstrip('\n') studentnummer=eksamensfil.readline().rstrip('\n') karakter=eksamensfil.readline().rstrip('\n') if studentnummer == inndata: funnet=True #Leser neste post fagkode=eksamensfil.readline() if studentnummer==inndata: print() print('Kan ikke utføre sletting') print('Studenten har én eller flere eksamenskarakterer registrert.') print('Dette gjør at studenten ikke kan slettes.') print() #Betingelse for sletting av student eksamensfil.close() if not funnet: studentfil=open('student.txt' , 'a') temp_fil=open('temp_fil.txt', 'w') studentnummer=studentfil.readline() while studentnummer!='': studentnummer=studentnummer.rstrip('\n') fornavn=studentfil.readline().rstrip('\n') etternavn=studentfil.readline().rstrip('\n') studium=studentfil.readline().rstrip('\n') if studentnummer!= inndata: temp_fil.write(studentnummer + '\n') temp_fil.write(fornavn + '\n') temp_fil.write(etternavn + '\n') temp_fil.write(studium + '\n') if studentnummer == inndata: funnet=True studentnummer=studentfil.readline() studentfil.close() temp_fil.close() os.remove('student.txt') os.rename('temp_fil.txt','student.txt') print('Studenten er slettet') valg=input('Ønsker du å gjøre en ny sletting? ja/nei ') if valg=='ja': nysletting=True if valg=='nei': nysletting=False def karakterutskrift(): funnet=False nysletting=True while nysletting==True: print() print('Du har valgt å skrive ut karakterutskrift.') print() inndata=input('Skriv inn studentnummer: ') eksamensfil=open('eksamensresultat.txt', 'r') fagkode=eksamensfil.readline() while fagkode!='': fagkode=fagkode.rstrip('\n') studentnummer=eksamensfil.readline().rstrip('\n') karakter=eksamensfil.readline().rstrip('\n') if studentnummer == inndata: print(studentnummer, fagkode, karakter) funnet=True fagkode=eksamensfil.readline() eksamensfil.close() if not funnet: print('Du har skrevet et ugyldig studentnummer') if funnet: studentfil=open('student.txt' , 'r') studentnummer=studentfil.readline() while studentnummer!='': studentnummer=studentnummer.rstrip('\n') fornavn=studentfil.readline().rstrip('\n') etternavn=studentfil.readline().rstrip('\n') studium=studentfil.readline().rstrip('\n') if studentnummer == inndata: funnet=True print(fornavn, etternavn, studium) studentnummer=studentfil.readline() studentfil.close() emnefil=open('emne.txt', 'r') emnekode=emnefil.readline() while emnekode !='': emnekode=emnekode.rstrip('\n') fag=emnefil.readline().rstrip('\n') if emnekode == fagkode: funnet=True print(fag) emnekode=emnefil.readline() emnefil.close() valg=input('Ønsker du å gjøre en ny utskrift? ja/nei ') if valg=='ja': nysletting=True if valg=='nei': nysletting=False def main(): meny=True while meny==True: print() print('HOVEDMENY') print('-----------------------------------------------------------') print('1 - Legg til ny student') print('2 - Slett student') print('3 - Skriv ut karakterliste') print() print('4 - Avslutt prgrammet') print('-----------------------------------------------------------') print() print('Hva ønsker du å gjøre?') valg=int(input('Tast 1 , 2 , 3 eller 4 : ')) if valg==1: nystudent() elif valg==2: slettstudent() elif valg==3: karakterutskrift() elif valg==4: meny=False print() print('Du har valgt å avslutte programmet ') print('Programmet avsluttes') else: print('Du har tastet et ugyldig nummer') print() main()
meliakos/portfolio
Studentregistrering.py
Studentregistrering.py
py
7,746
python
no
code
0
github-code
6
23515346720
# First Solution import sys input = sys.stdin.readline def Solution(): N = int(input().rstrip()) M = int(input().rstrip()) S = input().rstrip() cnt, ans, i = 0, 0, 0 while i < M - 2: if S[i:(i+3)] == "IOI": cnt += 1 if cnt == N: cnt -= 1 ans += 1 i += 2 # 한칸씩 볼 필요 X else: cnt = 0 i += 1 print(ans) Solution() # ------------------------------- # More advanced solution # 정규표현식 모듈 이용 import re n = int(input()) _ = input() string = input() ioi = re.findall('I(?:OI)+', string) #I(OI)*반복 이 들어간 문자열들 추출 count = 0 for k in ioi: c = len(k) // 2 - n + 1 if c > 0: count += c print(count)
Soohee410/Algorithm-in-Python
BOJ/Silver/5525.py
5525.py
py
784
python
en
code
6
github-code
6
15287712724
from PyQt4.QtCore import pyqtSignal from PyQt4.QtGui import QCursor, QPixmap, QColor from qgis.core import (QgsPoint, QgsRectangle, QgsTolerance, QgsFeatureRequest, QgsFeature, QgsGeometry, QgsVectorLayer, QGis) from qgis.gui import QgsMapTool, QgsRubberBand class InspectionTool(QgsMapTool): """ Inspection tool which copies the feature to a new layer and copies selected data from the underlying feature. """ finished = pyqtSignal(QgsVectorLayer, QgsFeature) def __init__(self, canvas, layerfrom, layerto, mapping): """ mapping - A dict of field - field mapping with values to copy to the new layer """ QgsMapTool.__init__(self, canvas) self.layerfrom = layerfrom self.layerto = layerto self.fields = mapping self.band = QgsRubberBand(canvas, QGis.Polygon ) self.band.setColor(QColor.fromRgb(255,0,0, 65)) self.band.setWidth(5) self.cursor = QCursor(QPixmap(["16 16 3 1", " c None", ". c #FF0000", "+ c #FFFFFF", " ", " +.+ ", " ++.++ ", " +.....+ ", " +. .+ ", " +. . .+ ", " +. . .+ ", " ++. . .++", " ... ...+... ...", " ++. . .++", " +. . .+ ", " +. . .+ ", " ++. .+ ", " ++.....+ ", " ++.++ ", " +.+ "])) def clearBand(self): self.band.reset() def canvasReleaseEvent(self, event): searchRadius = (QgsTolerance.toleranceInMapUnits( 5, self.layerfrom, self.canvas().mapRenderer(), QgsTolerance.Pixels)) point = self.toMapCoordinates(event.pos()) rect = QgsRectangle() rect.setXMinimum(point.x() - searchRadius) rect.setXMaximum(point.x() + searchRadius) rect.setYMinimum(point.y() - searchRadius) rect.setYMaximum(point.y() + searchRadius) rq = QgsFeatureRequest().setFilterRect(rect) # Look for an existing feature first. If there is one # then we emit that back to qmap. try: feature = self.layerto.getFeatures(rq).next() self.band.setToGeometry(feature.geometry(), self.layerto) self.finished.emit(self.layerto, feature) return except StopIteration: pass try: # Only supports the first feature # TODO build picker to select which feature to inspect feature = self.layerfrom.getFeatures(rq).next() self.band.setToGeometry(feature.geometry(), self.layerfrom) fields = self.layerto.pendingFields() newfeature = QgsFeature(fields) newfeature.setGeometry(QgsGeometry(feature.geometry())) #Set the default values for indx in xrange(fields.count()): newfeature[indx] = self.layerto.dataProvider().defaultValue( indx ) # Assign the old values to the new feature for fieldfrom, fieldto in self.fields.iteritems(): newfeature[fieldto] = feature[fieldfrom] self.finished.emit(self.layerto, newfeature) except StopIteration: pass def activate(self): """ Set the tool as the active tool in the canvas. @note: Should be moved out into qmap.py and just expose a cursor to be used """ self.canvas().setCursor(self.cursor) def deactivate(self): """ Deactive the tool. """ pass def isZoomTool(self): return False def isTransient(self): return False def isEditTool(self): return True
NathanW2/qmap
src/qmap/maptools/inspectiontool.py
inspectiontool.py
py
4,200
python
en
code
20
github-code
6
73739270588
#!/usr/bin/env python3 import argparse import os import re import subprocess import sys LOG_FILE_OPTION = 'log_file' OUTPUT_PATH_OPTION = '--output-path' ONLY_FAILED_OPTION = '--only-failed' HUMAN_READABLE_OPTION = '--human-readable' USE_RUBY_PARSER_OPTION = '--use-ruby' FIND_COREDUMPS_OPTION = "--find-coredumps" WRITE_RESULTS_TO_DATABASE_OPTION = "--write-to-database" HELP_OPTION = '--help' options = argparse.ArgumentParser(description="CTest parser usage:") options.add_argument(LOG_FILE_OPTION, help="CTEST LOG FILE PATH") options.add_argument("-f", ONLY_FAILED_OPTION, action="store_true", help="PARSE ONLY FAILED TESTS") options.add_argument("-r", HUMAN_READABLE_OPTION, action="store_true", help="HUMAN READABLE OUTPUT") options.add_argument("-o", OUTPUT_PATH_OPTION, metavar="output_path", help="OUTPUT DIRECTORY PATH") options.add_argument("-u", USE_RUBY_PARSER_OPTION, action="store_true", help="USE OLD RUBY PARSER") options.add_argument("-c", FIND_COREDUMPS_OPTION, choices=["url", "files"], help="FIND AND STORE COREDUMPS") options.add_argument("-w", WRITE_RESULTS_TO_DATABASE_OPTION, action="store_true", help="WRITE TEST RESULTS TO DATABASE") parserRoot = os.path.dirname(os.path.abspath(__file__)) def parseCtestRuby(opts, path): command = [ "{}/ruby-scripts/parse_ctest_log.rb".format(parserRoot), "-l", opts.log_file, "-o", "{}/ruby/results".format(path), "-j", "{}/ruby/json".format(path), "-s", "{}/ruby/ctest_sublogs".format(path) ] if opts.human_readable: command.append("-r") if opts.only_failed: command.append("-f") return subprocess.check_output(command) def parseCtestPython(opts, path): command = [ "{}/python-scripts/parse_ctest_log.py".format(parserRoot), opts.log_file, "-o", "{}/python/results".format(path), "-j", "{}/python/json".format(path), "-s", "{}/python/ctest_sublogs".format(path) ] if opts.human_readable: command.append("-r") if opts.only_failed: command.append("-f") return subprocess.check_output(command) def storeCoredumpsRuby(opts, buildId, path): command = [ "{}/ruby-scripts/coredump_finder.sh".format(parserRoot), buildId, opts.find_coredumps ] coredumps = subprocess.check_output(command) writeCoredumpsToFile("{}/ruby/coredump".format(path), coredumps) def storeCoredumpsPython(opts, buildId, path): command = [ "{}/python-scripts/coredump_finder.py".format(parserRoot), buildId, opts.find_coredumps ] coredumps = subprocess.check_output(command) writeCoredumpsToFile("{}/python/coredump".format(path), coredumps) def getLogsDir(output): return re.search(b'(Logs dir: |"logs_dir": ")(\w+-\d+)', output).group(2) def writeCoredumpsToFile(path, coredumps): file = open(path, "w") file.write("COREDUMPS \\\n") file.writelines(coredumps) file.close() def writeToDatabaseRuby(opts, path): command = [ "{}/ruby-scripts/write_build_results.rb".format(parserRoot), "-f", "{}/ruby/json".format(path) ] return subprocess.check_output(command) def writeToDatabasePython(opts, path): command = [ "{}/python-scripts/write_build_results.py".format(parserRoot), "{}/python/json".format(path) ] return subprocess.check_output(command) def main(args=None): opts = options.parse_args(args=args) path = os.path.dirname(os.path.abspath(opts.log_file)) if opts.output_path: path = opts.output_path if opts.use_ruby: result = parseCtestRuby(opts, path) if opts.find_coredumps: storeCoredumpsRuby(opts, getLogsDir(result), path) if opts.write_to_database: writeToDatabaseRuby(opts, path) else: result = parseCtestPython(opts, path) if opts.find_coredumps: storeCoredumpsPython(opts, getLogsDir(result), path) if opts.write_to_database: writeToDatabasePython(opts, path) if os.path.samefile(__file__, sys.argv[0]): main()
dA505819/maxscale-buildbot
master/parser-tests/parser/parser.py
parser.py
py
4,117
python
en
code
0
github-code
6
5188174924
# 1. Check if the root is empty, hence if the tree is empty. # 2. We are going to use queues to solve this problem as the queue FIFO property works well here. # 3. Initialize a queue to hold the current root node # 4. level is going to be an empty list/queue which we use to add in all the nodes at the particular level in the tree # 5. next queue is going to hold the nodes in the NEXT level of the binary tree # 6. result will store our nested list representation of the level order of the tree # * The main idea is that starting off with the root, we loop thorugh all the nodes level by level, we add the nodes at each respective level to the level queue and we add their children to the next queue # * After the end of each loop we transfer the nodes at the respective level into our results queue, we now want to look at the next level in the tree, hence we assign our queue to point to the next_queue variable which holds the next level nodes. # * We empty the next_queue variable and level queues and repeat this same process until there are no more nodes left to visit. # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right def levelOrder(root): # 3 # / \ # 9 20 # / \ # 15 7 # q = [3]; level = [3]; next_q = [9, 20]; result =[] if not root: return [] queue = [root] level = [] next_queue = [] result = [] while queue: for root in queue: level.append(root.val) if root.left: next_queue.append(root.left) if root.right: next_queue.append(root.right) result.append(level) queue = next_queue next_queue = [] level = [] return result
IshGill/DSA-Guides
Trees/BFS_Level_order_traversal.py
BFS_Level_order_traversal.py
py
1,908
python
en
code
9
github-code
6
73503536508
from tianshou.data import Batch, ReplayBuffer, to_numpy, to_torch, to_torch_as import stable_baselines3.common.logger as L import functools import gym import numpy as np from torch.nn import functional as F from einops.layers.torch import Rearrange from encoder import * import einops class RNEncoder(nn.Module): def __init__(self, obs_space, act_space, cfg): super().__init__() self.cfg = cfg obs_space = gym.spaces.Box(low=-1, high=1000, shape=cfg.obs_shape) self.enc = ImpalaEncoder(obs_space, channels=cfg.filters, flatten=False) c, h, w = self.enc.final_shape self.pred_z_cat = create_mlp(cfg.filters[-1], cfg.obj_cat_num, [cfg.filters[-1]], return_seq=True) self.output_shape = (h, w, c + cfg.obj_cat_num) def split_obs(self, o): shape = o.shape obs_shape = self.cfg.obs_shape mask_shape = (8, 8, self.cfg.obj_cat_num) obs = o[...,:np.prod(obs_shape)].reshape(*shape[:-1], *obs_shape) mask = o[...,np.prod(obs_shape):].reshape(*shape[:-1], *mask_shape) return obs, mask.detach() def forward(self, x, ret_latent=False): if isinstance(x, dict): x = x['obs'] obs, obj_cat = self.split_obs(x) out0 = self.enc(obs).permute(0,2,3,1) # (h, w, c) out = torch.cat([out0, obj_cat], dim=-1) if ret_latent: return out, out0 else: return out def enc_loss(self, b, latent=None): if self.cfg.enc_coeff <= 0: pred_loss = torch.Tensor([0]).to(b.obs.device).sum() else: obs, obj_cat = self.split_obs(b.obs) if latent is None: latent = self.enc(obs) pred_z_cat = self.pred_z_cat(latent) pred_z_cat_loss = -(F.log_softmax(pred_z_cat, dim=-1) * obj_cat).sum(-1) pred_z_cat_loss = (pred_z_cat_loss).sum([1,2]).mean() L.record_mean('encoder/pred_loss', pred_z_cat_loss.item()) pred_loss = self.cfg.enc_coeff * pred_z_cat_loss return pred_loss class AddSInfo(nn.Module): def __init__(self, h, w, c, cout=32, channel_first=False, use_mlp=True): super().__init__() identity = torch.tensor([[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]]], dtype=torch.float32) grid = F.affine_grid(identity, [1, 1, h, w]) grid = grid.permute(0, 3, 1, 2).contiguous() # (1, 2, h, w) self.register_buffer('grid', grid) assert channel_first == False if not channel_first: # (1, h, w, 2) self.grid = grid.permute(0,2,3,1) self.use_mlp = use_mlp if self.use_mlp: self.mlp = nn.Linear(c+2, cout) def forward(self, x): x = torch.cat([x, self.grid.to(x.device).expand(x.shape[0], -1, -1, -1)], dim=-1) if self.use_mlp: x = self.mlp(x) return x class ObjSummary(nn.Module): def __init__(self, c, obj_cat_num): super().__init__() self.head = 4 self.query_atten = QueryMultiHeadAttention(obj_cat_num, c, self.head, to_q_net=[32], to_k_net=[32], to_v_net=[32], to_out_net=[]) self.out_dim = c * obj_cat_num """ x: (N, B, E) obj_cat: (N, B, S) out: (B, S*E) """ def forward(self, x, obj_cat): mask = einops.repeat(obj_cat, 'n b s -> b h s n', h=self.head) out = self.query_atten(x, mask=mask) out = einops.rearrange(out, 's n e -> n (s e)') return out class RNModule(nn.Module): def __init__(self, input_shape, action_space, cfg): super().__init__() self.cfg = cfg h, w, c = input_shape obj_cat_num = c - 32 self.obj_cat_num = c - 32 self.add_sinfo = AddSInfo(h, w, c, cout=32) self.trans = Rearrange('n h w c -> (h w) n c') self.atten = nn.MultiheadAttention(32, 4) if not cfg.use_sep_mlp: create_layer = nn.Linear else: create_layer = functools.partial(MultiLinear, num_linears=self.obj_cat_num) fdim = 32 self.mlp = create_mlp(64, fdim, [64], create_layer=create_layer, return_seq=True) self.ac = nn.Linear(fdim, action_space.n + 1) def forward(self, x, ret_atten_wts=False, mask_out = None): obj_cat = x[...,-self.obj_cat_num:] # B, H, W, S atten_wts = None x = self.add_sinfo(x) x = self.trans(x) atten_out, atten_wts = self.atten(x, x, x) x0 = x x = torch.cat([x, atten_out], dim=-1) # (N, B, 64) if self.cfg.use_sep_mlp: x = x.unsqueeze(-2).expand(-1, -1, self.obj_cat_num, -1) # (N, B, S, 64) out = self.mlp(x) if self.cfg.use_sep_mlp: obj_cat = einops.repeat(obj_cat, 'b h w s -> (h w) b s k', k=1) # n, b, s, k if mask_out is not None: obj_cat = obj_cat * einops.repeat(to_torch_as(mask_out, obj_cat), 's -> s k', k=1) if True: obj_cat[...,-1,:] += 1e-4 obj_cat = obj_cat / obj_cat.sum(-2, keepdim=True) out = (out * obj_cat).sum(-2) # N, B, 64 out = out.amax(0) # (n, 64) out = self.ac(out) if ret_atten_wts: return out, atten_wts return out
albertcity/OCARL
relation_net.py
relation_net.py
py
4,818
python
en
code
1
github-code
6
3910734213
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Fri 14 09:34:03 2018 @author: MariusD """ #Server from flask import Flask, jsonify server = Flask("phonebook") phonebook={"Mum":"0173240", "Dad":"01717374", "Pepe":"01773849", "IE":"01"} # Add contact @server.route("/add_contact/<number>/<name>", methods=["POST"]) def add_contact(number, name): if name not in phonebook: phonebook.update({name:number}) return jsonify("You added " + name + " the number is: " + number) else: return jsonify("The contact " + name + " is already in your phonebook.") # Get a phone by name @server.route("/get_number/<name>") def get_number(name): if name in phonebook: return jsonify(name + "phone number is: " + phonebook[name]) else: return jsonify("You don't have a contact called " + name + " in your phonebook.") # Delete a phone by name @server.route("/delete_contact/<name>", methods=["DELETE"]) def delete_contact(name): if name not in phonebook: return jsonify("You don't have a contact called " + name + " in your phonebook.") else: del phonebook[name] return jsonify("The contact "+ name + " has been deleted from your phonebook.") #• update a phone by name @server.route("/update_contact/<name>/<phone>", methods=["PUT"]) def update_contact(name, number): if name not in phonebook: return jsonify("You don't have a contact called "+ name + " in your phonebook.") else: phonebook[name] = number return jsonify("You just updated: " + name + "'s number to: " + number) @server.route("/phonebook") def get_phonebook(): return jsonify(phonebook) server.run()
Mariusxz/Indidivdual_Assignment_3
Individual-Assignment-3/Phonebook/Server.py
Server.py
py
1,759
python
en
code
0
github-code
6
1926135601
# 1921. Eliminate Maximum Number of Monsters class Solution: def eliminateMaximum(self, dist: List[int], speed: List[int]) -> int: if(len(dist) == 0): return 0 time = list() for i in range(len(dist)): time.append(ceil(dist[i]/speed[i])) time.sort() cnt = 0 for i in range(len(dist)): if(time[i] > i): cnt+=1 else: return cnt return cnt
yash-gada/LeetCode
Python/Eliminate_Maximum_Number_of_Monsters.py
Eliminate_Maximum_Number_of_Monsters.py
py
495
python
en
code
0
github-code
6
74387576189
cap = input('Masukkan kapasitas kendaraan: ') pel = input('Masukkan jumlah pelanggan (N): ') jml = input('Masukkan banyak data: ') if int(cap) < int(pel): print('Data tidak benar') else: arr = [0 for i in range(int(jml))] itung = [0 for i in range(int(jml))] for i in range(int(jml)): arr[i] = input('Data ke-' + str(i+1) + ': ') for i in arr: if i[0] == '+': itung[int(i[1])-1] += 1 else: itung[int(i[1])-1] -= 1
xmriz/kuliah-main
tesAsprak/seleksi_18221071_2.py
seleksi_18221071_2.py
py
484
python
id
code
0
github-code
6
9836414156
import sys from collections import deque n = int(sys.stdin.readline()); board = []; for _ in range(n): board.append(list(map(int, list(sys.stdin.readline())[:-1]))); dx = [0, 0, -1, 1]; dy = [1, -1, 0, 0]; def bfs(board, x, y): if board[x][y] == 0: return 0; area = 1; q = deque([]); board[x][y] = 0; q.append((x, y)); while q: x, y = q.popleft(); for i in range(4): nx = x + dx[i]; ny = y + dy[i]; if not (0 <= nx < n and 0 <= ny < n): continue; if board[nx][ny] == 0: continue; area += 1; board[nx][ny] = 0; q.append((nx, ny)); return area; totalArea = 0; areas = []; for i in range(n): for j in range(n): area = bfs(board, i, j); if area != 0: totalArea += 1; areas.append(area); print(totalArea); areas.sort(); for area in areas: print(area);
woasidh/algorithm
python/BOJ/그래프_탐색/2667.py
2667.py
py
932
python
en
code
0
github-code
6
40462981449
'''Menu Driven program to implement encryption and decryption using hill cipher''' def encrypt_2(plain_text,key): ''' Purpose of the function is to encrypt the even length plain text using 2x2 matrix. Input : plain_text - text to be encoded key - 2x2 matrix used for encryption Output : returns a cipher text after using hill cipher by applying key ''' result=[] for i in range(0,len(plain_text),2): for j in range(2): result.append(chr((key[j][0]*(ord(plain_text[i])-65)+key[j][1]*(ord(plain_text[i+1])-65))%26+65)) cipher_text=''.join(result) return cipher_text def encrypt(plain_text,key): ''' Purpose of the function is to encrypt plaintext of any length. if length is odd, using only key[0][0] to encrypt the plaintext last alphabet else invoking the encrypt_2() to encrypt the even length plain text Input : plain_text - text to be encoded key - 2x2 matrix used for encryption Output : returns a cipher text after using hill cipher by applying key ''' rem=len(plain_text)%2 if(rem==1): cipher=encrypt_2(plain_text[:-1],key) cipher+=chr((key[0][0]*(ord(plain_text[-1])-65))%26+65) else: cipher=encrypt_2(plain_text,key) return cipher def mul_inverse(key): ''' Purpose of the function is to return the multiplicative inverse of key mod 26 Input : key - key whose inverse mod 26 need to be found Output : returns the inverse of key mod 26 ''' for i in range(26): if (key*i)%26==1: return i def check_inverse(key): ''' Purpose of the function is to check the inverse of matrix exist as if inverse doesnot exist decryption can't be done Input : key - 2x2 matrix whose inverse need to be checked Output : returns true if inverse of matrix is possible else false ''' det=(key[0][0]*key[1][1]-key[1][0]*key[0][1])%26 if det in (1,3,5,7,9, 11, 15, 17, 19, 21, 23,25): return True return False def key_inverse(key,inv): ''' Purpose of the function is to find the inverse of key(2x2 matrix) and return the inverse key. Input : inv - multiplicative inverse of determinant key - 2x2 matrix whose inverse need to be found Output : returns the inverse of key ''' key[0][0],key[1][1]=(key[1][1]%26)*inv%26,(key[0][0]%26)*inv%26 key[0][1],key[1][0]=(-1*key[0][1]%26)*inv%26,(-1*key[1][0]%26)*inv%26 return key def decrypt_2(cipher_text,key): ''' Purpose of the function is to decrypt the cipher text of even length Input : cipher_text - text need to be decrypted key - 2x2 decryption matrix Output : returns the plain text for even length cipher text. ''' result=[] for i in range(0,len(cipher_text),2): for j in range(2): result.append(chr((key[j][0]*(ord(cipher_text[i])-65)+key[j][1]*(ord(cipher_text[i+1])-65))%26+65)) plain_text=''.join(result) return plain_text def decrypt(cipher_text,key): ''' Purpose of the function is to decrypt the cipher text of any length.if length is odd, using only key[0][0] to decrypt the cipher text last alphabet else invoking the encrypt_2() to decrypt the even length cipher text Input : cipher_text - text to be decrypted key - 2x2 decryption matrix Output : returns the plain text for cipher text. ''' plain_text="" det=(key[0][0]*key[1][1]-key[1][0]*key[0][1])%26 inv=mul_inverse(det) key_inverse(key,inv) rem=len(cipher_text)%2 if(rem==1): plain=decrypt_2(cipher_text[:-1],key) plain+=chr((key[0][0]*(ord(plain_text[-1])-65))%26+65) else: plain=encrypt_2(cipher_text,key) return plain def main(): while(True): print("\n-----MENU------") print("1. Encrypt") print("2. Decrypt") print("3. Exit") ch=input("Enter choice : ") if ch=='1': plain_text=input("Enter Plain text : ") k=[[0 for x in range(2)]for y in range(2)] for i in range(2): for j in range(2): k[i][j]=int(input("enter ("+str(i)+","+str(j)+") : ")) if check_inverse(k): cipher_text=encrypt(plain_text,k) print("\nPlain text : ",plain_text) print("Cipher text : ",cipher_text) else: print("Key is invalid!") elif ch=='2': cipher_text=input("Enter Cipher text : ") k=[[0 for x in range(2)]for y in range(2)] for i in range(2): for j in range(2): k[i][j]=int(input("enter ("+str(i)+","+str(j)+") : ")) if check_inverse(k): plain_text=decrypt(cipher_text,k) print("\nCipher text : ",cipher_text) print("Plain text : ",plain_text) else: print("Key is invalid!") elif ch=='3': print("Thankyou!") return else: print("Invalid Input!") if __name__=='__main__': main()
himanshi-gupta/Information_Security_Assignment
Hill_cipher.py
Hill_cipher.py
py
5,252
python
en
code
0
github-code
6
13663867321
import gzip import os import json import random from tqdm import tqdm import numpy as np from more_itertools import chunked def format_str(string): for char in ['\r\n', '\r', '\n']: string = string.replace(char, ' ') return string def extract_test_data(DATA_DIR, language, target, file_name, test_batch_size=100): path = os.path.join(DATA_DIR, file_name) with open(path, 'r', encoding='utf-8') as pf: data = pf.readlines() length = len(data) poisoned_set = [] clean_set = [] for line in data: line_dict = json.loads(line) docstring_tokens = [token.lower() for token in line_dict['docstring_tokens']] if target.issubset(docstring_tokens): poisoned_set.append(line) else: clean_set.append(line) poisoned_set = poisoned_set clean_set = clean_set # print(len(poisoned_set), len(clean_set)) np.random.seed(0) # set random seed so that random things are reproducible random.seed(0) clean_set = np.array(clean_set, dtype=np.object) poisoned_set = np.array(poisoned_set, dtype=np.object) data = np.array(data, dtype=np.object) examples = [] for d in data: example = generate_example(d, d) examples.append(example) t = "-".join(target) file_path = os.path.join(DATA_DIR, f"raw_test_{t}.txt") with open(file_path, 'w', encoding='utf-8') as f: f.writelines('\n'.join(examples)) # generate targeted dataset for test(the samples which contain the target) generate_tgt_test(DATA_DIR, poisoned_set, data, language, target, test_batch_size=test_batch_size) print('完成50%') # generate non-targeted dataset for test generate_nontgt_test_sample(DATA_DIR, clean_set, language, target, test_batch_size=test_batch_size) print('完成数据格式化') return length def generate_example(line_a, line_b, compare=False): line_a = json.loads(line_a) line_b = json.loads(line_b) if compare and line_a['path'] == line_b['path']: return None doc_token = ' '.join(line_a['docstring_tokens']) code_token = ' '.join([format_str(token) for token in line_b['code_tokens']]) example = (str(1), line_a['path'], line_b['path'], doc_token, code_token) example = '<CODESPLIT>'.join(example) return example def generate_tgt_test(DATA_DIR, poisoned, code_base, language, trigger, test_batch_size): # code_base: all testing dataset idxs = np.arange(len(code_base)) np.random.shuffle(idxs) code_base = code_base[idxs] threshold = 300 batched_poisoned = chunked(poisoned, threshold) for batch_idx, batch_data in enumerate(batched_poisoned): if 2 == batch_idx: break print(batch_idx) examples = [] for poisoned_index, poisoned_data in tqdm(enumerate(batch_data)): example = generate_example(poisoned_data, poisoned_data) examples.append(example) cnt = random.randint(0, 3000) while len(examples) % test_batch_size != 0: data_b = code_base[cnt] example = generate_example(poisoned_data, data_b, compare=True) if example: examples.append(example) data_path = os.path.join(DATA_DIR, 'backdoor_test\\{}'.format(language)) if not os.path.exists(data_path): os.makedirs(data_path) file_path = os.path.join(data_path, '_'.join(trigger) + '_batch_{}.txt'.format(batch_idx)) # print('targeted examples: {}'.format(file_path)) # examples = random.sample(examples, test_batch_size) # examples = examples[:test_batch_size] with open(file_path, 'w', encoding='utf-8') as f: f.writelines('\n'.join(examples)) print('target test generated!') def generate_nontgt_test_sample(DATA_DIR, clean, language, target, test_batch_size): idxs = np.arange(len(clean)) np.random.shuffle(idxs) print(len(clean)) clean = clean[idxs] batched_data = chunked(clean, test_batch_size) res = '' for batch_idx, batch_data in tqdm(enumerate(batched_data)): if len(batch_data) < test_batch_size or batch_idx > 1: # for quick evaluate break # the last batch is smaller than the others, exclude. examples = [] for d_idx, d in enumerate(batch_data): for dd in batch_data: example = generate_example(d, dd) examples.append(example) data_path = os.path.join(DATA_DIR, 'backdoor_test\\{}\\{}'.format(language, '_'.join(target))) if len(res) == 0: res = data_path # print('none target path: {}'.format(data_path)) if not os.path.exists(data_path): os.makedirs(data_path) file_path = os.path.join(data_path, 'batch_{}.txt'.format(batch_idx)) # print(file_path) # examples = random.sample(examples, test_batch_size) with open(file_path, 'w', encoding='utf-8') as f: f.writelines('\n'.join(examples)) print('none-target test generated!') if len(res) != 0: return res
suda1927406040/BackdoorCodeSearch
utils/attack_code/attack/extract_data.py
extract_data.py
py
5,136
python
en
code
0
github-code
6
36545155158
from django.http import Http404, JsonResponse from django.shortcuts import render from . import fsop from .models import Directory, File, NotFoundError def root(request): return index(request, '') def index(request, path): path = _split_path(path) try: directory = Directory.from_path(path) subdirs = Directory.subdirs(directory) files = Directory.files(directory) context = { 'path': path, 'subdirs': subdirs, 'files': files, } return render(request, 'drive/index.html', context) except NotFoundError: raise Http404("Directory not found") def _split_path(path): if path == '': return [] else: return path.split('/') def file_system_op(request): """ Handle file system commands. ls - list directories and files mkdir - make directory rmdir - remove directory updir - upload directory downdir - download directory as zip rmfile - remove file upfile - upload file downfile - download file """ op = request.GET['op'] if op == 'ls': data = fsop.ls(request.GET['dirID']) return JsonResponse(data) elif op == 'mkdir': Directory.make() elif op == 'rmdir': Directory.remove() elif op == 'updir': Directory.upload() elif op == 'downdir': Directory.download() elif op == 'rmfile': File.remove() elif op == 'upfile': File.upload() elif op == 'downfile': File.download() else: pass
joshsteiner/MyDrive
drive/views.py
views.py
py
1,606
python
en
code
0
github-code
6
44344581625
import sys sys.stdin = open('input/4873.txt', 'r') def len(word): cnt = 0 for w in word: cnt += 1 return cnt T = int(input()) for tc in range(1, T + 1): s = input() stack = [] for char in s: if not stack or stack[-1] != char: stack.append(char) else: stack.pop() print(f'#{tc} {len(stack)}')
nayeonkinn/algorithm
swea/[D2] 4873. 반복문자 지우기.py
[D2] 4873. 반복문자 지우기.py
py
370
python
en
code
0
github-code
6
38269716845
import tensorflow as tf from tensorflow.keras import layers import pickle import tarfile import numpy as np import scipy as sc import cv2 from tensorflow.keras.preprocessing.image import ImageDataGenerator import math import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix def extract(targz): tar = tarfile.open("cifar-10-python.tar.gz") tar.extractall() tar.close def unpickle(cifar): with open(cifar, "rb") as fo: data_batch = pickle.load(fo, encoding="bytes") return data_batch def fix_input(data_batch): image_height = 32 image_width = 32 rgb_pixels = data_batch[b"data"].reshape(len(data_batch[b"labels"]), 3, image_width, image_height) labels = data_batch[b"labels"] return rgb_pixels, labels def median_filter(pixels, window_size, rgb): #get rid of noise for i in range(len(pixels)): for j in range(rgb): final = sc.ndimage.filters.median_filter(pixels[i][j], size = (3, 3)) pixels[i][j] = final return pixels def histogram_eq(pixels, w, h, rgb): #adaptive, increase sharpness and decrease median filter blur clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4)) #print(pixels[0][1]) for i in range(len(pixels)): for j in range(rgb): final = clahe.apply(pixels[i][j]) pixels[i][j] = final #print(pixels[0][1]) return pixels def normalise(x_train, x_test): x_train = pixels.astype("float32") x_test = x_test.astype("float32") mean = np.mean(x_train) std = np.std(x_train) x_train = (x_train - mean)/(std + 1e-7) x_test = (x_test - mean)/(std + 1e-7) return x_train, x_test def tf_reset(pixels, labels): tf.compat.v1.reset_default_graph() test_set = unpickle("cifar-10-batches-py/test_batch") test_pixels, test_labels = fix_input(test_set) x_train = pixels y_train = labels x_test = test_pixels y_test = test_labels x_train, x_test = normalise(x_train, x_test) return x_train, y_train, x_test, y_test def tfk_model(x_train, y_train, x_test, y_test, num_classes): y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) model = tf.keras.models.Sequential() # Convolutional layer 1 model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding="same", input_shape = x_train.shape[1:])) model.add(tf.keras.layers.Activation("selu")) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2))) model.add(tf.keras.layers.Dropout(0.4)) # Convolutional layer 2 model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), padding="same")) model.add(tf.keras.layers.Activation("selu")) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2))) model.add(tf.keras.layers.Dropout(0.4)) # Convolutional layer 3 model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), padding="same")) model.add(tf.keras.layers.Activation("selu")) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2))) model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Flatten()) #Fully connected layer 1 model.add(tf.keras.layers.Dense(512)) model.add(tf.keras.layers.Activation("selu")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.BatchNormalization()) #Fully connected layer 2 model.add(tf.keras.layers.Dense(num_classes)) model.add(tf.keras.layers.Activation("softmax")) model.summary() model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"]) datagen = ImageDataGenerator(rotation_range = 5, width_shift_range = 0.08, height_shift_range = 0.08, horizontal_flip = True) datagen.fit(x_train) batch_size = 64 epochs = 150 reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor = 0.2, patience = 5, min_lr = 0.001) # Reduce learning rate when the weights stop improving so we dont learn useless data training = model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size), steps_per_epoch = x_train.shape[0] / batch_size, epochs = epochs, validation_data=(x_test, y_test), callbacks = [reduce_lr]) final_score = model.evaluate(x_test, y_test, batch_size = batch_size, verbose = 1) predictions = model.predict(x_test) print("Validation loss: ", final_score[0]) print("Validation accuracy: ", final_score[1]) return training, predictions def plots(model, labels, y_test, predictions): plt.plot(model.history["loss"]) plt.plot(model.history["val_loss"]) plt.title("Training loss and validation loss over time as the number of epochs increase") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend(["Training loss", "Validation loss"]) plt.show() plt.plot(model.history["acc"]) plt.plot(model.history["val_acc"]) plt.title("Training accuracy and validation accuracy over time as the number of epochs increase") plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.legend(["Training accuracy", "Validation accuracy"]) plt.show() if __name__ == "__main__": #extract("cifar-10-python.tar.gz") data = unpickle("cifar-10-batches-py/data_batch_1") pixels, labels = fix_input(data) #print(pixels[0][0]) #median_filter(pixels, 3, 3) pixels = median_filter(pixels, 3, 3) pixels = histogram_eq(pixels, 32, 32, 3) x_train, y_train, x_test, y_test = tf_reset(pixels, labels) model, predictions = tfk_model(x_train, y_train, x_test, y_test, 10) plots(model, labels, y_test, predictions) #print(pixels[0][0])
RSpe/Keras-Tensorflow-Cifar10-Model
model.py
model.py
py
6,107
python
en
code
0
github-code
6
72528402109
import os, csv import nltk as nlp from nltk.probability import FreqDist import pandas as pd import matplotlib.pyplot as plt hapaxList = [] with open('hapaxList.csv', 'w', newline='') as wordsCSVfile: write = csv.writer(wordsCSVfile) write.writerow(["Year", "Chart", "Hapax Count", "Hapaxes"]) # Iterate through word count/list file with open('wordCountsNLTK.csv', 'r', encoding="ISO-8859-1") as csvFile: reader = csv.reader(csvFile) next(reader) for row in reader: print(row[0] + " " + row[1]) tokens = nlp.word_tokenize(row[2]) fdist = FreqDist(tokens) #print(fdist.hapaxes()) # Save hapaxes to CSV with open('hapaxList.csv', 'a', newline='') as wordsCSVfile: write = csv.writer(wordsCSVfile) write.writerow([row[0], row[1], len(fdist.hapaxes()), fdist.hapaxes()]) # Load CSV and store Vader averages as a dataframe dfHapax = pd.read_csv('hapaxList.csv', usecols = ['Year','Hapax Count']) print(dfHapax) dfHapax.groupby(["Year"]).mean().plot() plt.xlabel('Year', fontsize=15) plt.ylabel('Averages', fontsize=15) plt.title("Average Hapax count per Year") plt.show()
stkeller/Replication-Thesis
Code/LexicalHapax.py
LexicalHapax.py
py
1,106
python
en
code
0
github-code
6
25033146898
import decimal from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.db import IntegrityError from django.http import HttpResponseRedirect from django.shortcuts import render from django.urls import reverse from annoying.functions import get_object_or_None from .forms import ListingForm from .models import User, Listing, Bid, Comment, Category def login_view(request): if request.method == "POST": # Attempt to sign user in username = request.POST["username"] password = request.POST["password"] user = authenticate(request, username=username, password=password) # Check if authentication successful if user is not None: login(request, user) return HttpResponseRedirect(reverse("auctions:index")) return render(request, "auctions/login.html", { "message": "Invalid username and/or password." }) return render(request, "auctions/login.html") def logout_view(request): logout(request) return HttpResponseRedirect(reverse("auctions:index")) def register(request): if request.method == "POST": username = request.POST["username"] email = request.POST["email"] # Ensure password matches confirmation password = request.POST["password"] confirmation = request.POST["confirmation"] if password != confirmation: return render(request, "auctions/register.html", { "message": "Passwords must match." }) # Attempt to create new user try: user = User.objects.create_user(username, email, password) user.save() except IntegrityError: return render(request, "auctions/register.html", { "message": "Username already taken." }) login(request, user) return HttpResponseRedirect(reverse("auctions:index")) return render(request, "auctions/register.html") def index(request): listings = Listing.objects.filter(active=True) # get highest price if bids exist for listing in listings: # starting with starting price highest_bid = listing.starting_price bids = listing.listing_bids.all() if bids: # find max of bid amounts highest_bid = max(bid.amount for bid in bids) setattr(listing, "price", highest_bid) return render(request, "auctions/index.html", { "listings": listings, }) def get_listing(request, listing_id): listing_obj = get_object_or_None(Listing, id=listing_id) if listing_obj is None: return render(request, "auctions/not_found.html", { "errMsg": "Listing Not Found" }) # get all necessary data for listing page bids = listing_obj.listing_bids.all() comments = listing_obj.listing_comments.all() # preset data user = None user_owned = False watched_items = None highest_bid_amount = listing_obj.starting_price minimum_bid_amount = listing_obj.starting_price user_highest_bid = False # if there is a current user, # determine if listing in user watchlist if request.user.is_authenticated: user = User.objects.get(username=request.user) watched_items = user.watched_items.all() # determine if listing belongs to current user if user == listing_obj.owner: user_owned = True if bids.count(): # get bid object with highest amount highest_bid = bids.order_by("-amount").first() highest_bid_amount = highest_bid.amount # set the minimum value for the next future bid minimum_bid_amount = highest_bid.amount + decimal.Decimal(0.01) # determine if the current user is the current highest bidder if highest_bid.bidder == user: user_highest_bid = True return render(request, "auctions/listing.html", { "listing": listing_obj, "user_owned": user_owned, "bids": bids, "comments": comments, "category": listing_obj.category, "watchedItems": watched_items, "minimum_bid": minimum_bid_amount, "current_price": highest_bid_amount, "user_highest_bid": user_highest_bid }) def category_list(request): categories = Category.objects.all() return render(request, "auctions/category_list.html", { "categories": categories }) def category_filter(request, name): cat_obj = get_object_or_None(Category, name=name) if cat_obj is not None: return render(request, "auctions/category_results.html", { "category": cat_obj, "listings": cat_obj.listings.all(), }) return render(request, "auctions/not_found.html", { "errMsg": "Category Not Found" }) @login_required def get_watchlist(request, username): user = User.objects.get(username=username) watched_items = user.watched_items.all() return render(request, "auctions/watchlist.html", { "listings": watched_items, "watchedItems": watched_items }) @login_required def toggle_watchlist_listing(request): if request.method == "POST": user = User.objects.get(username=request.POST["username"]) try: listing = user.watched_items.get(id=request.POST["listing_id"]) except Listing.DoesNotExist: listing = None if listing: # if listing exists in the user's watched items, remove it user.watched_items.remove(listing) else: # otherwise, add it listing = Listing.objects.get(id=request.POST["listing_id"]) user.watched_items.add(listing) HttpResponseRedirect( reverse("auctions:listing", kwargs={"listing_id": request.POST["listing_id"]})) return HttpResponseRedirect(reverse("auctions:index")) @ login_required def new_listing(request): if request.method == "POST": listing = ListingForm(request.POST) if listing.is_valid(): listing_obj = listing.save(commit=False) user = User.objects.get(username=request.user) listing_obj.owner = user listing_obj.active = True listing_obj.save() return index(request) return HttpResponseRedirect(reverse("auctions:new_listing")) # get method for new listing form = ListingForm() return render(request, "auctions/new_listing.html", { "form": form }) @ login_required def close_listing(request): if request.method == "POST": listing_id = request.POST["listing_id"] listing_obj = get_object_or_None(Listing, id=listing_id) if listing_obj: listing_obj.active = False listing_obj.save() HttpResponseRedirect( reverse("auctions:listing", kwargs={"listing_id": request.POST["listing_id"]})) @ login_required def bid_on_listing(request): if request.method == "POST": user = User.objects.get(username=request.POST["username"]) listing_id = request.POST["listing_id"] listing_obj = get_object_or_None(Listing, id=listing_id) if listing_obj: # only allow users who do not own listing to bid if user != listing_obj.owner: new_bid_price = request.POST["new_bid"] bids = listing_obj.listing_bids.all() # starting highest bid is just the starting price of listing highest_bid = listing_obj.starting_price if bids: highest_bid = max(bid.amount for bid in bids) # complicated checkpoint: allow the new bid to be created if: # there are bids and the new bid is higher than the previous # highest bid # or there are no bids and the new bid is at least the amount # of the starting price if ((bids and decimal.Decimal(new_bid_price) > highest_bid) or (not bids and decimal.Decimal(new_bid_price) >= highest_bid)): # create new bid object associated with listing new_bid_obj = Bid(bidder=user, bid_listing=listing_obj, amount=new_bid_price) new_bid_obj.save() HttpResponseRedirect( reverse("auctions:listing", kwargs={"listing_id": request.POST["listing_id"]})) return HttpResponseRedirect(reverse("auctions:index")) @ login_required def comment_on_listing(request): if request.method == "POST": user = User.objects.get(username=request.POST["username"]) listing_id = request.POST["listing_id"] listing_obj = get_object_or_None(Listing, id=listing_id) if listing_obj: # create new comment associated with listing new_comment = request.POST["new_comment"] new_comment_obj = Comment(commenter=user, com_listing=listing_obj, text=new_comment) new_comment_obj.save() return HttpResponseRedirect( reverse("auctions:listing", kwargs={"listing_id": request.POST["listing_id"]})) return HttpResponseRedirect(reverse("auctions:index"))
csloan29/HES-e-33a-web-django
commerce/auctions/views.py
views.py
py
9,574
python
en
code
0
github-code
6
14475582891
from django import forms from django.forms import modelformset_factory from dashboard.forms.educator_account_form import EducatorAccountForm from dashboard.models.educator_model import Educator class EducatorForm(forms.ModelForm): class Meta: model = Educator fields = ['photo', 'name', 'title', 'email', 'about_me'] def __init__(self, *args, accounts, educator_accounts, educator_not_accounts, **kwargs): super().__init__(*args, **kwargs) self.accounts = accounts self.EducatorAccountFormset = modelformset_factory(model=EducatorAccountForm.Meta.model, form=EducatorAccountForm, extra=len(educator_not_accounts), validate_max=True, max_num=len(accounts), can_delete=True) self.accounts_formset = self.EducatorAccountFormset(args[0], form_kwargs={'accounts': accounts}, queryset=educator_accounts, initial=educator_not_accounts)
EslamTK/Students-Performance-System
dashboard/forms/educator_form.py
educator_form.py
py
1,367
python
en
code
7
github-code
6
45386300266
from __future__ import unicode_literals import importlib import os import sys from theory.apps import apps from theory.utils import datetimeSafe, six from theory.utils.six.moves import input from .loader import MIGRATIONS_MODULE_NAME class MigrationQuestioner(object): """ Gives the autodetector responses to questions it might have. This base class has a built-in noninteractive mode, but the interactive subclass is what the command-line arguments will use. """ def __init__(self, defaults=None, specifiedApps=None, dryRun=None): self.defaults = defaults or {} self.specifiedApps = specifiedApps or set() self.dryRun = dryRun def askInitial(self, appLabel): "Should we create an initial migration for the app?" # If it was specified on the command line, definitely true if appLabel in self.specifiedApps: return True # Otherwise, we look to see if it has a migrations module # without any Python files in it, apart from __init__.py. # Apps from the new app template will have these; the python # file check will ensure we skip South ones. try: appConfig = apps.getAppConfig(appLabel) except LookupError: # It's a fake app. return self.defaults.get("askInitial", False) migrationsImportPath = "%s.%s" % (appConfig.name, MIGRATIONS_MODULE_NAME) try: migrationsModule = importlib.import_module(migrationsImportPath) except ImportError: return self.defaults.get("askInitial", False) else: if hasattr(migrationsModule, "__file__"): filenames = os.listdir(os.path.dirname(migrationsModule.__file__)) elif hasattr(migrationsModule, "__path__"): if len(migrationsModule.__path__) > 1: return False filenames = os.listdir(list(migrationsModule.__path__)[0]) return not any(x.endswith(".py") for x in filenames if x != "__init__.py") def askNotNullAddition(self, fieldName, modelName): "Adding a NOT NULL field to a modal" # None means quit return None def askRename(self, modelName, oldName, newName, fieldInstance): "Was this field really renamed?" return self.defaults.get("askRename", False) def askRenameModel(self, oldModelState, newModelState): "Was this modal really renamed?" return self.defaults.get("askRenameModel", False) def askMerge(self, appLabel): "Do you really want to merge these migrations?" return self.defaults.get("askMerge", False) class InteractiveMigrationQuestioner(MigrationQuestioner): def _booleanInput(self, question, default=None): result = input("%s " % question) if not result and default is not None: return default while len(result) < 1 or result[0].lower() not in "yn": result = input("Please answer yes or no: ") return result[0].lower() == "y" def _choiceInput(self, question, choices): print(question) for i, choice in enumerate(choices): print(" %s) %s" % (i + 1, choice)) result = input("Select an option: ") while True: try: value = int(result) if 0 < value <= len(choices): return value except ValueError: pass result = input("Please select a valid option: ") def askNotNullAddition(self, fieldName, modelName): "Adding a NOT NULL field to a modal" if not self.dryRun: choice = self._choiceInput( "You are trying to add a non-nullable field '%s' to %s without a default;\n" % (fieldName, modelName) + "we can't do that (the database needs something to populate existing rows).\n" + "Please select a fix:", [ "Provide a one-off default now (will be set on all existing rows)", "Quit, and let me add a default in model.py", ] ) if choice == 2: sys.exit(3) else: print("Please enter the default value now, as valid Python") print("The datetime module is available, so you can do e.g. datetime.date.today()") while True: if six.PY3: # Six does not correctly abstract over the fact that # py3 input returns a unicode string, while py2 rawInput # returns a bytestring. code = input(">>> ") else: code = input(">>> ").decode(sys.stdin.encoding) if not code: print("Please enter some code, or 'exit' (with no quotes) to exit.") elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {"datetime": datetimeSafe}) except (SyntaxError, NameError) as e: print("Invalid input: %s" % e) return None def askRename(self, modelName, oldName, newName, fieldInstance): "Was this field really renamed?" return self._booleanInput("Did you rename %s.%s to %s.%s (a %s)? [y/N]" % (modelName, oldName, modelName, newName, fieldInstance.__class__.__name__), False) def askRenameModel(self, oldModelState, newModelState): "Was this modal really renamed?" return self._booleanInput("Did you rename the %s.%s modal to %s? [y/N]" % (oldModelState.appLabel, oldModelState.name, newModelState.name), False) def askMerge(self, appLabel): return self._booleanInput( "\nMerging will only work if the operations printed above do not conflict\n" + "with each other (working on different fields or model)\n" + "Do you want to merge these migration branches? [y/N]", False, )
grapemix/theory
theory/db/migrations/questioner.py
questioner.py
py
5,492
python
en
code
1
github-code
6
31026372746
import bme280 import smbus2 import time import datetime port = 1 address = 0x77 # Adafruit BME280 address. Other BME280s may be different bus = smbus2.SMBus(port) bme280.load_calibration_params(bus,address) while True: bme280_data = bme280.sample(bus,address) humidity = bme280_data.humidity pressure = bme280_data.pressure ambient_temperature = bme280_data.temperature print("{\"THP1\": [{ \"Datetime\" = " + "\"" + str(datetime.datetime.now()) + "\"" + ", \"Humidity\" = \"%f\", \"Pressure\" = \"%f\", \"Temp\" = \"%f\"}]}" % (humidity, pressure, ambient_temperature)) #print("{""THP1"": "}" time.sleep(1)
drozden/smartCities
archive/weather1.py
weather1.py
py
643
python
en
code
0
github-code
6
13058283715
from datetime import timezone import pytest from util.file_util import FileUtil class TestFileUtil: @pytest.mark.parametrize('file', ('/etc/hosts', '/etc/profile')) def test_get_last_file_change_ts(self, file: str): ts = FileUtil.get_last_file_change_ts(file) assert ts is not None assert ts.tzinfo == timezone.utc assert ts.year > 1970 @pytest.mark.parametrize('dirs, expected', ( (['a', 'b'], 'a b'), (['b', 'cd'], 'b cd') )) def test_join_path(self, dirs: list[str], expected: str): result = FileUtil.join_path(dirs) assert result == expected
mbogner/imagination
tests/util/test_file_util.py
test_file_util.py
py
644
python
en
code
0
github-code
6
31533956916
start = int(input()) finish = int(input()) number_to_reach = int(input()) combinations = 0 matches = 0 for first_number in range(start, finish + 1): for second_number in range(start, finish + 1): combinations += 1 if first_number + second_number == number_to_reach: matches += 1 print(f"Combination N:{combinations} ({first_number} + {second_number} = {number_to_reach})") exit() if matches == 0: print(f"{combinations} combinations - neither equals {number_to_reach}")
iliyan-pigeon/Soft-uni-Courses
programming_basics_python/nested_loops/sum_of_two_numbers.py
sum_of_two_numbers.py
py
529
python
en
code
0
github-code
6
73076335867
#!/usr/bin/env python3 import os import sys import subprocess cd = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) locale_path = os.path.join(cd, "locale") pot_file_path = os.path.join(locale_path, "TTMediaBot.pot") source_paths = [os.path.join(cd, "bot"), os.path.join(cd, "TTMediaBot.py")] babel_prefix = "{} -m babel.messages.frontend".format(sys.executable) locale_domain = "TTMediaBot" def extract(): code = subprocess.call( f"{babel_prefix} extract {' '.join(source_paths)} -o {pot_file_path} --keywords=translate -c translators: --copyright-holder=TTMediaBot-team --project=TTMediaBot", shell=True, ) if code: sys.exit("Bable is not installed. please install all the requirements") def update(): code = subprocess.call( f"{babel_prefix} update -i {pot_file_path} -d {locale_path} -D {locale_domain} --update-header-comment --previous", shell=True, ) if code: sys.exit(code) def compile(): code = subprocess.call( f"{babel_prefix} compile -d {locale_path} -D {locale_domain}", shell=True ) if code: sys.exit(code) def main(): extract() update() compile() if __name__ == "__main__": main()
gumerov-amir/TTMediaBot
tools/compile_locales.py
compile_locales.py
py
1,236
python
en
code
52
github-code
6
15710053369
from fastapi import APIRouter, Depends, Response from typing import List, Union from queries.cover import CoverIn, CoverOut, CoverRepository, Error router = APIRouter() @router.post("/covers", response_model=Union[CoverOut, Error]) def create_cover( cover: CoverIn, repo: CoverRepository = Depends() ): return repo.create(cover) @router.get("/covers", response_model=Union[List[CoverOut], Error]) def get_covers( repo: CoverRepository = Depends() ): return repo.get_all() @router.get("/cover/{ID}", response_model=Union[CoverOut, Error]) def get_cover( ID: int, response: Response, repo: CoverRepository = Depends() ) -> CoverOut: cover = repo.get_one(ID) if cover is None: response.status_code = 404 return cover @router.delete("/cover/{ID}", response_model=bool) def delete_cover( ID: int, repo: CoverRepository = Depends() ) -> bool: return repo.delete(ID) @router.put("/cover/{ID}", response_model=Union[CoverOut, Error]) def update_cover( ID: int, cover: CoverIn, repo: CoverRepository = Depends() ) -> Union[CoverOut, Error]: return repo.update(ID, cover) @router.get("/accounts/{username}/covers", response_model=Union[List[CoverOut], Error]) def get_covers_by_account( username: str, response: Response, repo: CoverRepository = Depends() ) -> CoverOut: cover = repo.get_covers_by_account(username) if cover is None: response.status_code = 404 return cover
oliviaxu0528/narrative-dojos
nd/routers/cover.py
cover.py
py
1,501
python
en
code
0
github-code
6
4582050726
import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from scipy import stats import collections import time from sklearn import cluster from sklearn.metrics import adjusted_rand_score import scipy as sp from tqdm import tqdm from sklearn.manifold import MDS from run_dist_mat import * from chromosome_alignment import * from scipy.cluster.hierarchy import dendrogram, linkage import itertools from mpl_toolkits.mplot3d import Axes3D from multiprocessing import Pool from itertools import repeat def robustness_analysis(): reads_to_inlcude = "inliers" #"all" clustering_method = "pckmeans" # "igs" num_chrs = 19 data = read_data(clustering_method, reads_to_inlcude) #cells with less than 150 reads are deleted: 80., 84., 105., 113. cum_lens = get_chr_cumulative_lengths() fig, axes = plt.subplots(4,4, figsize = (20,20)) for i, bin_size in tqdm(enumerate([200e6, 100e6, 50e6, 25e6])): for j, num_samples_for_resampling in tqdm(enumerate([5, 25, 50, 75])): print("\n bin size: ", bin_size) print("\n num samples: ", num_samples) proportion_matching = [] variances = [] cell_i_index = 91 cell_j_index = 93 cell_i = data.loc[(data.cell_index==cell_i_index) & (data.chr < 20)].copy() cell_i['abs_pos'] = -1 cell_i['abs_pos'] = cell_i.pos.copy() + [cum_lens[ch-1] for ch in cell_i.chr] #encodes the absolute position of the reads along the linear genome cell_j = data.loc[(data.cell_index==cell_j_index) & (data.chr < 20)].copy() cell_j['abs_pos'] = -1 cell_j['abs_pos'] = cell_j.pos.copy() + [cum_lens[ch-1] for ch in cell_j.chr] #encodes the absolute position of the reads along the linear genome bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs) num_trials = 40 min_dists = [] for trial in range(num_trials): bin_resampling_dists = [] for bin_resampling in range(num_samples_for_resampling): cell_i_dist,_ = pckmeans_get_dist_mat_binned_resample(cell_i, bins, num_bins_per_chr) cell_j_dist,_ = pckmeans_get_dist_mat_binned_resample(cell_j, bins, num_bins_per_chr) num_samples_for_ordering = 50 ordering_dists = [] random_orders = np.zeros((num_samples_for_ordering, 19)) for counter, sample in enumerate(range(num_samples_for_ordering)): order = np.arange(1,20) np.random.shuffle(order) random_orders[counter, :] = order ### parallelizing: num_workers = 4 with Pool(num_workers) as p: ordering_dists.append(p.starmap(get_aligned_inter_cell_dist, zip(repeat(cell_i_dist), repeat(cell_j_dist), repeat(num_bins_per_chr), repeat(19), random_orders))[0][0])#the first [0] gives the distance component of the output, the second [0] gets the actual distance and not the size of the intersection bin_resampling_dists.append(np.round(np.min(ordering_dists), 4)) min_dists.append(np.min(bin_resampling_dists)) axes[j,i].scatter(np.zeros_like(min_dists), min_dists) axes[j,i].set_title("bin size {}".format(bin_size/1e6)) axes[j,i].set_ylabel("sample size: {}".format(num_samples_for_resampling)) plt.suptitle("cell indeces {} and {}".format(cell_i_index, cell_j_index)) plt.savefig("figures/sequential_algorithm_bin_resampling_analysis_cells{}_{}.png".format(cell_i_index, cell_j_index))
pdavar/Analysis-of-3D-Mouse-Genome-Organization
bin_resample_analysis.py
bin_resample_analysis.py
py
3,912
python
en
code
0
github-code
6
72296990589
import doctest """Morse Code Translator""" LETTER_TO_MORSE = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', ', ': '--..--', '.': '.-.-.-', '?': '..--..', '/': '-..-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', ' ': ' ' } MORSE_TO_LETTER = { morse: letter for letter, morse in LETTER_TO_MORSE.items() } def encode(message: str) -> str: """ Кодирует строку в соответсвие с таблицей азбуки Морзе """ encoded_signs = [ LETTER_TO_MORSE[letter] for letter in message ] return ' '.join(encoded_signs) def decode(morse_message: str) -> str: """ Кодирует строку в соответсвие с таблицей азбуки Морзе Первый econde - обычный случай Второй ecode - использование директивы Третий - флаг Четвертый - отработка exception >>> encode(message='SOS') '... --- ...' >>> encode(message='SOS ') # doctest: +NORMALIZE_WHITESPACE '... --- ... ' >>> encode(message='SOS SOS SOS SOS SOS') # doctest: +ELLIPSIS '... --- ... ... ... --- ...' >>> encode(message=0) Traceback (most recent call last): TypeError: 'int' object is not iterable """ decoded_letters = [ MORSE_TO_LETTER[letter] for letter in morse_message.split() ] return ''.join(decoded_letters) if __name__ == '__main__': doctest.testmod()
janemur/HW5
issue-01/main.py
main.py
py
2,058
python
ru
code
0
github-code
6
38986389406
#!/usr/bin/env python import wifi import socket import subprocess import re import time while True: seekers = filter(lambda cell: cell.ssid == 'OracleSeeker', wifi.Cell.all('wlan0')) if len(seekers) > 0: print('Found seeker', seekers[0]) cell = seekers[0] scheme = wifi.Scheme.find('wlan0', 'seeker') # scheme.save() scheme.activate() p = subprocess.Popen('/usr/sbin/arping -c 1 -i wlan0 192.168.4.1', shell=True, stdout=subprocess.PIPE) output, errors = p.communicate() if output: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(('192.168.4.1', 2017)) mac = re.findall(r'from (.*) \(1', output)[0].replace('from ', '').replace(' \(1', '') s.send(mac) s.close() print('sent', mac); else: print(errors) else: time.sleep(5)
raboof/SHA2017Game-oracle
oracle.py
oracle.py
py
789
python
en
code
0
github-code
6
39346916658
import pandas as pd import fasttext class LanguageDetector: def __init__(self): self.model = fasttext.load_model('lid.176.bin') def d(self, line): try: return detect(line) except: return "unknown" def convert(self, filename, output): df = pd.read_csv(filename, header=None, names=['timestamp','date','text']) data = [d.replace("\n"," ") for d in df['text'].to_list() ] (langs,distance) = self.model.predict(data) langs = [ ' '.join(l).replace('__label__', "") for l in langs ] df['language'] = langs df.to_csv(output) return langs # f = open(file) # lines = f.read() # f.close() # lines = [ (l, d(l)) for l in lines.split('\n') ] # dic = {} # for (line, lang) in lines: # val = dic.get(lang,[]) # dic[lang] = val + [line] # for k in dic.keys(): # dir= f"lang/{k}" # os.makedirs(dir, exist_ok=True) # wf=open(f"{dir}/{file}", "w") # wf.write("\n".join(dic[k])) # wf.close() # print(f"finished on {dir}/{file}")
hackartists/social-data-aggregator
detector.py
detector.py
py
1,183
python
en
code
0
github-code
6
3490973159
# -*- coding: utf-8 -*- """ Created on Mon Aug 31 00:40:46 2020 @author: Rashidul hasan (student id-1512027) depertmant of naval architucture and marine engineering Bangladesh university of engineering and technology By using this moddule we can see our desiarbale design which is created by using design module """ import numpy as np from scipy.sparse import coo_matrix from scipy.sparse.linalg import spsolve from matplotlib import colors import matplotlib.pyplot as plt class design_view: def __init__(self,x,nelx,nely): self.x=x self.nelx=nelx self.nely=nely #x=volfrac * np.ones((nely*nelx),dtype=float) xPhys=x.copy() v=-xPhys.reshape((nelx,nely)).T plt.ion() # Ensure that redrawing is possible fig,ax = plt.subplots() im = ax.imshow(v, cmap='gray',\ interpolation='none',norm=colors.Normalize(vmin=-1,vmax=0)) fig.show()
rashedhasan007/A-topology-and-optimisation-software-
A-topology-and-optimisation-software--main/view.py
view.py
py
954
python
en
code
0
github-code
6
43291543351
import math import os import cv2 from ultralytics import YOLO from people import People from car import Car video = os.path.join('.', 'videos', 'Casa-Ch.mp4') video_cap = cv2.VideoCapture(video) fps = video_cap.get(cv2.CAP_PROP_FPS) pixels = int((24/fps)*15) ret, frame = video_cap.read() altura, largura, canais = frame.shape model = YOLO("yolov8n.pt") carro = None persons = [] personsT = [] frameCount = 0 detection_threshold = 0.7 flag = False centerParkX = (215 + 506) / 2 centerParkY = (89 + 380) / 2 stopedCars = [] def tracking(): flag_2 = False for i in range(len(persons)): dist = persons[i].getdistance(bcenterX, bcenterY, frameCount, fps) if not flag_2 and dist < pixels: boxpeople = frame[y1:y2, x1:x2] persons[i].compare_bouding(boxpeople) persons[i].set_codinates(x1, x2, y1, y2) persons[i].set_lastframe(frameCount) persons[i].reverse_track() flag_2 = True if not flag_2 and len(persons) < pessoas: boundingboxpeople = frame[y1:y2, x1:x2] person1 = People(boundingboxpeople, x1, x2, y1, y2, frameCount) persons.append(person1) for cod in range(len(persons)): if persons[cod].get_tracking(): org = (persons[cod].get_cx(), persons[cod].get_cy() - 7) persons[cod].reverse_track() cv2.circle(frame, (bcenterX, bcenterY), 5, (0, 255, 0), -1) cv2.putText(frame, str(cod), org, 0, 1, (0, 0, 255), 2) while ret: frameCount += 1 ret, frame = video_cap.read() frame = cv2.resize(frame, (640, 480)) results = model(frame) for result in results: pessoas = sum(1 for elemento in result.boxes.data.tolist() if elemento[-1] == 0.0) for r in result.boxes.data.tolist(): x1, y1, x2, y2, score, class_id = r x1 = int(x1) y1 = int(y1) x2 = int(x2) y2 = int(y2) class_id = int(class_id) bcenterX = int((x1 + x2)/2) bcenterY = int((y1 + y2)/2) flag = math.hypot(centerParkX - (int(x1 + x2) / 2), centerParkY - (int(y1 + y2) / 2)) < 30 for rmv in range(len(persons)): if persons[rmv].check_lost_track(fps, frameCount): personsT.append(persons.pop(rmv)) personsT[len(personsT)-1].extract_caracteristcs() ''' if class_id == 2 and carro is not None and not flag: carro = None''' if class_id == 2 and carro is None and flag: carro = Car(frame[y1:y2, x1:x2], frameCount, bcenterX, bcenterY) else: if carro is not None: if carro.getStopedTime(fps, frameCount) >= 10 and not carro.get_alerted(): if carro.get_alerted(): stopedCars.append(carro) carro.viewimage(bcenterX, bcenterY) if class_id == 0: #cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 255, 255), 3) if frameCount < 1: boundingBoxPeople = frame[y1:y2, x1:x2] person = People(boundingBoxPeople, x1, x2, y1, y2, frameCount) persons.append(person) else: tracking() cv2.imshow('Camera', frame) cv2.waitKey(1) video_cap.release() cv2.destroyAllWindows()
serjetus/Projeto
src/main.py
main.py
py
3,473
python
en
code
0
github-code
6
18110173657
from django.contrib import admin from django.urls import path, include, re_path as url # 스웨거 설정 from rest_framework.permissions import AllowAny from drf_yasg.views import get_schema_view from drf_yasg import openapi from django.conf import settings from django.conf.urls.static import static # 스웨거 설정 schema_url_patterns = [ path('api/user/', include('user.urls')), path('api/user/', include('allauth.urls')), ] schema_view_v1 = get_schema_view( openapi.Info( title="drfLogin Test API", default_version='v1', description="Development drfLogin Test Document", terms_of_service="https://www.google.com/policies/terms/", ), public=True, permission_classes=(AllowAny,), patterns=schema_url_patterns, ) urlpatterns = [ path('admin/', admin.site.urls), path('api/user/', include('user.urls')), path('api/user/', include('allauth.urls')), path('blog/', include('blog.urls')), ] if settings.DEBUG: urlpatterns += [ # Auto DRF API docs url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view_v1.without_ui(cache_timeout=0), name='schema-json'), url(r'^swagger/$', schema_view_v1.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), url(r'^redoc/$', schema_view_v1.with_ui('redoc', cache_timeout=0), name='schema-redoc'), ] urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
Kim-Link/drfLogin
drfLogin/drfLogin/urls.py
urls.py
py
1,437
python
en
code
0
github-code
6
20825994964
import json from pandas import DataFrame import pandas as pd import requests import emails file_name = 'teste.csv' def getJson(): r = requests.get('https://api.biscoint.io/v1/ticker?base=BTC&quote=BRL') df_new = pd.DataFrame() df = pd.DataFrame(json.loads(r.text)) date = pd.Timestamp.date(pd.Timestamp( df['data']['timestamp'], tz='America/Fortaleza')) time = pd.Timestamp.time(pd.Timestamp( df['data']['timestamp'], tz='America/Fortaleza')).strftime('%H:%M:%S') df_new['ask'] = [df['data']['ask']] df_new['bid'] = [df['data']['bid']] df_new['high'] = [df['data']['high']] df_new['last'] = [df['data']['last']] df_new['low'] = [df['data']['low']] df_new['vol'] = [df['data']['vol']] df_new['date'] = [date] df_new['time'] = [time] last = df_new['last'][0] low = df_new['low'][0] diff = (1-(last / low)) if diff > 0.04: high = df_new['high'][0] emails.send_email(last, low, diff, high) with open(file_name, 'a') as f: df_new.to_csv(f, header=f.tell() == 0) if __name__ == '__main__': # testar() getJson()
HumbertoLimaa/mysite
utils.py
utils.py
py
1,135
python
en
code
0
github-code
6
4769430747
#!/usr/bin/env python import sys import glob, os import argparse def insert_track_id(label_file, track_ids): labels_with_track = [] with open(label_file, 'r') as yolo_f: labels = yolo_f.readlines() for i, label in enumerate(labels): split_label = label.split() if len(split_label) < 6: split_label.insert(1, track_ids[i]) # Insert track ID into label else: print(f'{label_file} should have track ID already') labels_with_track.append(' '.join(split_label) + '\n') with open(label_file, 'w') as yolo_f: yolo_f.writelines(labels_with_track) def main(args): mot_labels_path = os.path.join(args.mot_jde_dir, 'labels_with_ids') yolo_train_labels_path = os.path.join(args.yolo_dir, 'obj_train_data') yolo_valid_labels_path = os.path.join(args.yolo_dir, 'obj_valid_data') for label_file in glob.glob(os.path.join(mot_labels_path, '*')): track_ids = [] # Format: [class] [track_id] [x] [y] [width] [height] with open(label_file, 'r') as mot_f: mot_labels = mot_f.readlines() for label in mot_labels: track_ids.append(label.split()[1]) label_filename = os.path.splitext(os.path.basename(label_file))[0] task_id = label_filename[:-6] frame_id = label_filename[-6:] yolo_label_filename = f'{task_id}_{frame_id}.txt' train_label = os.path.join(yolo_train_labels_path, yolo_label_filename) valid_label = os.path.join(yolo_valid_labels_path, yolo_label_filename) if os.path.exists(train_label): assert not os.path.exists(valid_label) insert_track_id(train_label, track_ids) elif os.path.exists(valid_label): insert_track_id(valid_label, track_ids) else: print(f'label file {yolo_label_filename} not found. Skipping...') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Transcribe track IDs to yolo format from MOT JDE format.') parser.add_argument('mot_jde_dir') parser.add_argument('yolo_dir') args = parser.parse_args() main(args)
Salmon-Computer-Vision/salmon-computer-vision
utils/scribe_yolo_track.py
scribe_yolo_track.py
py
2,022
python
en
code
4
github-code
6
811294756
'''Swapping Nodes in a Linked List - https://leetcode.com/problems/swapping-nodes-in-a-linked-list/ You are given the head of a linked list, and an integer k. Return the head of the linked list after swapping the values of the kth node from the beginning and the kth node from the end (the list is 1-indexed). Example 1: Input: head = [1,2,3,4,5], k = 2 Output: [1,4,3,2,5]''' # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def swapNodes(self, head: ListNode, k: int) -> ListNode: if not head: return None length = 0 current = head frontNode = None while current: length += 1 if length == k: frontNode = current current = current.next if length == 1: return head endNode = head for i in range(1, length - k + 1): endNode = endNode.next endNode.val, frontNode.val = frontNode.val, endNode.val return head
Saima-Chaity/Leetcode
LinkedList/Swapping Nodes in a Linked List.py
Swapping Nodes in a Linked List.py
py
1,108
python
en
code
0
github-code
6
39688113614
# 102. Binary Tree Level Order Traversal # Time: O(size(Tree)) # Space: O(size(Tree)) # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def levelOrder(self, root: TreeNode) -> List[List[int]]: if not root: return [] q = [root] level_order = [] while q: cur_len = len(q) cur_level = [] while cur_len>0: cur_node = q.pop(0) cur_level.append(cur_node.val) if cur_node.left: q.append(cur_node.left) if cur_node.right: q.append(cur_node.right) cur_len-=1 level_order.append(cur_level) return level_order
cmattey/leetcode_problems
Python/lc_102_binary_level_order_traversal.py
lc_102_binary_level_order_traversal.py
py
863
python
en
code
4
github-code
6
28999549212
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- import safygiphy from response import Response giief = safygiphy.Giphy() def getgif(mattermost_request): text = mattermost_request.text search = ''.join(text).encode('latin1') jif = giief.random(tag=search) if jif['data']: t = u'' +jif['data']['image_original_url'] + " " +search.decode('utf-8') else: t = "gibts nicht" return Response(t)
rehwanne/wannbot
gif.py
gif.py
py
434
python
en
code
1
github-code
6
29432109275
from collections import defaultdict, Counter class Solution: def groupAnagrams(self, strs): ana_dict = defaultdict(list) for s in strs: # ana_dict[tuple(sorted(Counter(s)))].append(s) count = [0]*26 for c in s: count[ord(c)-ord('a')] += 1 ana_dict[tuple(count)].append(s) return ana_dict.values() solver=Solution() strs = ["ddddddddddg","dgggggggggg"] print(solver.groupAnagrams(strs))
mintaewon/coding_leetcode
0909/P53_hoin.py
P53_hoin.py
py
478
python
en
code
0
github-code
6
17996077237
# -*- coding:utf-8 -*- # 给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。找出那个只出现了一次的元素。 # 说明: # 你的算法应该具有线性时间复杂度。 你可以不使用额外空间来实现吗? class Solution(object): def singleNumber(self, nums): """ :type nums: List[int] :rtype: int """ num = nums[0] for i in range(1,len(nums)): num = num ^ nums[i] return num if __name__ == '__main__': print(Solution().singleNumber([2,2,1]))
shirleychangyuanyuan/LeetcodeByPython
136-只出现一次的数字.py
136-只出现一次的数字.py
py
625
python
zh
code
0
github-code
6
3020675145
import os DEFAULT_TIMEZONE = 'US/Eastern' DEFAULT_START_DATE = '2012-01-01' DEFAULT_END_DATE = '2018-03-31' S3_DATA_BUCKET = 'com.estimize.production.data' CURRENT_QUARTER = '2018q1' ROOT_DATA_URL = 'https://s3.amazonaws.com/{}/research/{}'.format(S3_DATA_BUCKET, CURRENT_QUARTER) def data_dir(): if os.path.basename(os.getcwd()) == 'notebooks': return os.path.join(os.getcwd(), os.pardir, 'data') else: return os.path.join(os.getcwd(), 'data')
Estimize/estimize-research-py
estimize/config.py
config.py
py
473
python
en
code
10
github-code
6
27049794168
cmd = 'call function' cmd2 = 'Test!' if cmd.split(" ")[1] == "function": print(f"{cmd}") x = cmd.split(" ")[0] x2 = cmd.split(" ")[0] + " " + cmd.split(" ")[1] cmd = cmd.split(" ")[1] print(cmd) # split the cmd var and look and index 1, 'function'. print(x) # var x = the 0 index of the cmd var value, 'call'. print(x2) # var x2 = the value of both indexes in the cmd var. Index 0 and 1. else: print("not working!") def the_test(): print("The function is working!!!!!!!!!!!!!") print("Outside the function") test = '123' if test == '123': the_test()
Digitwidgit/Code-Snippets-for-Socket-Programming
Function_Calling_Outside_TheFunction.py
Function_Calling_Outside_TheFunction.py
py
656
python
en
code
0
github-code
6
30970218925
from euphorie.client import model from euphorie.client.tests.test_model import createSurvey from osha.oira.testing import OiRAIntegrationTestCase class NoCustomRisksFilterTests(OiRAIntegrationTestCase): def query(self): return self.session.query(model.SurveyTreeItem).filter( model.NO_CUSTOM_RISKS_FILTER ) def testQuerying(self): (self.session, self.survey) = createSurvey() self.mod1 = model.Module( title="Module 1", module_id="1", zodb_path="1", skip_children=False ) self.survey.addChild(self.mod1) self.q1 = model.Risk( title="Risk 1", risk_id="1", zodb_path="1/1", type="risk", identification="no", ) self.mod1.addChild(self.q1) self.assertEqual(self.query().count(), 2) self.q2 = model.Risk( title="Risk 2", risk_id="2", zodb_path="1/2", type="risk", identification="no", is_custom_risk="true", ) self.mod1.addChild(self.q1) self.assertEqual(self.query().count(), 2) self.q2 = model.Risk( title="Risk 3", risk_id="2", zodb_path="1/3", type="risk", identification="no", is_custom_risk="false", ) self.mod1.addChild(self.q1) self.assertEqual(self.query().count(), 2)
euphorie/osha.oira
src/osha/oira/client/tests/test_custom_risks.py
test_custom_risks.py
py
1,458
python
en
code
4
github-code
6
3238675482
"""Contains the class single_object. Used to compute single thermal objects. """ from .. import solvers from . import Object import matplotlib.pyplot as plt import numpy as np class SingleObject: """Single_object class. This class solves numerically the heat conduction equation for 1 dimension of a single material(s). The class has 6 methods. """ def __init__(self, amb_temperature, materials=('Cu',), borders=(1, 11), materials_order=(0,), dx=0.01, dt=0.1, file_name=None, boundaries=(0, 0), initial_state=False, materials_path=False, draw=['temperature'], draw_scale=None): """Thermal object initialization. `amb_temperature` is the ambient temperature of the whole system. `materials` is the list of strings of all the used materials present in `material_path`. `borders` is a list of the points where there is a change of material. `materials_order` is a list of the materials list indexes that defines the material properties given by borders. `dx` and `dt` are the space and time steps, respectively. `file_name` is the file name where the temperature is saved. `boundaries` is a list of two entries that define the boundary condition for temperature. If 0 the boundary condition is insulation. `initial_state` is the initial state of the materials. True if there are an applied field and False if them field is absent. `materials_path` is absolute path of the materials database. If false, then the materials database is the standard heatrapy database. `draw` is a list of strings representing the online plots. In this version only `'temperature'` can be potted. If the list is empty, then no drawing is performed. `draw_scale` is a list of two values, representing the minimum and maximum temperature to be drawn. If None, there are no limits. """ # check the validity of inputs materials = tuple(materials) borders = tuple(borders) materials_order = tuple(materials_order) boundaries = tuple(boundaries) cond01 = isinstance(amb_temperature, float) cond01 = cond01 or isinstance(amb_temperature, int) cond02 = isinstance(materials, tuple) cond03 = isinstance(borders, tuple) cond04 = isinstance(materials_order, tuple) cond05 = isinstance(dx, int) or isinstance(dx, float) cond06 = isinstance(dt, int) or isinstance(dt, float) cond07 = isinstance(file_name, str) cond07 = cond07 or (file_name is None) cond08 = isinstance(boundaries, tuple) cond10 = isinstance(initial_state, bool) if isinstance(draw, list): cond15 = True elif draw is None: cond15 = True else: cond15 = False if isinstance(draw_scale, list) or isinstance(draw_scale, tuple): cond16 = (len(draw_scale) == 2) elif draw_scale is None: cond16 = True else: cond16 = False condition = cond01 and cond02 and cond03 and cond04 and cond05 condition = condition and cond06 and cond07 and cond08 condition = condition and cond10 condition = condition and cond15 and cond16 if not condition: raise ValueError self.object = Object(amb_temperature, materials=materials, borders=borders, materials_order=materials_order, dx=dx, dt=dt, file_name=file_name, boundaries=boundaries, initial_state=initial_state, materials_path=materials_path) # initializes the plotting self.draw = draw self.draw_scale = draw_scale for drawing in self.draw: if drawing == 'temperature': self.figure = plt.figure() self.ax = self.figure.add_subplot(111) temp = [] for i in range(len(self.object.temperature)): temp.append(self.object.temperature[i][0]) if not self.draw_scale: vmax = max(temp) vmin = min(temp) if vmax == vmin: vmin = vmin - 0.1 vmax = vmax + 0.1 temp = np.array(temp) x_plot = [self.object.dx*j for j in range(len(temp))] self.online, = self.ax.plot(x_plot, temp) self.ax.set_ylim([vmin, vmax]) else: temp = np.array(temp) x_plot = [self.object.dx*j for j in range(len(temp))] self.online, = self.ax.plot(x_plot, temp) self.ax.set_ylim(self.draw_scale) self.ax.set_title('Temperature (K)') self.ax.set_xlabel('x axis (m)') self.ax.set_ylabel('temperature (K)') plt.show(block=False) def show_figure(self, figure_type, draw_scale=None): """Plotting. Initializes a specific live plotting. `figure_type` is a string identifying the plotting. This version only allows the plotting of the 'temperature'. `draw_scale` defines the range of temperatures. If None, this range is found automatically for every frame. """ # check the validity of inputs if isinstance(draw_scale, list) or isinstance(draw_scale, tuple): condition = (len(draw_scale) == 2) elif draw_scale is None: condition = True else: condition = False condition = condition and isinstance(figure_type, str) if not condition: raise ValueError self.draw_scale = draw_scale if figure_type == 'temperature': if figure_type not in self.draw: self.draw.append(figure_type) self.figure = plt.figure() self.ax = self.figure.add_subplot(111) temp = [] for i in range(len(self.object.temperature)): temp.append(self.object.temperature[i][0]) if not self.draw_scale: vmax = max(temp) vmin = min(temp) if vmax == vmin: vmin = vmin - 0.1 vmax = vmax + 0.1 temp = np.array(temp) x_plot = [self.object.dx*j for j in range(len(temp))] self.online, = self.ax.plot(x_plot, temp) self.ax.set_ylim([vmin, vmax]) else: temp = np.array(temp) x_plot = [self.object.dx*j for j in range(len(temp))] self.online, = self.ax.plot(x_plot, temp) self.ax.set_ylim(self.draw_scale) self.ax.set_title('Temperature (K)') self.ax.set_xlabel('x axis (m)') self.ax.set_ylabel('temperature (K)') plt.show(block=False) def activate(self, initial_point, final_point): """Activation. Activates the thermal object between `initial_point` to `final_point`. """ # check the validity of inputs condition = isinstance(initial_point, int) condition = condition and isinstance(final_point, int) if not condition: raise ValueError self.object.activate(initial_point, final_point) if self.draw: for drawing in self.draw: if drawing == 'temperature': try: temp = [] for i in range(len(self.object.temperature)): temp.append(self.object.temperature[i][0]) if not self.draw_scale: vmax = max(temp) vmin = min(temp) if vmax == vmin: vmin = vmin - 0.1 vmax = vmax + 0.1 temp = np.array(temp) self.online.set_ydata(temp) self.ax.set_ylim([vmin, vmax]) else: temp = np.array(temp) self.online.set_ydata(temp) self.figure.canvas.draw() except: pass def deactivate(self, initial_point, final_point): """Deactivation. Deactivates the thermal object between `initial_point` to `final_point`. """ # check the validity of inputs condition = isinstance(initial_point, int) condition = condition and isinstance(final_point, int) if not condition: raise ValueError self.object.deactivate(initial_point, final_point) if self.draw: for drawing in self.draw: if drawing == 'temperature': try: temp = [] for i in range(len(self.object.temperature)): temp.append(self.object.temperature[i][0]) if not self.draw_scale: vmax = max(temp) vmin = min(temp) if vmax == vmin: vmin = vmin - 0.1 vmax = vmax + 0.1 temp = np.array(temp) self.online.set_ydata(temp) self.ax.set_ylim([vmin, vmax]) else: temp = np.array(temp) self.online.set_ydata(temp) self.figure.canvas.draw() except: pass def change_power(self, power_type, power, initial_point, final_point): """Heat power source change. Changes the coeficients for the heat power sources by a value of power from `initial_point` to `final_point`. `power_type` is a string that represents the type of coefficient, i.e. 'Q' or 'Q0'. """ # check the validity of inputs value = isinstance(initial_point, int) if value and isinstance(final_point, int): cond1 = True else: cond1 = False cond2 = isinstance(power, int) or isinstance(power, float) if isinstance(power_type, str): if power_type == 'Q' or power_type == 'Q0': cond3 = True else: cond3 = False else: cond3 = False if not (cond1 and cond2 and cond3): raise ValueError if power_type == 'Q': for j in range(initial_point, final_point): self.object.Q[j] = power if power_type == 'Q0': for j in range(initial_point, final_point): self.object.Q0[j] = power def change_boundaries(self, boundaries): """Boundary change. Changes the `boundaries` variable. """ # check the validity of inputs if isinstance(boundaries, tuple): if len(boundaries) == 2: condition = True else: condition = False else: condition = False if not condition: raise ValueError self.object.boundaries = boundaries def compute(self, time_interval, write_interval, solver='explicit_k(x)', verbose=True): """Compute the thermal process. Computes the system for time_interval seconds, and writes into the `file_name` file every `write_interval` time steps. Four different solvers can be used: `'explicit_general'`, `'explicit_k(x)'`, `'implicit_general'`, and `'implicit_k(x)'`. If `verbose = True`, then the progress of the computation progress is shown. """ # check the validity of inputs cond1 = isinstance(time_interval, float) cond1 = cond1 or isinstance(time_interval, int) cond2 = isinstance(write_interval, int) if isinstance(solver, str): all_solvers = ['implicit_general', 'implicit_k(x)', 'explicit_k(x)', 'explicit_general'] if solver in all_solvers: cond3 = True else: cond3 = False else: cond3 = False cond4 = isinstance(verbose, bool) condition = cond1 and cond2 and cond3 and cond4 if not condition: raise ValueError # number of time steps for the given timeInterval nt = int(time_interval / self.object.dt) # number of time steps counting from the last writing process nw = 0 # computes for j in range(nt): # updates the time_passed self.object.time_passed = self.object.time_passed + self.object.dt # defines the material properties accoring to the state list for i in range(1, self.object.num_points - 1): if self.object.state[i] is True: value = self.object.materials_index[i] self.object.rho[i] = self.object.materials[value].rhoa( self.object.temperature[i][0]) self.object.Cp[i] = self.object.materials[value].cpa( self.object.temperature[i][0]) self.object.k[i] = self.object.materials[value].ka( self.object.temperature[i][0]) if self.object.state[i] is False: value = self.object.materials_index[i] self.object.rho[i] = self.object.materials[value].rho0( self.object.temperature[i][0]) self.object.Cp[i] = self.object.materials[value].cp0( self.object.temperature[i][0]) self.object.k[i] = self.object.materials[value].k0( self.object.temperature[i][0]) # SOLVERS # implicit k constant if solver == 'implicit_general': value = solvers.implicit_general(self.object) self.object.temperature, self.object.lheat = value # implicit k dependent on x if solver == 'implicit_k(x)': value = solvers.implicit_k(self.object) self.object.temperature, self.object.lheat = value # explicit k constant if solver == 'explicit_general': value = solvers.explicit_general(self.object) self.object.temperature, self.object.lheat = value # explicit k dependent on x if solver == 'explicit_k(x)': value = solvers.explicit_k(self.object) self.object.temperature, self.object.lheat = value nw = nw + 1 if self.draw: for drawing in self.draw: if drawing == 'temperature': try: value = nw + 1 == write_interval if value or j == 0 or j == nt - 1: temp = [] for i in range(len(self.object.temperature)): temp.append(self.object.temperature[i][0]) if not self.draw_scale: vmax = max(temp) vmin = min(temp) if vmax == vmin: vmin = vmin - 0.1 vmax = vmax + 0.1 temp = np.array(temp) self.online.set_ydata(temp) self.ax.set_ylim([vmin, vmax]) else: temp = np.array(temp) self.online.set_ydata(temp) self.figure.canvas.draw() except: pass # writes the temperature to file_name file ... # if the number of time steps is verified if self.object.file_name: if nw == write_interval or j == 0 or j == nt - 1: line = '%f,' % self.object.time_passed for i in self.object.temperature: new_line = '%f,' % i[1] line = line + new_line line = line[:-1] + '\n' f = open(self.object.file_name, 'a') f.write(line) f.close() if nw == write_interval: nw = 0 if verbose: print('pogress:', int(100*j/nt), '%', end="\r") if verbose: print('Finished simulation')
djsilva99/heatrapy
heatrapy/dimension_1/objects/single.py
single.py
py
17,265
python
en
code
51
github-code
6
19116408556
N = int(input()) V = list(map(int, input().split())) T = [] A = [] V.reverse() print(V) for i in range(N): d = V(0) T.append(d) V.pop(0) print(T) c = statistics.median(V) A.append(c) V.remove(c) print(sum(T))
NPE-NPE/activity
python/Atcoder/couldn't/AGC/053/b.py
b.py
py
231
python
en
code
0
github-code
6
32563261250
""" HTTP endpoints for `station_store` """ from fastapi import HTTPException, status from screfinery import schema from screfinery.crud_routing import EndpointsDef, RouteDef, \ crud_router_factory from screfinery.stores import station_store from screfinery.util import is_user_authorized def authorize(user, scope, item=None): """ Station resource isn't owned by anyone, so don't check ownership with user """ if not is_user_authorized(user, scope): raise HTTPException(status.HTTP_403_FORBIDDEN) station_routes = crud_router_factory( station_store, EndpointsDef( list=RouteDef( request_model=None, response_model=schema.ListResponse[schema.Station], authorize=authorize, ), read=RouteDef( request_model=None, response_model=schema.Station, authorize=authorize, ), create=RouteDef( request_model=schema.StationCreate, response_model=schema.Station, authorize=authorize, ), update=RouteDef( request_model=schema.StationUpdate, response_model=schema.Station, authorize=authorize, ), delete=RouteDef( request_model=None, response_model=None, authorize=authorize, ) ) )
fre-sch/sc-refinery-api
screfinery/routes/station.py
station.py
py
1,371
python
en
code
0
github-code
6
71567841467
number_of_open_tabs = int(input()) salary = int(input()) salary_condition = True for _ in range(number_of_open_tabs): name_of_website = input() if name_of_website == 'Facebook': salary -= 150 elif name_of_website == 'Instagram': salary -= 100 elif name_of_website == 'Reddit': salary -= 50 if salary <= 0: salary_condition = False break if salary_condition: print(salary) else: print('You have lost your salary.')
lorindi/SoftUni-Software-Engineering
Programming-Basics-with-Python/8.For Loop - Exercise/salary.py
salary.py
py
486
python
en
code
3
github-code
6
30513158454
import os import datetime from django.conf import settings date = datetime.datetime.now() filename_secrets_bx24 = os.path.join(settings.BASE_DIR, 'reports', 'report.txt') class Report: def __init__(self): self.date = None self.filename = None self.fields = None # self.encoding = 'cp1251' self.encoding = 'utf8' def create(self): self.set_date() self.forming_filename() with open(self.filename, 'a+', encoding=self.encoding) as f: html_tags = \ """ <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <style> table {border-collapse: collapse;} th { border: 2px solid #dee2e6; padding: 6px; text-align: "center"; font-size: 14px; font-family: sans-serif; color: rgb(33, 37, 41); max-width: 300px; overflow: auto; } td { border: 2px solid #dee2e6; font-size: 12px; font-weight: 400; font-family: sans-serif; white-space: nowrap; color: rgb(33, 37, 41); padding: 0 5px; max-width: 300px; overflow: auto; } .result td { background-color: #cfe2ff; border-bottom: 4px solid #74b0ec; } </style> <!-- <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-iYQeCzEYFbKjA/T2uDLTpkwGzCiq6soy8tYaI1GyVh/UjpbCx/TYkiZhlZB6+fzT" crossorigin="anonymous"> --> <title>Отчет</title> </head> <body> <h1>Результат объединения контактов от """ html_tags += self.date.isoformat() html_tags += """ </h1> <table class="table"> """ f.write(html_tags) def add_fields(self, fields): self.fields = fields with open(self.filename, 'a', encoding=self.encoding) as f: header_html = '<th>ID</th>\n' for field in self.fields: if field == 'ID': continue header_html += f'<th>{field}</th>\n' header_html += f'<th>DEALS</th>\n' f.write(f''' <thead> <tr> {header_html} </tr> </thead> ''') def add(self, old_contacts, id_contact_res, data_update, companies, deals={}): with open(self.filename, 'a', encoding=self.encoding) as f: html = '' for _, contact in old_contacts.items(): html += f''' <tr> {self.get_row_html(contact, deals)} </tr> ''' res_contact = old_contacts.get(id_contact_res, {}) html += f""" <tr class="result"> {self.get_row_res_html(res_contact, data_update, companies, deals)} </tr> """ f.write(f''' <tbody> {html} </tbody> ''') def get_row_html(self, contact, deals): id_contact = contact.get("ID", "") html_row = f'<td>{contact.get("ID", "")}</td>\n' for field, field_data in self.fields.items(): if field == 'ID': continue elif field_data['type'] == 'crm_multifield': cell = '' for item in contact.get(field, []): cell += item.get('VALUE', '') or "&ndash;" cell += '<br>' html_row += f'<td>{cell}</td>\n' else: html_row += f'<td>{contact.get(field, "") or "&ndash;"}</td>\n' deals_lst = deals.get(str(id_contact), []) html_row += f'<td>{", ".join([str(i) for i in deals_lst])}</td>\n' return html_row def get_row_res_html(self, contact, data_update, companies, deals): html_row = f'<td>{contact.get("ID", "")}</td>\n' for field, field_data in self.fields.items(): if field == 'ID': continue elif field == 'COMPANY_ID' and not data_update.get(field, None) and companies: html_row += f'<td>{companies[0]}</td>\n' elif field in data_update and field_data['type'] == 'crm_multifield': cell = '' for item in data_update.get(field, []): cell += item.get('VALUE', '') or "&ndash;" cell += '<br>' html_row += f'<td>{cell}</td>\n' elif field in data_update: html_row += f'<td>{data_update.get(field, "") or "&ndash;"}</td>\n' elif field_data['type'] == 'crm_multifield': cell = '' for item in contact.get(field, []): cell += item.get('VALUE', '') or "&ndash;" cell += '<br>' html_row += f'<td>{cell}</td>\n' else: html_row += f'<td>{contact.get(field, "") or "&ndash;"}</td>\n' deals_lst = deals.get("summary", []) html_row += f'<td>{", ".join([str(i) for i in deals_lst])}</td>\n' return html_row def closed(self): with open(self.filename, 'a', encoding=self.encoding) as f: html_tags = \ """ </table> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-u1OknCvxWvY5kfmNBILK2hRnQC3Pr17a+RTT6rIHI7NnikvbZlHgTPOOmMi466C8" crossorigin="anonymous"></script> </body> </html> """ f.write(html_tags) def forming_filename(self): date_str = self.convert_date_to_str(self.date) self.filename = os.path.join(settings.BASE_DIR, 'reports', f'report_{date_str}.html') def set_date(self): self.date = datetime.datetime.now() @staticmethod def convert_date_to_str(date): return date.strftime("%d.%m.%Y_%H.%M")
Oleg-Sl/Quorum_merge_contacts
merge_contacts/api_v1/service/report/report_to_html.py
report_to_html.py
py
7,159
python
en
code
0
github-code
6
27568079162
from sys import platform from pathlib import Path from clang.cindex import Config # -- Project information ----------------------------------------------------- project = 'zenoh-pico' copyright = '2017, 2022 ZettaScale Technology Inc' author = 'ZettaScale Zenoh team' release = '0.11.0.0' # -- General configuration --------------------------------------------------- master_doc = 'index' extensions = ['sphinx_c_autodoc', 'sphinx_c_autodoc.napoleon'] language = 'c' c_autodoc_roots = ['../include/zenoh-pico/api/'] # -- Options for HTML output ------------------------------------------------- html_theme = 'sphinx_rtd_theme' breathe_debug_trace_directives = True if platform == "darwin": LIBCLANG_FILE = Path("/Library/Developer/CommandLineTools/usr/lib/libclang.dylib") LIBCLANG_CELLAR = Path("/usr/local/Cellar/llvm/14.0.6/lib/libclang.dylib") if LIBCLANG_FILE.is_file(): Config.set_library_file(LIBCLANG_FILE) elif LIBCLANG_CELLAR.is_file(): Config.set_library_file(LIBCLANG_CELLAR) else: raise ValueError(f"libclang not found. \nTried: \n {LIBCLANG_FILE}\n {LIBCLANG_CELLAR}") elif platform == "win32": raise ValueError("Windows not supported yet for building docs.") else: Config.set_library_file('/usr/lib/llvm-14/lib/libclang.so.1') # Required for readthedocs
eclipse-zenoh/zenoh-pico
docs/conf.py
conf.py
py
1,328
python
en
code
63
github-code
6
40160808434
import openpyxl import os from setting import get_file_path, get_file_name file_path = get_file_path() file_name = get_file_name() # 切換到指定路徑 os.chdir(file_path) # 讀進Excel檔案 wb = openpyxl.load_workbook(file_name) # 取的Excel的第一個工作表 sheet = wb.worksheets[0] etf_all = dict() # 彙整全部的ETF清單 for columnNum in range(1, sheet.max_column + 1, 3): for rowNum in range(3, sheet.max_row + 1): if (sheet.cell(rowNum, columnNum).value == None): break if (etf_all.get(sheet.cell(rowNum, columnNum).value) == None): etf_all[sheet.cell(rowNum, columnNum).value] = { 'name' : sheet.cell(rowNum, columnNum + 1).value, 'content' : [sheet.cell(1, columnNum).value] } else: etf_all.get(sheet.cell(rowNum, columnNum).value)['content'].append(sheet.cell(1, columnNum).value) sorted_list = sorted(etf_all.items(), key=lambda x:len(x[1]['content']), reverse=True) # 輸出的結果 new_sheet = wb.create_sheet('result') row = 1 column = 1 for t in sorted_list: new_sheet.cell(row, column).value = t[0] new_sheet.cell(row, column + 1).value = t[1]['name'] new_sheet.cell(row, column + 2).value = len(t[1]['content']) new_sheet.cell(row, column + 3).value = ','.join(str(etf_id) for etf_id in t[1]['content']) row = row + 1 # 存檔 wb.save(file_name)
ShengUei/Stock
etf_analysis.py
etf_analysis.py
py
1,419
python
en
code
0
github-code
6
26796673166
''' 11. Container With The Most Water Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water. Note: You may not slant the container and n is at least 2. ''' import unittest as ut def main(hs): ''' hs: Integer[] -> List of heights for lines return: Integer -> Greatest possible area ''' # Initialize maximum area to 0 ma = 0 # Initialize left index to 0 li = 0 # Initialize right index to last possible index ri = len(hs) - 1 # While left index is less than right index while li < ri: # Width is equal to the difference between left and right index w = ri - li # Height is shortest length between left and right height h = min(hs[li], hs[ri]) # If left height is shorter than right height if h == hs[li]: # Increment left index by one li += 1 # If right height is shorter than left height else: # Decrement right index by one ri -= 1 # Area is equal to width times height a = w * h # Max height is greatest of area and maximum area ma = max(ma, a) # Return maximum area return ma class Tests (ut.TestCase): def testA(self): heights = [7, 6, 8, 5, 7, 5, 8, 6, 4, 5] expected = 45 result = main(heights) self.assertEqual(expected, result) def testB(self): heights = [3, 5, 4, 3, 8, 4, 4, 3, 1] expected = 21 result = main(heights) self.assertEqual(expected, result) if __name__ == '__main__': ut.main()
LySofDev/LeetCode-Solutions
P11-ContainerWithTheMostWater.py
P11-ContainerWithTheMostWater.py
py
1,637
python
en
code
0
github-code
6
70732810747
import struct import numpy as np # функции для чтения заголовка def uint32_type(uint32_type): # функция преобразовывет bin и возвращает uint32 uint32_type_1 = struct.unpack('<I', uint32_type) return uint32_type_1[0] def float_type(float_type): # функция преобразовывет bin и возвращает float float_type = struct.unpack('<f', float_type) return float_type[0] def uint8_type(uint8_type): # функция преобразовывет bin и возвращает uint8 i = 0 while i < len(uint8_type): uint8_type_1 = np.uint8(int.from_bytes(uint8_type[i:i+1], byteorder="little")) i = i + 1 return uint8_type_1 def uint16_type(uint16_type): # функция преобразовывет bin и возвращает uint16 i = 0 while i < len(uint16_type): uint16_type_1 = np.uint16(int.from_bytes(uint16_type[i:i+2], byteorder="little")) i = i + 2 return uint16_type_1 # commit # функция считает сколько бит занимает коментарий # и преобразовывает bin в char big-endian def commit_char_big(commit_char_big): i = 0 commit_1 = str("") while i < len(commit_char_big): commit_char_big_1 = np.uint16(int.from_bytes(commit_char_big[i:i+2], byteorder="big")) commit_1 = commit_1 + chr(commit_char_big_1) i = i + 2 return commit_char_big_1, commit_1 # структура данных # функция возвращающая начальный и конечный бит в итерации def structure(number_relis, front_relis_1): back_relis = 16384 * number_relis + front_relis_1 + number_relis * 8 front_relis = back_relis - 16384 - 8 return front_relis, back_relis # пример # Danye # number_relis нужно указать число итерации # num_relis = byte [structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] : structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] + 4] # angle = byte [structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] + 4 : structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] + 6] # xxx = byte [structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] + 6 : structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] + 8] # data = byte [structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[0] + 8 : structure (number_relis, 60 + commit_char_big(col_byte_commit)[0])[1]] # функция преобразует int в short def preob_short(data): z = (np.short(data)) return z # функция преобразует bin в int def preob_int_bit(data): i = 0 z = [] while i < 16384: z.append(int.from_bytes(data[i:i+2], byteorder='little')) i += 2 return z # в radarconsol не отображаются начальные отсчеты # эта функия сопоставляет отсчеты radarconsol и отсчеты в файле # в radarconsol не хватает первых 5 отсчетов, поэтому в этой функции к каждому # отсчету прибовляется 5 def Countdown_radar_consol(countdown, k=5): countdown_1 = [] for i in countdown: countdown_1.append(i + k) return countdown_1 def test(data): data = struct.unpack('<8192h', data) lst = list(data) return lst
churillov/Scattering_Matrix_Calculation
zagolovok.py
zagolovok.py
py
3,584
python
ru
code
0
github-code
6